diff --git a/.gitignore b/.gitignore index 173af69..82d65a8 100644 --- a/.gitignore +++ b/.gitignore @@ -11,6 +11,8 @@ *.tmp *.ilk *.pdb +*.gch +*.pch *.exe *vgcore *.gdb_history diff --git a/Makefile b/Makefile index 948a55b..250c4d0 100644 --- a/Makefile +++ b/Makefile @@ -1,131 +1,176 @@ -# **************************************************************************** # -# # -# ::: :::::::: # -# Makefile :+: :+: :+: # -# +:+ +:+ +:+ # -# By: kiroussa +#+ +:+ +#+ # -# +#+#+#+#+#+ +#+ # -# Created: 2022/10/04 16:43:41 by maldavid #+# #+# # -# Updated: 2024/04/24 14:59:23 by kiroussa ### ########.fr # -# # -# **************************************************************************** # +NAME = libmlx.so +MAKE = make --no-print-directory -NAME = libmlx.so -MAKE = make --no-print-directory +OS ?= $(shell uname -s) +DEBUG ?= false +TOOLCHAIN ?= clang +IMAGES_OPTIMIZED ?= true +FORCE_INTEGRATED_GPU ?= false +GRAPHICS_MEMORY_DUMP ?= false +PROFILER ?= false +FORCE_WAYLAND ?= false +DISABLE_ALL_SAFETIES ?= false +_ENABLEDFLAGS = -OS ?= $(shell uname -s) -DEBUG ?= false -TOOLCHAIN ?= clang -IMAGES_OPTIMIZED ?= true -FORCE_INTEGRATED_GPU ?= false -GRAPHICS_MEMORY_DUMP ?= false -PROFILER ?= false -_ENABLEDFLAGS = +SRCS = $(wildcard $(addsuffix /*.cpp, runtime/Sources/Core)) +SRCS += $(wildcard $(addsuffix /*.cpp, runtime/Sources/Graphics)) +SRCS += $(wildcard $(addsuffix /*.cpp, runtime/Sources/Platform)) +SRCS += $(wildcard $(addsuffix /*.cpp, runtime/Sources/Renderer)) +SRCS += $(wildcard $(addsuffix /*.cpp, runtime/Sources/Renderer/Vulkan)) +SRCS += $(wildcard $(addsuffix /*.cpp, runtime/Sources/Renderer/Pipelines)) +SRCS += $(wildcard $(addsuffix /*.cpp, runtime/Sources/Renderer/RenderPasses)) -SRCS = $(wildcard $(addsuffix /*.cpp, src/core)) -SRCS += $(wildcard $(addsuffix /*.cpp, src/platform)) -SRCS += $(wildcard $(addsuffix /*.cpp, src/renderer)) -SRCS += $(wildcard $(addsuffix /*.cpp, src/renderer/**)) +OBJ_DIR = objs/make/$(shell echo $(OS) | tr '[:upper:]' '[:lower:]') +OBJS := $(addprefix $(OBJ_DIR)/, $(SRCS:.cpp=.o)) -OBJ_DIR = objs/make/$(shell echo $(OS) | tr '[:upper:]' '[:lower:]') -OBJS := $(addprefix $(OBJ_DIR)/, $(SRCS:.cpp=.o)) +SHADERS_DIR = runtime/Includes/Embedded +SHADERS_SRCS = $(wildcard $(addsuffix /*.nzsl, $(SHADERS_DIR))) +SPVS = $(SHADERS_SRCS:.nzsl=.spv.h) -CXX = clang++ -CXXFLAGS = -std=c++17 -O3 -fPIC -Wall -Wextra -Werror -DSDL_MAIN_HANDLED -INCLUDES = -I./includes -I./src -I./third_party +CXX = clang++ +CXXFLAGS = -std=c++20 -fPIC -Wall -Wextra -DSDL_MAIN_HANDLED +INCLUDES = -I./includes -I./runtime/Includes -I./runtime/Sources -I./third_party + +CXXPCHFLAGS = -xc++-header + +PCH = runtime/Includes/PreCompiled.h +GCH = + +NZRRC = nzslc ifeq ($(TOOLCHAIN), gcc) -CXX = g++ -CXXFLAGS += -Wno-error=cpp + CXX = g++ + GCH = runtime/Includes/PreCompiled.h.gch + CXXFLAGS += -Wno-error=cpp else -CXXFLAGS += -Wno-error=#warning + GCH = runtime/Includes/PreCompiled.h.pch + CXXFLAGS += -Wno-error=#warning -include-pch $(GCH) endif ifeq ($(OS), Darwin) -LDFLAGS += -L /opt/homebrew/lib -lSDL2 -CXXFLAGS += -I /opt/homebrew/include -NAME = libmlx.dylib + LDFLAGS += -L /opt/homebrew/lib -lSDL2 + CXXFLAGS += -I /opt/homebrew/include + NAME = libmlx.dylib endif ifeq ($(DEBUG), true) -CXXFLAGS += -g3 -D DEBUG -LDFLAGS += -rdynamic + CXXFLAGS += -g3 -O0 -D DEBUG + LDFLAGS += -rdynamic +else + CXXFLAGS += -O3 endif ifeq ($(FORCE_INTEGRATED_GPU), true) -_ENABLEDFLAGS += FORCE_INTEGRATED_GPU + _ENABLEDFLAGS += FORCE_INTEGRATED_GPU endif ifeq ($(IMAGES_OPTIMIZED), true) -_ENABLEDFLAGS += IMAGE_OPTIMIZED + _ENABLEDFLAGS += IMAGE_OPTIMIZED endif ifeq ($(GRAPHICS_MEMORY_DUMP), true) -_ENABLEDFLAGS += GRAPHICS_MEMORY_DUMP + _ENABLEDFLAGS += GRAPHICS_MEMORY_DUMP endif ifeq ($(PROFILER), true) -_ENABLEDFLAGS += PROFILER + _ENABLEDFLAGS += PROFILER endif -CXXFLAGS += $(addprefix -D, $(_ENABLEDFLAGS)) +ifeq ($(FORCE_WAYLAND), true) + _ENABLEDFLAGS += FORCE_WAYLAND +endif + +ifeq ($(DISABLE_ALL_SAFETIES), true) + _ENABLEDFLAGS += DISABLE_ALL_SAFETIES +endif + +CXXFLAGS += $(addprefix -D, $(_ENABLEDFLAGS)) RM = rm -rf -TPUT = tput -T xterm-256color -_RESET := $(shell $(TPUT) sgr0) -_BOLD := $(shell $(TPUT) bold) -_ITALIC := $(shell $(TPUT) sitm) -_UNDER := $(shell $(TPUT) smul) -_GREEN := $(shell $(TPUT) setaf 2) -_YELLOW := $(shell $(TPUT) setaf 3) -_RED := $(shell $(TPUT) setaf 1) -_GRAY := $(shell $(TPUT) setaf 8) -_PURPLE := $(shell $(TPUT) setaf 5) +TPUT = tput -T xterm-256color +_RESET := $(shell $(TPUT) sgr0) +_BOLD := $(shell $(TPUT) bold) +_ITALIC := $(shell $(TPUT) sitm) +_UNDER := $(shell $(TPUT) smul) +_GREEN := $(shell $(TPUT) setaf 2) +_YELLOW := $(shell $(TPUT) setaf 3) +_RED := $(shell $(TPUT) setaf 1) +_GRAY := $(shell $(TPUT) setaf 8) +_PURPLE := $(shell $(TPUT) setaf 5) ifeq ($(DEBUG), true) -MODE := $(_RESET)$(_PURPLE)$(_BOLD)Debug$(_RESET)$(_PURPLE) -COLOR := $(_PURPLE) + MODE := $(_RESET)$(_PURPLE)$(_BOLD)Debug$(_RESET)$(_PURPLE) + COLOR := $(_PURPLE) else -MODE := $(_RESET)$(_GREEN)$(_BOLD)Release$(_RESET)$(_GREEN) -COLOR := $(_GREEN) + MODE := $(_RESET)$(_GREEN)$(_BOLD)Release$(_RESET)$(_GREEN) + COLOR := $(_GREEN) endif -OBJS_TOTAL = $(words $(OBJS)) -N_OBJS := $(shell find $(OBJ_DIR) -type f -name '*.o' 2>/dev/null | wc -l) -OBJS_TOTAL := $(shell echo $$(( $(OBJS_TOTAL) - $(N_OBJS) ))) -CURR_OBJ = 0 +OBJS_TOTAL = $(words $(OBJS)) +N_OBJS := $(shell find $(OBJ_DIR) -type f -name '*.o' 2>/dev/null | wc -l) +OBJS_TOTAL := $(shell echo $$(( $(OBJS_TOTAL) - $(N_OBJS) ))) +ifeq ($(OBJS_TOTAL), 0) # To avoid division per 0 + OBJS_TOTAL := 1 +endif +CURR_OBJ = 0 -$(OBJ_DIR)/%.o: %.cpp +$(OBJ_DIR)/%.o: %.cpp $(GCH) @mkdir -p $(dir $@) @$(eval CURR_OBJ=$(shell echo $$(( $(CURR_OBJ) + 1 )))) @$(eval PERCENT=$(shell echo $$(( $(CURR_OBJ) * 100 / $(OBJS_TOTAL) )))) @printf "$(COLOR)($(_BOLD)%3s%%$(_RESET)$(COLOR)) $(_RESET)Compiling $(_BOLD)$<$(_RESET)\n" "$(PERCENT)" @$(CXX) $(CXXFLAGS) $(INCLUDES) -c $< -o $@ -all: _printbuildinfos +SPVS_TOTAL = $(words $(SPVS)) +N_SPVS := $(shell find $(SHADERS_DIR) -type f -name '*.spv.h' 2>/dev/null | wc -l) +SPVS_TOTAL := $(shell echo $$(( $(SPVS_TOTAL) - $(N_SPVS) ))) +ifeq ($(SPVS_TOTAL), 0) # Same + SPVS_TOTAL := 1 +endif +CURR_SPV = 0 + +%.spv.h: %.nzsl + @$(eval CURR_SPV=$(shell echo $$(( $(CURR_SPV) + 1 )))) + @$(eval PERCENT=$(shell echo $$(( $(CURR_SPV) * 100 / $(SPVS_TOTAL) )))) + @printf "$(COLOR)($(_BOLD)%3s%%$(_RESET)$(COLOR)) $(_RESET)Compiling $(_BOLD)$<$(_RESET)\n" "$(PERCENT)" + @$(NZSLC) --compile=spv-header $< -o $(SHADERS_DIR) --optimize + +all: _printbuildinfos @$(MAKE) $(NAME) -$(NAME): $(OBJS) +$(GCH): + @printf "$(COLOR)($(_BOLD)%3s%%$(_RESET)$(COLOR)) $(_RESET)Compiling $(_BOLD)PreCompiled header$(_RESET)\n" "0" + @$(CXX) $(CXXPCHFLAGS) $(INCLUDES) $(PCH) -o $(GCH) + +$(NAME): $(OBJS) @printf "Linking $(_BOLD)$(NAME)$(_RESET)\n" @$(CXX) -shared -o $(NAME) $(OBJS) $(LDFLAGS) @printf "$(_BOLD)$(NAME)$(_RESET) compiled $(COLOR)$(_BOLD)successfully$(_RESET)\n" _printbuildinfos: - @printf "$(_PURPLE)$(_BOLD)MacroLibX $(_RESET)Compiling in $(_BOLD)$(MODE)$(_RESET) mode on $(_BOLD)$(OS)$(_RESET) | Using $(_BOLD)$(CXX)$(_RESET), flags: $(_BOLD)$(_ENABLEDFLAGS)$(_RESET)\n" + @printf "$(_PURPLE)$(_BOLD)MacroLibX $(_RESET)Compiling in $(_BOLD)$(MODE)$(_RESET) mode on $(_BOLD)$(OS)$(_RESET) | Using $(_BOLD)$(CXX) ($(shell $(CXX) --version | head -n 1))$(_RESET), flags: $(_BOLD)$(_ENABLEDFLAGS)$(_RESET)\n" debug: @$(MAKE) all DEBUG=true -j$(shell nproc) +clean-shaders: + @$(RM) $(SPVS) + +shaders: clean-shaders $(SPVS) + clean: @$(RM) $(OBJ_DIR) @printf "Cleaned $(_BOLD)$(OBJ_DIR)$(_RESET)\n" + @$(RM) $(GCH) + @printf "Cleaned $(_BOLD)$(GCH)$(_RESET)\n" -fclean: clean +fclean: clean @$(RM) $(NAME) @printf "Cleaned $(_BOLD)$(NAME)$(_RESET)\n" + @printf "Cleaned $(_BOLD)$(NAME)$(_RESET)\n" -re: fclean _printbuildinfos +re: fclean _printbuildinfos @$(MAKE) $(NAME) -.PHONY: all clean debug fclean re +.PHONY: all clean debug shaders clean-shaders fclean re diff --git a/README.md b/README.md index f0f654b..92ef19d 100644 --- a/README.md +++ b/README.md @@ -97,6 +97,9 @@ And you can enjoy your project ### 📦 Compile mode By default the mlx is built in release mode but you can switch to debug by using `make DEBUG=true`. +### 🦺 Safety +MacroLibX has a strong safety support, mainly by checking every pointer that you pass to it. But this safety has a cost that can be avoided by enabling `DISABLE_ALL_SAFETIES=true` before compiling but don't be afraid to recieve segmentation faults from the mlx. + ### ðŸ› ï¸ Set the toolchain If you want to use `GCC` to build the mlx you can use `make TOOLCHAIN=gcc` diff --git a/XMAKE_BUILD.md b/XMAKE_BUILD.md index 9022f40..4f3c0e8 100644 --- a/XMAKE_BUILD.md +++ b/XMAKE_BUILD.md @@ -10,6 +10,9 @@ Just as the Makfile build system, you can configure how xmake should build the M ### 📦 Compile mode You can configure xmake to build the mlx in debug mode or in release mode (release mode is enabled by default). To do so you can use `xmake config --mode=debug` or `xmake config --mode=release`. +### 🦺 Safety +MacroLibX has a strong safety support, mainly by checking every pointer that you pass to it. But this safety has a cost that can be avoided by enabling `xmake config --disable_all_safeties=y` before compiling but don't be afraid to recieve segmentation faults from the mlx. + ### ðŸ› ï¸ Set the toolchain To change the compilation toolchain you can use `xmake config --toolchain=[gcc|clang|...]` diff --git a/compile_commands.json b/compile_commands.json deleted file mode 100644 index 650f005..0000000 --- a/compile_commands.json +++ /dev/null @@ -1,1082 +0,0 @@ -[ - { - "directory": "/home/kbz_8/Documents/Programmation/42/other/MacroLibX/MacroLibX", - "arguments": [ - "clang++", - "-std=c++17", - "-O3", - "-fPIC", - "-Wall", - "-Wextra", - "-Werror", - "-DSDL_MAIN_HANDLED", - "-Wno-error=", - "-D", - "IMAGE_OPTIMIZED", - "-I./includes", - "-I./src", - "-I./third_party", - "-c", - "src/core/application.cpp", - "-o", - "objs/makefile/./src/core/application.o" - ], - "file": "src/core/application.cpp" - }, - { - "directory": "/home/kbz_8/Documents/Programmation/42/other/MacroLibX/MacroLibX", - "arguments": [ - "clang++", - "-std=c++17", - "-O3", - "-fPIC", - "-Wall", - "-Wextra", - "-Werror", - "-DSDL_MAIN_HANDLED", - "-Wno-error=", - "-D", - "IMAGE_OPTIMIZED", - "-I./includes", - "-I./src", - "-I./third_party", - "-c", - "src/core/bridge.cpp", - "-o", - "objs/makefile/./src/core/bridge.o" - ], - "file": "src/core/bridge.cpp" - }, - { - "directory": "/home/kbz_8/Documents/Programmation/42/other/MacroLibX/MacroLibX", - "arguments": [ - "clang++", - "-std=c++17", - "-O3", - "-fPIC", - "-Wall", - "-Wextra", - "-Werror", - "-DSDL_MAIN_HANDLED", - "-Wno-error=", - "-D", - "IMAGE_OPTIMIZED", - "-I./includes", - "-I./src", - "-I./third_party", - "-c", - "src/core/errors.cpp", - "-o", - "objs/makefile/./src/core/errors.o" - ], - "file": "src/core/errors.cpp" - }, - { - "directory": "/home/kbz_8/Documents/Programmation/42/other/MacroLibX/MacroLibX", - "arguments": [ - "clang++", - "-std=c++17", - "-O3", - "-fPIC", - "-Wall", - "-Wextra", - "-Werror", - "-DSDL_MAIN_HANDLED", - "-Wno-error=", - "-D", - "IMAGE_OPTIMIZED", - "-I./includes", - "-I./src", - "-I./third_party", - "-c", - "src/core/graphics.cpp", - "-o", - "objs/makefile/./src/core/graphics.o" - ], - "file": "src/core/graphics.cpp" - }, - { - "directory": "/home/kbz_8/Documents/Programmation/42/other/MacroLibX/MacroLibX", - "arguments": [ - "clang++", - "-std=c++17", - "-O3", - "-fPIC", - "-Wall", - "-Wextra", - "-Werror", - "-DSDL_MAIN_HANDLED", - "-Wno-error=", - "-D", - "IMAGE_OPTIMIZED", - "-I./includes", - "-I./src", - "-I./third_party", - "-c", - "src/core/memory.cpp", - "-o", - "objs/makefile/./src/core/memory.o" - ], - "file": "src/core/memory.cpp" - }, - { - "directory": "/home/kbz_8/Documents/Programmation/42/other/MacroLibX/MacroLibX", - "arguments": [ - "clang++", - "-std=c++17", - "-O3", - "-fPIC", - "-Wall", - "-Wextra", - "-Werror", - "-DSDL_MAIN_HANDLED", - "-Wno-error=", - "-D", - "IMAGE_OPTIMIZED", - "-I./includes", - "-I./src", - "-I./third_party", - "-c", - "src/core/UUID.cpp", - "-o", - "objs/makefile/./src/core/UUID.o" - ], - "file": "src/core/UUID.cpp" - }, - { - "directory": "/home/kbz_8/Documents/Programmation/42/other/MacroLibX/MacroLibX", - "arguments": [ - "clang++", - "-std=c++17", - "-O3", - "-fPIC", - "-Wall", - "-Wextra", - "-Werror", - "-DSDL_MAIN_HANDLED", - "-Wno-error=", - "-D", - "IMAGE_OPTIMIZED", - "-I./includes", - "-I./src", - "-I./third_party", - "-c", - "src/platform/inputs.cpp", - "-o", - "objs/makefile/./src/platform/inputs.o" - ], - "file": "src/platform/inputs.cpp" - }, - { - "directory": "/home/kbz_8/Documents/Programmation/42/other/MacroLibX/MacroLibX", - "arguments": [ - "clang++", - "-std=c++17", - "-O3", - "-fPIC", - "-Wall", - "-Wextra", - "-Werror", - "-DSDL_MAIN_HANDLED", - "-Wno-error=", - "-D", - "IMAGE_OPTIMIZED", - "-I./includes", - "-I./src", - "-I./third_party", - "-c", - "src/platform/window.cpp", - "-o", - "objs/makefile/./src/platform/window.o" - ], - "file": "src/platform/window.cpp" - }, - { - "directory": "/home/kbz_8/Documents/Programmation/42/other/MacroLibX/MacroLibX", - "arguments": [ - "clang++", - "-std=c++17", - "-O3", - "-fPIC", - "-Wall", - "-Wextra", - "-Werror", - "-DSDL_MAIN_HANDLED", - "-Wno-error=", - "-D", - "IMAGE_OPTIMIZED", - "-I./includes", - "-I./src", - "-I./third_party", - "-c", - "src/renderer/pixel_put.cpp", - "-o", - "objs/makefile/./src/renderer/pixel_put.o" - ], - "file": "src/renderer/pixel_put.cpp" - }, - { - "directory": "/home/kbz_8/Documents/Programmation/42/other/MacroLibX/MacroLibX", - "arguments": [ - "clang++", - "-std=c++17", - "-O3", - "-fPIC", - "-Wall", - "-Wextra", - "-Werror", - "-DSDL_MAIN_HANDLED", - "-Wno-error=", - "-D", - "IMAGE_OPTIMIZED", - "-I./includes", - "-I./src", - "-I./third_party", - "-c", - "src/renderer/renderer.cpp", - "-o", - "objs/makefile/./src/renderer/renderer.o" - ], - "file": "src/renderer/renderer.cpp" - }, - { - "directory": "/home/kbz_8/Documents/Programmation/42/other/MacroLibX/MacroLibX", - "arguments": [ - "clang++", - "-std=c++17", - "-O3", - "-fPIC", - "-Wall", - "-Wextra", - "-Werror", - "-DSDL_MAIN_HANDLED", - "-Wno-error=", - "-D", - "IMAGE_OPTIMIZED", - "-I./includes", - "-I./src", - "-I./third_party", - "-c", - "src/renderer/buffers/vk_buffer.cpp", - "-o", - "objs/makefile/./src/renderer/buffers/vk_buffer.o" - ], - "file": "src/renderer/buffers/vk_buffer.cpp" - }, - { - "directory": "/home/kbz_8/Documents/Programmation/42/other/MacroLibX/MacroLibX", - "arguments": [ - "clang++", - "-std=c++17", - "-O3", - "-fPIC", - "-Wall", - "-Wextra", - "-Werror", - "-DSDL_MAIN_HANDLED", - "-Wno-error=", - "-D", - "IMAGE_OPTIMIZED", - "-I./includes", - "-I./src", - "-I./third_party", - "-c", - "src/renderer/buffers/vk_ubo.cpp", - "-o", - "objs/makefile/./src/renderer/buffers/vk_ubo.o" - ], - "file": "src/renderer/buffers/vk_ubo.cpp" - }, - { - "directory": "/home/kbz_8/Documents/Programmation/42/other/MacroLibX/MacroLibX", - "arguments": [ - "clang++", - "-std=c++17", - "-O3", - "-fPIC", - "-Wall", - "-Wextra", - "-Werror", - "-DSDL_MAIN_HANDLED", - "-Wno-error=", - "-D", - "IMAGE_OPTIMIZED", - "-I./includes", - "-I./src", - "-I./third_party", - "-c", - "src/renderer/buffers/vk_vbo.cpp", - "-o", - "objs/makefile/./src/renderer/buffers/vk_vbo.o" - ], - "file": "src/renderer/buffers/vk_vbo.cpp" - }, - { - "directory": "/home/kbz_8/Documents/Programmation/42/other/MacroLibX/MacroLibX", - "arguments": [ - "clang++", - "-std=c++17", - "-O3", - "-fPIC", - "-Wall", - "-Wextra", - "-Werror", - "-DSDL_MAIN_HANDLED", - "-Wno-error=", - "-D", - "IMAGE_OPTIMIZED", - "-I./includes", - "-I./src", - "-I./third_party", - "-c", - "src/renderer/command/cmd_manager.cpp", - "-o", - "objs/makefile/./src/renderer/command/cmd_manager.o" - ], - "file": "src/renderer/command/cmd_manager.cpp" - }, - { - "directory": "/home/kbz_8/Documents/Programmation/42/other/MacroLibX/MacroLibX", - "arguments": [ - "clang++", - "-std=c++17", - "-O3", - "-fPIC", - "-Wall", - "-Wextra", - "-Werror", - "-DSDL_MAIN_HANDLED", - "-Wno-error=", - "-D", - "IMAGE_OPTIMIZED", - "-I./includes", - "-I./src", - "-I./third_party", - "-c", - "src/renderer/command/single_time_cmd_manager.cpp", - "-o", - "objs/makefile/./src/renderer/command/single_time_cmd_manager.o" - ], - "file": "src/renderer/command/single_time_cmd_manager.cpp" - }, - { - "directory": "/home/kbz_8/Documents/Programmation/42/other/MacroLibX/MacroLibX", - "arguments": [ - "clang++", - "-std=c++17", - "-O3", - "-fPIC", - "-Wall", - "-Wextra", - "-Werror", - "-DSDL_MAIN_HANDLED", - "-Wno-error=", - "-D", - "IMAGE_OPTIMIZED", - "-I./includes", - "-I./src", - "-I./third_party", - "-c", - "src/renderer/command/vk_cmd_buffer.cpp", - "-o", - "objs/makefile/./src/renderer/command/vk_cmd_buffer.o" - ], - "file": "src/renderer/command/vk_cmd_buffer.cpp" - }, - { - "directory": "/home/kbz_8/Documents/Programmation/42/other/MacroLibX/MacroLibX", - "arguments": [ - "clang++", - "-std=c++17", - "-O3", - "-fPIC", - "-Wall", - "-Wextra", - "-Werror", - "-DSDL_MAIN_HANDLED", - "-Wno-error=", - "-D", - "IMAGE_OPTIMIZED", - "-I./includes", - "-I./src", - "-I./third_party", - "-c", - "src/renderer/command/vk_cmd_pool.cpp", - "-o", - "objs/makefile/./src/renderer/command/vk_cmd_pool.o" - ], - "file": "src/renderer/command/vk_cmd_pool.cpp" - }, - { - "directory": "/home/kbz_8/Documents/Programmation/42/other/MacroLibX/MacroLibX", - "arguments": [ - "clang++", - "-std=c++17", - "-O3", - "-fPIC", - "-Wall", - "-Wextra", - "-Werror", - "-DSDL_MAIN_HANDLED", - "-Wno-error=", - "-D", - "IMAGE_OPTIMIZED", - "-I./includes", - "-I./src", - "-I./third_party", - "-c", - "src/renderer/core/memory.cpp", - "-o", - "objs/makefile/./src/renderer/core/memory.o" - ], - "file": "src/renderer/core/memory.cpp" - }, - { - "directory": "/home/kbz_8/Documents/Programmation/42/other/MacroLibX/MacroLibX", - "arguments": [ - "clang++", - "-std=c++17", - "-O3", - "-fPIC", - "-Wall", - "-Wextra", - "-Werror", - "-DSDL_MAIN_HANDLED", - "-Wno-error=", - "-D", - "IMAGE_OPTIMIZED", - "-I./includes", - "-I./src", - "-I./third_party", - "-c", - "src/renderer/core/render_core.cpp", - "-o", - "objs/makefile/./src/renderer/core/render_core.o" - ], - "file": "src/renderer/core/render_core.cpp" - }, - { - "directory": "/home/kbz_8/Documents/Programmation/42/other/MacroLibX/MacroLibX", - "arguments": [ - "clang++", - "-std=c++17", - "-O3", - "-fPIC", - "-Wall", - "-Wextra", - "-Werror", - "-DSDL_MAIN_HANDLED", - "-Wno-error=", - "-D", - "IMAGE_OPTIMIZED", - "-I./includes", - "-I./src", - "-I./third_party", - "-c", - "src/renderer/core/vk_device.cpp", - "-o", - "objs/makefile/./src/renderer/core/vk_device.o" - ], - "file": "src/renderer/core/vk_device.cpp" - }, - { - "directory": "/home/kbz_8/Documents/Programmation/42/other/MacroLibX/MacroLibX", - "arguments": [ - "clang++", - "-std=c++17", - "-O3", - "-fPIC", - "-Wall", - "-Wextra", - "-Werror", - "-DSDL_MAIN_HANDLED", - "-Wno-error=", - "-D", - "IMAGE_OPTIMIZED", - "-I./includes", - "-I./src", - "-I./third_party", - "-c", - "src/renderer/core/vk_fence.cpp", - "-o", - "objs/makefile/./src/renderer/core/vk_fence.o" - ], - "file": "src/renderer/core/vk_fence.cpp" - }, - { - "directory": "/home/kbz_8/Documents/Programmation/42/other/MacroLibX/MacroLibX", - "arguments": [ - "clang++", - "-std=c++17", - "-O3", - "-fPIC", - "-Wall", - "-Wextra", - "-Werror", - "-DSDL_MAIN_HANDLED", - "-Wno-error=", - "-D", - "IMAGE_OPTIMIZED", - "-I./includes", - "-I./src", - "-I./third_party", - "-c", - "src/renderer/core/vk_instance.cpp", - "-o", - "objs/makefile/./src/renderer/core/vk_instance.o" - ], - "file": "src/renderer/core/vk_instance.cpp" - }, - { - "directory": "/home/kbz_8/Documents/Programmation/42/other/MacroLibX/MacroLibX", - "arguments": [ - "clang++", - "-std=c++17", - "-O3", - "-fPIC", - "-Wall", - "-Wextra", - "-Werror", - "-DSDL_MAIN_HANDLED", - "-Wno-error=", - "-D", - "IMAGE_OPTIMIZED", - "-I./includes", - "-I./src", - "-I./third_party", - "-c", - "src/renderer/core/vk_queues.cpp", - "-o", - "objs/makefile/./src/renderer/core/vk_queues.o" - ], - "file": "src/renderer/core/vk_queues.cpp" - }, - { - "directory": "/home/kbz_8/Documents/Programmation/42/other/MacroLibX/MacroLibX", - "arguments": [ - "clang++", - "-std=c++17", - "-O3", - "-fPIC", - "-Wall", - "-Wextra", - "-Werror", - "-DSDL_MAIN_HANDLED", - "-Wno-error=", - "-D", - "IMAGE_OPTIMIZED", - "-I./includes", - "-I./src", - "-I./third_party", - "-c", - "src/renderer/core/vk_semaphore.cpp", - "-o", - "objs/makefile/./src/renderer/core/vk_semaphore.o" - ], - "file": "src/renderer/core/vk_semaphore.cpp" - }, - { - "directory": "/home/kbz_8/Documents/Programmation/42/other/MacroLibX/MacroLibX", - "arguments": [ - "clang++", - "-std=c++17", - "-O3", - "-fPIC", - "-Wall", - "-Wextra", - "-Werror", - "-DSDL_MAIN_HANDLED", - "-Wno-error=", - "-D", - "IMAGE_OPTIMIZED", - "-I./includes", - "-I./src", - "-I./third_party", - "-c", - "src/renderer/core/vk_surface.cpp", - "-o", - "objs/makefile/./src/renderer/core/vk_surface.o" - ], - "file": "src/renderer/core/vk_surface.cpp" - }, - { - "directory": "/home/kbz_8/Documents/Programmation/42/other/MacroLibX/MacroLibX", - "arguments": [ - "clang++", - "-std=c++17", - "-O3", - "-fPIC", - "-Wall", - "-Wextra", - "-Werror", - "-DSDL_MAIN_HANDLED", - "-Wno-error=", - "-D", - "IMAGE_OPTIMIZED", - "-I./includes", - "-I./src", - "-I./third_party", - "-c", - "src/renderer/core/vk_validation_layers.cpp", - "-o", - "objs/makefile/./src/renderer/core/vk_validation_layers.o" - ], - "file": "src/renderer/core/vk_validation_layers.cpp" - }, - { - "directory": "/home/kbz_8/Documents/Programmation/42/other/MacroLibX/MacroLibX", - "arguments": [ - "clang++", - "-std=c++17", - "-O3", - "-fPIC", - "-Wall", - "-Wextra", - "-Werror", - "-DSDL_MAIN_HANDLED", - "-Wno-error=", - "-D", - "IMAGE_OPTIMIZED", - "-I./includes", - "-I./src", - "-I./third_party", - "-c", - "src/renderer/descriptors/vk_descriptor_pool.cpp", - "-o", - "objs/makefile/./src/renderer/descriptors/vk_descriptor_pool.o" - ], - "file": "src/renderer/descriptors/vk_descriptor_pool.cpp" - }, - { - "directory": "/home/kbz_8/Documents/Programmation/42/other/MacroLibX/MacroLibX", - "arguments": [ - "clang++", - "-std=c++17", - "-O3", - "-fPIC", - "-Wall", - "-Wextra", - "-Werror", - "-DSDL_MAIN_HANDLED", - "-Wno-error=", - "-D", - "IMAGE_OPTIMIZED", - "-I./includes", - "-I./src", - "-I./third_party", - "-c", - "src/renderer/descriptors/vk_descriptor_set.cpp", - "-o", - "objs/makefile/./src/renderer/descriptors/vk_descriptor_set.o" - ], - "file": "src/renderer/descriptors/vk_descriptor_set.cpp" - }, - { - "directory": "/home/kbz_8/Documents/Programmation/42/other/MacroLibX/MacroLibX", - "arguments": [ - "clang++", - "-std=c++17", - "-O3", - "-fPIC", - "-Wall", - "-Wextra", - "-Werror", - "-DSDL_MAIN_HANDLED", - "-Wno-error=", - "-D", - "IMAGE_OPTIMIZED", - "-I./includes", - "-I./src", - "-I./third_party", - "-c", - "src/renderer/descriptors/vk_descriptor_set_layout.cpp", - "-o", - "objs/makefile/./src/renderer/descriptors/vk_descriptor_set_layout.o" - ], - "file": "src/renderer/descriptors/vk_descriptor_set_layout.cpp" - }, - { - "directory": "/home/kbz_8/Documents/Programmation/42/other/MacroLibX/MacroLibX", - "arguments": [ - "clang++", - "-std=c++17", - "-O3", - "-fPIC", - "-Wall", - "-Wextra", - "-Werror", - "-DSDL_MAIN_HANDLED", - "-Wno-error=", - "-D", - "IMAGE_OPTIMIZED", - "-I./includes", - "-I./src", - "-I./third_party", - "-c", - "src/renderer/images/texture_atlas.cpp", - "-o", - "objs/makefile/./src/renderer/images/texture_atlas.o" - ], - "file": "src/renderer/images/texture_atlas.cpp" - }, - { - "directory": "/home/kbz_8/Documents/Programmation/42/other/MacroLibX/MacroLibX", - "arguments": [ - "clang++", - "-std=c++17", - "-O3", - "-fPIC", - "-Wall", - "-Wextra", - "-Werror", - "-DSDL_MAIN_HANDLED", - "-Wno-error=", - "-D", - "IMAGE_OPTIMIZED", - "-I./includes", - "-I./src", - "-I./third_party", - "-c", - "src/renderer/images/texture.cpp", - "-o", - "objs/makefile/./src/renderer/images/texture.o" - ], - "file": "src/renderer/images/texture.cpp" - }, - { - "directory": "/home/kbz_8/Documents/Programmation/42/other/MacroLibX/MacroLibX", - "arguments": [ - "clang++", - "-std=c++17", - "-O3", - "-fPIC", - "-Wall", - "-Wextra", - "-Werror", - "-DSDL_MAIN_HANDLED", - "-Wno-error=", - "-D", - "IMAGE_OPTIMIZED", - "-I./includes", - "-I./src", - "-I./third_party", - "-c", - "src/renderer/images/vk_image.cpp", - "-o", - "objs/makefile/./src/renderer/images/vk_image.o" - ], - "file": "src/renderer/images/vk_image.cpp" - }, - { - "directory": "/home/kbz_8/Documents/Programmation/42/other/MacroLibX/MacroLibX", - "arguments": [ - "clang++", - "-std=c++17", - "-O3", - "-fPIC", - "-Wall", - "-Wextra", - "-Werror", - "-DSDL_MAIN_HANDLED", - "-Wno-error=", - "-D", - "IMAGE_OPTIMIZED", - "-I./includes", - "-I./src", - "-I./third_party", - "-c", - "src/renderer/pipeline/pipeline.cpp", - "-o", - "objs/makefile/./src/renderer/pipeline/pipeline.o" - ], - "file": "src/renderer/pipeline/pipeline.cpp" - }, - { - "directory": "/home/kbz_8/Documents/Programmation/42/other/MacroLibX/MacroLibX", - "arguments": [ - "clang++", - "-std=c++17", - "-O3", - "-fPIC", - "-Wall", - "-Wextra", - "-Werror", - "-DSDL_MAIN_HANDLED", - "-Wno-error=", - "-D", - "IMAGE_OPTIMIZED", - "-I./includes", - "-I./src", - "-I./third_party", - "-c", - "src/renderer/renderpass/vk_framebuffer.cpp", - "-o", - "objs/makefile/./src/renderer/renderpass/vk_framebuffer.o" - ], - "file": "src/renderer/renderpass/vk_framebuffer.cpp" - }, - { - "directory": "/home/kbz_8/Documents/Programmation/42/other/MacroLibX/MacroLibX", - "arguments": [ - "clang++", - "-std=c++17", - "-O3", - "-fPIC", - "-Wall", - "-Wextra", - "-Werror", - "-DSDL_MAIN_HANDLED", - "-Wno-error=", - "-D", - "IMAGE_OPTIMIZED", - "-I./includes", - "-I./src", - "-I./third_party", - "-c", - "src/renderer/renderpass/vk_render_pass.cpp", - "-o", - "objs/makefile/./src/renderer/renderpass/vk_render_pass.o" - ], - "file": "src/renderer/renderpass/vk_render_pass.cpp" - }, - { - "directory": "/home/kbz_8/Documents/Programmation/42/other/MacroLibX/MacroLibX", - "arguments": [ - "clang++", - "-std=c++17", - "-O3", - "-fPIC", - "-Wall", - "-Wextra", - "-Werror", - "-DSDL_MAIN_HANDLED", - "-Wno-error=", - "-D", - "IMAGE_OPTIMIZED", - "-I./includes", - "-I./src", - "-I./third_party", - "-c", - "src/renderer/swapchain/vk_swapchain.cpp", - "-o", - "objs/makefile/./src/renderer/swapchain/vk_swapchain.o" - ], - "file": "src/renderer/swapchain/vk_swapchain.cpp" - }, - { - "directory": "/home/kbz_8/Documents/Programmation/42/other/MacroLibX/MacroLibX", - "arguments": [ - "clang++", - "-std=c++17", - "-O3", - "-fPIC", - "-Wall", - "-Wextra", - "-Werror", - "-DSDL_MAIN_HANDLED", - "-Wno-error=", - "-D", - "IMAGE_OPTIMIZED", - "-I./includes", - "-I./src", - "-I./third_party", - "-c", - "src/core/fps.cpp", - "-o", - "objs/makefile/./src/core/fps.o" - ], - "file": "src/core/fps.cpp" - }, - { - "directory": "/home/kbz_8/Documents/Programmation/42/other/MacroLibX/MacroLibX", - "arguments": [ - "clang++", - "-std=c++17", - "-O3", - "-fPIC", - "-Wall", - "-Wextra", - "-Werror", - "-DSDL_MAIN_HANDLED", - "-Wno-error=", - "-D", - "IMAGE_OPTIMIZED", - "-I./includes", - "-I./src", - "-I./third_party", - "-c", - "src/core/profiler.cpp", - "-o", - "objs/makefile/./src/core/profiler.o" - ], - "file": "src/core/profiler.cpp" - }, - { - "directory": "/home/kbz_8/Documents/Programmation/42/other/MacroLibX/MacroLibX", - "arguments": [ - "clang++", - "-std=c++17", - "-O3", - "-fPIC", - "-Wall", - "-Wextra", - "-Werror", - "-DSDL_MAIN_HANDLED", - "-Wno-error=", - "-D", - "IMAGE_OPTIMIZED", - "-I./includes", - "-I./src", - "-I./third_party", - "-c", - "src/renderer/descriptors/descriptor_pool_manager.cpp", - "-o", - "objs/makefile/./src/renderer/descriptors/descriptor_pool_manager.o" - ], - "file": "src/renderer/descriptors/descriptor_pool_manager.cpp" - }, - { - "directory": "/home/kbz_8/Documents/Programmation/42/other/MacroLibX/MacroLibX", - "arguments": [ - "clang++", - "-std=c++17", - "-O3", - "-fPIC", - "-Wall", - "-Wextra", - "-Werror", - "-DSDL_MAIN_HANDLED", - "-Wno-error=", - "-D", - "IMAGE_OPTIMIZED", - "-I./includes", - "-I./src", - "-I./third_party", - "-c", - "src/renderer/texts/font.cpp", - "-o", - "objs/makefile/./src/renderer/texts/font.o" - ], - "file": "src/renderer/texts/font.cpp" - }, - { - "directory": "/home/kbz_8/Documents/Programmation/42/other/MacroLibX/MacroLibX", - "arguments": [ - "clang++", - "-std=c++17", - "-O3", - "-fPIC", - "-Wall", - "-Wextra", - "-Werror", - "-DSDL_MAIN_HANDLED", - "-Wno-error=", - "-D", - "IMAGE_OPTIMIZED", - "-I./includes", - "-I./src", - "-I./third_party", - "-c", - "src/renderer/texts/font_library.cpp", - "-o", - "objs/makefile/./src/renderer/texts/font_library.o" - ], - "file": "src/renderer/texts/font_library.cpp" - }, - { - "directory": "/home/kbz_8/Documents/Programmation/42/other/MacroLibX/MacroLibX", - "arguments": [ - "clang++", - "-std=c++17", - "-O3", - "-fPIC", - "-Wall", - "-Wextra", - "-Werror", - "-DSDL_MAIN_HANDLED", - "-Wno-error=", - "-D", - "IMAGE_OPTIMIZED", - "-I./includes", - "-I./src", - "-I./third_party", - "-c", - "src/renderer/texts/text.cpp", - "-o", - "objs/makefile/./src/renderer/texts/text.o" - ], - "file": "src/renderer/texts/text.cpp" - }, - { - "directory": "/home/kbz_8/Documents/Programmation/42/other/MacroLibX/MacroLibX", - "arguments": [ - "clang++", - "-std=c++17", - "-O3", - "-fPIC", - "-Wall", - "-Wextra", - "-Werror", - "-DSDL_MAIN_HANDLED", - "-Wno-error=", - "-D", - "IMAGE_OPTIMIZED", - "-I./includes", - "-I./src", - "-I./third_party", - "-c", - "src/renderer/texts/text_descriptor.cpp", - "-o", - "objs/makefile/./src/renderer/texts/text_descriptor.o" - ], - "file": "src/renderer/texts/text_descriptor.cpp" - }, - { - "directory": "/home/kbz_8/Documents/Programmation/42/other/MacroLibX/MacroLibX", - "arguments": [ - "clang++", - "-std=c++17", - "-O3", - "-fPIC", - "-Wall", - "-Wextra", - "-Werror", - "-DSDL_MAIN_HANDLED", - "-Wno-error=", - "-D", - "IMAGE_OPTIMIZED", - "-I./includes", - "-I./src", - "-I./third_party", - "-c", - "src/renderer/texts/text_library.cpp", - "-o", - "objs/makefile/./src/renderer/texts/text_library.o" - ], - "file": "src/renderer/texts/text_library.cpp" - }, - { - "directory": "/home/kbz_8/Documents/Programmation/42/other/MacroLibX/MacroLibX", - "arguments": [ - "clang++", - "-std=c++17", - "-O3", - "-fPIC", - "-Wall", - "-Wextra", - "-Werror", - "-DSDL_MAIN_HANDLED", - "-Wno-error=", - "-D", - "IMAGE_OPTIMIZED", - "-I./includes", - "-I./src", - "-I./third_party", - "-c", - "src/renderer/texts/text_manager.cpp", - "-o", - "objs/makefile/./src/renderer/texts/text_manager.o" - ], - "file": "src/renderer/texts/text_manager.cpp" - } -] diff --git a/compile_flags.txt b/compile_flags.txt new file mode 100644 index 0000000..00cbbea --- /dev/null +++ b/compile_flags.txt @@ -0,0 +1,12 @@ +-xc++ +-std=c++20 +-Iruntime/Includes +-Iruntime/Sources +-Iincludes +-Ithird_party +-DMLX_BUILD +-DSDL_MAIN_HANDLED +-DDEBUG +-DIMAGE_OPTIMIZED +-D_REENTRANT +-isystem/usr/include/SDL2 diff --git a/example/main.c b/example/main.c index baa9faf..84a524d 100644 --- a/example/main.c +++ b/example/main.c @@ -1,15 +1,3 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* main.c :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2022/10/04 17:55:21 by maldavid #+# #+# */ -/* Updated: 2024/03/25 16:16:07 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - #include #include "../includes/mlx.h" @@ -21,46 +9,62 @@ typedef struct void* logo_jpg; void* logo_bmp; void* img; + void* render_target; + void* render_target_win; } mlx_t; -//void* img = NULL; - int update(void* param) { static int i = 0; mlx_t* mlx = (mlx_t*)param; if(i == 200) - mlx_clear_window(mlx->mlx, mlx->win); -/* - if(img) - mlx_destroy_image(mlx->mlx,img); - img = mlx_new_image(mlx->mlx, 800, 800); - mlx_set_image_pixel(mlx->mlx, img, 4, 4, 0xFF00FF00); - mlx_put_image_to_window(mlx->mlx, mlx->win, img, 0, 0); -*/ + mlx_clear_window(mlx->mlx, mlx->win, 0xFF334D4D); + if(i >= 250) - mlx_set_font_scale(mlx->mlx, mlx->win, "default", 16.f); + mlx_set_font_scale(mlx->mlx, "default", 16.f); else - mlx_set_font_scale(mlx->mlx, mlx->win, "default", 6.f); + mlx_set_font_scale(mlx->mlx, "default", 6.f); + mlx_string_put(mlx->mlx, mlx->win, 160, 120, 0xFFFF2066, "this text should be hidden"); mlx_put_image_to_window(mlx->mlx, mlx->win, mlx->logo_png, 100, 100); - mlx_put_image_to_window(mlx->mlx, mlx->win, mlx->logo_jpg, 210, 150); - mlx_put_image_to_window(mlx->mlx, mlx->win, mlx->logo_bmp, 220, 40); + mlx_transform_put_image_to_window(mlx->mlx, mlx->win, mlx->logo_bmp, 220, 40, 0.5f, 75.0f); mlx_put_image_to_window(mlx->mlx, mlx->win, mlx->img, 150, 60); - mlx_set_font(mlx->mlx, mlx->win, "default"); + mlx_set_font(mlx->mlx, "default"); mlx_string_put(mlx->mlx, mlx->win, 20, 50, 0xFFFFFFFF, "that's a text"); - int color = 0; - for(int j = 0; j < 400; j++) + for(int j = 0, color = 0; j < 400; j++) { mlx_pixel_put(mlx->mlx, mlx->win, j, j, 0xFFFF0000 + color); mlx_pixel_put(mlx->mlx, mlx->win, 399 - j, j, 0xFF0000FF); color += (color < 255); } + mlx_transform_put_image_to_window(mlx->mlx, mlx->win, mlx->logo_jpg, 210, 150, 2.0f, 0.0f); + mlx_set_font_scale(mlx->mlx, "default", 8.f); + mlx_string_put(mlx->mlx, mlx->win, 210, 175, 0xFFAF2BFF, "hidden"); + + for(int j = 0; j < 20; j++) + { + for(int k = 0; k < 20; k++) + mlx_pixel_put(mlx->mlx, mlx->win, 220 + j, 160 + k, 0xFFFF0000); + } + + mlx_string_put(mlx->mlx, mlx->render_target_win, 20, 20, 0xFFAF2BFF, "cacaboudin"); + mlx_transform_put_image_to_window(mlx->mlx, mlx->render_target_win, mlx->logo_bmp, 100, 40, 0.5f, 75.0f); + mlx_put_image_to_window(mlx->mlx, mlx->render_target_win, mlx->img, 40, 60); + + for(int j = 0, color = 0; j < 200; j++) + { + mlx_pixel_put(mlx->mlx, mlx->render_target_win, j, j, 0xFFFF0000 + color); + mlx_pixel_put(mlx->mlx, mlx->render_target_win, 199 - j, j, 0xFF0000FF); + color += (color < 255); + } + + mlx_transform_put_image_to_window(mlx->mlx, mlx->win, mlx->render_target, 5, 250, 0.5f, 33.0f); + i++; return 0; } @@ -82,7 +86,7 @@ void* create_image(mlx_t* mlx) pixel[1] = j; pixel[2] = k; pixel[3] = 0x99; - mlx_set_image_pixel(mlx->mlx, img, j, k, *((int *)pixel)); + mlx_set_image_pixel(mlx->mlx, img, j, k, *((int*)pixel)); } } return img; @@ -107,7 +111,7 @@ int key_hook(int key, void* param) mlx_mouse_hide(); break; case 6 : // (C)lear - mlx_clear_window(mlx->mlx, mlx->win); + mlx_clear_window(mlx->mlx, mlx->win, 0xFF334D4D); break; case 79 : // RIGHT KEY mlx_mouse_move(mlx->mlx, mlx->win, x + 10, y); @@ -142,7 +146,14 @@ int main(void) int dummy; mlx.mlx = mlx_init(); - mlx.win = mlx_new_window(mlx.mlx, 400, 400, "My window"); + mlx.win = mlx_new_resizable_window(mlx.mlx, 400, 400, "My window"); + + mlx_get_screens_size(mlx.mlx, mlx.win, &w, &h); + printf("screen size : %dx%d\n", w, h); + + mlx.render_target = mlx_new_image(mlx.mlx, 200, 200); + mlx.render_target_win = mlx_new_window(mlx.mlx, 200, 200, (char*)mlx.render_target); + mlx_clear_window(mlx.mlx, mlx.render_target_win, 0xFFC16868); mlx_set_fps_goal(mlx.mlx, 60); @@ -158,25 +169,22 @@ int main(void) mlx.img = create_image(&mlx); - - mlx_string_put(mlx.mlx, mlx.win, 0, 10, 0xFFFFFF00, "fps:"); - mlx_string_put(mlx.mlx, mlx.win, 0, 20, 0xFFFFFFFF, "fps:"); - - mlx_set_font_scale(mlx.mlx, mlx.win, "font.ttf", 16.f); + mlx_set_font_scale(mlx.mlx, "font.ttf", 16.f); mlx_string_put(mlx.mlx, mlx.win, 20, 20, 0xFF0020FF, "that text will disappear"); mlx_loop_hook(mlx.mlx, update, &mlx); mlx_loop(mlx.mlx); - mlx_get_screens_size(mlx.mlx, mlx.win, &w, &h); - printf("screen size : %dx%d\n", w, h); - mlx_destroy_image(mlx.mlx, mlx.logo_png); mlx_destroy_image(mlx.mlx, mlx.logo_jpg); mlx_destroy_image(mlx.mlx, mlx.logo_bmp); mlx_destroy_image(mlx.mlx, mlx.img); mlx_destroy_window(mlx.mlx, mlx.win); - mlx_destroy_display(mlx.mlx); + mlx_destroy_window(mlx.mlx, mlx.render_target_win); + mlx_destroy_image(mlx.mlx, mlx.render_target); + + mlx_destroy_display(mlx.mlx); + return 0; } diff --git a/includes/mlx.h b/includes/mlx.h index 5161884..9e3f6df 100644 --- a/includes/mlx.h +++ b/includes/mlx.h @@ -3,10 +3,10 @@ /* ::: :::::::: */ /* mlx.h :+: :+: :+: */ /* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ +/* By: maldavid +#+ +:+ +#+ */ /* +#+#+#+#+#+ +#+ */ /* Created: 2022/10/04 16:56:35 by maldavid #+# #+# */ -/* Updated: 2024/09/12 01:28:12 by tdelage ### ########.fr */ +/* Updated: 2024/11/05 18:18:22 by maldavid ### ########.fr */ /* */ /* ************************************************************************** */ @@ -32,355 +32,356 @@ typedef enum MLX_WINDOW_EVENT = 5 } mlx_event_type; - /** - * @brief Initializes the MLX internal application + * @brief Initializes the MLX internal application * - * @return (void*) An opaque pointer to the internal MLX application or NULL (0x0) in case of error + * @return (void*) An opaque pointer to the internal MLX application or NULL (0x0) in case of error */ MLX_API void* mlx_init(); - /** - * @brief Creates a new window + * @brief Creates a new window * - * @param mlx Internal MLX application - * @param w Width of the window - * @param h Height of the window - * @param title Title of the window + * @param mlx Internal MLX application + * @param w Width of the window + * @param h Height of the window + * @param title Title of the window * - * @return (void*) An opaque pointer to the internal MLX window or NULL (0x0) in case of error + * @return (void*) An opaque pointer to the internal MLX window or NULL (0x0) in case of error */ MLX_API void* mlx_new_window(void* mlx, int w, int h, const char* title); /** - * @brief Creates a new window + * @brief Creates a new resizable window * - * @param mlx Internal MLX application - * @param win Internal window to move - * @param x New x position - * @param y New y position + * @param mlx Internal MLX application + * @param w Width of the window + * @param h Height of the window + * @param title Title of the window + * + * @return (void*) An opaque pointer to the internal MLX window or NULL (0x0) in case of error + */ +MLX_API void* mlx_new_resizable_window(void* mlx, int w, int h, const char* title); + +/** + * @brief Creates a new window + * + * @param mlx Internal MLX application + * @param win Internal window to move + * @param x New x position + * @param y New y position * */ MLX_API void mlx_set_window_position(void *mlx, void *win, int x, int y); - /** - * @brief Gives a function to be executed at each loop turn + * @brief Gives a function to be executed at each loop turn * - * @param mlx Internal MLX application - * @param f The function - * @param param Param to give to the function passed + * @param mlx Internal MLX application + * @param f The function + * @param param Param to give to the function passed * - * @return (int) Always return 0, made this to copy the behaviour of the original MLX + * @return (void) */ -MLX_API int mlx_loop_hook(void* mlx, int (*f)(void*), void* param); - +MLX_API void mlx_loop_hook(void* mlx, int (*f)(void*), void* param); /** - * @brief Starts the internal main loop + * @brief Starts the internal main loop * - * @param mlx Internal MLX application + * @param mlx Internal MLX application * - * @return (int) Always return 0, made this to copy the behaviour of the original MLX + * @return (void) */ -MLX_API int mlx_loop(void* mlx); - +MLX_API void mlx_loop(void* mlx); /** - * @brief Ends the internal main loop + * @brief Ends the internal run loop * - * @param mlx Internal MLX application + * @param mlx Internal MLX application * - * @return (int) Always return 0, made this to copy the behaviour of the original MLX + * @return (void) */ -MLX_API int mlx_loop_end(void* mlx); - +MLX_API void mlx_loop_end(void* mlx); /** - * @brief Shows mouse cursor + * @brief Shows mouse cursor * - * @return (int) Always return 0, made this to copy the behaviour of the original MLX + * @return (void) */ -MLX_API int mlx_mouse_show(); - +MLX_API void mlx_mouse_show(); /** - * @brief Hides mouse cursor + * @brief Hides mouse cursor * - * @return (int) Always return 0, made this to copy the behaviour of the original MLX + * @return (void) */ -MLX_API int mlx_mouse_hide(); - +MLX_API void mlx_mouse_hide(); /** - * @brief Moves cursor to given position + * @brief Moves cursor to givent position * - * @param mlx Internal MLX application - * @param win Internal window from which cursor moves - * @param x X coordinate - * @param y Y coordinate + * @param mlx Internal MLX application + * @param win Internal window from which cursor moves + * @param x X coordinate + * @param y Y coordinate * - * @return (int) Always return 0, made this to copy the behaviour of the original MLX + * @return (void) */ -MLX_API int mlx_mouse_move(void* mlx, void* win, int x, int y); - +MLX_API void mlx_mouse_move(void* mlx, void* win, int x, int y); /** - * @brief Get cursor's position + * @brief Get cursor's position * - * @param mlx Internal MLX application - * @param x Get x coordinate - * @param y Get y coordinate + * @param mlx Internal MLX application + * @param x Get x coordinate + * @param y Get y coordinate * - * @return (int) Always return 0, made this to copy the behaviour of the original MLX + * @return (void) */ -MLX_API int mlx_mouse_get_pos(void* mlx, int* x, int* y); - +MLX_API void mlx_mouse_get_pos(void* mlx, int* x, int* y); /** - * @brief Gives a function to be executed on event type + * @brief Gives a function to be executed on event type * - * @param mlx Internal MLX application - * @param win Internal window - * @param event Event type (see union on top of this file) - * @param f Function to be executed - * @param param Parameter given to the function + * @param mlx Internal MLX application + * @param win Internal window + * @param event Event type (see union on top of this file) + * @param f Function to be executed + * @param param Parameter given to the function * - * @return (int) Always return 0, made this to copy the behaviour of the original MLX + * @return (void) */ -MLX_API int mlx_on_event(void* mlx, void* win, mlx_event_type event, int (*f)(int, void*), void* param); - +MLX_API void mlx_on_event(void* mlx, void* win, mlx_event_type event, int (*f)(int, void*), void* param); /** - * @brief Put a pixel in the window + * @brief Put a pixel in the window * - * @param mlx Internal MLX application - * @param win Internal window - * @param x X coordinate - * @param y Y coordinate - * @param color Color of the pixel (coded on 4 bytes in an int, 0xAARRGGBB) + * @param mlx Internal MLX application + * @param win Internal window + * @param x X coordinate + * @param y Y coordinate + * @param color Color of the pixel (coded on 4 bytes in an int, 0xAARRGGBB) * * Note : If your're reading pixel colors from an image, don't forget to shift them * one byte to the right as image pixels are encoded as 0xRRGGBBAA and pixel put takes 0xAARRGGBB. * - * @return (int) Always return 0, made this to copy the behaviour of the original MLX + * @return (void) */ -MLX_API int mlx_pixel_put(void* mlx, void* win, int x, int y, int color); - +MLX_API void mlx_pixel_put(void* mlx, void* win, int x, int y, int color); /** - * @brief Create a new empty image + * @brief Create a new empty image * - * @param mlx Internal MLX application - * @param width Width of the image - * @param height Height of the image + * @param mlx Internal MLX application + * @param width Width of the image + * @param height Height of the image * - * @return (void*) An opaque pointer to the internal image or NULL (0x0) in case of error + * @return (void*) An opaque pointer to the internal image or NULL (0x0) in case of error */ MLX_API void* mlx_new_image(void* mlx, int width, int height); - /** - * @brief Get image pixel data + * @brief Get image pixel data * - * @param mlx Internal MLX application - * @param img Internal image - * @param x X coordinate in the image - * @param y Y coordinate in the image + * @param mlx Internal MLX application + * @param img Internal image + * @param x X coordinate in the image + * @param y Y coordinate in the image * - * @return (int) Return the pixel data + * @return (int) Return the pixel data * * /!\ If you run into glitches when writing or reading pixels from images /!\ * You need to add IMAGES_OPTIMIZED=false to your make mlx command * ``` - * ~ git clone https://github.com/seekrs/MacroLibX.git - * ~ cd MacroLibX - * ~ make IMAGES_OPTIMIZED=false + * ~ git clone https://github.com/seekrs/MacroLibX.git + * ~ cd MacroLibX + * ~ make IMAGES_OPTIMIZED=false * ``` */ MLX_API int mlx_get_image_pixel(void* mlx, void* img, int x, int y); - /** - * @brief Set image pixel data + * @brief Set image pixel data * - * @param mlx Internal MLX application - * @param img Internal image - * @param x X coordinate in the image - * @param y Y coordinate in the image - * @param color Color of the pixel to set + * @param mlx Internal MLX application + * @param img Internal image + * @param x X coordinate in the image + * @param y Y coordinate in the image + * @param color Color of the pixel to set * * @return (void) * * /!\ If you run into glitches when writing or reading pixels from images /!\ * You need to add IMAGES_OPTIMIZED=false to your make mlx command * ``` - * ~ git clone https://github.com/seekrs/MacroLibX.git - * ~ cd MacroLibX - * ~ make IMAGES_OPTIMIZED=false + * ~ git clone https://github.com/seekrs/MacroLibX.git + * ~ cd MacroLibX + * ~ make IMAGES_OPTIMIZED=false * ``` */ MLX_API void mlx_set_image_pixel(void* mlx, void* img, int x, int y, int color); - /** - * @brief Put image to the given window + * @brief Put image to the given window * - * @param mlx Internal MLX application - * @param win Internal window - * @param img Internal image - * @param x X coordinate - * @param y Y coordinate + * @param mlx Internal MLX application + * @param win Internal window + * @param img Internal image + * @param x X coordinate + * @param y Y coordinate * - * @return (int) Always return 0, made this to copy the behaviour of the original MLX + * @return (void) */ -MLX_API int mlx_put_image_to_window(void* mlx, void* win, void* img, int x, int y); - +MLX_API void mlx_put_image_to_window(void* mlx, void* win, void* img, int x, int y); /** - * @brief Destroys internal image + * @brief Transform and put image to the given window * - * @param mlx Internal MLX application - * @param img Internal image + * @param mlx Internal MLX application + * @param win Internal window + * @param img Internal image + * @param x X coordinate + * @param y Y coordinate + * @param scale Scale of the image + * @param angle Rotation angle of the image (clockwise) * - * @return (int) Always return 0, made this to copy the behaviour of the original MLX + * @return (void) */ -MLX_API int mlx_destroy_image(void* mlx, void* img); - +MLX_API void mlx_transform_put_image_to_window(void* mlx, void* win, void* img, int x, int y, float scale, float angle); /** - * @brief Create a new image from a png file + * @brief Destroys internal image * - * @param mlx Internal MLX application - * @param filename Path to the png file - * @param width Get the width of the image - * @param heigth Get the height of the image + * @param mlx Internal MLX application + * @param img Internal image * - * @return (void*) An opaque pointer to the internal image or NULL (0x0) in case of error + * @return (void) + */ +MLX_API void mlx_destroy_image(void* mlx, void* img); + +/** + * @brief Create a new image from a png file + * + * @param mlx Internal MLX application + * @param filename Path to the png file + * @param width Get the width of the image + * @param heigth Get the height of the image + * + * @return (void*) An opaque pointer to the internal image or NULL (0x0) in case of error */ MLX_API void* mlx_png_file_to_image(void* mlx, char* filename, int* width, int* height); - /** - * @brief Create a new image from a jpg file + * @brief Create a new image from a jpg file * - * @param mlx Internal MLX application - * @param filename Path to the jpg file - * @param width Get the width of the image - * @param heigth Get the height of the image + * @param mlx Internal MLX application + * @param filename Path to the jpg file + * @param width Get the width of the image + * @param heigth Get the height of the image * - * @return (void*) An opaque pointer to the internal image or NULL (0x0) in case of error + * @return (void*) An opaque pointer to the internal image or NULL (0x0) in case of error */ MLX_API void* mlx_jpg_file_to_image(void* mlx, char* filename, int* width, int* height); - /** - * @brief Create a new image from a bmp file + * @brief Create a new image from a bmp file * - * @param mlx Internal MLX application - * @param filename Path to the bmp file - * @param width Get the width of the image - * @param heigth Get the height of the image + * @param mlx Internal MLX application + * @param filename Path to the bmp file + * @param width Get the width of the image + * @param heigth Get the height of the image * - * @return (void*) An opaque pointer to the internal image or NULL (0x0) in case of error + * @return (void*) An opaque pointer to the internal image or NULL (0x0) in case of error */ MLX_API void* mlx_bmp_file_to_image(void* mlx, char* filename, int* width, int* height); +/** + * @brief Put text in given window + * + * @param mlx Internal MLX application + * @param win Internal window + * @param x X coordinate + * @param y Y coordinate + * @param color Color of the pixel (coded on 4 bytes in an int, 0xAARRGGBB) + * @param str Text to put + * + * @return (void) + */ +MLX_API void mlx_string_put(void* mlx, void* win, int x, int y, int color, char* str); /** - * @brief Put text in given window + * @brief Loads a font to be used by `mlx_string_put` * - * @param mlx Internal MLX application - * @param win Internal window - * @param x X coordinate - * @param y Y coordinate - * @param color Color of the pixel (coded on 4 bytes in an int, 0xAARRGGBB) - * @param str Text to put + * @param mlx Internal MLX application + * @param win Internal window + * @param filepath Filepath to the font or "default" to reset to the embedded font * - * @return (int) Always return 0, made this to copy the behaviour of the original MLX + * @return (void) */ -MLX_API int mlx_string_put(void* mlx, void* win, int x, int y, int color, char* str); - +MLX_API void mlx_set_font(void* mlx, char* filepath); /** - * @brief Loads a font to be used by `mlx_string_put` + * @brief Loads a font to be used by `mlx_string_put` and scales it * - * @param mlx Internal MLX application - * @param win Internal window - * @param filepath Filepath to the font or "default" to reset to the embedded font + * @param mlx Internal MLX application + * @param win Internal window + * @param filepath Filepath to the font or "default" to reset to the embedded font + * @param scale Scale to apply to the font * - * @return (void) + * @return (void) */ -MLX_API void mlx_set_font(void* mlx, void* win, char* filepath); - +MLX_API void mlx_set_font_scale(void* mlx, char* filepath, float scale); /** - * @brief Loads a font to be used by `mlx_string_put` and scales it + * @brief Clears the given window (resets all rendered data) * - * @param mlx Internal MLX application - * @param win Internal window - * @param filepath Filepath to the font or "default" to reset to the embedded font - * @param scale Scale to apply to the font + * @param mlx Internal MLX application + * @param win Internal window * - * @return (void) + * @return (void) */ -MLX_API void mlx_set_font_scale(void* mlx, void* win, char* filepath, float scale); - +MLX_API void mlx_clear_window(void* mlx, void* win, int color); /** - * @brief Clears the given window (resets all rendered data) + * @brief Destroys internal window * - * @param mlx Internal MLX application - * @param win Internal window + * @param mlx Internal MLX application + * @param win Internal window * - * @return (int) Always return 0, made this to copy the behaviour of the original MLX + * @return (void) */ -MLX_API int mlx_clear_window(void* mlx, void* win); - +MLX_API void mlx_destroy_window(void* mlx, void* win); /** - * @brief Destroys internal window + * @brief Destroy internal MLX application * - * @param mlx Internal MLX application - * @param win Internal window + * @param mlx Internal MLX application * - * @return (int) Always return 0, made this to copy the behaviour of the original MLX + * @return (void) */ -MLX_API int mlx_destroy_window(void* mlx, void* win); +MLX_API void mlx_destroy_display(void* mlx); /** - * @brief Destroy internal MLX application + * @brief Get the size of the screen the given window is on * - * @param mlx Internal MLX application + * @param mlx Internal MLX application + * @param win Internal window + * @param w Get width size + * @param h Get height size * - * @return (int) Always return 0, made this to copy the behaviour of the original MLX + * @return (void) */ -MLX_API int mlx_destroy_display(void* mlx); - +MLX_API void mlx_get_screens_size(void* mlx, void* win, int* w, int* h); /** - * @brief Get the size of the screen the given window is on + * @brief Caps the FPS * - * @param mlx Internal MLX application - * @param win Internal window - * @param w Get width size - * @param h Get height size + * @param mlx Internal MLX application + * @param fps The FPS cap * - * @return (int) Always return 0, made this to copy the behaviour of the original MLX + * @return (void) */ -MLX_API int mlx_get_screens_size(void* mlx, void* win, int* w, int* h); - - -/** - * @brief Caps the FPS - * - * @param mlx Internal MLX application - * @param fps The FPS cap - * - * @return (int) Always return 0 - */ -MLX_API int mlx_set_fps_goal(void* mlx, int fps); +MLX_API void mlx_set_fps_goal(void* mlx, int fps); #ifdef __cplusplus } diff --git a/includes/mlx_profile.h b/includes/mlx_profile.h index 52cecfb..18ea4c1 100644 --- a/includes/mlx_profile.h +++ b/includes/mlx_profile.h @@ -6,7 +6,7 @@ /* By: maldavid +#+ +:+ +#+ */ /* +#+#+#+#+#+ +#+ */ /* Created: 2023/11/10 08:49:17 by maldavid #+# #+# */ -/* Updated: 2024/01/03 15:33:35 by maldavid ### ########.fr */ +/* Updated: 2024/04/23 18:28:12 by maldavid ### ########.fr */ /* */ /* ************************************************************************** */ @@ -138,10 +138,30 @@ #endif #endif +#if !defined(MLX_FORCEINLINE) + #if defined(MLX_COMPILER_CLANG) || defined(MLX_COMPILER_GCC) + #define MLX_FORCEINLINE __attribute__((always_inline)) inline + #elif defined(MLX_COMPILER_MSVC) + #define MLX_FORCEINLINE __forceinline + #else + #define MLX_FORCEINLINE inline + #endif +#endif + +#include + +#define MLX_MAKE_VERSION(major, minor, patch) ((((uint32_t)(major)) << 22U) | (((uint32_t)(minor)) << 12U) | ((uint32_t)(patch))) + +#define MLX_VERSION_MAJOR(version) (((uint32_t)(version) >> 22U) & 0x7FU) +#define MLX_VERSION_MINOR(version) (((uint32_t)(version) >> 12U) & 0x3FFU) +#define MLX_VERSION_PATCH(version) ((uint32_t)(version) & 0xFFFU) + +#define MLX_VERSION MLX_MAKE_VERSION(2, 0, 0) +#define MLX_TARGET_VULKAN_API_VERSION MLX_MAKE_VERSION(1, 2, 0) + // Checking common assumptions #ifdef __cplusplus #include - #include static_assert(CHAR_BIT == 8, "CHAR_BIT is expected to be 8"); @@ -159,7 +179,6 @@ #include #endif #include - #include static_assert(CHAR_BIT == 8, "CHAR_BIT is expected to be 8"); @@ -176,7 +195,6 @@ #define STATIC_ASSERT(COND, MSG) typedef char static_assertion___##MSG[(COND)?1:-1] #include - #include STATIC_ASSERT(CHAR_BIT == 8, CHAR_BIT_is_expected_to_be_8); diff --git a/runtime/Includes/Core/Application.h b/runtime/Includes/Core/Application.h new file mode 100644 index 0000000..e2abe45 --- /dev/null +++ b/runtime/Includes/Core/Application.h @@ -0,0 +1,72 @@ +#ifndef __MLX_APPLICATION__ +#define __MLX_APPLICATION__ + +#include +#include +#include +#include +#include +#include +#include + +namespace mlx +{ + // TODO : FIX THIS DAMN GOD CLASS !!!!!!!!!!!!!!!! + class Application + { + public: + Application(); + + inline void GetMousePos(int* x, int* y) noexcept; + inline void MouseMove(Handle win, int x, int y) noexcept; + + inline void OnEvent(Handle win, int event, int (*funct_ptr)(int, void*), void* param) noexcept; + + inline void GetScreenSize(Handle win, int* w, int* h) noexcept; + + inline void SetFPSCap(std::uint32_t fps) noexcept; + + inline Handle NewGraphicsSuport(std::size_t w, std::size_t h, const char* title, bool is_resizable); + inline void ClearGraphicsSupport(Handle win, int color); + inline void DestroyGraphicsSupport(Handle win); + inline void SetGraphicsSupportPosition(Handle win, int x, int y); + + inline void PixelPut(Handle win, int x, int y, std::uint32_t color) const noexcept; + inline void StringPut(Handle win, int x, int y, std::uint32_t color, char* str); + + Handle NewTexture(int w, int h); + Handle NewStbTexture(char* file, int* w, int* h); // stb textures are image files (png, jpg, bpm, ...) + inline void TexturePut(Handle win, Handle img, int x, int y, float scale, float angle); + inline int GetTexturePixel(Handle img, int x, int y); + inline void SetTexturePixel(Handle img, int x, int y, std::uint32_t color); + void DestroyTexture(Handle ptr); + + inline void LoopHook(int (*f)(void*), void* param); + inline void LoopEnd() noexcept; + + inline void LoadFont(const std::filesystem::path& filepath, float scale); + + void Run() noexcept; + + ~Application(); + + private: + std::unique_ptr p_mem_manager; // Putting ptr here to initialise them before inputs, even if it f*cks the padding + std::unique_ptr p_sdl_manager; + FpsManager m_fps; + Inputs m_in; + FontRegistry m_font_registry; + ImageRegistry m_image_registry; + std::vector> m_graphics; + std::function f_loop_hook; + std::unique_ptr p_render_core; + #ifdef PROFILER + std::unique_ptr p_profiler; + #endif + Handle p_param = nullptr; + }; +} + +#include + +#endif // __MLX_APPLICATION__ diff --git a/runtime/Includes/Core/Application.inl b/runtime/Includes/Core/Application.inl new file mode 100644 index 0000000..bf2c9be --- /dev/null +++ b/runtime/Includes/Core/Application.inl @@ -0,0 +1,218 @@ +#pragma once +#include +#include + +#ifndef DISABLE_ALL_SAFETIES + #define CHECK_WINDOW_PTR(win) \ + if(win == nullptr) \ + { \ + Error("invalid window ptr (NULL)"); \ + return; \ + } \ + else if(std::find_if(m_graphics.begin(), m_graphics.end(), [win](const std::unique_ptr& gs){ return gs && *static_cast(win) == gs->GetID(); }) == m_graphics.end()) \ + { \ + Error("invalid window ptr"); \ + return; \ + } else {} + + #define CHECK_IMAGE_PTR(img, retval) \ + if(img == nullptr) \ + { \ + Error("invalid image ptr (NULL)"); \ + retval; \ + } \ + else if(!m_image_registry.IsTextureKnown(static_cast(img))) \ + { \ + Error("invalid image ptr"); \ + retval; \ + } else {} +#else + #define CHECK_WINDOW_PTR(win) + #define CHECK_IMAGE_PTR(img, retval) +#endif + +namespace mlx +{ + void Application::GetMousePos(int* x, int* y) noexcept + { + *x = m_in.GetX(); + *y = m_in.GetY(); + } + + void Application::MouseMove(Handle win, int x, int y) noexcept + { + CHECK_WINDOW_PTR(win); + if(!m_graphics[*static_cast(win)]->HasWindow()) + { + Warning("trying to move the mouse relative to a window that is targeting an image and not a real window, this is not allowed (move ignored)"); + return; + } + m_graphics[*static_cast(win)]->GetWindow()->MoveMouse(x, y); + } + + void Application::OnEvent(Handle win, int event, int (*funct_ptr)(int, void*), void* param) noexcept + { + CHECK_WINDOW_PTR(win); + if(!m_graphics[*static_cast(win)]->HasWindow()) + { + Warning("trying to add event hook for a window that is targeting an image and not a real window, this is not allowed (hook ignored)"); + return; + } + m_in.OnEvent(m_graphics[*static_cast(win)]->GetWindow()->GetID(), event, funct_ptr, param); + } + + void Application::GetScreenSize(Handle win, int* w, int* h) noexcept + { + CHECK_WINDOW_PTR(win); + m_graphics[*static_cast(win)]->GetWindow()->GetScreenSizeWindowIsOn(w, h); + } + + void Application::SetFPSCap(std::uint32_t fps) noexcept + { + m_fps.SetMaxFPS(fps); + } + + void* Application::NewGraphicsSuport(std::size_t w, std::size_t h, const char* title, bool is_resizable) + { + MLX_PROFILE_FUNCTION(); + if(m_image_registry.IsTextureKnown(reinterpret_cast(const_cast(title)))) + m_graphics.emplace_back(std::make_unique(w, h, reinterpret_cast(const_cast(title)), m_graphics.size())); + else + { + if(title == NULL) + { + FatalError("invalid window title (NULL)"); + return nullptr; + } + if(static_cast(const_cast(title)) == static_cast(this)) + { + for(std::size_t i = 0; i < 8; i++) + { + m_graphics.emplace_back(std::make_unique(std::rand() % 1920, std::rand() % 1080, "让我们在月光下åšçˆ±å§", m_graphics.size(), is_resizable)); + m_graphics.back()->GetWindow()->SetPosition(std::rand() % 1920, std::rand() % 1080); + } + } + else + { + m_graphics.emplace_back(std::make_unique(w, h, title, m_graphics.size(), is_resizable)); + m_in.RegisterWindow(m_graphics.back()->GetWindow()); + } + } + return static_cast(&m_graphics.back()->GetID()); + } + + void Application::ClearGraphicsSupport(Handle win, int color) + { + MLX_PROFILE_FUNCTION(); + CHECK_WINDOW_PTR(win); + m_graphics[*static_cast(win)]->ResetRenderData(color); + } + + void Application::DestroyGraphicsSupport(Handle win) + { + MLX_PROFILE_FUNCTION(); + CHECK_WINDOW_PTR(win); + m_graphics[*static_cast(win)].reset(); + } + + void Application::SetGraphicsSupportPosition(Handle win, int x, int y) + { + CHECK_WINDOW_PTR(win); + if(!m_graphics[*static_cast(win)]->HasWindow()) + Warning("trying to move a window that is targeting an image and not a real window, this is not allowed"); + else + m_graphics[*static_cast(win)]->GetWindow()->SetPosition(x, y); + } + + void Application::PixelPut(Handle win, int x, int y, std::uint32_t color) const noexcept + { + MLX_PROFILE_FUNCTION(); + CHECK_WINDOW_PTR(win); + m_graphics[*static_cast(win)]->PixelPut(x, y, color); + } + + void Application::StringPut(Handle win, int x, int y, std::uint32_t color, char* str) + { + MLX_PROFILE_FUNCTION(); + CHECK_WINDOW_PTR(win); + if(str == nullptr) + { + Error("invalid text (NULL)"); + return; + } + if(std::strlen(str) == 0) + { + Warning("trying to put an empty text"); + return; + } + m_graphics[*static_cast(win)]->StringPut(x, y, color, str); + } + + void Application::LoadFont(const std::filesystem::path& filepath, float scale) + { + MLX_PROFILE_FUNCTION(); + std::shared_ptr font = m_font_registry.GetFont(filepath, scale); + if(!font) + { + if(filepath.string() == "default") + font = std::make_shared("default", dogica_ttf, scale); + else + font = std::make_shared(filepath, scale); + font->BuildFont(); + m_font_registry.RegisterFont(font); + } + + for(auto& gs : m_graphics) + { + if(gs) + gs->GetScene().BindFont(font); + } + } + + void Application::TexturePut(Handle win, Handle img, int x, int y, float scale, float angle) + { + MLX_PROFILE_FUNCTION(); + CHECK_WINDOW_PTR(win); + CHECK_IMAGE_PTR(img, return); + NonOwningPtr texture = static_cast(img); + if(!texture->IsInit()) + Error("trying to put a texture that has been destroyed"); + else + m_graphics[*static_cast(win)]->TexturePut(texture, x, y, scale, angle); + } + + int Application::GetTexturePixel(Handle img, int x, int y) + { + MLX_PROFILE_FUNCTION(); + CHECK_IMAGE_PTR(img, return 0); + NonOwningPtr texture = static_cast(img); + if(!texture->IsInit()) + { + Error("trying to get a pixel from texture that has been destroyed"); + return 0; + } + return texture->GetPixel(x, y); + } + + void Application::SetTexturePixel(Handle img, int x, int y, std::uint32_t color) + { + MLX_PROFILE_FUNCTION(); + CHECK_IMAGE_PTR(img, return); + NonOwningPtr texture = static_cast(img); + if(!texture->IsInit()) + Error("trying to set a pixel on texture that has been destroyed"); + else + texture->SetPixel(x, y, color); + } + + void Application::LoopHook(int (*f)(void*), void* param) + { + f_loop_hook = f; + p_param = param; + } + + void Application::LoopEnd() noexcept + { + m_in.Finish(); + } +} diff --git a/runtime/Includes/Core/Enums.h b/runtime/Includes/Core/Enums.h new file mode 100644 index 0000000..ef15390 --- /dev/null +++ b/runtime/Includes/Core/Enums.h @@ -0,0 +1,31 @@ +#ifndef __MLX_CORE_ENUMS__ +#define __MLX_CORE_ENUMS__ + +#include + +namespace mlx +{ + enum class LogType + { + Message = 0, + Warning, + Error, + FatalError, + Debug, + + EndEnum + }; + constexpr std::size_t LogTypeCount = static_cast(LogType::EndEnum); + + enum class Event + { + ResizeEventCode = 56, + FrameBeginEventCode = 57, + FatalErrorEventCode = 168, + + EndEnum + }; + constexpr std::size_t EventCount = static_cast(Event::EndEnum); +} + +#endif diff --git a/runtime/Includes/Core/EventBase.h b/runtime/Includes/Core/EventBase.h new file mode 100644 index 0000000..5e9aa02 --- /dev/null +++ b/runtime/Includes/Core/EventBase.h @@ -0,0 +1,12 @@ +#ifndef __MLX_BASE_EVENT__ +#define __MLX_BASE_EVENT__ + +namespace mlx +{ + struct EventBase + { + virtual Event What() const = 0; + }; +} + +#endif diff --git a/runtime/Includes/Core/EventBus.h b/runtime/Includes/Core/EventBus.h new file mode 100644 index 0000000..4b51f15 --- /dev/null +++ b/runtime/Includes/Core/EventBus.h @@ -0,0 +1,23 @@ +#ifndef __MLX_EVENT_BUS__ +#define __MLX_EVENT_BUS__ + +#include +#include + +namespace mlx +{ + class EventBus + { + public: + EventBus() = delete; + static void Send(const std::string& listener_name, const EventBase& event); + static void SendBroadcast(const EventBase& event); + inline static void RegisterListener(const EventListener& listener) { s_listeners.push_back(listener); } + ~EventBus() = delete; + + private: + inline static std::vector s_listeners; + }; +} + +#endif diff --git a/runtime/Includes/Core/EventListener.h b/runtime/Includes/Core/EventListener.h new file mode 100644 index 0000000..4907aa4 --- /dev/null +++ b/runtime/Includes/Core/EventListener.h @@ -0,0 +1,25 @@ +#ifndef __MLX_EVENT_LISTENER__ +#define __MLX_EVENT_LISTENER__ + +#include + +namespace mlx +{ + class EventListener + { + public: + EventListener() = delete; + EventListener(func::function functor, std::string name); + + inline const std::string& GetName() const { return m_name; } + inline void Call(const EventBase& event) const noexcept { m_listen_functor(event); } + + ~EventListener() = default; + + private: + func::function m_listen_functor; + std::string m_name; + }; +} + +#endif diff --git a/runtime/Includes/Core/Format.h b/runtime/Includes/Core/Format.h new file mode 100644 index 0000000..3fdc6bc --- /dev/null +++ b/runtime/Includes/Core/Format.h @@ -0,0 +1,18 @@ +#ifndef __MLX_FORMAT__ +#define __MLX_FORMAT__ + +namespace mlx +{ + template + struct IsOstreamable : std::false_type {}; + + template + struct IsOstreamable() << std::declval())>> : std::true_type {}; + + template...>, int> = 0> + auto Format(std::string_view format, const Args&... args); +} + +#include + +#endif diff --git a/runtime/Includes/Core/Format.inl b/runtime/Includes/Core/Format.inl new file mode 100644 index 0000000..0b5b3d8 --- /dev/null +++ b/runtime/Includes/Core/Format.inl @@ -0,0 +1,133 @@ +#pragma once +#include + +namespace mlx +{ + namespace Internal + { + template + void Format(std::stringstream& ss, It first, It last) + { + for(auto it = first; it != last; ++it) + { + switch(*it) + { + case '%': + throw std::invalid_argument{"too few arguments"}; + case '/': + ++it; + if(it == last) + throw std::invalid_argument{"stray '/'"}; + [[fallthrough]]; + + default: ss << *it; + } + } + } + + template + void Format(std::stringstream& ss, It first, It last, const T& arg, const Args&... args) + { + for(auto it = first; it != last; ++it) + { + switch(*it) + { + case '%': + ss << arg; + return Format(ss, ++it, last, args...); + case '/': + ++it; + if(it == last) + throw std::invalid_argument{"stray '/'"}; + [[fallthrough]]; + + default: ss << *it; + } + } + throw std::invalid_argument{"too many arguments"}; + } + + template + void Format(std::ostream& os, It first, It last) + { + for(auto it = first; it != last; ++it) + { + switch(*it) + { + case '%': + throw std::invalid_argument{"too few arguments"}; + case '/': + ++it; + if(it == last) + throw std::invalid_argument{"stray '/'"}; + [[fallthrough]]; + + default: os << *it; + } + } + } + + template + void Format(std::ostream& os, It first, It last, const T& arg, const Args&... args) + { + for(auto it = first; it != last; ++it) + { + switch(*it) + { + case '%': + os << arg; + return Format(os, ++it, last, args...); + case '/': + ++it; + if(it == last) + throw std::invalid_argument{"stray '/'"}; + [[fallthrough]]; + + default: os << *it; + } + } + throw std::invalid_argument{"too many arguments"}; + } + + template + struct Formatter + { + std::string_view format; + std::tuple args; + }; + + template + void FormatHelper(std::stringstream& ss, const Formatter& formatter, std::index_sequence) + { + Format(ss, formatter.format.begin(), formatter.format.end(), + std::get(formatter.args)...); + } + + template + std::stringstream& operator<<(std::stringstream& ss, const Formatter& printer) + { + FormatHelper(ss, printer, std::index_sequence_for{}); + return ss; + } + + template + void FormatHelper(std::ostream& os, const Formatter& formatter, std::index_sequence) + { + Format(os, formatter.format.begin(), formatter.format.end(), + std::get(formatter.args)...); + } + + template + std::ostream& operator<<(std::ostream& os, const Formatter& printer) + { + FormatHelper(os, printer, std::index_sequence_for{}); + return os; + } + } + + template...>, int>> + auto Format(std::string_view format, const Args&... args) + { + return Internal::Formatter{format, std::forward_as_tuple(args...)}; + } +} diff --git a/runtime/Includes/Core/Fps.h b/runtime/Includes/Core/Fps.h new file mode 100644 index 0000000..6cebc77 --- /dev/null +++ b/runtime/Includes/Core/Fps.h @@ -0,0 +1,27 @@ +#ifndef __MLX_FPS__ +#define __MLX_FPS__ + +namespace mlx +{ + class FpsManager + { + public: + FpsManager() = default; + + void Init(); + bool Update(); + inline void SetMaxFPS(std::uint32_t fps) noexcept { m_max_fps = fps; m_ns = 1000000000.0 / fps; } + + ~FpsManager() = default; + + private: + double m_ns = 1000000000.0 / 1'337'000.0; + std::int64_t m_fps_before = 0; + std::int64_t m_fps_now = 0; + std::int64_t m_timer = 0; + std::uint32_t m_max_fps = 1'337'000; + std::uint32_t m_fps_elapsed_time = 0; + }; +} + +#endif diff --git a/runtime/Includes/Core/Graphics.h b/runtime/Includes/Core/Graphics.h new file mode 100644 index 0000000..d348000 --- /dev/null +++ b/runtime/Includes/Core/Graphics.h @@ -0,0 +1,57 @@ +#ifndef __MLX_GRAPHICS__ +#define __MLX_GRAPHICS__ + +#include +#include +#include +#include +#include +#include +#include + +namespace mlx +{ + class GraphicsSupport : public NonCopyable + { + public: + GraphicsSupport(std::size_t w, std::size_t h, NonOwningPtr render_target, int id); + GraphicsSupport(std::size_t w, std::size_t h, std::string title, int id, bool is_resizable); + + [[nodiscard]] MLX_FORCEINLINE int& GetID() noexcept { return m_id; } + [[nodiscard]] inline std::shared_ptr GetWindow() { return p_window; } + + void Render() noexcept; + + inline void ResetRenderData(int color) noexcept; + + inline void PixelPut(int x, int y, std::uint32_t color) noexcept; + inline void StringPut(int x, int y, std::uint32_t color, std::string str); + inline void TexturePut(NonOwningPtr texture, int x, int y, float scale, float angle); + + inline void TryEraseSpritesInScene(NonOwningPtr texture) noexcept; + + [[nodiscard]] MLX_FORCEINLINE bool HasWindow() const noexcept { return m_has_window; } + [[nodiscard]] MLX_FORCEINLINE Renderer& GetRenderer() { return m_renderer; } + [[nodiscard]] MLX_FORCEINLINE Scene& GetScene() { return *p_scene; } + + ~GraphicsSupport(); + + private: + Renderer m_renderer; + SceneRenderer m_scene_renderer; + PutPixelManager m_put_pixel_manager; + std::shared_ptr p_window; + std::unique_ptr p_scene; + + std::uint64_t m_draw_layer = 0; + + int m_id; + + bool m_has_window; + bool m_pixelput_called = false; + }; +} + +#include + +#endif diff --git a/runtime/Includes/Core/Graphics.inl b/runtime/Includes/Core/Graphics.inl new file mode 100644 index 0000000..b07350e --- /dev/null +++ b/runtime/Includes/Core/Graphics.inl @@ -0,0 +1,88 @@ +#pragma once +#include + +namespace mlx +{ + void GraphicsSupport::ResetRenderData(int color) noexcept + { + MLX_PROFILE_FUNCTION(); + Vec4f vec_color = { + static_cast((color & 0x000000FF)) / 255.0f, + static_cast((color & 0x0000FF00) >> 8) / 255.0f, + static_cast((color & 0x00FF0000) >> 16) / 255.0f, + static_cast((color & 0xFF000000) >> 24) / 255.0f + }; + p_scene->ResetScene(std::move(vec_color)); + m_put_pixel_manager.ResetRenderData(); + m_draw_layer = 0; + m_pixelput_called = false; + } + + void GraphicsSupport::PixelPut(int x, int y, std::uint32_t color) noexcept + { + MLX_PROFILE_FUNCTION(); + NonOwningPtr texture = m_put_pixel_manager.DrawPixel(x, y, m_draw_layer, color); + if(texture) + { + m_pixelput_called = true; + Sprite& new_sprite = p_scene->CreateSprite(texture); + new_sprite.SetPosition(Vec2f{ 0.0f, 0.0f }); + } + } + + void GraphicsSupport::StringPut(int x, int y, std::uint32_t color, std::string str) + { + MLX_PROFILE_FUNCTION(); + if(str.empty()) + return; + + Vec4f vec_color = { + static_cast((color & 0x000000FF)) / 255.0f, + static_cast((color & 0x0000FF00) >> 8) / 255.0f, + static_cast((color & 0x00FF0000) >> 16) / 255.0f, + static_cast((color & 0xFF000000) >> 24) / 255.0f + }; + + NonOwningPtr text = p_scene->GetTextFromPositionAndColor(str, Vec2f{ static_cast(x), static_cast(y) }, vec_color); + if(!text) + { + if(m_pixelput_called) + { + m_draw_layer++; + m_pixelput_called = false; + } + Text& new_text = p_scene->CreateText(str); + new_text.SetPosition(Vec2f{ static_cast(x), static_cast(y) }); + new_text.SetColor(std::move(vec_color)); + } + else if(!p_scene->IsTextAtGivenDrawLayer(str, m_draw_layer)) + p_scene->BringToDrawLayer(text.Get(), m_draw_layer); + } + + void GraphicsSupport::TexturePut(NonOwningPtr texture, int x, int y, float scale, float angle) + { + MLX_PROFILE_FUNCTION(); + NonOwningPtr sprite = p_scene->GetSpriteFromTexturePositionScaleRotation(texture, Vec2f{ static_cast(x), static_cast(y) }, scale, angle); + if(!sprite) + { + if(m_pixelput_called) + { + m_draw_layer++; + m_pixelput_called = false; + } + Sprite& new_sprite = p_scene->CreateSprite(texture); + new_sprite.SetCenter(Vec2f{ texture->GetWidth() / 2.0f, texture->GetHeight() / 2.0f }); + new_sprite.SetPosition(Vec2f{ static_cast(x), static_cast(y) }); + new_sprite.SetScale(Vec2f{ scale, scale }); + new_sprite.SetRotation(angle); + } + else if(!p_scene->IsTextureAtGivenDrawLayer(texture, m_draw_layer)) + p_scene->BringToDrawLayer(sprite.Get(), m_draw_layer); + } + + void GraphicsSupport::TryEraseSpritesInScene(NonOwningPtr texture) noexcept + { + MLX_PROFILE_FUNCTION(); + p_scene->TryEraseSpriteFromTexture(texture); + } +} diff --git a/runtime/Includes/Core/ImagesRegistry.h b/runtime/Includes/Core/ImagesRegistry.h new file mode 100644 index 0000000..01c471b --- /dev/null +++ b/runtime/Includes/Core/ImagesRegistry.h @@ -0,0 +1,24 @@ +#ifndef __MLX_CORE_IMAGES_REGISTRY__ +#define __MLX_CORE_IMAGES_REGISTRY__ + +namespace mlx +{ + class ImageRegistry + { + public: + ImageRegistry() = default; + + inline void RegisterTexture(NonOwningPtr texture); + inline void UnregisterTexture(NonOwningPtr texture); + inline bool IsTextureKnown(NonOwningPtr texture); + + ~ImageRegistry() = default; + + private: + std::unordered_set> m_textures_registry; + }; +} + +#include + +#endif diff --git a/runtime/Includes/Core/ImagesRegistry.inl b/runtime/Includes/Core/ImagesRegistry.inl new file mode 100644 index 0000000..c7a2909 --- /dev/null +++ b/runtime/Includes/Core/ImagesRegistry.inl @@ -0,0 +1,20 @@ +#pragma once +#include + +namespace mlx +{ + void ImageRegistry::RegisterTexture(NonOwningPtr texture) + { + m_textures_registry.insert(texture); + } + + void ImageRegistry::UnregisterTexture(NonOwningPtr texture) + { + m_textures_registry.erase(texture); + } + + bool ImageRegistry::IsTextureKnown(NonOwningPtr texture) + { + return m_textures_registry.find(texture) != m_textures_registry.end(); + } +} diff --git a/runtime/Includes/Core/Logs.h b/runtime/Includes/Core/Logs.h new file mode 100644 index 0000000..821dcbe --- /dev/null +++ b/runtime/Includes/Core/Logs.h @@ -0,0 +1,72 @@ +#ifndef __MLX_LOGS__ +#define __MLX_LOGS__ + +#include + +namespace mlx +{ + template + void DebugLog(unsigned int line, std::string_view file, std::string_view function, std::string message, const Args&... args); + + template + void Error(unsigned int line, std::string_view file, std::string_view function, std::string message, const Args&... args); + + template + void Warning(unsigned int line, std::string_view file, std::string_view function, std::string message, const Args&... args); + + template + void Message(unsigned int line, std::string_view file, std::string_view function, std::string message, const Args&... args); + + template + void FatalError(unsigned int line, std::string_view file, std::string_view function, std::string message, const Args&... args); + + template + void Verify(bool cond, unsigned int line, std::string_view file, std::string_view function, std::string message, const Args&... args); + + class Logs + { + public: + Logs() = delete; + + static void Report(LogType type, std::string message); + static void Report(LogType type, unsigned int line, std::string_view file, std::string_view function, std::string message); + + ~Logs() = delete; + }; + + #ifdef DEBUG + template + void Assert(bool cond, unsigned int line, std::string_view file, std::string_view function, std::string message, const Args&... args); + #else + template + void Assert([[maybe_unused]] bool cond, [[maybe_unused]] unsigned int line, [[maybe_unused]] std::string_view file, [[maybe_unused]] std::string_view function, [[maybe_unused]] std::string message, [[maybe_unused]] const Args&... args) {} + #endif +} + +#include + +namespace mlx +{ + #undef DebugLog + #define DebugLog(...) DebugLog(__LINE__, __FILE__, __func__, __VA_ARGS__) + + #undef Message + #define Message(...) Message(__LINE__, __FILE__, __func__, __VA_ARGS__) + + #undef Warning + #define Warning(...) Warning(__LINE__, __FILE__, __func__, __VA_ARGS__) + + #undef Error + #define Error(...) Error(__LINE__, __FILE__, __func__, __VA_ARGS__) + + #undef FatalError + #define FatalError(...) FatalError(__LINE__, __FILE__, __func__, __VA_ARGS__) + + #undef Verify + #define Verify(cond, ...) Verify(cond, __LINE__, __FILE__, __func__, __VA_ARGS__) + + #undef Assert + #define Assert(cond, ...) Assert(cond, __LINE__, __FILE__, __func__, __VA_ARGS__) +} + +#endif diff --git a/runtime/Includes/Core/Logs.inl b/runtime/Includes/Core/Logs.inl new file mode 100644 index 0000000..1275383 --- /dev/null +++ b/runtime/Includes/Core/Logs.inl @@ -0,0 +1,124 @@ +#pragma once +#include +#include + +namespace mlx +{ + template + void DebugLog(unsigned int line, std::string_view file, std::string_view function, std::string message, const Args&... args) + { + using namespace std::literals; + try + { + std::stringstream ss; + ss << Format(message, args...); + Logs::Report(LogType::Debug, line, file, function, ss.str()); + } + catch(const std::exception& e) + { + Logs::Report(LogType::Error, line, file, function, "formatter exception catched in the log printer: "s + e.what()); + } + } + + template + void Error(unsigned int line, std::string_view file, std::string_view function, std::string message, const Args&... args) + { + using namespace std::literals; + try + { + std::stringstream ss; + ss << Format(message, args...); + Logs::Report(LogType::Error, line, file, function, ss.str()); + } + catch(const std::exception& e) + { + Logs::Report(LogType::Error, line, file, function, "formatter exception catched in the log printer: "s + e.what()); + } + } + + template + void Warning(unsigned int line, std::string_view file, std::string_view function, std::string message, const Args&... args) + { + using namespace std::literals; + try + { + std::stringstream ss; + ss << Format(message, args...); + Logs::Report(LogType::Warning, line, file, function, ss.str()); + } + catch(const std::exception& e) + { + Logs::Report(LogType::Error, line, file, function, "formatter exception catched in the log printer: "s + e.what()); + } + } + + template + void Message(unsigned int line, std::string_view file, std::string_view function, std::string message, const Args&... args) + { + using namespace std::literals; + try + { + std::stringstream ss; + ss << Format(message, args...); + Logs::Report(LogType::Message, line, file, function, ss.str()); + } + catch(const std::exception& e) + { + Logs::Report(LogType::Error, line, file, function, "formatter exception catched in the log printer: "s + e.what()); + } + } + + template + void FatalError(unsigned int line, std::string_view file, std::string_view function, std::string message, const Args&... args) + { + using namespace std::literals; + try + { + std::stringstream ss; + ss << Format(message, args...); + Logs::Report(LogType::FatalError, line, file, function, ss.str()); + } + catch(const std::exception& e) + { + Logs::Report(LogType::FatalError, line, file, function, "formatter exception catched in the log printer: "s + e.what()); + } + } + + template + void Verify(bool cond, unsigned int line, std::string_view file, std::string_view function, std::string message, const Args&... args) + { + using namespace std::literals; + if(cond) + return; + try + { + std::stringstream ss; + ss << Format("Verification failed : %", message, args...); + Logs::Report(LogType::FatalError, line, file, function, ss.str()); + } + catch(const std::exception& e) + { + Logs::Report(LogType::FatalError, line, file, function, "formatter exception catched in the log printer: "s + e.what()); + } + } + + #ifdef DEBUG + template + void Assert(bool cond, unsigned int line, std::string_view file, std::string_view function, std::string message, const Args&... args) + { + using namespace std::literals; + if(cond) + return; + try + { + std::stringstream ss; + ss << Format("Assertion failed : %", message, args...); + Logs::Report(LogType::FatalError, line, file, function, ss.str()); + } + catch(const std::exception& e) + { + Logs::Report(LogType::FatalError, line, file, function, "formatter exception catched in the log printer: "s + e.what()); + } + } + #endif +} diff --git a/runtime/Includes/Core/Memory.h b/runtime/Includes/Core/Memory.h new file mode 100644 index 0000000..174be18 --- /dev/null +++ b/runtime/Includes/Core/Memory.h @@ -0,0 +1,27 @@ +#ifndef __MLX_MEMORY__ +#define __MLX_MEMORY__ + +namespace mlx +{ + class MemManager + { + public: + MemManager(); + + static void* Malloc(std::size_t size); + static void* Calloc(std::size_t n, std::size_t size); + static void* Realloc(void* ptr, std::size_t size); + static void Free(void* ptr); + + inline static bool IsInit() noexcept { return s_instance != nullptr; } + inline static MemManager& Get() noexcept { return *s_instance; } + + ~MemManager(); + + private: + static MemManager* s_instance; + inline static std::vector s_blocks; + }; +} + +#endif diff --git a/runtime/Includes/Core/Profiler.h b/runtime/Includes/Core/Profiler.h new file mode 100644 index 0000000..3d5e56d --- /dev/null +++ b/runtime/Includes/Core/Profiler.h @@ -0,0 +1,126 @@ +#ifndef __MLX_PROFILER__ +#define __MLX_PROFILER__ + +namespace mlx +{ + using FloatingPointMilliseconds = std::chrono::duration; + + struct ProfileResult + { + std::string name; + FloatingPointMilliseconds elapsed_time; + std::thread::id thread_id; + }; + + class Profiler + { + public: + Profiler(const Profiler&) = delete; + Profiler(Profiler&&) = delete; + Profiler() { BeginRuntimeSession(); s_instance = this; } + + void AppendProfileData(ProfileResult&& result); + + inline static bool IsInit() noexcept { return s_instance != nullptr; } + inline static Profiler& Get() noexcept { return *s_instance; } + + ~Profiler(); + + private: + void BeginRuntimeSession(); + void WriteProfile(const ProfileResult& result); + void EndRuntimeSession(); + inline void WriteHeader() + { + m_output_stream << "{\"profileData\":[{}"; + m_output_stream.flush(); + } + + inline void WriteFooter() + { + m_output_stream << "]}"; + m_output_stream.flush(); + } + + private: + static Profiler* s_instance; + + std::unordered_map> m_profile_data; + std::ofstream m_output_stream; + std::mutex m_mutex; + bool m_runtime_session_began = false; + }; + + class ProfilerTimer + { + public: + ProfilerTimer(const char* name) : m_name(name) + { + m_start_timepoint = std::chrono::steady_clock::now(); + } + + inline void Stop() + { + auto end_timepoint = std::chrono::steady_clock::now(); + auto high_res_start = FloatingPointMilliseconds{ m_start_timepoint.time_since_epoch() }; + auto elapsed_time = std::chrono::time_point_cast(end_timepoint).time_since_epoch() - std::chrono::time_point_cast(m_start_timepoint).time_since_epoch(); + + Profiler::Get().AppendProfileData({ m_name, elapsed_time, std::this_thread::get_id() }); + + m_stopped = true; + } + + ~ProfilerTimer() + { + if(!m_stopped) + Stop(); + } + + private: + std::chrono::time_point m_start_timepoint; + const char* m_name; + bool m_stopped = false; + }; + + namespace ProfilerUtils + { + template + struct ChangeResult + { + char data[N]; + }; + + template + constexpr auto CleanupOutputString(const char(&expr)[N], const char(&remove)[K]) + { + ChangeResult result = {}; + + std::size_t src_index = 0; + std::size_t dst_index = 0; + while(src_index < N) + { + std::size_t match_index = 0; + while(match_index < K - 1 && src_index + match_index < N - 1 && expr[src_index + match_index] == remove[match_index]) + match_index++; + if(match_index == K - 1) + src_index += match_index; + result.data[dst_index++] = expr[src_index] == '"' ? '\'' : expr[src_index]; + src_index++; + } + return result; + } + } +} + +#ifdef PROFILER + #define MLX_PROFILE_SCOPE_LINE2(name, line) constexpr auto fixed_name_##line = ::mlx::ProfilerUtils::CleanupOutputString(name, "__cdecl ");\ + ::mlx::ProfilerTimer timer##line(fixed_name_##line.data) + #define MLX_PROFILE_SCOPE_LINE(name, line) MLX_PROFILE_SCOPE_LINE2(name, line) + #define MLX_PROFILE_SCOPE(name) MLX_PROFILE_SCOPE_LINE(name, __LINE__) + #define MLX_PROFILE_FUNCTION() MLX_PROFILE_SCOPE(MLX_FUNC_SIG) +#else + #define MLX_PROFILE_SCOPE(name) + #define MLX_PROFILE_FUNCTION() +#endif + +#endif diff --git a/runtime/Includes/Core/SDLManager.h b/runtime/Includes/Core/SDLManager.h new file mode 100644 index 0000000..2178f59 --- /dev/null +++ b/runtime/Includes/Core/SDLManager.h @@ -0,0 +1,46 @@ +#ifndef __MLX_SDL_MANAGER__ +#define __MLX_SDL_MANAGER__ + +#include + +namespace mlx +{ + class SDLManager + { + public: + SDLManager(); + + Handle CreateWindow(const std::string& title, std::size_t w, std::size_t h, bool hidden, std::int32_t& id, bool is_resizable); + void DestroyWindow(Handle window) noexcept; + + void InputsFetcher(func::function functor); + + VkSurfaceKHR CreateVulkanSurface(Handle window, VkInstance instance) const noexcept; + std::vector GetRequiredVulkanInstanceExtentions(Handle window) const noexcept; + Vec2ui GetVulkanDrawableSize(Handle window) const noexcept; + void MoveMouseOnWindow(Handle window, int x, int y) const noexcept; + void GetScreenSizeWindowIsOn(Handle window, int* x, int* y) const noexcept; + void SetWindowPosition(Handle window, int x, int y) const noexcept; + + static void HideCursor() noexcept; + static void ShowCursor() noexcept; + + std::int32_t GetX() const noexcept; + std::int32_t GetY() const noexcept; + std::int32_t GetXRel() const noexcept; + std::int32_t GetYRel() const noexcept; + + inline static bool IsInit() noexcept { return s_instance != nullptr; } + inline static SDLManager& Get() noexcept { return *s_instance; } + + ~SDLManager(); + + private: + static SDLManager* s_instance; + + std::unordered_set m_windows_registry; + bool m_drop_sdl_responsability = false; + }; +} + +#endif diff --git a/runtime/Includes/Core/UUID.h b/runtime/Includes/Core/UUID.h new file mode 100644 index 0000000..9911302 --- /dev/null +++ b/runtime/Includes/Core/UUID.h @@ -0,0 +1,19 @@ +#ifndef __MLX_UUID__ +#define __MLX_UUID__ + +namespace mlx +{ + class UUID + { + public: + UUID(); + UUID(std::uint64_t uuid); + + inline operator std::uint64_t() const { return m_uuid; } + + private: + std::uint64_t m_uuid; + }; +} + +#endif diff --git a/src/utils/dogica_ttf.h b/runtime/Includes/Embedded/DogicaTTF.h similarity index 99% rename from src/utils/dogica_ttf.h rename to runtime/Includes/Embedded/DogicaTTF.h index e890ab3..f624ff2 100644 --- a/src/utils/dogica_ttf.h +++ b/runtime/Includes/Embedded/DogicaTTF.h @@ -1,20 +1,6 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* dogica_ttf.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2023/04/11 16:20:25 by maldavid #+# #+# */ -/* Updated: 2023/12/14 16:54:12 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - #ifndef __MLX_DOGICA_TTF__ #define __MLX_DOGICA_TTF__ -#include - constexpr const unsigned int dogica_ttf_len = 33860; static const std::vector dogica_ttf = { diff --git a/src/utils/icon_mlx.h b/runtime/Includes/Embedded/IconMlx.h similarity index 99% rename from src/utils/icon_mlx.h rename to runtime/Includes/Embedded/IconMlx.h index 04f0029..b74d1ea 100644 --- a/src/utils/icon_mlx.h +++ b/runtime/Includes/Embedded/IconMlx.h @@ -1,20 +1,6 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* icon_mlx.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2023/11/25 11:23:16 by maldavid #+# #+# */ -/* Updated: 2023/11/25 11:55:51 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - #ifndef __ICON_MLX__ #define __ICON_MLX__ -#include - constexpr const int logo_mlx_height = 125; constexpr const int logo_mlx_width = 125; constexpr const int logo_mlx_size = logo_mlx_height * logo_mlx_width * 4; diff --git a/runtime/Includes/Embedded/Shader2DFragment.nzsl b/runtime/Includes/Embedded/Shader2DFragment.nzsl new file mode 100644 index 0000000..a972887 --- /dev/null +++ b/runtime/Includes/Embedded/Shader2DFragment.nzsl @@ -0,0 +1,28 @@ +[nzsl_version("1.0")] +module; + +struct VertOut +{ + [location(0)] color: vec4[f32], + [location(1)] uv: vec2[f32] +} + +struct FragOut +{ + [location(0)] color: vec4[f32] +} + +external +{ + [set(1), binding(0)] u_texture: sampler2D[f32] +} + +[entry(frag)] +fn main(input: VertOut) -> FragOut +{ + let output: FragOut; + output.color = input.color * u_texture.Sample(input.uv); + if(output.color.w == 0.0) + discard; + return output; +} diff --git a/runtime/Includes/Embedded/Shader2DFragment.spv.h b/runtime/Includes/Embedded/Shader2DFragment.spv.h new file mode 100644 index 0000000..5792158 --- /dev/null +++ b/runtime/Includes/Embedded/Shader2DFragment.spv.h @@ -0,0 +1,44 @@ +3,2,35,7,0,0,1,0,39,0,0,0,51,0,0,0,0,0,0,0,17,0,2,0,1,0,0,0,14,0, +3,0,0,0,0,0,1,0,0,0,15,0,8,0,4,0,0,0,28,0,0,0,109,97,105,110,0,0,0,0, +10,0,0,0,16,0,0,0,22,0,0,0,16,0,3,0,28,0,0,0,7,0,0,0,3,0,3,0,0,0, +0,0,100,0,0,0,5,0,4,0,19,0,0,0,86,101,114,116,79,117,116,0,6,0,5,0,19,0,0,0, +0,0,0,0,99,111,108,111,114,0,0,0,6,0,4,0,19,0,0,0,1,0,0,0,117,118,0,0,5,0, +4,0,23,0,0,0,70,114,97,103,79,117,116,0,6,0,5,0,23,0,0,0,0,0,0,0,99,111,108,111, +114,0,0,0,5,0,5,0,5,0,0,0,117,95,116,101,120,116,117,114,101,0,0,0,5,0,4,0,10,0, +0,0,99,111,108,111,114,0,0,0,5,0,3,0,16,0,0,0,117,118,0,0,5,0,4,0,22,0,0,0, +99,111,108,111,114,0,0,0,5,0,4,0,28,0,0,0,109,97,105,110,0,0,0,0,71,0,4,0,5,0, +0,0,33,0,0,0,0,0,0,0,71,0,4,0,5,0,0,0,34,0,0,0,1,0,0,0,71,0,4,0, +10,0,0,0,30,0,0,0,0,0,0,0,71,0,4,0,16,0,0,0,30,0,0,0,1,0,0,0,71,0, +4,0,22,0,0,0,30,0,0,0,0,0,0,0,72,0,5,0,19,0,0,0,0,0,0,0,35,0,0,0, +0,0,0,0,72,0,5,0,19,0,0,0,1,0,0,0,35,0,0,0,16,0,0,0,72,0,5,0,23,0, +0,0,0,0,0,0,35,0,0,0,0,0,0,0,22,0,3,0,1,0,0,0,32,0,0,0,25,0,9,0, +2,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0, +0,0,27,0,3,0,3,0,0,0,2,0,0,0,32,0,4,0,4,0,0,0,0,0,0,0,3,0,0,0, +19,0,2,0,6,0,0,0,33,0,3,0,7,0,0,0,6,0,0,0,23,0,4,0,8,0,0,0,1,0, +0,0,4,0,0,0,32,0,4,0,9,0,0,0,1,0,0,0,8,0,0,0,21,0,4,0,11,0,0,0, +32,0,0,0,1,0,0,0,43,0,4,0,11,0,0,0,12,0,0,0,0,0,0,0,32,0,4,0,13,0, +0,0,7,0,0,0,8,0,0,0,23,0,4,0,14,0,0,0,1,0,0,0,2,0,0,0,32,0,4,0, +15,0,0,0,1,0,0,0,14,0,0,0,43,0,4,0,11,0,0,0,17,0,0,0,1,0,0,0,32,0, +4,0,18,0,0,0,7,0,0,0,14,0,0,0,30,0,4,0,19,0,0,0,8,0,0,0,14,0,0,0, +32,0,4,0,20,0,0,0,7,0,0,0,19,0,0,0,32,0,4,0,21,0,0,0,3,0,0,0,8,0, +0,0,30,0,3,0,23,0,0,0,8,0,0,0,32,0,4,0,24,0,0,0,7,0,0,0,23,0,0,0, +43,0,4,0,11,0,0,0,25,0,0,0,3,0,0,0,43,0,4,0,1,0,0,0,26,0,0,0,0,0, +0,0,20,0,2,0,27,0,0,0,59,0,4,0,4,0,0,0,5,0,0,0,0,0,0,0,59,0,4,0, +9,0,0,0,10,0,0,0,1,0,0,0,59,0,4,0,15,0,0,0,16,0,0,0,1,0,0,0,59,0, +4,0,21,0,0,0,22,0,0,0,3,0,0,0,54,0,5,0,6,0,0,0,28,0,0,0,0,0,0,0, +7,0,0,0,248,0,2,0,29,0,0,0,59,0,4,0,24,0,0,0,30,0,0,0,7,0,0,0,59,0, +4,0,20,0,0,0,31,0,0,0,7,0,0,0,65,0,5,0,13,0,0,0,32,0,0,0,31,0,0,0, +12,0,0,0,63,0,3,0,32,0,0,0,10,0,0,0,65,0,5,0,18,0,0,0,33,0,0,0,31,0, +0,0,17,0,0,0,63,0,3,0,33,0,0,0,16,0,0,0,65,0,5,0,13,0,0,0,34,0,0,0, +31,0,0,0,12,0,0,0,61,0,4,0,8,0,0,0,35,0,0,0,34,0,0,0,61,0,4,0,3,0, +0,0,36,0,0,0,5,0,0,0,65,0,5,0,18,0,0,0,37,0,0,0,31,0,0,0,17,0,0,0, +61,0,4,0,14,0,0,0,38,0,0,0,37,0,0,0,87,0,5,0,8,0,0,0,39,0,0,0,36,0, +0,0,38,0,0,0,133,0,5,0,8,0,0,0,40,0,0,0,35,0,0,0,39,0,0,0,65,0,5,0, +13,0,0,0,41,0,0,0,30,0,0,0,12,0,0,0,62,0,3,0,41,0,0,0,40,0,0,0,65,0, +5,0,13,0,0,0,45,0,0,0,30,0,0,0,12,0,0,0,61,0,4,0,8,0,0,0,46,0,0,0, +45,0,0,0,81,0,5,0,1,0,0,0,47,0,0,0,46,0,0,0,3,0,0,0,180,0,5,0,27,0, +0,0,48,0,0,0,47,0,0,0,26,0,0,0,247,0,3,0,42,0,0,0,0,0,0,0,250,0,4,0, +48,0,0,0,43,0,0,0,44,0,0,0,248,0,2,0,43,0,0,0,252,0,1,0,248,0,2,0,44,0, +0,0,249,0,2,0,42,0,0,0,248,0,2,0,42,0,0,0,61,0,4,0,23,0,0,0,49,0,0,0, +30,0,0,0,81,0,5,0,8,0,0,0,50,0,0,0,49,0,0,0,0,0,0,0,62,0,3,0,22,0, +0,0,50,0,0,0,253,0,1,0,56,0,1,0 diff --git a/runtime/Includes/Embedded/Shader2DVertex.nzsl b/runtime/Includes/Embedded/Shader2DVertex.nzsl new file mode 100644 index 0000000..065d89b --- /dev/null +++ b/runtime/Includes/Embedded/Shader2DVertex.nzsl @@ -0,0 +1,44 @@ +[nzsl_version("1.0")] +module; + +struct VertIn +{ + [location(0)] pos: vec4[f32], + [location(1)] uv: vec2[f32] +} + +struct VertOut +{ + [location(0)] color: vec4[f32], + [location(1)] uv: vec2[f32], + [builtin(position)] pos: vec4[f32] +} + +struct ViewerData +{ + projection_matrix: mat4[f32] +} + +struct SpriteData +{ + model_matrix: mat4[f32], + color: vec4[f32] +} + +external +{ + [set(0), binding(0)] viewer_data: uniform[ViewerData], + model: push_constant[SpriteData] +} + +[entry(vert)] +fn main(input: VertIn) -> VertOut +{ + let position: vec4[f32] = vec4[f32](input.pos.xy, 1.0, 1.0); + input.uv *= -1.0; + let output: VertOut; + output.uv = input.uv; + output.color = model.color; + output.pos = viewer_data.projection_matrix * model.model_matrix * position; + return output; +} diff --git a/runtime/Includes/Embedded/Shader2DVertex.spv.h b/runtime/Includes/Embedded/Shader2DVertex.spv.h new file mode 100644 index 0000000..2d0f916 --- /dev/null +++ b/runtime/Includes/Embedded/Shader2DVertex.spv.h @@ -0,0 +1,71 @@ +3,2,35,7,0,0,1,0,39,0,0,0,70,0,0,0,0,0,0,0,17,0,2,0,1,0,0,0,14,0, +3,0,0,0,0,0,1,0,0,0,15,0,10,0,0,0,0,0,34,0,0,0,109,97,105,110,0,0,0,0, +13,0,0,0,19,0,0,0,25,0,0,0,27,0,0,0,28,0,0,0,3,0,3,0,0,0,0,0,100,0, +0,0,5,0,5,0,4,0,0,0,86,105,101,119,101,114,68,97,116,97,0,0,6,0,8,0,4,0,0,0, +0,0,0,0,112,114,111,106,101,99,116,105,111,110,95,109,97,116,114,105,120,0,0,0,5,0,5,0,7,0, +0,0,83,112,114,105,116,101,68,97,116,97,0,0,6,0,7,0,7,0,0,0,0,0,0,0,109,111,100,101, +108,95,109,97,116,114,105,120,0,0,0,0,6,0,5,0,7,0,0,0,1,0,0,0,99,111,108,111,114,0, +0,0,5,0,4,0,22,0,0,0,86,101,114,116,73,110,0,0,6,0,4,0,22,0,0,0,0,0,0,0, +112,111,115,0,6,0,4,0,22,0,0,0,1,0,0,0,117,118,0,0,5,0,4,0,29,0,0,0,86,101, +114,116,79,117,116,0,6,0,5,0,29,0,0,0,0,0,0,0,99,111,108,111,114,0,0,0,6,0,4,0, +29,0,0,0,1,0,0,0,117,118,0,0,6,0,4,0,29,0,0,0,2,0,0,0,112,111,115,0,5,0, +5,0,6,0,0,0,118,105,101,119,101,114,95,100,97,116,97,0,5,0,4,0,9,0,0,0,109,111,100,101, +108,0,0,0,5,0,3,0,13,0,0,0,112,111,115,0,5,0,3,0,19,0,0,0,117,118,0,0,5,0, +4,0,25,0,0,0,99,111,108,111,114,0,0,0,5,0,3,0,27,0,0,0,117,118,0,0,5,0,5,0, +28,0,0,0,112,111,115,105,116,105,111,110,0,0,0,0,5,0,4,0,34,0,0,0,109,97,105,110,0,0, +0,0,71,0,4,0,6,0,0,0,33,0,0,0,0,0,0,0,71,0,4,0,6,0,0,0,34,0,0,0, +0,0,0,0,71,0,4,0,28,0,0,0,11,0,0,0,0,0,0,0,71,0,4,0,13,0,0,0,30,0, +0,0,0,0,0,0,71,0,4,0,19,0,0,0,30,0,0,0,1,0,0,0,71,0,4,0,25,0,0,0, +30,0,0,0,0,0,0,0,71,0,4,0,27,0,0,0,30,0,0,0,1,0,0,0,71,0,3,0,4,0, +0,0,2,0,0,0,72,0,4,0,4,0,0,0,0,0,0,0,5,0,0,0,72,0,5,0,4,0,0,0, +0,0,0,0,7,0,0,0,16,0,0,0,72,0,5,0,4,0,0,0,0,0,0,0,35,0,0,0,0,0, +0,0,71,0,3,0,7,0,0,0,2,0,0,0,72,0,4,0,7,0,0,0,0,0,0,0,5,0,0,0, +72,0,5,0,7,0,0,0,0,0,0,0,7,0,0,0,16,0,0,0,72,0,5,0,7,0,0,0,0,0, +0,0,35,0,0,0,0,0,0,0,72,0,5,0,7,0,0,0,1,0,0,0,35,0,0,0,64,0,0,0, +72,0,5,0,22,0,0,0,0,0,0,0,35,0,0,0,0,0,0,0,72,0,5,0,22,0,0,0,1,0, +0,0,35,0,0,0,16,0,0,0,72,0,5,0,29,0,0,0,0,0,0,0,35,0,0,0,0,0,0,0, +72,0,5,0,29,0,0,0,1,0,0,0,35,0,0,0,16,0,0,0,72,0,5,0,29,0,0,0,2,0, +0,0,35,0,0,0,32,0,0,0,22,0,3,0,1,0,0,0,32,0,0,0,23,0,4,0,2,0,0,0, +1,0,0,0,4,0,0,0,24,0,4,0,3,0,0,0,2,0,0,0,4,0,0,0,30,0,3,0,4,0, +0,0,3,0,0,0,32,0,4,0,5,0,0,0,2,0,0,0,4,0,0,0,30,0,4,0,7,0,0,0, +3,0,0,0,2,0,0,0,32,0,4,0,8,0,0,0,9,0,0,0,7,0,0,0,19,0,2,0,10,0, +0,0,33,0,3,0,11,0,0,0,10,0,0,0,32,0,4,0,12,0,0,0,1,0,0,0,2,0,0,0, +21,0,4,0,14,0,0,0,32,0,0,0,1,0,0,0,43,0,4,0,14,0,0,0,15,0,0,0,0,0, +0,0,32,0,4,0,16,0,0,0,7,0,0,0,2,0,0,0,23,0,4,0,17,0,0,0,1,0,0,0, +2,0,0,0,32,0,4,0,18,0,0,0,1,0,0,0,17,0,0,0,43,0,4,0,14,0,0,0,20,0, +0,0,1,0,0,0,32,0,4,0,21,0,0,0,7,0,0,0,17,0,0,0,30,0,4,0,22,0,0,0, +2,0,0,0,17,0,0,0,32,0,4,0,23,0,0,0,7,0,0,0,22,0,0,0,32,0,4,0,24,0, +0,0,3,0,0,0,2,0,0,0,32,0,4,0,26,0,0,0,3,0,0,0,17,0,0,0,30,0,5,0, +29,0,0,0,2,0,0,0,17,0,0,0,2,0,0,0,43,0,4,0,1,0,0,0,30,0,0,0,0,0, +128,63,43,0,4,0,1,0,0,0,31,0,0,0,0,0,128,191,32,0,4,0,32,0,0,0,7,0,0,0, +29,0,0,0,43,0,4,0,14,0,0,0,33,0,0,0,2,0,0,0,32,0,4,0,52,0,0,0,9,0, +0,0,2,0,0,0,32,0,4,0,56,0,0,0,2,0,0,0,3,0,0,0,32,0,4,0,59,0,0,0, +9,0,0,0,3,0,0,0,59,0,4,0,5,0,0,0,6,0,0,0,2,0,0,0,59,0,4,0,8,0, +0,0,9,0,0,0,9,0,0,0,59,0,4,0,12,0,0,0,13,0,0,0,1,0,0,0,59,0,4,0, +18,0,0,0,19,0,0,0,1,0,0,0,59,0,4,0,24,0,0,0,25,0,0,0,3,0,0,0,59,0, +4,0,26,0,0,0,27,0,0,0,3,0,0,0,59,0,4,0,24,0,0,0,28,0,0,0,3,0,0,0, +54,0,5,0,10,0,0,0,34,0,0,0,0,0,0,0,11,0,0,0,248,0,2,0,35,0,0,0,59,0, +4,0,16,0,0,0,36,0,0,0,7,0,0,0,59,0,4,0,32,0,0,0,37,0,0,0,7,0,0,0, +59,0,4,0,23,0,0,0,38,0,0,0,7,0,0,0,65,0,5,0,16,0,0,0,39,0,0,0,38,0, +0,0,15,0,0,0,63,0,3,0,39,0,0,0,13,0,0,0,65,0,5,0,21,0,0,0,40,0,0,0, +38,0,0,0,20,0,0,0,63,0,3,0,40,0,0,0,19,0,0,0,65,0,5,0,16,0,0,0,41,0, +0,0,38,0,0,0,15,0,0,0,61,0,4,0,2,0,0,0,42,0,0,0,41,0,0,0,79,0,7,0, +17,0,0,0,43,0,0,0,42,0,0,0,42,0,0,0,0,0,0,0,1,0,0,0,80,0,6,0,2,0, +0,0,44,0,0,0,43,0,0,0,30,0,0,0,30,0,0,0,62,0,3,0,36,0,0,0,44,0,0,0, +65,0,5,0,21,0,0,0,45,0,0,0,38,0,0,0,20,0,0,0,61,0,4,0,17,0,0,0,46,0, +0,0,45,0,0,0,142,0,5,0,17,0,0,0,47,0,0,0,46,0,0,0,31,0,0,0,65,0,5,0, +21,0,0,0,48,0,0,0,38,0,0,0,20,0,0,0,62,0,3,0,48,0,0,0,47,0,0,0,65,0, +5,0,21,0,0,0,49,0,0,0,38,0,0,0,20,0,0,0,61,0,4,0,17,0,0,0,50,0,0,0, +49,0,0,0,65,0,5,0,21,0,0,0,51,0,0,0,37,0,0,0,20,0,0,0,62,0,3,0,51,0, +0,0,50,0,0,0,65,0,5,0,52,0,0,0,53,0,0,0,9,0,0,0,20,0,0,0,61,0,4,0, +2,0,0,0,54,0,0,0,53,0,0,0,65,0,5,0,16,0,0,0,55,0,0,0,37,0,0,0,15,0, +0,0,62,0,3,0,55,0,0,0,54,0,0,0,65,0,5,0,56,0,0,0,57,0,0,0,6,0,0,0, +15,0,0,0,61,0,4,0,3,0,0,0,58,0,0,0,57,0,0,0,65,0,5,0,59,0,0,0,60,0, +0,0,9,0,0,0,15,0,0,0,61,0,4,0,3,0,0,0,61,0,0,0,60,0,0,0,146,0,5,0, +3,0,0,0,62,0,0,0,58,0,0,0,61,0,0,0,61,0,4,0,2,0,0,0,63,0,0,0,36,0, +0,0,145,0,5,0,2,0,0,0,64,0,0,0,62,0,0,0,63,0,0,0,65,0,5,0,16,0,0,0, +65,0,0,0,37,0,0,0,33,0,0,0,62,0,3,0,65,0,0,0,64,0,0,0,61,0,4,0,29,0, +0,0,66,0,0,0,37,0,0,0,81,0,5,0,2,0,0,0,67,0,0,0,66,0,0,0,0,0,0,0, +62,0,3,0,25,0,0,0,67,0,0,0,81,0,5,0,17,0,0,0,68,0,0,0,66,0,0,0,1,0, +0,0,62,0,3,0,27,0,0,0,68,0,0,0,81,0,5,0,2,0,0,0,69,0,0,0,66,0,0,0, +2,0,0,0,62,0,3,0,28,0,0,0,69,0,0,0,253,0,1,0,56,0,1,0 diff --git a/runtime/Includes/Embedded/ShaderScreenFragment.nzsl b/runtime/Includes/Embedded/ShaderScreenFragment.nzsl new file mode 100644 index 0000000..562aca0 --- /dev/null +++ b/runtime/Includes/Embedded/ShaderScreenFragment.nzsl @@ -0,0 +1,46 @@ +[nzsl_version("1.0")] +module; + +struct VertOut +{ + [location(0)] uv : vec2[f32] +} + +struct FragOut +{ + [location(0)] color: vec4[f32] +} + +external +{ + [set(0), binding(0)] u_texture: sampler2D[f32] +} + +option approximates_rgb: bool = false; + +fn LinearTosRGB(color: vec3[f32]) -> vec3[f32] +{ + const if(!approximates_rgb) + { + return select( + color > (0.0031308).rrr, + 1.055 * pow(color, (1.0 / 2.4).rrr) - (0.055).rrr, + 12.92 * color + ); + } + else + return pow(color, (1.0 / 2.2).rrr); +} + +option gamma_correction: bool = false; + +[entry(frag)] +fn main(input: VertOut) -> FragOut +{ + let output: FragOut; + const if(gamma_correction) + output.color = vec4[f32](LinearTosRGB(u_texture.Sample(input.uv).xyz), 1.0); + else + output.color = u_texture.Sample(input.uv); + return output; +} diff --git a/runtime/Includes/Embedded/ShaderScreenFragment.spv.h b/runtime/Includes/Embedded/ShaderScreenFragment.spv.h new file mode 100644 index 0000000..727a9d3 --- /dev/null +++ b/runtime/Includes/Embedded/ShaderScreenFragment.spv.h @@ -0,0 +1,31 @@ +3,2,35,7,0,0,1,0,39,0,0,0,34,0,0,0,0,0,0,0,17,0,2,0,1,0,0,0,14,0, +3,0,0,0,0,0,1,0,0,0,15,0,7,0,4,0,0,0,21,0,0,0,109,97,105,110,0,0,0,0, +10,0,0,0,18,0,0,0,16,0,3,0,21,0,0,0,7,0,0,0,3,0,3,0,0,0,0,0,100,0, +0,0,5,0,4,0,14,0,0,0,86,101,114,116,79,117,116,0,6,0,4,0,14,0,0,0,0,0,0,0, +117,118,0,0,5,0,4,0,19,0,0,0,70,114,97,103,79,117,116,0,6,0,5,0,19,0,0,0,0,0, +0,0,99,111,108,111,114,0,0,0,5,0,5,0,5,0,0,0,117,95,116,101,120,116,117,114,101,0,0,0, +5,0,3,0,10,0,0,0,117,118,0,0,5,0,4,0,18,0,0,0,99,111,108,111,114,0,0,0,5,0, +4,0,21,0,0,0,109,97,105,110,0,0,0,0,71,0,4,0,5,0,0,0,33,0,0,0,0,0,0,0, +71,0,4,0,5,0,0,0,34,0,0,0,0,0,0,0,71,0,4,0,10,0,0,0,30,0,0,0,0,0, +0,0,71,0,4,0,18,0,0,0,30,0,0,0,0,0,0,0,72,0,5,0,14,0,0,0,0,0,0,0, +35,0,0,0,0,0,0,0,72,0,5,0,19,0,0,0,0,0,0,0,35,0,0,0,0,0,0,0,22,0, +3,0,1,0,0,0,32,0,0,0,25,0,9,0,2,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,27,0,3,0,3,0,0,0,2,0,0,0,32,0, +4,0,4,0,0,0,0,0,0,0,3,0,0,0,19,0,2,0,6,0,0,0,33,0,3,0,7,0,0,0, +6,0,0,0,23,0,4,0,8,0,0,0,1,0,0,0,2,0,0,0,32,0,4,0,9,0,0,0,1,0, +0,0,8,0,0,0,21,0,4,0,11,0,0,0,32,0,0,0,1,0,0,0,43,0,4,0,11,0,0,0, +12,0,0,0,0,0,0,0,32,0,4,0,13,0,0,0,7,0,0,0,8,0,0,0,30,0,3,0,14,0, +0,0,8,0,0,0,32,0,4,0,15,0,0,0,7,0,0,0,14,0,0,0,23,0,4,0,16,0,0,0, +1,0,0,0,4,0,0,0,32,0,4,0,17,0,0,0,3,0,0,0,16,0,0,0,30,0,3,0,19,0, +0,0,16,0,0,0,32,0,4,0,20,0,0,0,7,0,0,0,19,0,0,0,32,0,4,0,31,0,0,0, +7,0,0,0,16,0,0,0,59,0,4,0,4,0,0,0,5,0,0,0,0,0,0,0,59,0,4,0,9,0, +0,0,10,0,0,0,1,0,0,0,59,0,4,0,17,0,0,0,18,0,0,0,3,0,0,0,54,0,5,0, +6,0,0,0,21,0,0,0,0,0,0,0,7,0,0,0,248,0,2,0,22,0,0,0,59,0,4,0,20,0, +0,0,23,0,0,0,7,0,0,0,59,0,4,0,15,0,0,0,24,0,0,0,7,0,0,0,65,0,5,0, +13,0,0,0,25,0,0,0,24,0,0,0,12,0,0,0,63,0,3,0,25,0,0,0,10,0,0,0,61,0, +4,0,3,0,0,0,26,0,0,0,5,0,0,0,65,0,5,0,13,0,0,0,27,0,0,0,24,0,0,0, +12,0,0,0,61,0,4,0,8,0,0,0,28,0,0,0,27,0,0,0,87,0,5,0,16,0,0,0,29,0, +0,0,26,0,0,0,28,0,0,0,65,0,5,0,31,0,0,0,30,0,0,0,23,0,0,0,12,0,0,0, +62,0,3,0,30,0,0,0,29,0,0,0,61,0,4,0,19,0,0,0,32,0,0,0,23,0,0,0,81,0, +5,0,16,0,0,0,33,0,0,0,32,0,0,0,0,0,0,0,62,0,3,0,18,0,0,0,33,0,0,0, +253,0,1,0,56,0,1,0 diff --git a/runtime/Includes/Embedded/ShaderScreenVertex.nzsl b/runtime/Includes/Embedded/ShaderScreenVertex.nzsl new file mode 100644 index 0000000..94a4440 --- /dev/null +++ b/runtime/Includes/Embedded/ShaderScreenVertex.nzsl @@ -0,0 +1,31 @@ +[nzsl_version("1.0")] +module; + +struct VertIn +{ + [builtin(vertex_index)] vert_index: i32 +} + +struct VertOut +{ + [location(0)] uv: vec2[f32], + [builtin(position)] position: vec4[f32] +} + +const vertices = array[vec2[f32]]( + vec2[f32](-1.0, -3.0), + vec2[f32](-1.0, 1.0), + vec2[f32]( 3.0, 1.0) +); + +[entry(vert)] +fn main(input: VertIn) -> VertOut +{ + let position = vertices[input.vert_index]; + + let output: VertOut; + output.position = vec4[f32](position, 0.0, 1.0); + output.uv = position * 0.5 + vec2[f32](0.5, 0.5); + + return output; +} diff --git a/runtime/Includes/Embedded/ShaderScreenVertex.spv.h b/runtime/Includes/Embedded/ShaderScreenVertex.spv.h new file mode 100644 index 0000000..e4e53d8 --- /dev/null +++ b/runtime/Includes/Embedded/ShaderScreenVertex.spv.h @@ -0,0 +1,48 @@ +3,2,35,7,0,0,1,0,39,0,0,0,59,0,0,0,0,0,0,0,17,0,2,0,1,0,0,0,14,0, +3,0,0,0,0,0,1,0,0,0,15,0,8,0,0,0,0,0,37,0,0,0,109,97,105,110,0,0,0,0, +20,0,0,0,26,0,0,0,29,0,0,0,3,0,3,0,0,0,0,0,100,0,0,0,5,0,4,0,23,0, +0,0,86,101,114,116,73,110,0,0,6,0,6,0,23,0,0,0,0,0,0,0,118,101,114,116,95,105,110,100, +101,120,0,0,5,0,4,0,30,0,0,0,86,101,114,116,79,117,116,0,6,0,4,0,30,0,0,0,0,0, +0,0,117,118,0,0,6,0,6,0,30,0,0,0,1,0,0,0,112,111,115,105,116,105,111,110,0,0,0,0, +5,0,5,0,15,0,0,0,118,101,114,116,105,99,101,115,0,0,0,0,5,0,6,0,20,0,0,0,118,101, +114,116,101,120,95,105,110,100,101,120,0,0,0,0,5,0,3,0,26,0,0,0,117,118,0,0,5,0,5,0, +29,0,0,0,112,111,115,105,116,105,111,110,0,0,0,0,5,0,4,0,37,0,0,0,109,97,105,110,0,0, +0,0,71,0,4,0,20,0,0,0,11,0,0,0,42,0,0,0,71,0,4,0,29,0,0,0,11,0,0,0, +0,0,0,0,71,0,4,0,26,0,0,0,30,0,0,0,0,0,0,0,72,0,5,0,23,0,0,0,0,0, +0,0,35,0,0,0,0,0,0,0,72,0,5,0,30,0,0,0,0,0,0,0,35,0,0,0,0,0,0,0, +72,0,5,0,30,0,0,0,1,0,0,0,35,0,0,0,16,0,0,0,22,0,3,0,1,0,0,0,32,0, +0,0,23,0,4,0,2,0,0,0,1,0,0,0,2,0,0,0,21,0,4,0,3,0,0,0,32,0,0,0, +0,0,0,0,43,0,4,0,3,0,0,0,4,0,0,0,3,0,0,0,28,0,4,0,5,0,0,0,2,0, +0,0,4,0,0,0,32,0,4,0,6,0,0,0,6,0,0,0,5,0,0,0,43,0,4,0,1,0,0,0, +7,0,0,0,0,0,128,191,43,0,4,0,1,0,0,0,8,0,0,0,0,0,64,192,44,0,5,0,2,0, +0,0,9,0,0,0,7,0,0,0,8,0,0,0,43,0,4,0,1,0,0,0,10,0,0,0,0,0,128,63, +44,0,5,0,2,0,0,0,11,0,0,0,7,0,0,0,10,0,0,0,43,0,4,0,1,0,0,0,12,0, +0,0,0,0,64,64,44,0,5,0,2,0,0,0,13,0,0,0,12,0,0,0,10,0,0,0,44,0,6,0, +5,0,0,0,14,0,0,0,9,0,0,0,11,0,0,0,13,0,0,0,19,0,2,0,16,0,0,0,33,0, +3,0,17,0,0,0,16,0,0,0,21,0,4,0,18,0,0,0,32,0,0,0,1,0,0,0,32,0,4,0, +19,0,0,0,1,0,0,0,18,0,0,0,43,0,4,0,18,0,0,0,21,0,0,0,0,0,0,0,32,0, +4,0,22,0,0,0,7,0,0,0,18,0,0,0,30,0,3,0,23,0,0,0,18,0,0,0,32,0,4,0, +24,0,0,0,7,0,0,0,23,0,0,0,32,0,4,0,25,0,0,0,3,0,0,0,2,0,0,0,23,0, +4,0,27,0,0,0,1,0,0,0,4,0,0,0,32,0,4,0,28,0,0,0,3,0,0,0,27,0,0,0, +30,0,4,0,30,0,0,0,2,0,0,0,27,0,0,0,32,0,4,0,31,0,0,0,7,0,0,0,2,0, +0,0,32,0,4,0,32,0,0,0,7,0,0,0,30,0,0,0,43,0,4,0,18,0,0,0,33,0,0,0, +1,0,0,0,43,0,4,0,1,0,0,0,34,0,0,0,0,0,0,0,43,0,4,0,1,0,0,0,35,0, +0,0,0,0,0,63,44,0,5,0,2,0,0,0,36,0,0,0,35,0,0,0,35,0,0,0,32,0,4,0, +45,0,0,0,6,0,0,0,2,0,0,0,32,0,4,0,51,0,0,0,7,0,0,0,27,0,0,0,59,0, +5,0,6,0,0,0,15,0,0,0,6,0,0,0,14,0,0,0,59,0,4,0,19,0,0,0,20,0,0,0, +1,0,0,0,59,0,4,0,25,0,0,0,26,0,0,0,3,0,0,0,59,0,4,0,28,0,0,0,29,0, +0,0,3,0,0,0,54,0,5,0,16,0,0,0,37,0,0,0,0,0,0,0,17,0,0,0,248,0,2,0, +38,0,0,0,59,0,4,0,31,0,0,0,39,0,0,0,7,0,0,0,59,0,4,0,32,0,0,0,40,0, +0,0,7,0,0,0,59,0,4,0,24,0,0,0,41,0,0,0,7,0,0,0,65,0,5,0,22,0,0,0, +42,0,0,0,41,0,0,0,21,0,0,0,63,0,3,0,42,0,0,0,20,0,0,0,65,0,5,0,22,0, +0,0,43,0,0,0,41,0,0,0,21,0,0,0,61,0,4,0,18,0,0,0,44,0,0,0,43,0,0,0, +65,0,5,0,45,0,0,0,46,0,0,0,15,0,0,0,44,0,0,0,61,0,4,0,2,0,0,0,47,0, +0,0,46,0,0,0,62,0,3,0,39,0,0,0,47,0,0,0,61,0,4,0,2,0,0,0,48,0,0,0, +39,0,0,0,80,0,6,0,27,0,0,0,49,0,0,0,48,0,0,0,34,0,0,0,10,0,0,0,65,0, +5,0,51,0,0,0,50,0,0,0,40,0,0,0,33,0,0,0,62,0,3,0,50,0,0,0,49,0,0,0, +61,0,4,0,2,0,0,0,52,0,0,0,39,0,0,0,142,0,5,0,2,0,0,0,53,0,0,0,52,0, +0,0,35,0,0,0,129,0,5,0,2,0,0,0,54,0,0,0,53,0,0,0,36,0,0,0,65,0,5,0, +31,0,0,0,55,0,0,0,40,0,0,0,21,0,0,0,62,0,3,0,55,0,0,0,54,0,0,0,61,0, +4,0,30,0,0,0,56,0,0,0,40,0,0,0,81,0,5,0,2,0,0,0,57,0,0,0,56,0,0,0, +0,0,0,0,62,0,3,0,26,0,0,0,57,0,0,0,81,0,5,0,27,0,0,0,58,0,0,0,56,0, +0,0,1,0,0,0,62,0,3,0,29,0,0,0,58,0,0,0,253,0,1,0,56,0,1,0 diff --git a/runtime/Includes/Graphics/Drawable.h b/runtime/Includes/Graphics/Drawable.h new file mode 100644 index 0000000..803fd5f --- /dev/null +++ b/runtime/Includes/Graphics/Drawable.h @@ -0,0 +1,58 @@ +#ifndef __MLX_DRAWABLE__ +#define __MLX_DRAWABLE__ + +#include +#include +#include + +namespace mlx +{ + class Drawable + { + friend class Render2DPass; + + public: + inline Drawable(DrawableType type) : m_type(type) {} + + inline void SetColor(Vec4f color) noexcept { m_color = std::move(color); } + inline void SetPosition(Vec2f position) noexcept { m_position = std::move(position); } + inline void SetScale(Vec2f scale) noexcept { m_scale = std::move(scale); } + inline void SetRotation(float rotation) noexcept { m_rotation = EulerAnglesf{ 0.0f, 0.0f, rotation }; } + inline void SetCenter(Vec2f center) noexcept { m_center = std::move(center); } + + inline virtual void Update([[maybe_unused]] VkCommandBuffer cmd) {} + + [[nodiscard]] MLX_FORCEINLINE const Vec4f& GetColor() const noexcept { return m_color; } + [[nodiscard]] MLX_FORCEINLINE const Vec2f& GetPosition() const noexcept { return m_position; } + [[nodiscard]] MLX_FORCEINLINE const Vec2f& GetScale() const noexcept { return m_scale; } + [[nodiscard]] MLX_FORCEINLINE const Quatf& GetRotation() const noexcept { return m_rotation; } + [[nodiscard]] MLX_FORCEINLINE const Vec2f& GetCenter() const noexcept { return m_center; } + [[nodiscard]] MLX_FORCEINLINE std::shared_ptr GetMesh() const { return p_mesh; } + [[nodiscard]] MLX_FORCEINLINE DrawableType GetType() const noexcept { return m_type; } + + inline virtual ~Drawable() { if(p_set) p_set->ReturnDescriptorSetToPool(); } + + protected: + [[nodiscard]] inline bool IsSetInit() const noexcept { return p_set && p_set->IsInit(); } + [[nodiscard]] inline VkDescriptorSet GetSet(std::size_t frame_index) const noexcept { return p_set ? p_set->GetSet(frame_index) : VK_NULL_HANDLE; } + + inline void UpdateDescriptorSet(std::shared_ptr set) + { + p_set = RenderCore::Get().GetDescriptorPoolManager().GetAvailablePool().RequestDescriptorSet(set->GetShaderLayout(), set->GetShaderType()); + } + + virtual void Bind(std::size_t frame_index, VkCommandBuffer cmd) = 0; + + protected: + std::shared_ptr p_set; + std::shared_ptr p_mesh; + Quatf m_rotation = Quatf::Identity(); + Vec4f m_color = Vec4f{ 1.0f, 1.0f, 1.0f, 1.0f }; + Vec2f m_position = Vec2f{ 0.0f, 0.0f }; + Vec2f m_scale = Vec2f{ 1.0f, 1.0f }; + Vec2f m_center = Vec2f{ 0.0f, 0.0f }; + DrawableType m_type; + }; +} + +#endif diff --git a/runtime/Includes/Graphics/Enums.h b/runtime/Includes/Graphics/Enums.h new file mode 100644 index 0000000..8347ab0 --- /dev/null +++ b/runtime/Includes/Graphics/Enums.h @@ -0,0 +1,13 @@ +#ifndef __MLX_GRAPHICS_ENUMS__ +#define __MLX_GRAPHICS_ENUMS__ + +namespace mlx +{ + enum class DrawableType + { + Sprite, + Text + }; +} + +#endif diff --git a/runtime/Includes/Graphics/Font.h b/runtime/Includes/Graphics/Font.h new file mode 100644 index 0000000..a082ee1 --- /dev/null +++ b/runtime/Includes/Graphics/Font.h @@ -0,0 +1,53 @@ +#ifndef __MLX_FONT__ +#define __MLX_FONT__ + +#include + +namespace mlx +{ + class Font + { + public: + Font(const std::filesystem::path& path, float scale) : m_build_data(path), m_name(path.string()), m_scale(scale) {} + Font(const std::string& name, const std::vector& ttf_data, float scale) : m_build_data(ttf_data), m_name(name), m_scale(scale) {} + + void BuildFont(); + void Destroy(); + + inline const std::string& GetName() const { return m_name; } + inline float GetScale() const noexcept { return m_scale; } + inline const std::array& GetCharData() const { return m_cdata; } + inline const Texture& GetTexture() const noexcept { return m_atlas; } + inline bool operator==(const Font& rhs) const { return rhs.m_name == m_name && rhs.m_scale == m_scale; } + inline bool operator!=(const Font& rhs) const { return rhs.m_name != m_name || rhs.m_scale != m_scale; } + + inline ~Font() { Destroy(); } + + private: + std::array m_cdata; + Texture m_atlas; + std::variant> m_build_data; + std::string m_name; + float m_scale; + }; + + class FontRegistry + { + public: + FontRegistry() = default; + + inline void RegisterFont(std::shared_ptr font); + inline void UnregisterFont(std::shared_ptr font); + inline std::shared_ptr GetFont(const std::filesystem::path& name, float scale); + inline void Reset(); + + ~FontRegistry() = default; + + private: + std::unordered_set> m_fonts_registry; + }; +} + +#include + +#endif diff --git a/runtime/Includes/Graphics/Font.inl b/runtime/Includes/Graphics/Font.inl new file mode 100644 index 0000000..1deb809 --- /dev/null +++ b/runtime/Includes/Graphics/Font.inl @@ -0,0 +1,29 @@ +#pragma once +#include + +namespace mlx +{ + void FontRegistry::RegisterFont(std::shared_ptr font) + { + m_fonts_registry.insert(font); + } + + void FontRegistry::UnregisterFont(std::shared_ptr font) + { + m_fonts_registry.erase(font); + } + + std::shared_ptr FontRegistry::GetFont(const std::filesystem::path& name, float scale) + { + auto it = std::find_if(m_fonts_registry.begin(), m_fonts_registry.end(), [&name, scale](std::shared_ptr rhs) + { + return (name == rhs->GetName() && scale == rhs->GetScale()); + }); + return (it != m_fonts_registry.end() ? *it : nullptr); + } + + void FontRegistry::Reset() + { + m_fonts_registry.clear(); + } +} diff --git a/runtime/Includes/Graphics/Mesh.h b/runtime/Includes/Graphics/Mesh.h new file mode 100644 index 0000000..627b808 --- /dev/null +++ b/runtime/Includes/Graphics/Mesh.h @@ -0,0 +1,42 @@ +#ifndef __MLX_RENDERER_MESH__ +#define __MLX_RENDERER_MESH__ + +#include +#include +#include + +namespace mlx +{ + class Mesh + { + public: + struct SubMesh + { + VertexBuffer vbo; + IndexBuffer ibo; + std::size_t triangle_count = 0; + + inline SubMesh(const std::vector& vertices, const std::vector& indices); + }; + + public: + Mesh() = default; + + void Draw(VkCommandBuffer cmd, std::size_t& drawcalls, std::size_t& polygondrawn) const noexcept; + void Draw(VkCommandBuffer cmd, std::size_t& drawcalls, std::size_t& polygondrawn, std::size_t submesh_index) const noexcept; + + inline std::size_t GetSubMeshCount() const { return m_sub_meshes.size(); } + + inline void AddSubMesh(SubMesh mesh) { m_sub_meshes.emplace_back(std::move(mesh)); } + [[nodiscard]] inline SubMesh& GetSubMesh(std::size_t index) { return m_sub_meshes.at(index); } + + ~Mesh(); + + private: + std::vector m_sub_meshes; + }; +} + +#include + +#endif diff --git a/runtime/Includes/Graphics/Mesh.inl b/runtime/Includes/Graphics/Mesh.inl new file mode 100644 index 0000000..2713ff0 --- /dev/null +++ b/runtime/Includes/Graphics/Mesh.inl @@ -0,0 +1,20 @@ +#pragma once +#include + +namespace mlx +{ + Mesh::SubMesh::SubMesh(const std::vector& vertices, const std::vector& indices) + { + CPUBuffer vb(vertices.size() * sizeof(Vertex)); + std::memcpy(vb.GetData(), vertices.data(), vb.GetSize()); + vbo.Init(vb.GetSize(), 0, "mlx_mesh"); + vbo.SetData(std::move(vb)); + + CPUBuffer ib(indices.size() * sizeof(std::uint32_t)); + std::memcpy(ib.GetData(), indices.data(), ib.GetSize()); + ibo.Init(ib.GetSize(), 0, "mlx_mesh"); + ibo.SetData(std::move(ib)); + + triangle_count = vertices.size() / 3; + } +} diff --git a/runtime/Includes/Graphics/PutPixelManager.h b/runtime/Includes/Graphics/PutPixelManager.h new file mode 100644 index 0000000..5f76609 --- /dev/null +++ b/runtime/Includes/Graphics/PutPixelManager.h @@ -0,0 +1,25 @@ +#ifndef __MLX_PUT_PIXEL_MANAGER__ +#define __MLX_PUT_PIXEL_MANAGER__ + +#include + +namespace mlx +{ + class PutPixelManager + { + public: + PutPixelManager(NonOwningPtr renderer) : p_renderer(renderer) {} + + // Return a valid pointer when a new texture has been created + NonOwningPtr DrawPixel(int x, int y, std::uint64_t draw_layer, std::uint32_t color); + void ResetRenderData(); + + ~PutPixelManager(); + + private: + std::unordered_map m_textures; + NonOwningPtr p_renderer; + }; +} + +#endif diff --git a/runtime/Includes/Graphics/Scene.h b/runtime/Includes/Graphics/Scene.h new file mode 100644 index 0000000..843d2b0 --- /dev/null +++ b/runtime/Includes/Graphics/Scene.h @@ -0,0 +1,46 @@ +#ifndef __MLX_SCENE__ +#define __MLX_SCENE__ + +#include +#include +#include +#include +#include +#include +#include + +namespace mlx +{ + class Scene + { + public: + Scene() = default; + + Sprite& CreateSprite(NonOwningPtr texture) noexcept; + NonOwningPtr GetSpriteFromTexturePositionScaleRotation(NonOwningPtr texture, const Vec2f& position, float scale, float rotation) const; + void TryEraseSpriteFromTexture(NonOwningPtr texture); + bool IsTextureAtGivenDrawLayer(NonOwningPtr texture, std::uint64_t draw_layer) const; + + Text& CreateText(const std::string& text) noexcept; + NonOwningPtr GetTextFromPositionAndColor(const std::string& text, const Vec2f& position, const Vec4f& color) const; + bool IsTextAtGivenDrawLayer(const std::string& text, std::uint64_t draw_layer) const; + + inline void BindFont(std::shared_ptr font) { Verify((bool)font, "invalid fond pointer"); p_bound_font = font; } + + void BringToDrawLayer(NonOwningPtr drawable, std::uint64_t draw_layer); + + inline void ResetScene(Vec4f clear) { m_drawables.clear(); m_clear_color = std::move(clear); } + inline const Vec4f& GetClearColor() const noexcept { return m_clear_color; } + + [[nodiscard]] MLX_FORCEINLINE const std::vector>& GetDrawables() const noexcept { return m_drawables; } + + ~Scene() = default; + + private: + std::vector> m_drawables; + std::shared_ptr p_bound_font; + Vec4f m_clear_color = { 0.0f, 0.0f, 0.0f, 1.0f }; + }; +} + +#endif diff --git a/runtime/Includes/Graphics/Sprite.h b/runtime/Includes/Graphics/Sprite.h new file mode 100644 index 0000000..b82f4fc --- /dev/null +++ b/runtime/Includes/Graphics/Sprite.h @@ -0,0 +1,45 @@ +#ifndef __MLX_SPRITE__ +#define __MLX_SPRITE__ + +#include +#include +#include +#include +#include +#include + +namespace mlx +{ + class Sprite : public Drawable + { + friend class Render2DPass; + + public: + Sprite(NonOwningPtr texture); + Sprite(std::shared_ptr mesh, NonOwningPtr texture); + + MLX_FORCEINLINE void Update(VkCommandBuffer cmd) override + { + Verify((bool)p_texture, "a sprite has no texture attached (internal mlx issue, please report to the devs)"); + p_texture->Update(cmd); + } + + [[nodiscard]] MLX_FORCEINLINE NonOwningPtr GetTexture() const { return p_texture; } + + inline ~Sprite() = default; + + private: + inline void Bind(std::size_t frame_index, VkCommandBuffer cmd) override + { + if(!p_set) + return; + p_set->SetImage(frame_index, 0, *p_texture); + p_set->Update(frame_index, cmd); + } + + private: + NonOwningPtr p_texture; + }; +} + +#endif diff --git a/runtime/Includes/Graphics/Text.h b/runtime/Includes/Graphics/Text.h new file mode 100644 index 0000000..457bc21 --- /dev/null +++ b/runtime/Includes/Graphics/Text.h @@ -0,0 +1,40 @@ +#ifndef __MLX_TEXT__ +#define __MLX_TEXT__ + +#include +#include +#include + +namespace mlx +{ + class Text : public Drawable + { + friend class Render2DPass; + + public: + Text(const std::string& text, std::shared_ptr font); + inline Text(const std::string& text, std::shared_ptr font, std::shared_ptr mesh) : Drawable(DrawableType::Text) { Init(text, font, mesh); } + + [[nodiscard]] inline const std::string& GetText() const { return m_text; } + [[nodiscard]] inline std::shared_ptr GetFont() const { return p_font; } + + virtual ~Text() = default; + + private: + void Init(const std::string& text, std::shared_ptr font, std::shared_ptr mesh); + + inline void Bind(std::size_t frame_index, VkCommandBuffer cmd) override + { + if(!p_set) + return; + p_set->SetImage(frame_index, 0, const_cast(p_font->GetTexture())); + p_set->Update(frame_index, cmd); + } + + private: + std::shared_ptr p_font; + std::string m_text; + }; +} + +#endif diff --git a/runtime/Includes/Maths/Angles.h b/runtime/Includes/Maths/Angles.h new file mode 100644 index 0000000..1aa9b3c --- /dev/null +++ b/runtime/Includes/Maths/Angles.h @@ -0,0 +1,106 @@ +#ifndef __MLX_ANGLES__ +#define __MLX_ANGLES__ + +#include + +namespace mlx +{ + template struct EulerAngles; + template struct Quat; + + template + struct Angle + { + T value; + + constexpr Angle() = default; + constexpr Angle(T angle); + template constexpr explicit Angle(const Angle& Angle); + template constexpr Angle(const Angle& angle); + constexpr Angle(const Angle&) = default; + constexpr Angle(Angle&&) noexcept = default; + ~Angle() = default; + + constexpr bool ApproxEqual(const Angle& angle) const; + constexpr bool ApproxEqual(const Angle& angle, T max_difference) const; + + T GetCos() const; + T GetSin() const; + std::pair GetSinCos() const; + T GetTan() const; + + constexpr Angle& Normalize(); + + template T To() const; + template Angle ToAngle() const; + constexpr T ToDegrees() const; + constexpr Angle ToDegreeAngle() const; + EulerAngles ToEulerAngles() const; + Quat ToQuat() const; + constexpr T ToRadians() const; + constexpr Angle ToRadianAngle() const; + std::string ToString() const; + constexpr T ToTurns() const; + constexpr Angle ToTurnAngle() const; + + constexpr Angle& operator=(const Angle&) = default; + constexpr Angle& operator=(Angle&&) noexcept = default; + + constexpr Angle operator+() const; + constexpr Angle operator-() const; + + constexpr Angle operator+(Angle other) const; + constexpr Angle operator-(Angle other) const; + constexpr Angle operator*(T scalar) const; + constexpr Angle operator/(T divider) const; + + constexpr Angle& operator+=(Angle other); + constexpr Angle& operator-=(Angle other); + constexpr Angle& operator*=(T scalar); + constexpr Angle& operator/=(T divider); + + constexpr bool operator==(Angle other) const; + constexpr bool operator!=(Angle other) const; + constexpr bool operator<(Angle other) const; + constexpr bool operator<=(Angle other) const; + constexpr bool operator>(Angle other) const; + constexpr bool operator>=(Angle other) const; + + static constexpr bool ApproxEqual(const Angle& lhs, const Angle& rhs); + static constexpr bool ApproxEqual(const Angle& lhs, const Angle& rhs, T max_difference); + static constexpr Angle Clamp(Angle angle, Angle min, Angle max); + template static constexpr Angle From(T value); + static constexpr Angle FromDegrees(T degrees); + static constexpr Angle FromRadians(T radians); + static constexpr Angle FromTurns(T turn); + static constexpr Angle Zero(); + }; + + template + using DegreeAngle = Angle; + + using DegreeAngled = DegreeAngle; + using DegreeAnglef = DegreeAngle; + + template + using RadianAngle = Angle; + + using RadianAngled = RadianAngle; + using RadianAnglef = RadianAngle; + + template + using TurnAngle = Angle; + + using TurnAngled = TurnAngle; + using TurnAnglef = TurnAngle; + + template Angle operator*(T scale, Angle angle); + + template Angle operator/(T divider, Angle angle); + + template std::ostream& operator<<(std::ostream& out, Angle angle); +} + +#include + +#endif diff --git a/runtime/Includes/Maths/Angles.inl b/runtime/Includes/Maths/Angles.inl new file mode 100644 index 0000000..df460ff --- /dev/null +++ b/runtime/Includes/Maths/Angles.inl @@ -0,0 +1,493 @@ +#pragma once +#include + +#include +#include + +namespace mlx +{ + namespace Internal + { + template struct AngleConversion; + + template + struct AngleConversion + { + template + static constexpr T Convert(T angle) + { + return angle; + } + }; + + template<> + struct AngleConversion + { + template + static constexpr T Convert(T angle) + { + return DegreeToRadian(angle); + } + }; + + template<> + struct AngleConversion + { + template + static constexpr T Convert(T angle) + { + return angle / T(360); + } + }; + + template<> + struct AngleConversion + { + template + static constexpr T Convert(T angle) + { + return RadianToDegree(angle); + } + }; + + template<> + struct AngleConversion + { + template + static constexpr T Convert(T angle) + { + return angle / Tau(); + } + }; + + template<> + struct AngleConversion + { + template + static constexpr T Convert(T angle) + { + return angle * T(360); + } + }; + + template<> + struct AngleConversion + { + template + static constexpr T Convert(T angle) + { + return angle * Tau(); + } + }; + + template struct AngleUtils; + + template<> + struct AngleUtils + { + template + static constexpr T GetEpsilon() + { + return T(1e-4); + } + + template + static constexpr T GetLimit() + { + return 360; + } + + template static std::ostream& ToString(std::ostream& out, T value) + { + return out << "Angle(" << value << "deg)"; + } + }; + + template<> + struct AngleUtils + { + template + static constexpr T GetEpsilon() + { + return T(1e-5); + } + + template + static constexpr T GetLimit() + { + return Tau(); + } + + template + static std::ostream& ToString(std::ostream& out, T value) + { + return out << "Angle(" << value << "rad)"; + } + }; + + template<> + struct AngleUtils + { + template + static constexpr T GetEpsilon() + { + return T(1e-5); + } + + template + static constexpr T GetLimit() + { + return 1; + } + + template + static std::ostream& ToString(std::ostream& out, T value) + { + return out << "Angle(" << value << "turn)"; + } + }; + + #ifdef MLX_PLAT_LINUX + template + void SinCos(T x, T* sin, T* cos) + { + double s, c; + ::sincos(x, &s, &c); + *sin = static_cast(s); + *cos = static_cast(c); + } + + template<> + inline void SinCos(float x, float* s, float* c) + { + ::sincosf(x, s, c); + } + + template<> + inline void SinCos(long double x, long double* s, long double* c) + { + ::sincosl(x, s, c); + } + #else + template + void SinCos(T x, T* sin, T* cos) + { + *sin = std::sin(x); + *cos = std::cos(x); + } + #endif + } + + template + constexpr Angle::Angle(T angle) : + value(angle) + { + } + + template + template + constexpr Angle::Angle(const Angle& angle) : + value(static_cast(angle.value)) + { + } + + template + template + constexpr Angle::Angle(const Angle& angle) : + value(Internal::AngleConversion::Convert(angle.value)) + { + } + + template + constexpr bool Angle::ApproxEqual(const Angle& angle) const + { + return ApproxEqual(angle, Internal::AngleUtils::template GetEpsilon()); + } + + template + constexpr bool Angle::ApproxEqual(const Angle& angle, T maxDifference) const + { + return NumberEquals(value, angle.value, maxDifference); + } + + template + T Angle::GetCos() const + { + return std::cos(ToRadians()); + } + + template + T Angle::GetSin() const + { + return std::sin(ToRadians()); + } + + template + std::pair Angle::GetSinCos() const + { + T sin, cos; + Internal::SinCos(ToRadians(), &sin, &cos); + + return std::make_pair(sin, cos); + } + + template + T Angle::GetTan() const + { + return std::tan(ToRadians()); + } + + template + constexpr Angle& Angle::Normalize() + { + constexpr T limit = Internal::AngleUtils::template GetLimit(); + constexpr T halfLimit = limit / T(2); + + value = Mod(value + halfLimit, limit); + if (value < T(0)) + value += limit; + + value -= halfLimit; + return *this; + } + + template + template + T Angle::To() const + { + return Internal::AngleConversion::Convert(value); + } + + template + template + Angle Angle::ToAngle() const + { + return Angle(To()); + } + + template + constexpr T Angle::ToDegrees() const + { + return To(); + } + + template + constexpr Angle Angle::ToDegreeAngle() const + { + return ToAngle(); + } + + template + EulerAngles Angle::ToEulerAngles() const + { + return EulerAngles(0, 0, ToDegrees()); + } + + template + Quat Angle::ToQuat() const + { + auto halfAngle = Angle(*this) / 2.f; + auto sincos = halfAngle.GetSinCos(); + return Quat(sincos.second, 0, 0, sincos.first); + } + + template + constexpr T Angle::ToRadians() const + { + return To(); + } + + template + constexpr Angle Angle::ToRadianAngle() const + { + return ToAngle(); + } + + template + std::string Angle::ToString() const + { + std::ostringstream oss; + Internal::AngleUtils::ToString(oss, value); + + return oss.str(); + } + + template + constexpr T Angle::ToTurns() const + { + return To(value); + } + + template + constexpr Angle Angle::ToTurnAngle() const + { + return ToAngle(); + } + + template + constexpr Angle Angle::operator+() const + { + return *this; + } + + template + constexpr Angle Angle::operator-() const + { + return Angle(-value); + } + + template + constexpr Angle Angle::operator+(Angle other) const + { + return Angle(value + other.value); + } + + template + constexpr Angle Angle::operator-(Angle other) const + { + return Angle(value - other.value); + } + + template + constexpr Angle Angle::operator*(T scalar) const + { + return Angle(value * scalar); + } + + template + constexpr Angle Angle::operator/(T divider) const + { + return Angle(value / divider); + } + + template + constexpr Angle& Angle::operator+=(Angle other) + { + value += other.value; + return *this; + } + + template + constexpr Angle& Angle::operator-=(Angle other) + { + value -= other.value; + return *this; + } + + template + constexpr Angle& Angle::operator*=(T scalar) + { + value *= scalar; + return *this; + } + + template + constexpr Angle& Angle::operator/=(T divider) + { + value /= divider; + return *this; + } + + template + constexpr bool Angle::operator==(Angle other) const + { + return value == other.value; + } + + template + constexpr bool Angle::operator!=(Angle other) const + { + return value != other.value; + } + + template + constexpr bool Angle::operator<(Angle other) const + { + return value < other.value; + } + + template + constexpr bool Angle::operator<=(Angle other) const + { + return value <= other.value; + } + + template + constexpr bool Angle::operator>(Angle other) const + { + return value > other.value; + } + + template + constexpr bool Angle::operator>=(Angle other) const + { + return value >= other.value; + } + + template + constexpr bool Angle::ApproxEqual(const Angle& lhs, const Angle& rhs) + { + return lhs.ApproxEqual(rhs); + } + + template + constexpr bool Angle::ApproxEqual(const Angle& lhs, const Angle& rhs, T maxDifference) + { + return lhs.ApproxEqual(rhs, maxDifference); + } + + template + constexpr Angle Angle::Clamp(Angle angle, Angle min, Angle max) + { + return Angle(std::clamp(angle.value, min.value, max.value)); + } + + template + template + constexpr Angle Angle::From(T value) + { + return Angle(Internal::AngleConversion::Convert(value)); + } + + template + constexpr Angle Angle::FromDegrees(T degrees) + { + return From(degrees); + } + + template + constexpr Angle Angle::FromRadians(T radians) + { + return From(radians); + } + + template + constexpr Angle Angle::FromTurns(T turns) + { + return From(turns); + } + + template + constexpr Angle Angle::Zero() + { + return Angle(0); + } + + template + Angle operator/(T scale, Angle angle) + { + return Angle(scale / angle.value); + } + + template + std::ostream& operator<<(std::ostream& out, Angle angle) + { + return Internal::AngleUtils::ToString(out, angle.value); + } + + template + constexpr Angle Clamp(Angle value, T min, T max) + { + return std::max(std::min(value.value, max), min); + } +} diff --git a/runtime/Includes/Maths/Constants.h b/runtime/Includes/Maths/Constants.h new file mode 100644 index 0000000..29c9a64 --- /dev/null +++ b/runtime/Includes/Maths/Constants.h @@ -0,0 +1,83 @@ +#ifndef __MLX_MATHS_CONSTANTS__ +#define __MLX_MATHS_CONSTANTS__ + +namespace mlx +{ + template constexpr std::size_t BitCount = CHAR_BIT * sizeof(T); + + template + struct MathConstants + { + static constexpr T Infinity() + { + static_assert(std::numeric_limits::has_infinity); + return std::numeric_limits::infinity(); + } + + static constexpr T Max() + { + return std::numeric_limits::max(); + } + + static constexpr T Min() + { + return std::numeric_limits::min(); + } + + static constexpr T NaN() + { + static_assert(std::numeric_limits::has_signaling_NaN); + return std::numeric_limits::quiet_NaN(); + } + + // Math constants + static constexpr T HalfPi() + { + static_assert(std::is_floating_point_v); + return T(1.5707963267948966192313216916398); + } + + static constexpr T Pi() + { + static_assert(std::is_floating_point_v); + return T(3.1415926535897932384626433832795); + } + + static constexpr T Sqrt2() + { + static_assert(std::is_floating_point_v); + return T(1.4142135623730950488016887242097); + } + + static constexpr T Sqrt3() + { + static_assert(std::is_floating_point_v); + return T(1.7320508075688772935274463415059); + } + + static constexpr T Sqrt5() + { + static_assert(std::is_floating_point_v); + return T(2.2360679774997896964091736687313); + } + + static constexpr T Tau() + { + static_assert(std::is_floating_point_v); + return T(6.2831853071795864769252867665590); + } + }; + + template constexpr auto Infinity() { return MathConstants::Infinity(); } + template constexpr auto MaxValue() { return MathConstants::Max(); } + template constexpr auto MinValue() { return MathConstants::Min(); } + template constexpr auto NaN() { return MathConstants::NaN(); } + template constexpr auto HalfPi() { return MathConstants::HalfPi(); } + template constexpr auto Pi() { return MathConstants::Pi(); } + template constexpr auto Sqrt2() { return MathConstants::Sqrt2(); } + template constexpr auto Sqrt3() { return MathConstants::Sqrt3(); } + template constexpr auto Sqrt5() { return MathConstants::Sqrt5(); } + template constexpr auto Tau() { return MathConstants::Tau(); } +} + +#endif diff --git a/runtime/Includes/Maths/Enums.h b/runtime/Includes/Maths/Enums.h new file mode 100644 index 0000000..b74e833 --- /dev/null +++ b/runtime/Includes/Maths/Enums.h @@ -0,0 +1,18 @@ +#ifndef __MLX_MATHS_ENUMS__ +#define __MLX_MATHS_ENUMS__ + +namespace mlx +{ + enum class AngleUnit + { + Degree = 0, + Radian, + Turn, + + EndEnum + }; + + constexpr std::size_t AngleUnitCount = static_cast(AngleUnit::EndEnum); +} + +#endif diff --git a/runtime/Includes/Maths/EulerAngles.h b/runtime/Includes/Maths/EulerAngles.h new file mode 100644 index 0000000..54171c9 --- /dev/null +++ b/runtime/Includes/Maths/EulerAngles.h @@ -0,0 +1,55 @@ +#ifndef __MLX_EULER_ANGLES__ +#define __MLX_EULER_ANGLES__ + +#include + +namespace mlx +{ + template + struct EulerAngles + { + constexpr EulerAngles() = default; + constexpr EulerAngles(DegreeAngle P, DegreeAngle Y, DegreeAngle R); + constexpr EulerAngles(const DegreeAngle angles[3]); + template constexpr EulerAngles(const Angle& angle); + constexpr EulerAngles(const Quat& quat); + template constexpr explicit EulerAngles(const EulerAngles& angles); + constexpr EulerAngles(const EulerAngles&) = default; + constexpr EulerAngles(EulerAngles&&) = default; + ~EulerAngles() = default; + + constexpr bool ApproxEqual(const EulerAngles& angles, T maxDifference = std::numeric_limits::epsilon()) const; + + constexpr EulerAngles& Normalize(); + + Quat ToQuat() const; + std::string ToString() const; + + constexpr EulerAngles operator+(const EulerAngles& angles) const; + constexpr EulerAngles operator-(const EulerAngles& angles) const; + + constexpr EulerAngles& operator=(const EulerAngles&) = default; + constexpr EulerAngles& operator=(EulerAngles&&) = default; + + constexpr EulerAngles& operator+=(const EulerAngles& angles); + constexpr EulerAngles& operator-=(const EulerAngles& angles); + + constexpr bool operator==(const EulerAngles& angles) const; + constexpr bool operator!=(const EulerAngles& angles) const; + constexpr bool operator<(const EulerAngles& angles) const; + constexpr bool operator<=(const EulerAngles& angles) const; + constexpr bool operator>(const EulerAngles& angles) const; + constexpr bool operator>=(const EulerAngles& angles) const; + + static constexpr bool ApproxEqual(const EulerAngles& lhs, const EulerAngles& rhs, T maxDifference = std::numeric_limits::epsilon()); + static constexpr EulerAngles Zero(); + + DegreeAngle pitch, yaw, roll; + }; + + using EulerAnglesf = EulerAngles; +} + +#include + +#endif diff --git a/runtime/Includes/Maths/EulerAngles.inl b/runtime/Includes/Maths/EulerAngles.inl new file mode 100644 index 0000000..23a9a57 --- /dev/null +++ b/runtime/Includes/Maths/EulerAngles.inl @@ -0,0 +1,169 @@ +#pragma once +#include + +namespace mlx +{ + template + constexpr EulerAngles::EulerAngles(DegreeAngle P, DegreeAngle Y, DegreeAngle R) : + pitch(P), yaw(Y), roll(R) + {} + + template + constexpr EulerAngles::EulerAngles(const DegreeAngle angles[3]) : + EulerAngles(angles[0], angles[1], angles[2]) + {} + + template + template + constexpr EulerAngles::EulerAngles(const Angle& angle) : + EulerAngles(angle.ToEulerAngles()) + {} + + template + constexpr EulerAngles::EulerAngles(const Quat& quat) : + EulerAngles(quat.ToEulerAngles()) + {} + + template + template + constexpr EulerAngles::EulerAngles(const EulerAngles& angles) : + pitch(DegreeAngle(angles.pitch)), yaw(DegreeAngle(angles.yaw)), roll(DegreeAngle(angles.roll)) + {} + + template + constexpr bool EulerAngles::ApproxEqual(const EulerAngles& angles, T maxDifference) const + { + return pitch.ApproxEqual(angles.pitch, maxDifference) && yaw.ApproxEqual(angles.yaw, maxDifference) && roll.ApproxEqual(angles.roll, maxDifference); + } + + template + constexpr EulerAngles& EulerAngles::Normalize() + { + pitch.Normalize(); + yaw.Normalize(); + roll.Normalize(); + return *this; + } + + template + Quat EulerAngles::ToQuat() const + { + // XYZ + auto [s1, c1] = (yaw / T(2.0)).GetSinCos(); + auto [s2, c2] = (roll / T(2.0)).GetSinCos(); + auto [s3, c3] = (pitch / T(2.0)).GetSinCos(); + + return Quat(c1 * c2 * c3 - s1 * s2 * s3, + s1 * s2 * c3 + c1 * c2 * s3, + s1 * c2 * c3 + c1 * s2 * s3, + c1 * s2 * c3 - s1 * c2 * s3); + } + + template + std::string EulerAngles::ToString() const + { + std::ostringstream ss; + ss << *this; + return ss.str(); + } + + template + constexpr EulerAngles EulerAngles::operator+(const EulerAngles& angles) const + { + return EulerAngles(pitch + angles.pitch, yaw + angles.yaw, roll + angles.roll); + } + + template + constexpr EulerAngles EulerAngles::operator-(const EulerAngles& angles) const + { + return EulerAngles(pitch - angles.pitch, yaw - angles.yaw, roll - angles.roll); + } + + template + constexpr EulerAngles& EulerAngles::operator+=(const EulerAngles& angles) + { + pitch += angles.pitch; + yaw += angles.yaw; + roll += angles.roll; + return *this; + } + + template + constexpr EulerAngles& EulerAngles::operator-=(const EulerAngles& angles) + { + pitch -= angles.pitch; + yaw -= angles.yaw; + roll -= angles.roll; + return *this; + } + + template + constexpr bool EulerAngles::operator==(const EulerAngles& angles) const + { + return pitch == angles.pitch && yaw == angles.yaw && roll == angles.roll; + } + + template + constexpr bool EulerAngles::operator!=(const EulerAngles& angles) const + { + return !operator==(angles); + } + + template + constexpr bool EulerAngles::operator<(const EulerAngles& angles) const + { + if (pitch != angles.pitch) + return pitch < angles.pitch; + if (yaw != angles.yaw) + return yaw < angles.yaw; + return roll < angles.roll; + } + + template + constexpr bool EulerAngles::operator<=(const EulerAngles& angles) const + { + if (pitch != angles.pitch) + return pitch < angles.pitch; + if (yaw != angles.yaw) + return yaw < angles.yaw; + return roll <= angles.roll; + } + + template + constexpr bool EulerAngles::operator>(const EulerAngles& angles) const + { + if (pitch != angles.pitch) + return pitch > angles.pitch; + if (yaw != angles.yaw) + return yaw > angles.yaw; + return roll > angles.roll; + } + + template + constexpr bool EulerAngles::operator>=(const EulerAngles& angles) const + { + if (pitch != angles.pitch) + return pitch > angles.pitch; + if (yaw != angles.yaw) + return yaw > angles.yaw; + return roll >= angles.roll; + } + + template + constexpr bool EulerAngles::ApproxEqual(const EulerAngles& lhs, const EulerAngles& rhs, T maxDifference) + { + return lhs.ApproxEqual(rhs, maxDifference); + } + + template + constexpr EulerAngles EulerAngles::Zero() + { + return EulerAngles(0, 0, 0); + } + + template + std::ostream& operator<<(std::ostream& out, const EulerAngles& angles) + { + return out << "EulerAngles(" << angles.pitch << ", " << angles.yaw << ", " << angles.roll << ')'; + } +} diff --git a/runtime/Includes/Maths/Mat4.h b/runtime/Includes/Maths/Mat4.h new file mode 100644 index 0000000..bc8adf9 --- /dev/null +++ b/runtime/Includes/Maths/Mat4.h @@ -0,0 +1,118 @@ +#ifndef __MLX_MAT4__ +#define __MLX_MAT4__ + +#include + +namespace mlx +{ + template struct Vec2; + template struct Vec3; + template struct Vec4; + template struct Quat; + + template + struct Mat4 + { + T m11, m12, m13, m14; + T m21, m22, m23, m24; + T m31, m32, m33, m34; + T m41, m42, m43, m44; + + constexpr Mat4() = default; + constexpr Mat4(T r11, T r12, T r13, T r14, + T r21, T r22, T r23, T r24, + T r31, T r32, T r33, T r34, + T r41, T r42, T r43, T r44); + constexpr Mat4(const T matrix[16]); + constexpr Mat4(const Mat4&) = default; + constexpr Mat4(Mat4&&) = default; + + constexpr Mat4& ApplyRotation(const Quat& rotation); + constexpr Mat4& ApplyScale(const Vec3& scale); + constexpr Mat4& ApplyTranslation(const Vec3& translation); + + constexpr bool ApproxEqual(const Mat4& vec, T max_difference = std::numeric_limits::epsilon()) const; + + constexpr Mat4& Concatenate(const Mat4& matrix); + constexpr Mat4& ConcatenateTransform(const Mat4& matrix); + + constexpr Vec4 GetColumn(std::size_t column) const; + constexpr T GetDeterminant() const; + constexpr T GetDeterminantTransform() const; + constexpr bool GetInverse(Mat4* dest) const; + constexpr bool GetInverseTransform(Mat4* dest) const; + Quat GetRotation() const; + constexpr Vec4 GetRow(std::size_t row) const; + constexpr Vec3 GetScale() const; + constexpr Vec3 GetSquaredScale() const; + constexpr Vec3 GetTranslation() const; + constexpr void GetTransposed(Mat4* dest) const; + + constexpr bool HasNegativeScale() const; + constexpr bool HasScale() const; + + constexpr Mat4& Inverse(bool* succeeded = nullptr); + constexpr Mat4& InverseTransform(bool* succeeded = nullptr); + + constexpr bool IsTransformMatrix() const; + constexpr bool IsIdentity() const; + + constexpr Mat4& SetRotation(const Quat& rotation); + constexpr Mat4& SetScale(const Vec3& scale); + constexpr Mat4& SetTranslation(const Vec3& translation); + + std::string ToString() const; + + constexpr Vec2 Transform(const Vec2& vector, T z = 0.0, T w = 1.0) const; + constexpr Vec3 Transform(const Vec3& vector, T w = 1.0) const; + constexpr Vec4 Transform(const Vec4& vector) const; + + constexpr Mat4& Transpose(); + + constexpr T& operator()(std::size_t x, std::size_t y); + constexpr const T& operator()(std::size_t x, std::size_t y) const; + + constexpr T& operator[](std::size_t i); + constexpr const T& operator[](std::size_t i) const; + + constexpr Mat4& operator=(const Mat4&) = default; + constexpr Mat4& operator=(Mat4&&) = default; + + constexpr Mat4 operator*(const Mat4& matrix) const; + constexpr Vec2 operator*(const Vec2& vector) const; + constexpr Vec3 operator*(const Vec3& vector) const; + constexpr Vec4 operator*(const Vec4& vector) const; + constexpr Mat4 operator*(T scalar) const; + + constexpr Mat4& operator*=(const Mat4& matrix); + constexpr Mat4& operator*=(T scalar); + + constexpr bool operator==(const Mat4& mat) const; + constexpr bool operator!=(const Mat4& mat) const; + + static constexpr bool ApproxEqual(const Mat4& lhs, const Mat4& rhs, T max_difference = std::numeric_limits::epsilon()); + static constexpr Mat4 Concatenate(const Mat4& left, const Mat4& right); + static constexpr Mat4 ConcatenateTransform(const Mat4& left, const Mat4& right); + static constexpr Mat4 Identity(); + static constexpr Mat4 LookAt(const Vec3& eye, const Vec3& target, const Vec3& up = Vec3::Up()); + static constexpr Mat4 Ortho(T left, T right, T top, T bottom, T z_near = -1.0, T zFar = 1.0); + static Mat4 Perspective(RadianAngle angle, T ratio, T z_near, T z_far); + static constexpr Mat4 Rotate(const Quat& rotation); + static constexpr Mat4 Scale(const Vec3& scale); + static constexpr Mat4 Translate(const Vec3& translation); + static constexpr Mat4 Transform(const Vec3& translation, const Quat& rotation); + static constexpr Mat4 Transform(const Vec3& translation, const Quat& rotation, const Vec3& scale); + static constexpr Mat4 TransformInverse(const Vec3& translation, const Quat& rotation); + static constexpr Mat4 TransformInverse(const Vec3& translation, const Quat& rotation, const Vec3& scale); + static constexpr Mat4 Zero(); + + ~Mat4() = default; + }; + + using Mat4d = Mat4; + using Mat4f = Mat4; +} + +#include + +#endif diff --git a/runtime/Includes/Maths/Mat4.inl b/runtime/Includes/Maths/Mat4.inl new file mode 100644 index 0000000..b15e0d4 --- /dev/null +++ b/runtime/Includes/Maths/Mat4.inl @@ -0,0 +1,875 @@ +#pragma once +#include + +#include +#include +#include +#include +#include +#include + +namespace mlx +{ + template + constexpr Mat4::Mat4(T r11, T r12, T r13, T r14, + T r21, T r22, T r23, T r24, + T r31, T r32, T r33, T r34, + T r41, T r42, T r43, T r44) : + m11(r11), m12(r12), m13(r13), m14(r14), + m21(r21), m22(r22), m23(r23), m24(r24), + m31(r31), m32(r32), m33(r33), m34(r34), + m41(r41), m42(r42), m43(r43), m44(r44) + {} + + template + constexpr Mat4::Mat4(const T matrix[16]) : + Mat4(matrix[ 0], matrix[ 1], matrix[ 2], matrix[ 3], + matrix[ 4], matrix[ 5], matrix[ 6], matrix[ 7], + matrix[ 8], matrix[ 9], matrix[10], matrix[11], + matrix[12], matrix[13], matrix[14], matrix[15]) + {} + + template + constexpr Mat4& Mat4::ApplyRotation(const Quat& rotation) + { + return Concatenate(Mat4::Rotate(rotation)); + } + + template + constexpr Mat4& Mat4::ApplyScale(const Vec3& scale) + { + m11 *= scale.x; + m12 *= scale.x; + m13 *= scale.x; + + m21 *= scale.y; + m22 *= scale.y; + m23 *= scale.y; + + m31 *= scale.z; + m32 *= scale.z; + m33 *= scale.z; + + return *this; + } + + template + constexpr Mat4& Mat4::ApplyTranslation(const Vec3& translation) + { + m41 += translation.x; + m42 += translation.y; + m43 += translation.z; + + return *this; + } + + template + constexpr bool Mat4::ApproxEqual(const Mat4& mat, T maxDifference) const + { + for(unsigned int i = 0; i < 16; ++i) + if(!NumberEquals((&m11)[i], (&mat.m11)[i], maxDifference)) + return false; + + return true; + } + + template + constexpr Mat4& Mat4::Concatenate(const Mat4& matrix) + { + return operator=(Mat4( + m11 * matrix.m11 + m12 * matrix.m21 + m13 * matrix.m31 + m14 * matrix.m41, + m11 * matrix.m12 + m12 * matrix.m22 + m13 * matrix.m32 + m14 * matrix.m42, + m11 * matrix.m13 + m12 * matrix.m23 + m13 * matrix.m33 + m14 * matrix.m43, + m11 * matrix.m14 + m12 * matrix.m24 + m13 * matrix.m34 + m14 * matrix.m44, + + m21 * matrix.m11 + m22 * matrix.m21 + m23 * matrix.m31 + m24 * matrix.m41, + m21 * matrix.m12 + m22 * matrix.m22 + m23 * matrix.m32 + m24 * matrix.m42, + m21 * matrix.m13 + m22 * matrix.m23 + m23 * matrix.m33 + m24 * matrix.m43, + m21 * matrix.m14 + m22 * matrix.m24 + m23 * matrix.m34 + m24 * matrix.m44, + + m31 * matrix.m11 + m32 * matrix.m21 + m33 * matrix.m31 + m34 * matrix.m41, + m31 * matrix.m12 + m32 * matrix.m22 + m33 * matrix.m32 + m34 * matrix.m42, + m31 * matrix.m13 + m32 * matrix.m23 + m33 * matrix.m33 + m34 * matrix.m43, + m31 * matrix.m14 + m32 * matrix.m24 + m33 * matrix.m34 + m34 * matrix.m44, + + m41 * matrix.m11 + m42 * matrix.m21 + m43 * matrix.m31 + m44 * matrix.m41, + m41 * matrix.m12 + m42 * matrix.m22 + m43 * matrix.m32 + m44 * matrix.m42, + m41 * matrix.m13 + m42 * matrix.m23 + m43 * matrix.m33 + m44 * matrix.m43, + m41 * matrix.m14 + m42 * matrix.m24 + m43 * matrix.m34 + m44 * matrix.m44 + )); + } + + template + constexpr Mat4& Mat4::ConcatenateTransform(const Mat4& matrix) + { + return operator=(Mat4( + m11*matrix.m11 + m12*matrix.m21 + m13*matrix.m31, + m11*matrix.m12 + m12*matrix.m22 + m13*matrix.m32, + m11*matrix.m13 + m12*matrix.m23 + m13*matrix.m33, + T(0.0), + + m21*matrix.m11 + m22*matrix.m21 + m23*matrix.m31, + m21*matrix.m12 + m22*matrix.m22 + m23*matrix.m32, + m21*matrix.m13 + m22*matrix.m23 + m23*matrix.m33, + T(0.0), + + m31*matrix.m11 + m32*matrix.m21 + m33*matrix.m31, + m31*matrix.m12 + m32*matrix.m22 + m33*matrix.m32, + m31*matrix.m13 + m32*matrix.m23 + m33*matrix.m33, + T(0.0), + + m41*matrix.m11 + m42*matrix.m21 + m43*matrix.m31 + matrix.m41, + m41*matrix.m12 + m42*matrix.m22 + m43*matrix.m32 + matrix.m42, + m41*matrix.m13 + m42*matrix.m23 + m43*matrix.m33 + matrix.m43, + T(1.0) + )); + } + + template + constexpr Vec4 Mat4::GetColumn(std::size_t column) const + { + Assert(column < 4, "column index out of range"); + const T* ptr = &m11 + column * 4; + return Vec4(ptr[0], ptr[1], ptr[2], ptr[3]); + } + + template + constexpr T Mat4::GetDeterminant() const + { + T A = m22*(m33*m44 - m43*m34) - m32*(m23*m44 - m43*m24) + m42*(m23*m34 - m33*m24); + T B = m12*(m33*m44 - m43*m34) - m32*(m13*m44 - m43*m14) + m42*(m13*m34 - m33*m14); + T C = m12*(m23*m44 - m43*m24) - m22*(m13*m44 - m43*m14) + m42*(m13*m24 - m23*m14); + T D = m12*(m23*m34 - m33*m24) - m22*(m13*m34 - m33*m14) + m32*(m13*m24 - m23*m14); + + return m11*A - m21*B + m31*C - m41*D; + } + + template + constexpr T Mat4::GetDeterminantTransform() const + { + T A = m22*m33 - m32*m23; + T B = m12*m33 - m32*m13; + T C = m12*m23 - m22*m13; + + return m11*A - m21*B + m31*C; + } + + template + constexpr bool Mat4::GetInverse(Mat4* dest) const + { + Assert(dest, "destination matrix must be valid"); + + T det = GetDeterminant(); + if(det == T(0.0)) + return false; + + // http://stackoverflow.com/questions/1148309/inverting-a-4x4-matrix + T inv[16]; + inv[0] = m22 * m33 * m44 - + m22 * m34 * m43 - + m32 * m23 * m44 + + m32 * m24 * m43 + + m42 * m23 * m34 - + m42 * m24 * m33; + + inv[1] = -m12 * m33 * m44 + + m12 * m34 * m43 + + m32 * m13 * m44 - + m32 * m14 * m43 - + m42 * m13 * m34 + + m42 * m14 * m33; + + inv[2] = m12 * m23 * m44 - + m12 * m24 * m43 - + m22 * m13 * m44 + + m22 * m14 * m43 + + m42 * m13 * m24 - + m42 * m14 * m23; + + inv[3] = -m12 * m23 * m34 + + m12 * m24 * m33 + + m22 * m13 * m34 - + m22 * m14 * m33 - + m32 * m13 * m24 + + m32 * m14 * m23; + + inv[4] = -m21 * m33 * m44 + + m21 * m34 * m43 + + m31 * m23 * m44 - + m31 * m24 * m43 - + m41 * m23 * m34 + + m41 * m24 * m33; + + inv[5] = m11 * m33 * m44 - + m11 * m34 * m43 - + m31 * m13 * m44 + + m31 * m14 * m43 + + m41 * m13 * m34 - + m41 * m14 * m33; + + inv[6] = -m11 * m23 * m44 + + m11 * m24 * m43 + + m21 * m13 * m44 - + m21 * m14 * m43 - + m41 * m13 * m24 + + m41 * m14 * m23; + + inv[7] = m11 * m23 * m34 - + m11 * m24 * m33 - + m21 * m13 * m34 + + m21 * m14 * m33 + + m31 * m13 * m24 - + m31 * m14 * m23; + + inv[8] = m21 * m32 * m44 - + m21 * m34 * m42 - + m31 * m22 * m44 + + m31 * m24 * m42 + + m41 * m22 * m34 - + m41 * m24 * m32; + + inv[9] = -m11 * m32 * m44 + + m11 * m34 * m42 + + m31 * m12 * m44 - + m31 * m14 * m42 - + m41 * m12 * m34 + + m41 * m14 * m32; + + inv[10] = m11 * m22 * m44 - + m11 * m24 * m42 - + m21 * m12 * m44 + + m21 * m14 * m42 + + m41 * m12 * m24 - + m41 * m14 * m22; + + inv[11] = -m11 * m22 * m34 + + m11 * m24 * m32 + + m21 * m12 * m34 - + m21 * m14 * m32 - + m31 * m12 * m24 + + m31 * m14 * m22; + + inv[12] = -m21 * m32 * m43 + + m21 * m33 * m42 + + m31 * m22 * m43 - + m31 * m23 * m42 - + m41 * m22 * m33 + + m41 * m23 * m32; + + inv[13] = m11 * m32 * m43 - + m11 * m33 * m42 - + m31 * m12 * m43 + + m31 * m13 * m42 + + m41 * m12 * m33 - + m41 * m13 * m32; + + inv[14] = -m11 * m22 * m43 + + m11 * m23 * m42 + + m21 * m12 * m43 - + m21 * m13 * m42 - + m41 * m12 * m23 + + m41 * m13 * m22; + + inv[15] = m11 * m22 * m33 - + m11 * m23 * m32 - + m21 * m12 * m33 + + m21 * m13 * m32 + + m31 * m12 * m23 - + m31 * m13 * m22; + + T invDet = T(1.0) / det; + for(unsigned int i = 0; i < 16; ++i) + inv[i] *= invDet; + + *dest = inv; + return true; + } + + template + constexpr bool Mat4::GetInverseTransform(Mat4* dest) const + { + Assert(dest, "destination matrix must be valid"); + + T det = GetDeterminantTransform(); + if(det == T(0.0)) + return false; + + + // http://stackoverflow.com/questions/1148309/inverting-a-4x4-matrix + T inv[16]; + inv[0] = m22 * m33 - + m32 * m23; + + inv[1] = -m12 * m33 + + m32 * m13; + + inv[2] = m12 * m23 - + m22 * m13; + + inv[3] = T(0.0); + + inv[4] = -m21 * m33 + + m31 * m23; + + inv[5] = m11 * m33 - + m31 * m13; + + inv[6] = -m11 * m23 + + m21 * m13; + + inv[7] = T(0.0); + + inv[8] = m21 * m32 - + m31 * m22; + + inv[9] = -m11 * m32 + + m31 * m12; + + inv[10] = m11 * m22 - + m21 * m12; + + inv[11] = T(0.0); + + inv[12] = -m21 * m32 * m43 + + m21 * m33 * m42 + + m31 * m22 * m43 - + m31 * m23 * m42 - + m41 * m22 * m33 + + m41 * m23 * m32; + + inv[13] = m11 * m32 * m43 - + m11 * m33 * m42 - + m31 * m12 * m43 + + m31 * m13 * m42 + + m41 * m12 * m33 - + m41 * m13 * m32; + + inv[14] = -m11 * m22 * m43 + + m11 * m23 * m42 + + m21 * m12 * m43 - + m21 * m13 * m42 - + m41 * m12 * m23 + + m41 * m13 * m22; + + T invDet = T(1.0) / det; + for(unsigned int i = 0; i < 16; ++i) + inv[i] *= invDet; + + inv[15] = T(1.0); + + *dest = inv; + return true; + } + + template + Quat Mat4::GetRotation() const + { + // http://www.euclideanspace.com/maths/geometry/rotations/conversions/matrixToQuat/ + Quat quat; + + T trace = m11 + m22 + m33; + if(trace > T(0.0)) + { + T s = T(0.5) / std::sqrt(trace + T(1.0)); + quat.w = T(0.25) / s; + quat.x = (m23 - m32) * s; + quat.y = (m31 - m13) * s; + quat.z = (m12 - m21) * s; + } + else + { + if(m11 > m22 && m11 > m33) + { + T s = T(2.0) * std::sqrt(T(1.0) + m11 - m22 - m33); + + quat.w = (m23 - m32) / s; + quat.x = T(0.25) * s; + quat.y = (m21 + m12) / s; + quat.z = (m31 + m13) / s; + } + else if(m22 > m33) + { + T s = T(2.0) * std::sqrt(T(1.0) + m22 - m11 - m33); + + quat.w = (m31 - m13) / s; + quat.x = (m21 + m12) / s; + quat.y = T(0.25) * s; + quat.z = (m32 + m23) / s; + } + else + { + T s = T(2.0) * std::sqrt(T(1.0) + m33 - m11 - m22); + + quat.w = (m12 - m21) / s; + quat.x = (m31 + m13) / s; + quat.y = (m32 + m23) / s; + quat.z = T(0.25) * s; + } + } + + return quat; + } + + template + constexpr Vec4 Mat4::GetRow(std::size_t row) const + { + Assert(row < 4, "row index out of range"); + + const T* ptr = &m11; + return Vec4(ptr[row], ptr[row+4], ptr[row+8], ptr[row+12]); + } + + template + constexpr Vec3 Mat4::GetScale() const + { + Vec3 squaredScale = GetSquaredScale(); + return Vec3(std::sqrt(squaredScale.x), std::sqrt(squaredScale.y), std::sqrt(squaredScale.z)); + } + + template + constexpr Vec3 Mat4::GetSquaredScale() const + { + return Vec3(m11 * m11 + m12 * m12 + m13 * m13, + m21 * m21 + m22 * m22 + m23 * m23, + m31 * m31 + m32 * m32 + m33 * m33); + } + + template + constexpr Vec3 Mat4::GetTranslation() const + { + return Vec3(m41, m42, m43); + } + + template + constexpr void Mat4::GetTransposed(Mat4* dest) const + { + (*dest) = Mat4f( + m11, m21, m31, m41, + m12, m22, m32, m42, + m13, m23, m33, m43, + m14, m24, m34, m44 + ); + } + + template + constexpr bool Mat4::HasNegativeScale() const + { + return GetDeterminant() < T(0.0); + } + + template + constexpr bool Mat4::HasScale() const + { + T t = m11*m11 + m21*m21 + m31*m31; + if(!NumberEquals(t, T(1.0))) + return true; + + t = m12*m12 + m22*m22 + m32*m32; + if(!NumberEquals(t, T(1.0))) + return true; + + t = m13*m13 + m23*m23 + m33*m33; + if(!NumberEquals(t, T(1.0))) + return true; + + return false; + } + + template + constexpr Mat4& Mat4::Inverse(bool* succeeded) + { + bool result = GetInverse(this); + if(succeeded) + *succeeded = result; + + return *this; + } + + template + constexpr Mat4& Mat4::InverseTransform(bool* succeeded) + { + bool result = GetInverseTransform(this); + if(succeeded) + *succeeded = result; + + return *this; + } + + template + constexpr bool Mat4::IsTransformMatrix() const + { + return NumberEquals(m14, T(0.0)) && NumberEquals(m24, T(0.0)) && NumberEquals(m34, T(0.0)) && NumberEquals(m44, T(1.0)); + } + + template + constexpr bool Mat4::IsIdentity() const + { + return (NumberEquals(m11, T(1.0)) && NumberEquals(m12, T(0.0)) && NumberEquals(m13, T(0.0)) && NumberEquals(m14, T(0.0)) && + NumberEquals(m21, T(0.0)) && NumberEquals(m22, T(1.0)) && NumberEquals(m23, T(0.0)) && NumberEquals(m24, T(0.0)) && + NumberEquals(m31, T(0.0)) && NumberEquals(m32, T(0.0)) && NumberEquals(m33, T(1.0)) && NumberEquals(m34, T(0.0)) && + NumberEquals(m41, T(0.0)) && NumberEquals(m42, T(0.0)) && NumberEquals(m43, T(0.0)) && NumberEquals(m44, T(1.0))); + } + + template + constexpr Mat4& Mat4::SetRotation(const Quat& rotation) + { + T qw = rotation.w; + T qx = rotation.x; + T qy = rotation.y; + T qz = rotation.z; + + T qx2 = qx * qx; + T qy2 = qy * qy; + T qz2 = qz * qz; + + m11 = T(1.0) - T(2.0) * qy2 - T(2.0) * qz2; + m21 = T(2.0) * qx * qy - T(2.0) * qz * qw; + m31 = T(2.0) * qx * qz + T(2.0) * qy * qw; + + m12 = T(2.0) * qx * qy + T(2.0) * qz * qw; + m22 = T(1.0) - T(2.0) * qx2 - T(2.0) * qz2; + m32 = T(2.0) * qy * qz - T(2.0) * qx * qw; + + m13 = T(2.0) * qx * qz - T(2.0) * qy * qw; + m23 = T(2.0) * qy * qz + T(2.0) * qx * qw; + m33 = T(1.0) - T(2.0) * qx2 - T(2.0) * qy2; + + return *this; + } + + template + constexpr Mat4& Mat4::SetScale(const Vec3& scale) + { + m11 = scale.x; + m22 = scale.y; + m33 = scale.z; + + return *this; + } + + template + constexpr Mat4& Mat4::SetTranslation(const Vec3& translation) + { + m41 = translation.x; + m42 = translation.y; + m43 = translation.z; + + return *this; + } + + template + std::string Mat4::ToString() const + { + std::ostringstream ss; + ss << *this; + + return ss.str(); + } + + template + constexpr Vec2 Mat4::Transform(const Vec2& vector, T z, T w) const + { + return Vec2(m11 * vector.x + m21 * vector.y + m31 * z + m41 * w, + m12 * vector.x + m22 * vector.y + m32 * z + m42 * w); + } + + template + constexpr Vec3 Mat4::Transform(const Vec3& vector, T w) const + { + return Vec3(m11 * vector.x + m21 * vector.y + m31 * vector.z + m41 * w, + m12 * vector.x + m22 * vector.y + m32 * vector.z + m42 * w, + m13 * vector.x + m23 * vector.y + m33 * vector.z + m43 * w); + } + + template + constexpr Vec4 Mat4::Transform(const Vec4& vector) const + { + return Vec4(m11 * vector.x + m21 * vector.y + m31 * vector.z + m41 * vector.w, + m12 * vector.x + m22 * vector.y + m32 * vector.z + m42 * vector.w, + m13 * vector.x + m23 * vector.y + m33 * vector.z + m43 * vector.w, + m14 * vector.x + m24 * vector.y + m34 * vector.z + m44 * vector.w); + } + + template + constexpr Mat4& Mat4::Transpose() + { + std::swap(m12, m21); + std::swap(m13, m31); + std::swap(m14, m41); + std::swap(m23, m32); + std::swap(m24, m42); + std::swap(m34, m43); + + return *this; + } + + template + constexpr T& Mat4::operator()(std::size_t x, std::size_t y) + { + Assert(x <= 3, "index out of range"); + Assert(y <= 3, "index out of range"); + + return (&m11)[y*4 + x]; + } + + template + constexpr const T& Mat4::operator()(std::size_t x, std::size_t y) const + { + Assert(x <= 3, "index out of range"); + Assert(y <= 3, "index out of range"); + + return (&m11)[y*4+x]; + } + + template + constexpr T& Mat4::operator[](std::size_t i) + { + Assert(i <= 16, "index out of range"); + + return (&m11)[i]; + } + + template + constexpr const T& Mat4::operator[](std::size_t i) const + { + Assert(i <= 16, "index out of range"); + + return (&m11)[i]; + } + + template + constexpr Mat4 Mat4::operator*(const Mat4& matrix) const + { + Mat4 result(*this); + return result.Concatenate(matrix); + } + + template + constexpr Vec2 Mat4::operator*(const Vec2& vector) const + { + return Transform(vector); + } + + template + constexpr Vec3 Mat4::operator*(const Vec3& vector) const + { + return Transform(vector); + } + + template + constexpr Vec4 Mat4::operator*(const Vec4& vector) const + { + return Transform(vector); + } + + template + constexpr Mat4 Mat4::operator*(T scalar) const + { + Mat4 mat; + for(unsigned int i = 0; i < 16; ++i) + mat[i] = (&m11)[i] * scalar; + + return mat; + } + + template + constexpr Mat4& Mat4::operator*=(const Mat4& matrix) + { + Concatenate(matrix); + + return *this; + } + + template + constexpr Mat4& Mat4::operator*=(T scalar) + { + for(unsigned int i = 0; i < 16; ++i) + (&m11)[i] *= scalar; + + return *this; + } + + template + constexpr bool Mat4::operator==(const Mat4& mat) const + { + for(unsigned int i = 0; i < 16; ++i) + if((&m11)[i] != (&mat.m11)[i]) + return false; + + return true; + } + + template + constexpr bool Mat4::operator!=(const Mat4& mat) const + { + return !operator==(mat); + } + + template + constexpr bool Mat4::ApproxEqual(const Mat4& lhs, const Mat4& rhs, T maxDifference) + { + return lhs.ApproxEqual(rhs, maxDifference); + } + + template + constexpr Mat4 Mat4::Concatenate(const Mat4& left, const Mat4& right) + { + Mat4 matrix(left); // Copy of left-hand side matrix + matrix.Concatenate(right); // Concatenation with right-hand side + + return matrix; + } + + template + constexpr Mat4 Mat4::ConcatenateTransform(const Mat4& left, const Mat4& right) + { + Mat4 matrix(left); // Copy of left-hand side matrix + matrix.ConcatenateTransform(right); // Affine concatenation with right-hand side + + return matrix; + } + + template + constexpr Mat4 Mat4::Identity() + { + return Mat4( + T(1.0), T(0.0), T(0.0), T(0.0), + T(0.0), T(1.0), T(0.0), T(0.0), + T(0.0), T(0.0), T(1.0), T(0.0), + T(0.0), T(0.0), T(0.0), T(1.0) + ); + } + + template + constexpr Mat4 Mat4::LookAt(const Vec3& eye, const Vec3& target, const Vec3& up) + { + Vec3 f = Vec3::Normalize(target - eye); + Vec3 s = Vec3::Normalize(f.CrossProduct(up)); + Vec3 u = s.CrossProduct(f); + + return Mat4( + s.x, u.x, -f.x, T(0.0), + s.y, u.y, -f.y, T(0.0), + s.z, u.z, -f.z, T(0.0), + -s.DotProduct(eye), -u.DotProduct(eye), f.DotProduct(eye), T(1.0) + ); + } + + template + constexpr Mat4 Mat4::Ortho(T left, T right, T top, T bottom, T zNear, T zFar) + { + // http://msdn.microsoft.com/en-us/library/windows/desktop/bb204942(v=vs.85).aspx + return Mat4( + T(2.0) / (right - left), T(0.0), T(0.0), T(0.0), + T(0.0), T(2.0) / (top - bottom), T(0.0), T(0.0), + T(0.0), T(0.0), T(1.0) / (zNear - zFar), T(0.0), + (left + right) / (left - right), (top + bottom) / (bottom - top), zNear / (zNear - zFar), T(1.0) + ); + } + + template + Mat4 Mat4::Perspective(RadianAngle angle, T ratio, T zNear, T zFar) + { + angle /= T(2.0); + + T yScale = angle.GetTan(); + + return Mat4( + T(1.0) / (ratio * yScale), T(0.0), T(0.0), T(0.0), + T(0.0), T(-1.0) / (yScale), T(0.0), T(0.0), + T(0.0), T(0.0), zFar / (zNear - zFar), T(-1.0), + T(0.0), T(0.0), -(zNear * zFar) / (zFar - zNear), T(0.0) + ); + } + + template + constexpr Mat4 Mat4::Rotate(const Quat& rotation) + { + Mat4 matrix = Mat4::Identity(); + matrix.SetRotation(rotation); + + return matrix; + } + + template + constexpr Mat4 Mat4::Scale(const Vec3& scale) + { + return Mat4( + scale.x, T(0.0), T(0.0), T(0.0), + T(0.0), scale.y, T(0.0), T(0.0), + T(0.0), T(0.0), scale.z, T(0.0), + T(0.0), T(0.0), T(0.0), T(1.0) + ); + } + + template + constexpr Mat4 Mat4::Translate(const Vec3& translation) + { + return Mat4( + T(1.0), T(0.0), T(0.0), T(0.0), + T(0.0), T(1.0), T(0.0), T(0.0), + T(0.0), T(0.0), T(1.0), T(0.0), + translation.x, translation.y, translation.z, T(1.0) + ); + } + + template + constexpr Mat4 Mat4::Transform(const Vec3& translation, const Quat& rotation) + { + Mat4 mat = Mat4f::Identity(); + mat.SetRotation(rotation); + mat.SetTranslation(translation); + + return mat; + } + + template + constexpr Mat4 Mat4::Transform(const Vec3& translation, const Quat& rotation, const Vec3& scale) + { + Mat4 mat = Transform(translation, rotation); + mat.ApplyScale(scale); + + return mat; + } + + template + constexpr Mat4 Mat4::TransformInverse(const Vec3& translation, const Quat& rotation) + { + // A view matrix must apply an inverse transformation of the 'world' matrix + Quat invRot = rotation.GetConjugate(); // Inverse of the rotation + + return Transform(-(invRot * translation), invRot); + } + + template + constexpr Mat4 Mat4::TransformInverse(const Vec3& translation, const Quat& rotation, const Vec3& scale) + { + return TransformInverse(translation, rotation).ApplyScale(T(1.0) / scale); + } + + template + constexpr Mat4 Mat4::Zero() + { + return Mat4( + T(0.0), T(0.0), T(0.0), T(0.0), + T(0.0), T(0.0), T(0.0), T(0.0), + T(0.0), T(0.0), T(0.0), T(0.0), + T(0.0), T(0.0), T(0.0), T(0.0) + ); + } + + template + std::ostream& operator<<(std::ostream& out, const Mat4& matrix) + { + return out << "Mat4(" << matrix.m11 << ", " << matrix.m12 << ", " << matrix.m13 << ", " << matrix.m14 << ",\n" + << " " << matrix.m21 << ", " << matrix.m22 << ", " << matrix.m23 << ", " << matrix.m24 << ",\n" + << " " << matrix.m31 << ", " << matrix.m32 << ", " << matrix.m33 << ", " << matrix.m34 << ",\n" + << " " << matrix.m41 << ", " << matrix.m42 << ", " << matrix.m43 << ", " << matrix.m44 << ')'; + } + + template + constexpr Mat4 operator*(T scale, const Mat4& matrix) + { + return matrix * scale; + } +} diff --git a/runtime/Includes/Maths/MathsUtils.h b/runtime/Includes/Maths/MathsUtils.h new file mode 100644 index 0000000..fb211d9 --- /dev/null +++ b/runtime/Includes/Maths/MathsUtils.h @@ -0,0 +1,24 @@ +#ifndef __MLX_MATHS_UTILS__ +#define __MLX_MATHS_UTILS__ + +namespace mlx +{ + template + [[nodiscard]] constexpr T Mod(T x, T y) noexcept; + + template + [[nodiscard]] constexpr T DegreeToRadian(T degrees) noexcept; + + template + [[nodiscard]] constexpr T RadianToDegree(T radians) noexcept; + + template + [[nodiscard]] constexpr T Clamp(T value, T min, T max) noexcept; + + template + [[nodiscard]] constexpr T Lerp(const T& from, const T& to, const T2& interpolation) noexcept; +} + +#include + +#endif diff --git a/runtime/Includes/Maths/MathsUtils.inl b/runtime/Includes/Maths/MathsUtils.inl new file mode 100644 index 0000000..509d2ad --- /dev/null +++ b/runtime/Includes/Maths/MathsUtils.inl @@ -0,0 +1,44 @@ +#pragma once +#include + +#include + +namespace mlx +{ + template + [[nodiscard]] constexpr T Mod(T x, T y) noexcept + { + if constexpr(std::is_floating_point_v) + { + if(!std::is_constant_evaluated()) + return x - static_cast(x / y) * y; + else + return std::fmod(x, y); + } + return x % y; + } + + template + [[nodiscard]] constexpr T DegreeToRadian(T degrees) noexcept + { + return degrees * (Pi() / T(180.0)); + } + + template + [[nodiscard]] constexpr T RadianToDegree(T radians) noexcept + { + return radians * (T(180.0) / Pi()); + } + + template + [[nodiscard]] constexpr T Clamp(T value, T min, T max) noexcept + { + return std::max(std::min(value, max), min); + } + + template + [[nodiscard]] constexpr T Lerp(const T& from, const T& to, const T2& interpolation) noexcept + { + return static_cast(from + interpolation * (to - from)); + } +} diff --git a/runtime/Includes/Maths/Quaternions.h b/runtime/Includes/Maths/Quaternions.h new file mode 100644 index 0000000..77b7b4e --- /dev/null +++ b/runtime/Includes/Maths/Quaternions.h @@ -0,0 +1,92 @@ +#ifndef __MLX_QUATERNIONS__ +#define __MLX_QUATERNIONS__ + +#include +#include + +namespace mlx +{ + template + struct Quat + { + T w, x, y, z; + + constexpr Quat() = default; + constexpr Quat(T W, T X, T Y, T Z); + template Quat(const Angle& angle); + Quat(const EulerAngles& angles); + constexpr Quat(RadianAngle angle, const Vec3& axis); + constexpr Quat(const T quat[4]); + template constexpr explicit Quat(const Quat& quat); + constexpr Quat(const Quat&) = default; + constexpr Quat(Quat&&) = default; + + RadianAngle AngleBetween(const Quat& vec) const; + constexpr bool ApproxEqual(const Quat& quat, T max_difference = std::numeric_limits::epsilon()) const; + + Quat& ComputeW(); + constexpr Quat& Conjugate(); + + constexpr T DotProduct(const Quat& vec) const; + + constexpr Quat GetConjugate() const; + Quat GetInverse() const; + Quat GetNormal(T* length = nullptr) const; + + Quat& Inverse(); + + T Magnitude() const; + + Quat& Normalize(T* length = nullptr); + + constexpr T SquaredMagnitude() const; + + RadianAngle To2DAngle() const; + EulerAngles ToEulerAngles() const; + std::string ToString() const; + + constexpr Quat& operator=(const Quat& quat) = default; + constexpr Quat& operator=(Quat&&) = default; + + constexpr Quat operator+(const Quat& quat) const; + constexpr Quat operator*(const Quat& quat) const; + constexpr Vec3 operator*(const Vec3& vec) const; + constexpr Quat operator*(T scale) const; + constexpr Quat operator/(const Quat& quat) const; + + constexpr Quat& operator+=(const Quat& quat); + constexpr Quat& operator*=(const Quat& quat); + constexpr Quat& operator*=(T scale); + constexpr Quat& operator/=(const Quat& quat); + + constexpr bool operator==(const Quat& quat) const; + constexpr bool operator!=(const Quat& quat) const; + constexpr bool operator<(const Quat& quat) const; + constexpr bool operator<=(const Quat& quat) const; + constexpr bool operator>(const Quat& quat) const; + constexpr bool operator>=(const Quat& quat) const; + + static RadianAngle AngleBetween(const Quat& lhs, const Quat& rhs); + static constexpr bool ApproxEqual(const Quat& lhs, const Quat& rhs, T max_difference = std::numeric_limits::epsilon()); + static constexpr Quat Identity(); + static constexpr Quat Lerp(const Quat& from, const Quat& to, T interpolation); + static Quat LookAt(const Vec3& forward, const Vec3& up); + static Quat Normalize(const Quat& quat, T* length = nullptr); + static Quat RotationBetween(const Vec3& from, const Vec3& to); + static Quat RotateTowards(const Quat& from, const Quat& to, RadianAngle max_rotation); + static Quat Mirror(Quat quat, const Vec3& axis); + static Quat Slerp(const Quat& from, const Quat& to, T interpolation); + static constexpr Quat Zero(); + + ~Quat() = default; + }; + + using Quatd = Quat; + using Quatf = Quat; + + template std::ostream& operator<<(std::ostream& out, const Quat& quat); +} + +#include + +#endif diff --git a/runtime/Includes/Maths/Quaternions.inl b/runtime/Includes/Maths/Quaternions.inl new file mode 100644 index 0000000..3905510 --- /dev/null +++ b/runtime/Includes/Maths/Quaternions.inl @@ -0,0 +1,508 @@ +#pragma once +#include + +namespace mlx +{ + template + constexpr Quat::Quat(T W, T X, T Y, T Z) : w(W), x(X), y(Y), z(Z) + {} + + template + template + Quat::Quat(const Angle& angle) : Quat(angle.ToQuat()) + {} + + template + Quat::Quat(const EulerAngles& angles) : Quat(angles.ToQuat()) + {} + + template + constexpr Quat::Quat(RadianAngle angle, const Vec3& axis) + { + angle /= T(2.0); + + Vec3 normalizedAxis = axis.GetNormal(); + + auto sincos = angle.GetSinCos(); + + w = sincos.second; + x = normalizedAxis.x * sincos.first; + y = normalizedAxis.y * sincos.first; + z = normalizedAxis.z * sincos.first; + + Normalize(); + } + + template + constexpr Quat::Quat(const T quat[4]) : w(quat[0]), x(quat[1]), y(quat[2]), z(quat[3]) + {} + + template + template + constexpr Quat::Quat(const Quat& quat) : w(static_cast(quat.w)), x(static_cast(quat.x)), y(static_cast(quat.y)), z(static_cast(quat.z)) + {} + + template + RadianAngle Quat::AngleBetween(const Quat& quat) const + { + T alpha = Vec3::DotProduct(Vec3(x, y, z), Vec3(quat.x, quat.y, quat.z)); + return std::acos(mlx::Clamp(alpha, T(-1.0), T(1.0))); + } + + template + constexpr bool Quat::ApproxEqual(const Quat& quat, T maxDifference) const + { + return NumberEquals(w, quat.w, maxDifference) && + NumberEquals(x, quat.x, maxDifference) && + NumberEquals(y, quat.y, maxDifference) && + NumberEquals(z, quat.z, maxDifference); + } + + template + Quat& Quat::ComputeW() + { + T t = T(1.0) - SquaredMagnitude(); + + if(t < T(0.0)) + w = T(0.0); + else + w = -std::sqrt(t); + + return *this; + } + + template + constexpr Quat& Quat::Conjugate() + { + x = -x; + y = -y; + z = -z; + return *this; + } + + template + constexpr T Quat::DotProduct(const Quat& quat) const + { + return w * quat.w + x * quat.x + y * quat.y + z * quat.z; + } + + template + constexpr Quat Quat::GetConjugate() const + { + Quat quat(*this); + quat.Conjugate(); + return quat; + } + + template + Quat Quat::GetInverse() const + { + Quat quat(*this); + quat.Inverse(); + return quat; + } + + template + Quat Quat::GetNormal(T* length) const + { + Quat quat(*this); + quat.Normalize(length); + return quat; + } + + template + Quat& Quat::Inverse() + { + T norm = SquaredMagnitude(); + if(norm > T(0.0)) + { + T invNorm = T(1.0) / std::sqrt(norm); + + w *= invNorm; + x *= -invNorm; + y *= -invNorm; + z *= -invNorm; + } + + return *this; + } + + template + T Quat::Magnitude() const + { + return std::sqrt(SquaredMagnitude()); + } + + template + Quat& Quat::Normalize(T* length) + { + T norm = std::sqrt(SquaredMagnitude()); + if(norm > T(0.0)) + { + T invNorm = T(1.0) / norm; + w *= invNorm; + x *= invNorm; + y *= invNorm; + z *= invNorm; + } + + if(length) + *length = norm; + + return *this; + } + + template + constexpr T Quat::SquaredMagnitude() const + { + return w * w + x * x + y * y + z * z; + } + + template + RadianAngle Quat::To2DAngle() const + { + T siny_cosp = T(2.0) * (w * z + x * y); + T cosy_cosp = T(1.0) - T(2.0) * (y * y + z * z); + + return std::atan2(siny_cosp, cosy_cosp); + } + + template + EulerAngles Quat::ToEulerAngles() const + { + T test = x * y + z * w; + if(test > T(0.499)) + // singularity at north pole + return EulerAngles(DegreeAngle(T(0.0)), RadianAngle(T(2.0) * std::atan2(x, w)), DegreeAngle(T(90.0))); + + if(test < T(-0.499)) + // singularity at south pole + return EulerAngles(DegreeAngle(T(0.0)), RadianAngle(T(-2.0) * std::atan2(x, w)), DegreeAngle(T(-90.0))); + + return EulerAngles(RadianAngle(std::atan2(T(2.0) * x * w - T(2.0) * y * z, T(1.0) - T(2.0) * x * x - T(2.0) * z * z)), + RadianAngle(std::atan2(T(2.0) * y * w - T(2.0) * x * z, T(1.0) - T(2.0) * y * y - T(2.0) * z * z)), + RadianAngle(std::asin(T(2.0) * test))); + } + + template + std::string Quat::ToString() const + { + std::ostringstream ss; + ss << *this; + + return ss.str(); + } + + template + constexpr Quat Quat::operator+(const Quat& quat) const + { + Quat result; + result.w = w + quat.w; + result.x = x + quat.x; + result.y = y + quat.y; + result.z = z + quat.z; + + return result; + } + + template + constexpr Quat Quat::operator*(const Quat& quat) const + { + Quat result; + result.w = w * quat.w - x * quat.x - y * quat.y - z * quat.z; + result.x = w * quat.x + x * quat.w + y * quat.z - z * quat.y; + result.y = w * quat.y + y * quat.w + z * quat.x - x * quat.z; + result.z = w * quat.z + z * quat.w + x * quat.y - y * quat.x; + + return result; + } + + template + constexpr Vec3 Quat::operator*(const Vec3& vec) const + { + Vec3 quatVec(x, y, z); + Vec3 uv = quatVec.CrossProduct(vec); + Vec3 uuv = quatVec.CrossProduct(uv); + uv *= T(2.0) * w; + uuv *= T(2.0); + + return vec + uv + uuv; + } + + template + constexpr Quat Quat::operator*(T scale) const + { + return Quat(w * scale, + x * scale, + y * scale, + z * scale); + } + + template + constexpr Quat Quat::operator/(const Quat& quat) const + { + return quat.GetConjugate() * (*this); + } + + template + constexpr Quat& Quat::operator+=(const Quat& quat) + { + return operator=(operator+(quat)); + } + + template + constexpr Quat& Quat::operator*=(const Quat& quat) + { + return operator=(operator*(quat)); + } + + template + constexpr Quat& Quat::operator*=(T scale) + { + return operator=(operator*(scale)); + } + + template + constexpr Quat& Quat::operator/=(const Quat& quat) + { + return operator=(operator/(quat)); + } + + template + constexpr bool Quat::operator==(const Quat& quat) const + { + return w == quat.w && x == quat.x && y == quat.y && z == quat.z; + } + + template + constexpr bool Quat::operator!=(const Quat& quat) const + { + return !operator==(quat); + } + + template + constexpr bool Quat::operator<(const Quat& quat) const + { + if(w != quat.w) + return w < quat.w; + + if(x != quat.x) + return x < quat.x; + + if(y != quat.y) + return y < quat.y; + + if(z != quat.z) + return z < quat.z; + } + + template + constexpr bool Quat::operator<=(const Quat& quat) const + { + if(w != quat.w) + return w < quat.w; + + if(x != quat.x) + return x < quat.x; + + if(y != quat.y) + return y < quat.y; + + if(z != quat.z) + return z <= quat.z; + } + + template + constexpr bool Quat::operator>(const Quat& quat) const + { + if(w != quat.w) + return w > quat.w; + + if(x != quat.x) + return x > quat.x; + + if(y != quat.y) + return y > quat.y; + + if(z != quat.z) + return z > quat.z; + } + + template + constexpr bool Quat::operator>=(const Quat& quat) const + { + if(w != quat.w) + return w > quat.w; + + if(x != quat.x) + return x > quat.x; + + if(y != quat.y) + return y > quat.y; + + if(z != quat.z) + return z >= quat.z; + } + + template + RadianAngle Quat::AngleBetween(const Quat& lhs, const Quat& rhs) + { + return lhs.AngleBetween(rhs); + } + + template + constexpr bool Quat::ApproxEqual(const Quat& lhs, const Quat& rhs, T maxDifference) + { + return lhs.ApproxEqual(rhs, maxDifference); + } + + template + constexpr Quat Quat::Identity() + { + return Quat(1, 0, 0, 0); + } + + template + constexpr Quat Quat::Lerp(const Quat& from, const Quat& to, T interpolation) + { + Quat interpolated; + interpolated.w = mlx::Lerp(from.w, to.w, interpolation); + interpolated.x = mlx::Lerp(from.x, to.x, interpolation); + interpolated.y = mlx::Lerp(from.y, to.y, interpolation); + interpolated.z = mlx::Lerp(from.z, to.z, interpolation); + + return interpolated; + } + + template + Quat Quat::LookAt(const Vec3& forward, const Vec3& up) + { + // From https://gamedev.stackexchange.com/questions/53129/quaternion-look-at-with-up-vector + Vec3 forward_w = Vec3::Forward(); + Vec3 axis = Vec3::CrossProduct(forward, forward_w); + RadianAngle angle = std::acos(Vec3::DotProduct(forward, forward_w)); + + Vec3 third = Vec3::CrossProduct(axis, forward_w); + if(Vec3::DotProduct(third, forward) < 0) + angle = -angle; + + Quat q1 = Quat(angle, axis); + + Vec3 up_l = q1 * up; + Vec3 right = Vec3::Normalize(Vec3::CrossProduct(forward, up)); + Vec3 up_w = Vec3::Normalize(Vec3::CrossProduct(right, forward)); + + Vec3 axis2 = Vec3::CrossProduct(up_l, up_w); + RadianAngle angle2 = std::acos(Vec3::DotProduct(forward, forward_w)); + + Quat q2 = Quat(angle2, axis2); + + return q2 * q1; + } + + template + Quat Quat::Normalize(const Quat& quat, T* length) + { + return quat.GetNormal(length); + } + + template + Quat Quat::RotationBetween(const Vec3& from, const Vec3& to) + { + T dot = from.DotProduct(to); + if(dot < T(-0.999999)) + { + Vec3 crossProduct; + if(from.DotProduct(Vec3::UnitX()) < T(0.999999)) + crossProduct = Vec3::UnitX().CrossProduct(from); + else + crossProduct = Vec3::UnitY().CrossProduct(from); + + crossProduct.Normalize(); + return Quat(Pi(), crossProduct); + } + else if(dot > T(0.999999)) + return Quat::Identity(); + else + { + T norm = std::sqrt(from.GetSquaredLength() * to.GetSquaredLength()); + Vec3 crossProduct = from.CrossProduct(to); + + return Quat(norm + dot, crossProduct.x, crossProduct.y, crossProduct.z).GetNormal(); + } + } + + template + Quat Quat::RotateTowards(const Quat& from, const Quat& to, RadianAngle maxRotation) + { + RadianAngle rotationBetween = AngleBetween(from, to); + if(rotationBetween < maxRotation) + return to; + + return Slerp(from, to, std::min(maxRotation.value / rotationBetween.value), 1.f); + } + + template + Quat Quat::Mirror(Quat quat, const Vec3& axis) + { + T x = std::copysign(T(1.0), axis.x); + T y = std::copysign(T(1.0), axis.y); + T z = std::copysign(T(1.0), axis.z); + + quat.x = y * z * quat.x; + quat.y = x * z * quat.y; + quat.z = x * y * quat.z; + + return quat; + } + + template + Quat Quat::Slerp(const Quat& from, const Quat& to, T interpolation) + { + Quat q; + + T cosOmega = from.DotProduct(to); + if(cosOmega < T(0.0)) + { + // We invert everything + q = Quat(-to.w, -to.x, -to.y, -to.z); + cosOmega = -cosOmega; + } + else + q = Quat(to); + + T k0, k1; + if(cosOmega > T(0.9999)) + { + // Linear interpolation to avoid division by zero + k0 = T(1.0) - interpolation; + k1 = interpolation; + } + else + { + T sinOmega = std::sqrt(T(1.0) - cosOmega*cosOmega); + T omega = std::atan2(sinOmega, cosOmega); + + // To avoid two divisions + sinOmega = T(1.0)/sinOmega; + + k0 = std::sin((T(1.0) - interpolation) * omega) * sinOmega; + k1 = std::sin(interpolation*omega) * sinOmega; + } + + Quat result(k0 * from.w, k0 * from.x, k0 * from.y, k0 * from.z); + return result += q * k1; + } + + template + constexpr Quat Quat::Zero() + { + return Quat(0, 0, 0, 0); + } + + template + std::ostream& operator<<(std::ostream& out, const Quat& quat) + { + return out << "Quat(" << quat.w << " | " << quat.x << ", " << quat.y << ", " << quat.z << ')'; + } +} diff --git a/runtime/Includes/Maths/Readme.md b/runtime/Includes/Maths/Readme.md new file mode 100644 index 0000000..1ef71b6 --- /dev/null +++ b/runtime/Includes/Maths/Readme.md @@ -0,0 +1 @@ +Highly inspired by [Nazara Maths library](https://github.com/NazaraEngine/NazaraEngine/tree/main/include/Nazara/Math) diff --git a/runtime/Includes/Maths/Vec2.h b/runtime/Includes/Maths/Vec2.h new file mode 100755 index 0000000..a08706d --- /dev/null +++ b/runtime/Includes/Maths/Vec2.h @@ -0,0 +1,109 @@ +#ifndef __MLX_VEC2__ +#define __MLX_VEC2__ + +namespace mlx +{ + template struct Vec3; + template struct Vec4; + + template + struct Vec2 + { + union { T x, r, s; }; + union { T y, g, t; }; + + constexpr Vec2() = default; + constexpr Vec2(T X, T Y); + constexpr explicit Vec2(T scale); + template constexpr explicit Vec2(const Vec2& vec); + constexpr Vec2(const Vec2&) = default; + constexpr Vec2(Vec2&&) = default; + constexpr explicit Vec2(const Vec3& vec); + constexpr explicit Vec2(const Vec4& vec); + + T AbsDotProduct(const Vec2& vec) const; + constexpr bool ApproxEqual(const Vec2& vec, T max_difference = std::numeric_limits::epsilon()) const; + + template U Distance(const Vec2& vec) const; + constexpr T DotProduct(const Vec2& vec) const; + + template T GetLength() const; + Vec2 GetNormal(T* length = nullptr) const; + constexpr T GetSquaredLength() const; + + constexpr Vec2& Maximize(const Vec2& vec); + constexpr Vec2& Minimize(const Vec2& vec); + + Vec2& Normalize(T* length = nullptr); + + constexpr T SquaredDistance(const Vec2& vec) const; + + std::string ToString() const; + + constexpr T& operator[](std::size_t i); + constexpr T operator[](std::size_t i) const; + + constexpr const Vec2& operator+() const; + constexpr Vec2 operator-() const; + + constexpr Vec2 operator+(const Vec2& vec) const; + constexpr Vec2 operator-(const Vec2& vec) const; + constexpr Vec2 operator*(const Vec2& vec) const; + constexpr Vec2 operator*(T scale) const; + constexpr Vec2 operator/(const Vec2& vec) const; + constexpr Vec2 operator/(T scale) const; + constexpr Vec2 operator%(const Vec2& vec) const; + constexpr Vec2 operator%(T mod) const; + + constexpr Vec2& operator=(const Vec2&) = default; + constexpr Vec2& operator=(Vec2&&) = default; + + constexpr Vec2& operator+=(const Vec2& vec); + constexpr Vec2& operator-=(const Vec2& vec); + constexpr Vec2& operator*=(const Vec2& vec); + constexpr Vec2& operator*=(T scale); + constexpr Vec2& operator/=(const Vec2& vec); + constexpr Vec2& operator/=(T scale); + constexpr Vec2& operator%=(const Vec2& vec); + constexpr Vec2& operator%=(T mod); + + constexpr bool operator==(const Vec2& vec) const; + constexpr bool operator!=(const Vec2& vec) const; + constexpr bool operator<(const Vec2& vec) const; + constexpr bool operator<=(const Vec2& vec) const; + constexpr bool operator>(const Vec2& vec) const; + constexpr bool operator>=(const Vec2& vec) const; + + static constexpr Vec2 Apply(T(*func)(T), const Vec2& vec); + static constexpr bool ApproxEqual(const Vec2& lhs, const Vec2& rhs, T max_difference = std::numeric_limits::epsilon()); + template static U Distance(const Vec2& vec1, const Vec2& vec2); + static constexpr T DotProduct(const Vec2& vec1, const Vec2& vec2); + static constexpr Vec2 Lerp(const Vec2& from, const Vec2& to, T interpolation); + static Vec2 Normalize(const Vec2& vec); + static constexpr Vec2 Unit(); + static constexpr Vec2 UnitX(); + static constexpr Vec2 UnitY(); + static constexpr Vec2 Zero(); + + ~Vec2() = default; + }; + + using Vec2d = Vec2; + using Vec2f = Vec2; + using Vec2i = Vec2; + using Vec2ui = Vec2; + using Vec2i32 = Vec2; + using Vec2i64 = Vec2; + using Vec2ui32 = Vec2; + using Vec2ui64 = Vec2; + + template std::ostream& operator<<(std::ostream& out, const Vec2& vec); + + template constexpr Vec2 operator*(T scale, const Vec2& vec); + template constexpr Vec2 operator/(T scale, const Vec2& vec); + template constexpr Vec2 operator%(T mod, const Vec2& vec); +} + +#include + +#endif // __AK_VEC2__ diff --git a/runtime/Includes/Maths/Vec2.inl b/runtime/Includes/Maths/Vec2.inl new file mode 100755 index 0000000..f31c2f8 --- /dev/null +++ b/runtime/Includes/Maths/Vec2.inl @@ -0,0 +1,387 @@ +#pragma once +#include + +namespace mlx +{ + template + constexpr Vec2::Vec2(T X, T Y) : x(X), y(Y) {} + + template + constexpr Vec2::Vec2(T scale) : x(scale), y(scale) {} + + template + template + constexpr Vec2::Vec2(const Vec2& vec) : x(static_cast(vec.x)), y(static_cast(vec.y)) {} + + template + constexpr Vec2::Vec2(const Vec3& vec) : x(vec.x), y(vec.y) {} + + template + constexpr Vec2::Vec2(const Vec4& vec) : x(vec.x), y(vec.y) {} + + template + T Vec2::AbsDotProduct(const Vec2& vec) const + { + return std::abs(x * vec.x) + std::abs(y * vec.y); + } + + template + constexpr bool Vec2::ApproxEqual(const Vec2& vec, T maxDifference) const + { + return NumberEquals(x, vec.x, maxDifference) && NumberEquals(y, vec.y, maxDifference); + } + + template + template + U Vec2::Distance(const Vec2& vec) const + { + return static_cast(std::sqrt(SquaredDistance(vec))); + } + + template + constexpr T Vec2::DotProduct(const Vec2& vec) const + { + return x * vec.x + y * vec.y; + } + + template + template + T Vec2::GetLength() const + { + return static_cast(std::sqrt(static_cast(GetSquaredLength()))); + } + + template + Vec2 Vec2::GetNormal(T* length) const + { + Vec2 vec(*this); + vec.Normalize(length); + return vec; + } + + template + constexpr T Vec2::GetSquaredLength() const + { + return x * x + y * y; + } + + template + constexpr Vec2& Vec2::Maximize(const Vec2& vec) + { + if(vec.x > x) + x = vec.x; + if(vec.y > y) + y = vec.y; + return *this; + } + + template + constexpr Vec2& Vec2::Minimize(const Vec2& vec) + { + if(vec.x < x) + x = vec.x; + if(vec.y < y) + y = vec.y; + return *this; + } + + template + Vec2& Vec2::Normalize(T* length) + { + T norm = GetLength(); + if(norm > T(0.0)) + { + T invNorm = T(1.0) / norm; + x *= invNorm; + y *= invNorm; + } + if(length) + *length = norm; + return *this; + } + + template + constexpr T Vec2::SquaredDistance(const Vec2& vec) const + { + return (*this - vec).GetSquaredLength(); + } + + template + std::string Vec2::ToString() const + { + return "Vec2(" + std::to_string(x) + ", " + std::to_string(y) + ')'; + } + + template + constexpr T& Vec2::operator[](std::size_t i) + { + mlx::Assert(i < 2, "index out of range"); + return *(&x + i); + } + + template + constexpr T Vec2::operator[](std::size_t i) const + { + mlx::Assert(i < 2, "index out of range"); + return *(&x + i); + } + + template + constexpr const Vec2& Vec2::operator+() const + { + return *this; + } + + template + constexpr Vec2 Vec2::operator-() const + { + return Vec2(-x, -y); + } + + template + constexpr Vec2 Vec2::operator+(const Vec2& vec) const + { + return Vec2(x + vec.x, y + vec.y); + } + + template + constexpr Vec2 Vec2::operator-(const Vec2& vec) const + { + return Vec2(x - vec.x, y - vec.y); + } + + template + constexpr Vec2 Vec2::operator*(const Vec2& vec) const + { + return Vec2(x * vec.x, y * vec.y); + } + + template + constexpr Vec2 Vec2::operator*(T scale) const + { + return Vec2(x * scale, y * scale); + } + + template + constexpr Vec2 Vec2::operator/(const Vec2& vec) const + { + return Vec2(x / vec.x, y / vec.y); + } + + template + constexpr Vec2 Vec2::operator/(T scale) const + { + return Vec2(x / scale, y / scale); + } + + template + constexpr Vec2 Vec2::operator%(const Vec2& vec) const + { + return Vec2(Mod(x, vec.x), Mod(y, vec.y)); + } + + template + constexpr Vec2 Vec2::operator%(T mod) const + { + return Vec2(Mod(x, mod), Mod(y, mod)); + } + + template + constexpr Vec2& Vec2::operator+=(const Vec2& vec) + { + x += vec.x; + y += vec.y; + + return *this; + } + + template + constexpr Vec2& Vec2::operator-=(const Vec2& vec) + { + x -= vec.x; + y -= vec.y; + + return *this; + } + + template + constexpr Vec2& Vec2::operator*=(const Vec2& vec) + { + x *= vec.x; + y *= vec.y; + + return *this; + } + + template + constexpr Vec2& Vec2::operator*=(T scale) + { + x *= scale; + y *= scale; + + return *this; + } + + template + constexpr Vec2& Vec2::operator/=(const Vec2& vec) + { + x /= vec.x; + y /= vec.y; + + return *this; + } + + template + constexpr Vec2& Vec2::operator/=(T scale) + { + x /= scale; + y /= scale; + + return *this; + } + + template + constexpr Vec2& Vec2::operator%=(const Vec2& vec) + { + x = Mod(x, vec.x); + y = Mod(y, vec.y); + + return *this; + } + + template + constexpr Vec2& Vec2::operator%=(T value) + { + x = Mod(x, value); + y = Mod(y, value); + + return *this; + } + + template + constexpr bool Vec2::operator==(const Vec2& vec) const + { + return x == vec.x && y == vec.y; + } + + template + constexpr bool Vec2::operator!=(const Vec2& vec) const + { + return !operator==(vec); + } + + template + constexpr bool Vec2::operator<(const Vec2& vec) const + { + if (x != vec.x) + return x < vec.x; + + return y < vec.y; + } + + template + constexpr bool Vec2::operator<=(const Vec2& vec) const + { + if (x != vec.x) + return x < vec.x; + + return y <= vec.y; + } + + template + constexpr bool Vec2::operator>(const Vec2& vec) const + { + if (x != vec.x) + return x > vec.x; + + return y > vec.y; + } + + template + constexpr bool Vec2::operator>=(const Vec2& vec) const + { + if (x != vec.x) + return x > vec.x; + + return y >= vec.y; + } + + template + constexpr Vec2 Vec2::Apply(T(*func)(T), const Vec2& vec) + { + return Vec2(func(vec.x), func(vec.y)); + } + + template + constexpr bool Vec2::ApproxEqual(const Vec2& lhs, const Vec2& rhs, T maxDifference) + { + return lhs.ApproxEqual(rhs, maxDifference); + } + + template + template + U Vec2::Distance(const Vec2& vec1, const Vec2& vec2) + { + return vec1.Distance(vec2); + } + + template + constexpr T Vec2::DotProduct(const Vec2& vec1, const Vec2& vec2) + { + return vec1.DotProduct(vec2); + } + + template + Vec2 Vec2::Normalize(const Vec2& vec) + { + return vec.GetNormal(); + } + + template + constexpr Vec2 Vec2::Unit() + { + return Vec2(1, 1); + } + + template + constexpr Vec2 Vec2::UnitX() + { + return Vec2(1, 0); + } + + template + constexpr Vec2 Vec2::UnitY() + { + return Vec2(0, 1); + } + + template + constexpr Vec2 Vec2::Zero() + { + return Vec2(0, 0); + } + + template + std::ostream& operator<<(std::ostream& out, const Vec2& vec) + { + return out << "Vec2(" << vec.x << ", " << vec.y << ')'; + } + + template + constexpr Vec2 operator*(T scale, const Vec2& vec) + { + return Vec2(scale * vec.x, scale * vec.y); + } + + template + constexpr Vec2 operator/(T scale, const Vec2& vec) + { + return Vec2(scale / vec.x, scale / vec.y); + } + + template + constexpr Vec2 operator%(T mod, const Vec2& vec) + { + return Vec2(Mod(mod, vec.x), Mod(mod, vec.y)); + } +} diff --git a/runtime/Includes/Maths/Vec3.h b/runtime/Includes/Maths/Vec3.h new file mode 100755 index 0000000..ef7fdd5 --- /dev/null +++ b/runtime/Includes/Maths/Vec3.h @@ -0,0 +1,126 @@ +#ifndef __MLX_VEC3__ +#define __MLX_VEC3__ + +namespace mlx +{ + template struct Vec2; + template struct Vec4; + + template + struct Vec3 + { + union { T x, r, s; }; + union { T y, g, t; }; + union { T z, b, p; }; + + constexpr Vec3() = default; + constexpr Vec3(T X, T Y, T Z); + constexpr Vec3(T X, const Vec2& vec); + constexpr explicit Vec3(T scale); + constexpr Vec3(const Vec2& vec, T Z = 0.0); + template constexpr explicit Vec3(const Vec3& vec); + constexpr Vec3(const Vec3&) = default; + constexpr Vec3(Vec3&&) = default; + constexpr explicit Vec3(const Vec4& vec); + + T AbsDotProduct(const Vec3& vec) const; + constexpr bool ApproxEqual(const Vec3& vec, T max_difference = std::numeric_limits::epsilon()) const; + + constexpr Vec3 CrossProduct(const Vec3& vec) const; + + template U Distance(const Vec3& vec) const; + constexpr T DotProduct(const Vec3& vec) const; + + Vec3 GetAbs() const; + template U GetLength() const; + Vec3 GetNormal(T* length = nullptr) const; + constexpr T GetSquaredLength() const; + + constexpr Vec3& Maximize(const Vec3& vec); + constexpr Vec3& Minimize(const Vec3& vec); + + Vec3& Normalize(T* length = nullptr); + + constexpr T SquaredDistance(const Vec3& vec) const; + + std::string ToString() const; + + constexpr T& operator[](std::size_t i); + constexpr const T& operator[](std::size_t i) const; + + constexpr const Vec3& operator+() const; + constexpr Vec3 operator-() const; + + constexpr Vec3 operator+(const Vec3& vec) const; + constexpr Vec3 operator-(const Vec3& vec) const; + constexpr Vec3 operator*(const Vec3& vec) const; + constexpr Vec3 operator*(T scale) const; + constexpr Vec3 operator/(const Vec3& vec) const; + constexpr Vec3 operator/(T scale) const; + constexpr Vec3 operator%(const Vec3& vec) const; + constexpr Vec3 operator%(T mod) const; + + constexpr Vec3& operator=(const Vec3&) = default; + constexpr Vec3& operator=(Vec3&&) = default; + + constexpr Vec3& operator+=(const Vec3& vec); + constexpr Vec3& operator-=(const Vec3& vec); + constexpr Vec3& operator*=(const Vec3& vec); + constexpr Vec3& operator*=(T scale); + constexpr Vec3& operator/=(const Vec3& vec); + constexpr Vec3& operator/=(T scale); + constexpr Vec3& operator%=(const Vec3& vec); + constexpr Vec3& operator%=(T mod); + + constexpr bool operator==(const Vec3& vec) const; + constexpr bool operator!=(const Vec3& vec) const; + constexpr bool operator<(const Vec3& vec) const; + constexpr bool operator<=(const Vec3& vec) const; + constexpr bool operator>(const Vec3& vec) const; + constexpr bool operator>=(const Vec3& vec) const; + + static constexpr Vec3 Apply(T(*func)(T), const Vec3& vec); + static constexpr bool ApproxEqual(const Vec3& lhs, const Vec3& rhs, T max_difference = std::numeric_limits::epsilon()); + static constexpr Vec3 Backward(); + static constexpr Vec3 Clamp(const Vec3& vec, const Vec3& min, const Vec3& max); + static constexpr Vec3 CrossProduct(const Vec3& vec1, const Vec3& vec2); + template static U Distance(const Vec3& vec1, const Vec3& vec2); + static constexpr T DotProduct(const Vec3& vec1, const Vec3& vec2); + static constexpr Vec3 Down(); + static constexpr Vec3 Forward(); + static constexpr Vec3 Left(); + static constexpr Vec3 Max(const Vec3& lhs, const Vec3& rhs); + static constexpr Vec3 Min(const Vec3& lhs, const Vec3& rhs); + static Vec3 Normalize(const Vec3& vec); + static constexpr Vec3 Right(); + static constexpr T SquaredDistance(const Vec3& vec1, const Vec3& vec2); + static constexpr Vec3 Unit(); + static constexpr Vec3 UnitX(); + static constexpr Vec3 UnitY(); + static constexpr Vec3 UnitZ(); + static constexpr Vec3 Up(); + static constexpr Vec3 Zero(); + + ~Vec3() = default; + }; + + using Vec3b = Vec3; + using Vec3d = Vec3; + using Vec3f = Vec3; + using Vec3i = Vec3; + using Vec3ui = Vec3; + using Vec3i32 = Vec3; + using Vec3i64 = Vec3; + using Vec3ui32 = Vec3; + using Vec3ui64 = Vec3; + + template std::ostream& operator<<(std::ostream& out, const Vec3& vec); + + template constexpr Vec3 operator*(T scale, const Vec3& vec); + template constexpr Vec3 operator/(T scale, const Vec3& vec); + template constexpr Vec3 operator%(T scale, const Vec3& vec); +} + +#include + +#endif // __AK_VEC3__ diff --git a/runtime/Includes/Maths/Vec3.inl b/runtime/Includes/Maths/Vec3.inl new file mode 100755 index 0000000..158c1d0 --- /dev/null +++ b/runtime/Includes/Maths/Vec3.inl @@ -0,0 +1,508 @@ +#pragma once +#include + +namespace mlx +{ + template + constexpr Vec3::Vec3(T X, T Y, T Z) : x(X), y(Y), z(Z) {} + + template + constexpr Vec3::Vec3(T X, const Vec2& vec) : x(X), y(vec.x), z(vec.y) {} + + template + constexpr Vec3::Vec3(T scale) : x(scale), y(scale), z(scale) {} + + template + constexpr Vec3::Vec3(const Vec2& vec, T Z) : x(vec.x), y(vec.y), z(Z) {} + + template + template + constexpr Vec3::Vec3(const Vec3& vec) : x(static_cast(vec.x)), y(static_cast(vec.y)), z(static_cast(vec.z)) {} + + template + constexpr Vec3::Vec3(const Vec4& vec) : x(vec.x), y(vec.y), z(vec.z) {} + + template + T Vec3::AbsDotProduct(const Vec3& vec) const + { + return std::abs(x * vec.x) + std::abs(y * vec.y) + std::abs(z * vec.z); + } + + template + constexpr bool Vec3::ApproxEqual(const Vec3& vec, T maxDifference) const + { + return NumberEquals(x, vec.x, maxDifference) && NumberEquals(y, vec.y, maxDifference) && NumberEquals(z, vec.z, maxDifference); + } + + template + constexpr Vec3 Vec3::CrossProduct(const Vec3& vec) const + { + return Vec3(y * vec.z - z * vec.y, z * vec.x - x * vec.z, x * vec.y - y * vec.x); + } + + template + template + U Vec3::Distance(const Vec3& vec) const + { + return static_cast(std::sqrt(static_cast(SquaredDistance(vec)))); + } + + template + constexpr T Vec3::DotProduct(const Vec3& vec) const + { + return x * vec.x + y * vec.y + z * vec.z; + } + + template + Vec3 Vec3::GetAbs() const + { + return Vec3(std::abs(x), std::abs(y), std::abs(z)); + } + + template + template + U Vec3::GetLength() const + { + return static_cast(std::sqrt(static_cast(GetSquaredLength()))); + } + + template + Vec3 Vec3::GetNormal(T* length) const + { + Vec3 vec(*this); + vec.Normalize(length); + + return vec; + } + + template + constexpr T Vec3::GetSquaredLength() const + { + return x*x + y*y + z*z; + } + + template + constexpr Vec3& Vec3::Maximize(const Vec3& vec) + { + if (vec.x > x) + x = vec.x; + + if (vec.y > y) + y = vec.y; + + if (vec.z > z) + z = vec.z; + + return *this; + } + + template + constexpr Vec3& Vec3::Minimize(const Vec3& vec) + { + if (vec.x < x) + x = vec.x; + + if (vec.y < y) + y = vec.y; + + if (vec.z < z) + z = vec.z; + + return *this; + } + + template + Vec3& Vec3::Normalize(T* length) + { + T norm = GetLength(); + if (norm > T(0.0)) + { + T invNorm = T(1.0) / norm; + x *= invNorm; + y *= invNorm; + z *= invNorm; + } + + if (length) + *length = norm; + + return *this; + } + + template + constexpr T Vec3::SquaredDistance(const Vec3& vec) const + { + return (*this - vec).GetSquaredLength(); + } + + template + std::string Vec3::ToString() const + { + return "Vec3(" + std::to_string(x) + ", " + std::to_string(y) + ", " + std::to_string(z) + ')'; + } + + template + constexpr T& Vec3::operator[](std::size_t i) + { + mlx::Assert(i < 3, "index out of range"); + return *(&x + i); + } + + template + constexpr const T& Vec3::operator[](std::size_t i) const + { + mlx::Assert(i < 3, "index out of range"); + return *(&x + i); + } + + template + constexpr const Vec3& Vec3::operator+() const + { + return *this; + } + + template + constexpr Vec3 Vec3::operator-() const + { + return Vec3(-x, -y, -z); + } + template + constexpr Vec3 Vec3::operator+(const Vec3& vec) const + { + return Vec3(x + vec.x, y + vec.y, z + vec.z); + } + + template + constexpr Vec3 Vec3::operator-(const Vec3& vec) const + { + return Vec3(x - vec.x, y - vec.y, z - vec.z); + } + + template + constexpr Vec3 Vec3::operator*(const Vec3& vec) const + { + return Vec3(x * vec.x, y * vec.y, z * vec.z); + } + + template + constexpr Vec3 Vec3::operator*(T scale) const + { + return Vec3(x * scale, y * scale, z * scale); + } + + template + constexpr Vec3 Vec3::operator/(const Vec3& vec) const + { + return Vec3(x / vec.x, y / vec.y, z / vec.z); + } + + template + constexpr Vec3 Vec3::operator/(T scale) const + { + return Vec3(x / scale, y / scale, z / scale); + } + + template + constexpr Vec3 Vec3::operator%(const Vec3& vec) const + { + return Vec3(Mod(x, vec.x), Mod(y, vec.y), Mod(z, vec.z)); + } + + template + constexpr Vec3 Vec3::operator%(T mod) const + { + return Vec3(Mod(x, mod), Mod(y, mod), Mod(z, mod)); + } + + template + constexpr Vec3& Vec3::operator+=(const Vec3& vec) + { + x += vec.x; + y += vec.y; + z += vec.z; + + return *this; + } + + template + constexpr Vec3& Vec3::operator-=(const Vec3& vec) + { + x -= vec.x; + y -= vec.y; + z -= vec.z; + + return *this; + } + + template + constexpr Vec3& Vec3::operator*=(const Vec3& vec) + { + x *= vec.x; + y *= vec.y; + z *= vec.z; + + return *this; + } + + template + constexpr Vec3& Vec3::operator*=(T scale) + { + x *= scale; + y *= scale; + z *= scale; + + return *this; + } + + template + constexpr Vec3& Vec3::operator/=(const Vec3& vec) + { + x /= vec.x; + y /= vec.y; + z /= vec.z; + + return *this; + } + + template + constexpr Vec3& Vec3::operator/=(T scale) + { + x /= scale; + y /= scale; + z /= scale; + + return *this; + } + + template + constexpr Vec3& Vec3::operator%=(const Vec3& vec) + { + x = Mod(x, vec.x); + y = Mod(y, vec.y); + z = Mod(z, vec.z); + + return *this; + } + + template + constexpr Vec3& Vec3::operator%=(T mod) + { + x = Mod(x, mod); + y = Mod(y, mod); + z = Mod(z, mod); + + return *this; + } + + template + constexpr bool Vec3::operator==(const Vec3& vec) const + { + return x == vec.x && y == vec.y && z == vec.z; + } + + template + constexpr bool Vec3::operator!=(const Vec3& vec) const + { + return !operator==(vec); + } + + template + constexpr bool Vec3::operator<(const Vec3& vec) const + { + if (x != vec.x) + return x < vec.x; + + if (y != vec.y) + return y < vec.y; + + return z < vec.z; + } + + template + constexpr bool Vec3::operator<=(const Vec3& vec) const + { + if (x != vec.x) + return x < vec.x; + + if (y != vec.y) + return y < vec.y; + + return z <= vec.z; + } + + template + constexpr bool Vec3::operator>(const Vec3& vec) const + { + if (x != vec.x) + return x > vec.x; + + if (y != vec.y) + return y > vec.y; + + return z > vec.z; + } + + template + constexpr bool Vec3::operator>=(const Vec3& vec) const + { + if (x != vec.x) + return x > vec.x; + + if (y != vec.y) + return y > vec.y; + + return z >= vec.z; + } + + template + constexpr Vec3 Vec3::Apply(T(*func)(T), const Vec3& vec) + { + return Vec3(func(vec.x), func(vec.y), func(vec.z)); + } + + template + constexpr bool Vec3::ApproxEqual(const Vec3& lhs, const Vec3& rhs, T maxDifference) + { + return lhs.ApproxEqual(rhs, maxDifference); + } + + template + constexpr Vec3 Vec3::CrossProduct(const Vec3& vec1, const Vec3& vec2) + { + return vec1.CrossProduct(vec2); + } + + template + constexpr T Vec3::DotProduct(const Vec3& vec1, const Vec3& vec2) + { + return vec1.DotProduct(vec2); + } + + template + constexpr Vec3 Vec3::Backward() + { + return Vec3(0, 0, 1); + } + + template + template + U Vec3::Distance(const Vec3& vec1, const Vec3& vec2) + { + return vec1.Distance(vec2); + } + + template + constexpr Vec3 Vec3::Down() + { + return Vec3(0, -1, 0); + } + + template + constexpr Vec3 Vec3::Forward() + { + return Vec3(0, 0, -1); + } + + template + constexpr Vec3 Vec3::Left() + { + return Vec3(-1, 0, 0); + } + + template + constexpr Vec3 Vec3::Max(const Vec3& lhs, const Vec3& rhs) + { + Vec3 max = lhs; + max.Maximize(rhs); + + return max; + } + + template + constexpr Vec3 Vec3::Min(const Vec3& lhs, const Vec3& rhs) + { + Vec3 min = lhs; + min.Minimize(rhs); + + return min; + } + + template + Vec3 Vec3::Normalize(const Vec3& vec) + { + return vec.GetNormal(); + } + + template + constexpr Vec3 Vec3::Right() + { + return Vec3(1, 0, 0); + } + + template + constexpr T Vec3::SquaredDistance(const Vec3& vec1, const Vec3& vec2) + { + return vec1.SquaredDistance(vec2); + } + + template + constexpr Vec3 Vec3::Unit() + { + return Vec3(1); + } + + template + constexpr Vec3 Vec3::UnitX() + { + return Vec3(1, 0, 0); + } + + template + constexpr Vec3 Vec3::UnitY() + { + return Vec3(0, 1, 0); + } + + template + constexpr Vec3 Vec3::UnitZ() + { + return Vec3(0, 0, 1); + } + + template + constexpr Vec3 Vec3::Up() + { + return Vec3(0, 1, 0); + } + + template + constexpr Vec3 Vec3::Zero() + { + return Vec3(0, 0, 0); + } + + template + std::ostream& operator<<(std::ostream& out, const Vec3& vec) + { + return out << "Vec3(" << vec.x << ", " << vec.y << ", " << vec.z << ')'; + } + + template + constexpr Vec3 operator*(T scale, const Vec3& vec) + { + return Vec3(scale * vec.x, scale * vec.y, scale * vec.z); + } + + template + constexpr Vec3 operator/(T scale, const Vec3& vec) + { + return Vec3(scale / vec.x, scale / vec.y, scale / vec.z); + } + + template + constexpr Vec3 operator%(T mod, const Vec3& vec) + { + return Vec3(Mod(mod, vec.x), Mod(mod, vec.y), Mod(mod, vec.z)); + } +} + diff --git a/runtime/Includes/Maths/Vec4.h b/runtime/Includes/Maths/Vec4.h new file mode 100755 index 0000000..a7e40ee --- /dev/null +++ b/runtime/Includes/Maths/Vec4.h @@ -0,0 +1,108 @@ +#ifndef __MLX_VEC4__ +#define __MLX_VEC4__ + +namespace mlx +{ + template struct Vec2; + template struct Vec3; + + template + struct Vec4 + { + union { T x, r, s; }; + union { T y, g, t; }; + union { T z, b, p; }; + union { T w, a, q; }; + + constexpr Vec4() = default; + constexpr Vec4(T X, T Y, T Z, T W = 1.0); + constexpr Vec4(T X, T Y, const Vec2& vec); + constexpr Vec4(T X, const Vec2& vec, T W); + constexpr Vec4(T X, const Vec3& vec); + constexpr explicit Vec4(T scale); + constexpr Vec4(const Vec2& vec, T Z = 0.0, T W = 1.0); + constexpr Vec4(const Vec3& vec, T W = 1.0); + template constexpr explicit Vec4(const Vec4& vec); + constexpr Vec4(const Vec4&) = default; + constexpr Vec4(Vec4&&) = default; + + T AbsDotProduct(const Vec4& vec) const; + constexpr bool ApproxEqual(const Vec4& vec, T max_difference = std::numeric_limits::epsilon()) const; + + constexpr T DotProduct(const Vec4& vec) const; + + Vec4 GetNormal(T* length = nullptr) const; + + constexpr Vec4& Maximize(const Vec4& vec); + constexpr Vec4& Minimize(const Vec4& vec); + + Vec4& Normalize(T* length = nullptr); + + std::string ToString() const; + + constexpr Vec4& operator=(const Vec4&) = default; + constexpr Vec4& operator=(Vec4&&) = default; + + constexpr T& operator[](std::size_t i); + constexpr const T& operator[](std::size_t i) const; + + constexpr const Vec4& operator+() const; + constexpr Vec4 operator-() const; + + constexpr Vec4 operator+(const Vec4& vec) const; + constexpr Vec4 operator-(const Vec4& vec) const; + constexpr Vec4 operator*(const Vec4& vec) const; + constexpr Vec4 operator*(T scale) const; + constexpr Vec4 operator/(const Vec4& vec) const; + constexpr Vec4 operator/(T scale) const; + constexpr Vec4 operator%(const Vec4& vec) const; + constexpr Vec4 operator%(T mod) const; + + constexpr Vec4& operator+=(const Vec4& vec); + constexpr Vec4& operator-=(const Vec4& vec); + constexpr Vec4& operator*=(const Vec4& vec); + constexpr Vec4& operator*=(T scale); + constexpr Vec4& operator/=(const Vec4& vec); + constexpr Vec4& operator/=(T scale); + constexpr Vec4& operator%=(const Vec4& vec); + constexpr Vec4& operator%=(T mod); + + constexpr bool operator==(const Vec4& vec) const; + constexpr bool operator!=(const Vec4& vec) const; + constexpr bool operator<(const Vec4& vec) const; + constexpr bool operator<=(const Vec4& vec) const; + constexpr bool operator>(const Vec4& vec) const; + constexpr bool operator>=(const Vec4& vec) const; + + static constexpr Vec4 Apply(T(*func)(T), const Vec4& vec); + static constexpr bool ApproxEqual(const Vec4& lhs, const Vec4& rhs, T max_difference = std::numeric_limits::epsilon()); + static constexpr T DotProduct(const Vec4& vec1, const Vec4& vec2); + static Vec4 Normalize(const Vec4& vec); + static constexpr Vec4 UnitX(); + static constexpr Vec4 UnitY(); + static constexpr Vec4 UnitZ(); + static constexpr Vec4 Zero(); + + ~Vec4() = default; + }; + + using Vec4d = Vec4; + using Vec4f = Vec4; + using Vec4i = Vec4; + using Vec4ui = Vec4; + using Vec4i32 = Vec4; + using Vec4i64 = Vec4; + using Vec4ui32 = Vec4; + using Vec4ui64 = Vec4; + + template std::ostream& operator<<(std::ostream& out, const Vec4& vec); + + template constexpr Vec4 operator*(T scale, const Vec4& vec); + template constexpr Vec4 operator/(T scale, const Vec4& vec); + template constexpr Vec4 operator%(T mod, const Vec4& vec); +} + +#include + +#endif // __AK_VEC4__ + diff --git a/runtime/Includes/Maths/Vec4.inl b/runtime/Includes/Maths/Vec4.inl new file mode 100755 index 0000000..deeb5c6 --- /dev/null +++ b/runtime/Includes/Maths/Vec4.inl @@ -0,0 +1,423 @@ +#pragma once +#include + +namespace mlx +{ + template + constexpr Vec4::Vec4(T X, T Y, T Z, T W) : x(X), y(Y), z(Z), w(W) {} + + template + constexpr Vec4::Vec4(T X, T Y, const Vec2& vec) : x(X), y(Y), z(vec.x), w(vec.y) {} + + template + constexpr Vec4::Vec4(T X, const Vec2& vec, T W) : x(X), y(vec.x), z(vec.y), w(W) {} + + template + constexpr Vec4::Vec4(T X, const Vec3& vec) : x(X), y(vec.x), z(vec.y), w(vec.z) {} + + template + constexpr Vec4::Vec4(T scale) : x(scale), y(scale), z(scale), w(scale) {} + + template + constexpr Vec4::Vec4(const Vec2& vec, T Z, T W) : x(vec.x), y(vec.y), z(Z), w(W) {} + + template + constexpr Vec4::Vec4(const Vec3& vec, T W) : x(vec.x), y(vec.y), z(vec.z), w(W) {} + + template + template + constexpr Vec4::Vec4(const Vec4& vec) : x(static_cast(vec.x)), y(static_cast(vec.y)), z(static_cast(vec.z)), w(static_cast(vec.w)) {} + + template + T Vec4::AbsDotProduct(const Vec4& vec) const + { + return std::abs(x * vec.x) + std::abs(y * vec.y) + std::abs(z * vec.z) + std::abs(w * vec.w); + } + + template + constexpr bool Vec4::ApproxEqual(const Vec4& vec, T maxDifference) const + { + return NumberEquals(x, vec.x, maxDifference) && NumberEquals(y, vec.y, maxDifference) && NumberEquals(z, vec.z, maxDifference) && NumberEquals(w, vec.w, maxDifference); + } + + template + constexpr T Vec4::DotProduct(const Vec4& vec) const + { + return x*vec.x + y*vec.y + z*vec.z + w*vec.w; + } + + template + Vec4 Vec4::GetNormal(T* length) const + { + Vec4 vec(*this); + vec.Normalize(length); + + return vec; + } + + template + constexpr Vec4& Vec4::Maximize(const Vec4& vec) + { + if (vec.x > x) + x = vec.x; + + if (vec.y > y) + y = vec.y; + + if (vec.z > z) + z = vec.z; + + if (vec.w > w) + w = vec.w; + + return *this; + } + + template + constexpr Vec4& Vec4::Minimize(const Vec4& vec) + { + if (vec.x < x) + x = vec.x; + + if (vec.y < y) + y = vec.y; + + if (vec.z < z) + z = vec.z; + + if (vec.w < w) + w = vec.w; + + return *this; + } + + template + Vec4& Vec4::Normalize(T* length) + { + T invLength = T(1.0) / w; + x *= invLength; + y *= invLength; + z *= invLength; + + if (length) + *length = w; + + w = T(1.0); + + return *this; + } + + template + std::string Vec4::ToString() const + { + std::ostringstream ss; + ss << *this; + + return ss.str(); + } + + template + constexpr T& Vec4::operator[](std::size_t i) + { + mlx::Assert(i < 4, "index out of range"); + return *(&x + i); + } + + template + constexpr const T& Vec4::operator[](std::size_t i) const + { + mlx::Assert(i < 4, "index out of range"); + return *(&x + i); + } + + template + constexpr const Vec4& Vec4::operator+() const + { + return *this; + } + + template + constexpr Vec4 Vec4::operator-() const + { + return Vec4(-x, -y, -z, -w); + } + template + constexpr Vec4 Vec4::operator+(const Vec4& vec) const + { + return Vec4(x + vec.x, y + vec.y, z + vec.z, w + vec.w); + } + template + constexpr Vec4 Vec4::operator-(const Vec4& vec) const + { + return Vec4(x - vec.x, y - vec.y, z - vec.z, w - vec.w); + } + + template + constexpr Vec4 Vec4::operator*(const Vec4& vec) const + { + return Vec4(x * vec.x, y * vec.y, z * vec.z, w * vec.w); + } + + template + constexpr Vec4 Vec4::operator*(T scale) const + { + return Vec4(x * scale, y * scale, z * scale, w * scale); + } + + template + constexpr Vec4 Vec4::operator/(const Vec4& vec) const + { + return Vec4(x / vec.x, y / vec.y, z / vec.z, w / vec.w); + } + + template + constexpr Vec4 Vec4::operator/(T scale) const + { + return Vec4(x / scale, y / scale, z / scale, w / scale); + } + + template + constexpr Vec4 Vec4::operator%(const Vec4& vec) const + { + return Vec4(Mod(x, vec.x), Mod(y, vec.y), Mod(z, vec.z), Mod(w, vec.w)); + } + + template + constexpr Vec4 Vec4::operator%(T mod) const + { + return Vec4(Mod(x, mod), Mod(y, mod), Mod(z, mod), Mod(z, mod)); + } + + template + constexpr Vec4& Vec4::operator+=(const Vec4& vec) + { + x += vec.x; + y += vec.y; + z += vec.z; + w += vec.w; + + return *this; + } + + template + constexpr Vec4& Vec4::operator-=(const Vec4& vec) + { + x -= vec.x; + y -= vec.y; + z -= vec.z; + w -= vec.w; + + return *this; + } + + template + constexpr Vec4& Vec4::operator*=(const Vec4& vec) + { + x *= vec.x; + y *= vec.y; + z *= vec.z; + w *= vec.w; + + return *this; + } + + template + constexpr Vec4& Vec4::operator*=(T scale) + { + x *= scale; + y *= scale; + z *= scale; + w *= scale; + + return *this; + } + + template + constexpr Vec4& Vec4::operator/=(const Vec4& vec) + { + x /= vec.x; + y /= vec.y; + z /= vec.z; + w /= vec.w; + + return *this; + } + + template + constexpr Vec4& Vec4::operator/=(T scale) + { + x /= scale; + y /= scale; + z /= scale; + w /= scale; + + return *this; + } + + template + constexpr Vec4& Vec4::operator%=(const Vec4& vec) + { + x = Mod(x, vec.x); + y = Mod(y, vec.y); + z = Mod(z, vec.z); + w = Mod(w, vec.w); + + return *this; + } + + template + constexpr Vec4& Vec4::operator%=(T mod) + { + x = Mod(x, mod); + y = Mod(y, mod); + z = Mod(z, mod); + w = Mod(w, mod); + + return *this; + } + + template + constexpr bool Vec4::operator==(const Vec4& vec) const + { + return x == vec.x && y == vec.y && z == vec.z && w == vec.w; + } + + template + constexpr bool Vec4::operator!=(const Vec4& vec) const + { + return !operator==(vec); + } + + template + constexpr bool Vec4::operator<(const Vec4& vec) const + { + if (x != vec.x) + return x < vec.x; + + if (y != vec.y) + return y < vec.y; + + if (z != vec.z) + return z < vec.z; + + return w < vec.w; + } + + template + constexpr bool Vec4::operator<=(const Vec4& vec) const + { + if (x != vec.x) + return x < vec.x; + + if (y != vec.y) + return y < vec.y; + + if (z != vec.z) + return z < vec.z; + + return w <= vec.w; + } + + template + constexpr bool Vec4::operator>(const Vec4& vec) const + { + if (x != vec.x) + return x > vec.x; + + if (y != vec.y) + return y > vec.y; + + if (z != vec.z) + return z > vec.z; + + return w > vec.w; + } + + template + constexpr bool Vec4::operator>=(const Vec4& vec) const + { + if (x != vec.x) + return x > vec.x; + + if (y != vec.y) + return y > vec.y; + + if (z != vec.z) + return z > vec.z; + + return w >= vec.w; + } + + template + constexpr Vec4 Vec4::Apply(T(*func)(T), const Vec4& vec) + { + return Vec4(func(vec.x), func(vec.y), func(vec.z), func(vec.w)); + } + + template + constexpr bool Vec4::ApproxEqual(const Vec4& lhs, const Vec4& rhs, T maxDifference) + { + return lhs.ApproxEqual(rhs, maxDifference); + } + + template + constexpr T Vec4::DotProduct(const Vec4& vec1, const Vec4& vec2) + { + return vec1.DotProduct(vec2); + } + + template + Vec4 Vec4::Normalize(const Vec4& vec) + { + return vec.GetNormal(); + } + + template + constexpr Vec4 Vec4::UnitX() + { + return Vec4(1, 0, 0, 1); + } + + template + constexpr Vec4 Vec4::UnitY() + { + return Vec4(0, 1, 0, 1); + } + + template + constexpr Vec4 Vec4::UnitZ() + { + return Vec4(0, 0, 1, 1); + } + + template + constexpr Vec4 Vec4::Zero() + { + return Vec4(0, 0, 0, 1); + } + + template + std::ostream& operator<<(std::ostream& out, const Vec4& vec) + { + return out << "Vec4(" << vec.x << ", " << vec.y << ", " << vec.z << ", " << vec.w << ')'; + } + + template + constexpr Vec4 operator*(T scale, const Vec4& vec) + { + return Vec4(scale * vec.x, scale * vec.y, scale * vec.z, scale * vec.w); + } + + template + constexpr Vec4 operator/(T scale, const Vec4& vec) + { + return Vec4(scale / vec.x, scale / vec.y, scale / vec.z, scale / vec.w); + } + + template + constexpr Vec4 operator%(T mod, const Vec4& vec) + { + return Vec4(Mod(mod, vec.x), Mod(mod, vec.y), Mod(mod, vec.z), Mod(mod, vec.w)); + } +} + diff --git a/runtime/Includes/Platform/Inputs.h b/runtime/Includes/Platform/Inputs.h new file mode 100644 index 0000000..3ac5b90 --- /dev/null +++ b/runtime/Includes/Platform/Inputs.h @@ -0,0 +1,51 @@ +#ifndef __MLX_INPUTS__ +#define __MLX_INPUTS__ + +#include +#include +#include + +namespace mlx +{ + class Inputs + { + public: + struct Hook + { + func::function hook; + void* param = nullptr; + }; + + public: + Inputs() = default; + + void FetchInputs(); + + inline void RegisterWindow(std::shared_ptr window) { m_windows[window->GetID()] = window; } + + std::int32_t GetX() const noexcept; + std::int32_t GetY() const noexcept; + std::int32_t GetXRel() const noexcept; + std::int32_t GetYRel() const noexcept; + + inline bool IsMouseMoving() const noexcept { return GetXRel() || GetYRel(); } + MLX_FORCEINLINE bool IsRunning() const noexcept { return m_run; } + MLX_FORCEINLINE constexpr void Finish() noexcept { m_run = false; } + MLX_FORCEINLINE constexpr void Run() noexcept { m_run = true; } + + inline void OnEvent(std::uint32_t id, int event, int (*funct_ptr)(int, void*), void* param) noexcept + { + m_events_hooks[id][event].hook = funct_ptr; + m_events_hooks[id][event].param = param; + } + + ~Inputs() = default; + + private: + std::unordered_map> m_windows; + std::unordered_map> m_events_hooks; + bool m_run = false; + }; +} + +#endif diff --git a/runtime/Includes/Platform/Window.h b/runtime/Includes/Platform/Window.h new file mode 100644 index 0000000..66ba306 --- /dev/null +++ b/runtime/Includes/Platform/Window.h @@ -0,0 +1,41 @@ +#ifndef __MLX_WINDOW__ +#define __MLX_WINDOW__ + +#include +#include + +namespace mlx +{ + class Window + { + public: + Window(std::size_t w, std::size_t h, const std::string& title, bool is_resizable, bool hidden = false); + + inline Handle GetWindowHandle() const noexcept { return p_window; } + inline int GetWidth() const noexcept { return m_width; } + inline int GetHeight() const noexcept { return m_height; } + inline std::uint32_t GetID() const noexcept { return m_id; } + inline const std::string& GetName() const { return m_name; } + + inline void MoveMouse(int x, int y) { SDLManager::Get().MoveMouseOnWindow(p_window, x, y); } + inline void GetScreenSizeWindowIsOn(int* x, int* y) { SDLManager::Get().GetScreenSizeWindowIsOn(p_window, x, y); } + inline void SetPosition(int x, int y) { SDLManager::Get().SetWindowPosition(p_window, x, y); } + + inline VkSurfaceKHR CreateVulkanSurface(VkInstance instance) const noexcept { return SDLManager::Get().CreateVulkanSurface(p_window, instance); } + inline std::vector GetRequiredVulkanInstanceExtentions() const noexcept { return SDLManager::Get().GetRequiredVulkanInstanceExtentions(p_window); } + inline Vec2ui GetVulkanDrawableSize() const noexcept { return SDLManager::Get().GetVulkanDrawableSize(p_window); } + + void Destroy() noexcept; + + ~Window() { Destroy(); } + + private: + std::string m_name; + Handle p_window = nullptr; + std::int32_t m_id; + int m_width = 0; + int m_height = 0; + }; +} + +#endif diff --git a/runtime/Includes/PreCompiled.h b/runtime/Includes/PreCompiled.h new file mode 100644 index 0000000..4a958a9 --- /dev/null +++ b/runtime/Includes/PreCompiled.h @@ -0,0 +1,113 @@ +#ifndef __MLX_PRE_COMPILED_HEADER__ +#define __MLX_PRE_COMPILED_HEADER__ + +#define VK_NO_PROTOTYPES + +#include +#include +#include +#include +#include + +#include + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// Experimentals +#if __has_include() + #include +#elif __has_include() + #include +#else + #error header not present in this STL +#endif + +#ifndef MLX_PLAT_WINDOWS + #include +#endif + +#ifdef MLX_PLAT_LINUX + #include // sincos +#endif + +#define VMA_STATIC_VULKAN_FUNCTIONS 0 +#define VMA_DYNAMIC_VULKAN_FUNCTIONS 0 +#define VMA_VULKAN_VERSION 1000000 +#define VMA_ASSERT(expr) ((void)0) // Because why not + +#ifdef MLX_COMPILER_CLANG + #pragma clang diagnostic push + #pragma clang diagnostic ignored "-Weverything" + #include + #pragma clang diagnostic pop +#elif defined(MLX_COMPILER_GCC) + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wimplicit-fallthrough" + #pragma GCC diagnostic ignored "-Wmissing-field-initializers" + #pragma GCC diagnostic ignored "-Wunused-parameter" + #pragma GCC diagnostic ignored "-Wunused-variable" + #pragma GCC diagnostic ignored "-Wunused-function" + #pragma GCC diagnostic ignored "-Wparentheses" + #include + #pragma GCC diagnostic pop +#else + #include +#endif + +#include +#include + +#define KVF_IMPL_VK_NO_PROTOTYPES +#ifdef DEBUG + #define KVF_ENABLE_VALIDATION_LAYERS +#endif +#include + +#include +#include +#include +#include +#include + +constexpr const int RANGE = 1024; + +using Handle = void*; + +#endif diff --git a/runtime/Includes/Renderer/Buffer.h b/runtime/Includes/Renderer/Buffer.h new file mode 100644 index 0000000..0c4dcd1 --- /dev/null +++ b/runtime/Includes/Renderer/Buffer.h @@ -0,0 +1,86 @@ +#ifndef __MLX_GPU_BUFFER__ +#define __MLX_GPU_BUFFER__ + +#include +#include +#include + +namespace mlx +{ + class GPUBuffer + { + public: + GPUBuffer() = default; + + void Init(BufferType type, VkDeviceSize size, VkBufferUsageFlags usage, CPUBuffer data, [[maybe_unused]] std::string_view debug_name); + void Destroy() noexcept; + + bool CopyFrom(const GPUBuffer& buffer) noexcept; + + void Swap(GPUBuffer& buffer) noexcept; + + [[nodiscard]] MLX_FORCEINLINE void* GetMap() const noexcept { return p_map; } + [[nodiscard]] MLX_FORCEINLINE VkBuffer Get() const noexcept { return m_buffer; } + [[nodiscard]] MLX_FORCEINLINE VmaAllocation GetAllocation() const noexcept { return m_allocation; } + [[nodiscard]] MLX_FORCEINLINE VkDeviceSize GetSize() const noexcept { return m_size; } + [[nodiscard]] MLX_FORCEINLINE VkDeviceSize GetOffset() const noexcept { return m_offset; } + + [[nodiscard]] inline bool IsInit() const noexcept { return m_buffer != VK_NULL_HANDLE; } + + ~GPUBuffer() = default; + + protected: + void PushToGPU() noexcept; + + protected: + #ifdef DEBUG + std::string m_debug_name; + #endif + VkBuffer m_buffer = VK_NULL_HANDLE; + VmaAllocation m_allocation; + VkDeviceSize m_offset = 0; + VkDeviceSize m_size = 0; + void* p_map = nullptr; + + private: + void CreateBuffer(VkDeviceSize size, VkBufferUsageFlags usage, VmaAllocationCreateInfo alloc_info, [[maybe_unused]] std::string_view debug_name); + + private: + VkBufferUsageFlags m_usage = 0; + }; + + class VertexBuffer : public GPUBuffer + { + public: + inline void Init(std::uint32_t size, VkBufferUsageFlags additional_flags, [[maybe_unused]] std::string_view debug_name) { GPUBuffer::Init(BufferType::LowDynamic, size, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | additional_flags, {}, std::move(debug_name)); } + void SetData(CPUBuffer data); + inline void Bind(VkCommandBuffer cmd) const noexcept { VkDeviceSize offset = 0; RenderCore::Get().vkCmdBindVertexBuffers(cmd, 0, 1, &m_buffer, &offset); } + }; + + class IndexBuffer : public GPUBuffer + { + public: + inline void Init(std::uint32_t size, VkBufferUsageFlags additional_flags, [[maybe_unused]] std::string_view debug_name) { GPUBuffer::Init(BufferType::LowDynamic, size, VK_BUFFER_USAGE_INDEX_BUFFER_BIT | additional_flags, {}, std::move(debug_name)); } + void SetData(CPUBuffer data); + inline void Bind(VkCommandBuffer cmd) const noexcept { RenderCore::Get().vkCmdBindIndexBuffer(cmd, m_buffer, 0, VK_INDEX_TYPE_UINT32); } + }; + + class UniformBuffer + { + public: + void Init(std::uint32_t size, [[maybe_unused]] std::string_view debug_name); + void SetData(CPUBuffer data, std::size_t frame_index); + void Destroy() noexcept; + + inline VkDeviceSize GetSize(int i) const noexcept { return m_buffers[i].GetSize(); } + inline VkDeviceSize GetOffset(int i) const noexcept { return m_buffers[i].GetOffset(); } + inline VkBuffer GetVk(int i) const noexcept { return m_buffers[i].Get(); } + inline GPUBuffer& Get(int i) noexcept { return m_buffers[i]; } + + private: + std::array m_buffers; + std::array m_maps; + }; +} + +#endif diff --git a/runtime/Includes/Renderer/Descriptor.h b/runtime/Includes/Renderer/Descriptor.h new file mode 100644 index 0000000..0a3b9e9 --- /dev/null +++ b/runtime/Includes/Renderer/Descriptor.h @@ -0,0 +1,89 @@ +#ifndef __MLX_DESCRIPTOR_SET__ +#define __MLX_DESCRIPTOR_SET__ + +#include +#include +#include + +namespace mlx +{ + struct Descriptor + { + NonOwningPtr storage_buffer_ptr; + NonOwningPtr uniform_buffer_ptr; + NonOwningPtr image_ptr; + VkDescriptorType type; + std::uint32_t binding; + }; + + class DescriptorPool + { + public: + DescriptorPool() = default; + + void Init() noexcept; + void Destroy() noexcept; + + std::shared_ptr RequestDescriptorSet(const ShaderSetLayout& layout, ShaderType shader_type); + void ReturnDescriptorSet(std::shared_ptr set); + + [[nodiscard]] inline VkDescriptorPool Get() const noexcept { return m_pool; } + [[nodiscard]] MLX_FORCEINLINE std::size_t GetNumberOfSetsAllocated() const noexcept { return m_allocation_count; } + + ~DescriptorPool() = default; + + private: + std::vector> m_free_sets; + std::vector> m_used_sets; + VkDescriptorPool m_pool; + std::size_t m_allocation_count = 0; + }; + + class DescriptorPoolManager + { + public: + DescriptorPoolManager() = default; + + DescriptorPool& GetAvailablePool(); + void Destroy(); + + ~DescriptorPoolManager() = default; + + private: + std::vector m_pools; + }; + + class DescriptorSet : public std::enable_shared_from_this + { + friend DescriptorPool; + + public: + void SetImage(std::size_t i, std::uint32_t binding, class Image& image); + void SetStorageBuffer(std::size_t i, std::uint32_t binding, class GPUBuffer& buffer); + void SetUniformBuffer(std::size_t i, std::uint32_t binding, class GPUBuffer& buffer); + void Update(std::size_t i, VkCommandBuffer cmd = VK_NULL_HANDLE) noexcept; + + void ReturnDescriptorSetToPool(); + + [[nodiscard]] inline VkDescriptorSet GetSet(std::size_t i) const noexcept { return m_sets[i]; } + [[nodiscard]] MLX_FORCEINLINE bool IsInit() const noexcept { return m_sets[0] != VK_NULL_HANDLE; } + [[nodiscard]] MLX_FORCEINLINE VkDescriptorSetLayout GetVulkanLayout() const noexcept { return m_set_layout; } + [[nodiscard]] MLX_FORCEINLINE const ShaderSetLayout& GetShaderLayout() const { return m_shader_layout; } + [[nodiscard]] MLX_FORCEINLINE ShaderType GetShaderType() const noexcept { return m_shader_type; } + + ~DescriptorSet() = default; + + private: + DescriptorSet(DescriptorPool& pool, VkDescriptorSetLayout vulkan_layout, const ShaderSetLayout& layout, std::array vulkan_sets, ShaderType shader_type); + + private: + ShaderSetLayout m_shader_layout; + std::vector m_descriptors; + std::array m_sets; + VkDescriptorSetLayout m_set_layout; + ShaderType m_shader_type; + DescriptorPool& m_pool; + }; +} + +#endif diff --git a/runtime/Includes/Renderer/Enums.h b/runtime/Includes/Renderer/Enums.h new file mode 100644 index 0000000..80431a1 --- /dev/null +++ b/runtime/Includes/Renderer/Enums.h @@ -0,0 +1,32 @@ +#ifndef __MLX_RENDERER_ENUMS__ +#define __MLX_RENDERER_ENUMS__ + +namespace mlx +{ + enum class BufferType + { + Constant = 0, + Staging, + HighDynamic, // typically stored in RAM + LowDynamic, // typically stored in VRAM + + EndEnum + }; + constexpr std::size_t BufferTypeCount = static_cast(BufferType::EndEnum); + + enum class ImageType + { + Color = 0, + + EndEnum + }; + constexpr std::size_t ImageTypeCount = static_cast(ImageType::EndEnum); + + enum class ShaderType + { + Vertex, + Fragment + }; +} + +#endif diff --git a/runtime/Includes/Renderer/Image.h b/runtime/Includes/Renderer/Image.h new file mode 100644 index 0000000..c9fd54e --- /dev/null +++ b/runtime/Includes/Renderer/Image.h @@ -0,0 +1,105 @@ +#ifndef __MLX_IMAGE__ +#define __MLX_IMAGE__ + +#include +#include +#include +#include +#include + +namespace mlx +{ + class Image + { + public: + Image() = default; + + inline void Init(VkImage image, VkFormat format, std::uint32_t width, std::uint32_t height, VkImageLayout layout, [[maybe_unused]] std::string_view debug_name) noexcept + { + m_image = image; + m_format = format; + m_width = width; + m_height = height; + m_layout = layout; + #ifdef DEBUG + m_debug_name = std::move(debug_name); + #endif + } + + void Init(ImageType type, std::uint32_t width, std::uint32_t height, VkFormat format, VkImageTiling tiling, VkImageUsageFlags usage, bool is_multisampled, std::string_view debug_name); + void CreateImageView(VkImageViewType type, VkImageAspectFlags aspectFlags, int layer_count = 1) noexcept; + void CreateSampler() noexcept; + void TransitionLayout(VkImageLayout new_layout, VkCommandBuffer cmd = VK_NULL_HANDLE); + void Clear(VkCommandBuffer cmd, Vec4f color); + + void DestroySampler() noexcept; + void DestroyImageView() noexcept; + virtual void Destroy() noexcept; + + [[nodiscard]] MLX_FORCEINLINE VkImage Get() const noexcept { return m_image; } + [[nodiscard]] MLX_FORCEINLINE VmaAllocation GetAllocation() const noexcept { return m_allocation; } + [[nodiscard]] MLX_FORCEINLINE VkImageView GetImageView() const noexcept { return m_image_view; } + [[nodiscard]] MLX_FORCEINLINE VkFormat GetFormat() const noexcept { return m_format; } + [[nodiscard]] MLX_FORCEINLINE VkImageTiling GetTiling() const noexcept { return m_tiling; } + [[nodiscard]] MLX_FORCEINLINE VkImageLayout GetLayout() const noexcept { return m_layout; } + [[nodiscard]] MLX_FORCEINLINE VkSampler GetSampler() const noexcept { return m_sampler; } + [[nodiscard]] MLX_FORCEINLINE std::uint32_t GetWidth() const noexcept { return m_width; } + [[nodiscard]] MLX_FORCEINLINE std::uint32_t GetHeight() const noexcept { return m_height; } + [[nodiscard]] MLX_FORCEINLINE bool IsInit() const noexcept { return m_image != VK_NULL_HANDLE; } + [[nodiscard]] MLX_FORCEINLINE ImageType GetType() const noexcept { return m_type; } + + #ifdef DEBUG + [[nodiscard]] MLX_FORCEINLINE const std::string& GetDebugName() const { return m_debug_name; } + #endif + + virtual ~Image() = default; + + protected: + #ifdef DEBUG + std::string m_debug_name; + #endif + VmaAllocation m_allocation; + VkImage m_image = VK_NULL_HANDLE; + VkImageView m_image_view = VK_NULL_HANDLE; + VkSampler m_sampler = VK_NULL_HANDLE; + VkFormat m_format; + VkImageTiling m_tiling; + VkImageLayout m_layout = VK_IMAGE_LAYOUT_UNDEFINED; + ImageType m_type; + std::uint32_t m_width = 0; + std::uint32_t m_height = 0; + bool m_is_multisampled = false; + }; + + class Texture: public Image + { + public: + Texture() = default; + Texture(CPUBuffer pixels, std::uint32_t width, std::uint32_t height, VkFormat format, bool is_multisampled, [[maybe_unused]] std::string_view debug_name) + { + Init(std::move(pixels), width, height, format, is_multisampled, std::move(debug_name)); + } + + void Init(CPUBuffer pixels, std::uint32_t width, std::uint32_t height, VkFormat format, bool is_multisampled, [[maybe_unused]] std::string_view debug_name); + void Destroy() noexcept override; + + void SetPixel(int x, int y, std::uint32_t color) noexcept; + int GetPixel(int x, int y) noexcept; + + void Update(VkCommandBuffer cmd); + + ~Texture() override { Destroy(); } + + private: + void OpenCPUBuffer(); + + private: + std::vector m_cpu_buffer; + std::optional m_staging_buffer; + bool m_has_been_modified = false; + }; + + Texture* StbTextureLoad(const std::filesystem::path& file, int* w, int* h); +} + +#endif diff --git a/runtime/Includes/Renderer/Memory.h b/runtime/Includes/Renderer/Memory.h new file mode 100644 index 0000000..26f0b99 --- /dev/null +++ b/runtime/Includes/Renderer/Memory.h @@ -0,0 +1,36 @@ +#ifndef __MLX_VK_MEMORY__ +#define __MLX_VK_MEMORY__ + +namespace mlx +{ + class GPUAllocator + { + public: + GPUAllocator() = default; + + void Init() noexcept; + void Destroy() noexcept; + + VmaAllocation CreateBuffer(const VkBufferCreateInfo* binfo, const VmaAllocationCreateInfo* vinfo, VkBuffer& buffer, const char* name = nullptr) noexcept; + void DestroyBuffer(VmaAllocation allocation, VkBuffer buffer, const char* name) noexcept; + + VmaAllocation CreateImage(const VkImageCreateInfo* iminfo, const VmaAllocationCreateInfo* vinfo, VkImage& image, const char* name = nullptr) noexcept; + void DestroyImage(VmaAllocation allocation, VkImage image, const char* name) noexcept; + + void MapMemory(VmaAllocation allocation, void** data) noexcept; + void UnmapMemory(VmaAllocation allocation) noexcept; + + void DumpMemoryToJson(); + + void Flush(VmaAllocation allocation, VkDeviceSize size, VkDeviceSize offset) noexcept; + + ~GPUAllocator() = default; + + private: + VmaAllocator m_allocator; + std::int32_t m_active_buffers_allocations = 0; + std::int32_t m_active_images_allocations = 0; + }; +} + +#endif diff --git a/runtime/Includes/Renderer/Pipelines/Graphics.h b/runtime/Includes/Renderer/Pipelines/Graphics.h new file mode 100644 index 0000000..a474f80 --- /dev/null +++ b/runtime/Includes/Renderer/Pipelines/Graphics.h @@ -0,0 +1,60 @@ +#ifndef __MLX_GRAPHICS_PIPELINE__ +#define __MLX_GRAPHICS_PIPELINE__ + +#include +#include +#include +#include + +namespace mlx +{ + struct GraphicPipelineDescriptor + { + std::shared_ptr vertex_shader; + std::shared_ptr fragment_shader; + std::vector> color_attachments; + NonOwningPtr renderer = nullptr; + bool clear_color_attachments = true; + bool no_vertex_inputs = false; + }; + + class GraphicPipeline : public Pipeline + { + public: + GraphicPipeline() = default; + + void Init(const GraphicPipelineDescriptor& descriptor, std::string_view debug_name); + bool BindPipeline(VkCommandBuffer cmd, std::size_t framebuffer_index, std::array clear) noexcept; + void EndPipeline(VkCommandBuffer cmd) noexcept override; + void Destroy() noexcept; + + [[nodiscard]] inline VkPipeline GetPipeline() const override { return m_pipeline; } + [[nodiscard]] inline VkPipelineLayout GetPipelineLayout() const override { return m_pipeline_layout; } + [[nodiscard]] inline VkPipelineBindPoint GetPipelineBindPoint() const override { return VK_PIPELINE_BIND_POINT_GRAPHICS; } + + ~GraphicPipeline() = default; + + private: + void CreateFramebuffers(const std::vector>& render_targets, bool clear_attachments); + void TransitionAttachments(VkCommandBuffer cmd = VK_NULL_HANDLE); + + // Private override to remove access + bool BindPipeline(VkCommandBuffer) noexcept override { return false; }; + + private: + std::vector> m_attachments; + std::vector m_framebuffers; + std::vector m_clears; + #ifdef DEBUG + std::string m_debug_name; + #endif + std::shared_ptr p_vertex_shader; + std::shared_ptr p_fragment_shader; + VkRenderPass m_renderpass = VK_NULL_HANDLE; + VkPipeline m_pipeline = VK_NULL_HANDLE; + VkPipelineLayout m_pipeline_layout = VK_NULL_HANDLE; + NonOwningPtr p_renderer; + }; +} + +#endif diff --git a/runtime/Includes/Renderer/Pipelines/Pipeline.h b/runtime/Includes/Renderer/Pipelines/Pipeline.h new file mode 100644 index 0000000..f92f1b9 --- /dev/null +++ b/runtime/Includes/Renderer/Pipelines/Pipeline.h @@ -0,0 +1,22 @@ +#ifndef __MLX_PIPELINE__ +#define __MLX_PIPELINE__ + +namespace mlx +{ + class Pipeline + { + public: + Pipeline() = default; + + inline virtual bool BindPipeline(VkCommandBuffer command_buffer) noexcept { RenderCore::Get().vkCmdBindPipeline(command_buffer, GetPipelineBindPoint(), GetPipeline()); return true; } + inline virtual void EndPipeline([[maybe_unused]] VkCommandBuffer command_buffer) noexcept {} + + virtual VkPipeline GetPipeline() const = 0; + virtual VkPipelineLayout GetPipelineLayout() const = 0; + virtual VkPipelineBindPoint GetPipelineBindPoint() const = 0; + + virtual ~Pipeline() = default; + }; +} + +#endif diff --git a/runtime/Includes/Renderer/Pipelines/Shader.h b/runtime/Includes/Renderer/Pipelines/Shader.h new file mode 100644 index 0000000..54cbc7d --- /dev/null +++ b/runtime/Includes/Renderer/Pipelines/Shader.h @@ -0,0 +1,65 @@ +#ifndef __MLX_SHADER__ +#define __MLX_SHADER__ + +#include + +namespace mlx +{ + struct ShaderSetLayout + { + std::vector> binds; + + ShaderSetLayout(std::vector > b) : binds(std::move(b)) {} + + inline bool operator==(const ShaderSetLayout& rhs) const { return binds == rhs.binds; } + }; + + struct ShaderPushConstantLayout + { + std::size_t offset; + std::size_t size; + + ShaderPushConstantLayout(std::size_t o, std::size_t s) : offset(o), size(s) {} + }; + + struct ShaderLayout + { + std::vector> set_layouts; + std::vector push_constants; + + ShaderLayout(std::vector > s, std::vector pc) : set_layouts(std::move(s)), push_constants(std::move(pc)) {} + }; + + struct ShaderPipelineLayoutPart + { + std::vector push_constants; + std::vector set_layouts; + }; + + class Shader + { + public: + Shader(const std::vector& bytecode, ShaderType type, ShaderLayout layout); + + [[nodiscard]] inline const ShaderLayout& GetShaderLayout() const { return m_layout; } + [[nodiscard]] inline const std::vector& GetByteCode() const noexcept { return m_bytecode; } + [[nodiscard]] inline const ShaderPipelineLayoutPart& GetPipelineLayout() const noexcept { return m_pipeline_layout_part; } + [[nodiscard]] inline VkShaderModule GetShaderModule() const noexcept { return m_module; } + [[nodiscard]] inline VkShaderStageFlagBits GetShaderStage() const noexcept { return m_stage; } + + ~Shader(); + + private: + void GeneratePipelineLayout(ShaderLayout layout); + + private: + ShaderLayout m_layout; + ShaderPipelineLayoutPart m_pipeline_layout_part; + std::vector m_bytecode; + std::vector m_set_layouts; + VkShaderStageFlagBits m_stage; + VkShaderModule m_module = VK_NULL_HANDLE; + }; +} + +#endif diff --git a/runtime/Includes/Renderer/RenderCore.h b/runtime/Includes/Renderer/RenderCore.h new file mode 100644 index 0000000..f6b126c --- /dev/null +++ b/runtime/Includes/Renderer/RenderCore.h @@ -0,0 +1,58 @@ +#ifndef __MLX_RENDER_CORE__ +#define __MLX_RENDER_CORE__ + +constexpr const int MAX_FRAMES_IN_FLIGHT = 2; + +#include +#include + +namespace mlx +{ + #if defined(DEBUG) && defined(VK_EXT_debug_utils) + #define MLX_HAS_DEBUG_UTILS_FUNCTIONS + #endif + + class RenderCore + { + public: + RenderCore(); + + [[nodiscard]] MLX_FORCEINLINE VkInstance GetInstance() const noexcept { return m_instance; } + [[nodiscard]] MLX_FORCEINLINE VkInstance& GetInstanceRef() noexcept { return m_instance; } + [[nodiscard]] MLX_FORCEINLINE VkDevice GetDevice() const noexcept { return m_device; } + [[nodiscard]] MLX_FORCEINLINE VkPhysicalDevice GetPhysicalDevice() const noexcept { return m_physical_device; } + [[nodiscard]] MLX_FORCEINLINE GPUAllocator& GetAllocator() noexcept { return m_allocator; } + [[nodiscard]] inline DescriptorPoolManager& GetDescriptorPoolManager() noexcept { return m_descriptor_pool_manager; } + + inline void WaitDeviceIdle() const noexcept { vkDeviceWaitIdle(m_device); } + + inline static bool IsInit() noexcept { return s_instance != nullptr; } + inline static RenderCore& Get() noexcept { return *s_instance; } + + #define MLX_VULKAN_GLOBAL_FUNCTION(fn) PFN_##fn fn = nullptr; + #define MLX_VULKAN_INSTANCE_FUNCTION(fn) PFN_##fn fn = nullptr; + #define MLX_VULKAN_DEVICE_FUNCTION(fn) PFN_##fn fn = nullptr; + #include + #undef MLX_VULKAN_GLOBAL_FUNCTION + #undef MLX_VULKAN_INSTANCE_FUNCTION + #undef MLX_VULKAN_DEVICE_FUNCTION + + ~RenderCore(); + + private: + void LoadKVFGlobalVulkanFunctionPointers() const noexcept; + void LoadKVFInstanceVulkanFunctionPointers() const noexcept; + void LoadKVFDeviceVulkanFunctionPointers() const noexcept; + + private: + static RenderCore* s_instance; + + DescriptorPoolManager m_descriptor_pool_manager; + GPUAllocator m_allocator; + VkInstance m_instance = VK_NULL_HANDLE; + VkDevice m_device = VK_NULL_HANDLE; + VkPhysicalDevice m_physical_device = VK_NULL_HANDLE; + }; +} + +#endif diff --git a/runtime/Includes/Renderer/RenderPasses/2DPass.h b/runtime/Includes/Renderer/RenderPasses/2DPass.h new file mode 100644 index 0000000..a9cc015 --- /dev/null +++ b/runtime/Includes/Renderer/RenderPasses/2DPass.h @@ -0,0 +1,29 @@ +#ifndef __MLX_2D_PASS__ +#define __MLX_2D_PASS__ + +#include +#include +#include + +namespace mlx +{ + class Render2DPass + { + public: + Render2DPass() = default; + void Init(); + void Pass(class Scene& scene, class Renderer& renderer, class Texture& render_target); + void Destroy(); + ~Render2DPass() = default; + + private: + GraphicPipeline m_pipeline; + std::shared_ptr p_viewer_data_set; + std::shared_ptr p_viewer_data_buffer; + std::shared_ptr p_texture_set; + std::shared_ptr p_vertex_shader; + std::shared_ptr p_fragment_shader; + }; +} + +#endif diff --git a/runtime/Includes/Renderer/RenderPasses/FinalPass.h b/runtime/Includes/Renderer/RenderPasses/FinalPass.h new file mode 100644 index 0000000..1de1d0e --- /dev/null +++ b/runtime/Includes/Renderer/RenderPasses/FinalPass.h @@ -0,0 +1,27 @@ +#ifndef __MLX_FINAL_PASS__ +#define __MLX_FINAL_PASS__ + +#include +#include +#include + +namespace mlx +{ + class FinalPass + { + public: + FinalPass() = default; + void Init(); + void Pass(class Scene& scene, class Renderer& renderer, class Texture& render_target, NonOwningPtr final_target); + void Destroy(); + ~FinalPass() = default; + + private: + GraphicPipeline m_pipeline; + std::shared_ptr p_set; + std::shared_ptr p_vertex_shader; + std::shared_ptr p_fragment_shader; + }; +} + +#endif diff --git a/runtime/Includes/Renderer/RenderPasses/Passes.h b/runtime/Includes/Renderer/RenderPasses/Passes.h new file mode 100644 index 0000000..48dfc84 --- /dev/null +++ b/runtime/Includes/Renderer/RenderPasses/Passes.h @@ -0,0 +1,29 @@ +#ifndef __MLX_PASSES__ +#define __MLX_PASSES__ + +#include +#include +#include + +namespace mlx +{ + class RenderPasses + { + public: + RenderPasses() = default; + + void Init(NonOwningPtr render_target); + void Pass(class Scene& scene, class Renderer& renderer, const Vec4f& clear_color); + void Destroy(); + + ~RenderPasses() = default; + + private: + Render2DPass m_2Dpass; + FinalPass m_final; + Texture m_main_render_texture; + NonOwningPtr p_render_target; + }; +} + +#endif diff --git a/runtime/Includes/Renderer/Renderer.h b/runtime/Includes/Renderer/Renderer.h new file mode 100644 index 0000000..0217a45 --- /dev/null +++ b/runtime/Includes/Renderer/Renderer.h @@ -0,0 +1,53 @@ +#ifndef __MLX_RENDERER__ +#define __MLX_RENDERER__ + +#include +#include +#include +#include +#include +#include + +namespace mlx +{ + class Renderer + { + public: + Renderer() = default; + + void Init(NonOwningPtr window); + void Init(NonOwningPtr render_target); + + void BeginFrame(); + void EndFrame(); + + [[nodiscard]] inline VkSemaphore GetImageAvailableSemaphore(int index) const noexcept { return m_image_available_semaphores[index]; } + [[nodiscard]] inline VkSemaphore GetRenderFinishedSemaphore(int index) const noexcept { return m_render_finished_semaphores[index]; } + [[nodiscard]] inline VkCommandBuffer GetCommandBuffer(int index) const noexcept { return m_cmd_buffers[index]; } + [[nodiscard]] inline VkCommandBuffer GetActiveCommandBuffer() const noexcept { return m_cmd_buffers[m_current_frame_index]; } + [[nodiscard]] inline std::size_t& GetDrawCallsCounterRef() noexcept { return m_drawcalls; } + [[nodiscard]] inline std::size_t& GetPolygonDrawnCounterRef() noexcept { return m_polygons_drawn; } + [[nodiscard]] inline std::size_t GetCurrentFrameIndex() const noexcept { return m_current_frame_index; } + [[nodiscard]] inline NonOwningPtr GetWindow() const noexcept { return p_window; } + [[nodiscard]] inline NonOwningPtr GetRenderTarget() const noexcept { return p_render_target; } + [[nodiscard]] inline const Swapchain& GetSwapchain() const noexcept { return m_swapchain; } + + void Destroy() noexcept; + + ~Renderer() = default; + + private: + Swapchain m_swapchain; + std::array m_image_available_semaphores; + std::array m_render_finished_semaphores; + std::array m_cmd_buffers; + std::array m_cmd_fences; + NonOwningPtr p_window; + NonOwningPtr p_render_target; + std::uint32_t m_current_frame_index = 0; + std::size_t m_polygons_drawn = 0; + std::size_t m_drawcalls = 0; + }; +} + +#endif diff --git a/runtime/Includes/Renderer/ScenesRenderer.h b/runtime/Includes/Renderer/ScenesRenderer.h new file mode 100644 index 0000000..2919d5f --- /dev/null +++ b/runtime/Includes/Renderer/ScenesRenderer.h @@ -0,0 +1,22 @@ +#ifndef __MLX_SCENES_RENDERER__ +#define __MLX_SCENES_RENDERER__ + +#include + +namespace mlx +{ + class SceneRenderer + { + public: + SceneRenderer() = default; + void Init(NonOwningPtr render_target); + void Render(class Scene& scene, class Renderer& renderer); + void Destroy(); + ~SceneRenderer() = default; + + private: + RenderPasses m_passes; + }; +} + +#endif diff --git a/runtime/Includes/Renderer/Swapchain.h b/runtime/Includes/Renderer/Swapchain.h new file mode 100644 index 0000000..c2ca82a --- /dev/null +++ b/runtime/Includes/Renderer/Swapchain.h @@ -0,0 +1,43 @@ +#ifndef __MLX_SWAPCHAIN__ +#define __MLX_SWAPCHAIN__ + +#include +#include + +namespace mlx +{ + class Swapchain + { + public: + Swapchain() = default; + + void Init(NonOwningPtr window); + void AquireFrame(VkSemaphore signal); + void Present(VkSemaphore wait) noexcept; + void Destroy(); + + [[nodiscard]] inline VkSwapchainKHR Get() const noexcept { return m_swapchain; } + [[nodiscard]] inline VkSurfaceKHR GetSurface() const noexcept { return m_surface; } + [[nodiscard]] inline std::uint32_t GetImagesCount() const noexcept { return m_images_count; } + [[nodiscard]] inline std::uint32_t GetMinImagesCount() const noexcept { return m_min_images_count; } + [[nodiscard]] inline std::uint32_t GetImageIndex() const noexcept { return m_current_image_index; } + [[nodiscard]] inline const std::vector& GetSwapchainImages() const { return m_swapchain_images; } + + ~Swapchain() = default; + + private: + void CreateSwapchain(); + + private: + std::vector m_swapchain_images; + VkSwapchainKHR m_swapchain = VK_NULL_HANDLE; + VkSurfaceKHR m_surface = VK_NULL_HANDLE; + NonOwningPtr p_window; + std::uint32_t m_images_count = 0; + std::uint32_t m_min_images_count = 0; + std::uint32_t m_current_image_index = 0; + bool m_resize = false; + }; +} + +#endif diff --git a/runtime/Includes/Renderer/Vertex.h b/runtime/Includes/Renderer/Vertex.h new file mode 100644 index 0000000..d815a3e --- /dev/null +++ b/runtime/Includes/Renderer/Vertex.h @@ -0,0 +1,24 @@ +#ifndef __MLX_VERTEX__ +#define __MLX_VERTEX__ + +#include +#include + +namespace mlx +{ + struct Vertex + { + alignas(16) Vec4f position = Vec4f{ 0.0f, 0.0f, 0.0f, 1.0f }; + alignas(16) Vec2f uv = Vec2f{ 0.0f, 0.0f }; + + Vertex() = default; + Vertex(Vec4f p, Vec2f u) : position(std::move(p)), uv(std::move(u)) {} + + [[nodiscard]] inline static VkVertexInputBindingDescription GetBindingDescription(); + [[nodiscard]] inline static std::array GetAttributeDescriptions(); + }; +} + +#include + +#endif diff --git a/runtime/Includes/Renderer/Vertex.inl b/runtime/Includes/Renderer/Vertex.inl new file mode 100644 index 0000000..fb09f95 --- /dev/null +++ b/runtime/Includes/Renderer/Vertex.inl @@ -0,0 +1,31 @@ +#pragma once +#include + +namespace mlx +{ + VkVertexInputBindingDescription Vertex::GetBindingDescription() + { + VkVertexInputBindingDescription binding_description{}; + binding_description.binding = 0; + binding_description.stride = sizeof(Vertex); + binding_description.inputRate = VK_VERTEX_INPUT_RATE_VERTEX; + return binding_description; + } + + std::array Vertex::GetAttributeDescriptions() + { + std::array attribute_descriptions; + + attribute_descriptions[0].binding = 0; + attribute_descriptions[0].location = 0; + attribute_descriptions[0].format = VK_FORMAT_R32G32B32A32_SFLOAT; + attribute_descriptions[0].offset = offsetof(Vertex, position); + + attribute_descriptions[1].binding = 0; + attribute_descriptions[1].location = 1; + attribute_descriptions[1].format = VK_FORMAT_R32G32_SFLOAT; + attribute_descriptions[1].offset = offsetof(Vertex, uv); + + return attribute_descriptions; + } +} diff --git a/runtime/Includes/Renderer/ViewerData.h b/runtime/Includes/Renderer/ViewerData.h new file mode 100644 index 0000000..e9685b6 --- /dev/null +++ b/runtime/Includes/Renderer/ViewerData.h @@ -0,0 +1,14 @@ +#ifndef __MLX_VIEWER_DATA__ +#define __MLX_VIEWER_DATA__ + +#include + +namespace mlx +{ + struct ViewerData + { + Mat4f projection_matrix; + }; +} + +#endif diff --git a/runtime/Includes/Renderer/Vulkan/VulkanDefs.h b/runtime/Includes/Renderer/Vulkan/VulkanDefs.h new file mode 100644 index 0000000..5ee802e --- /dev/null +++ b/runtime/Includes/Renderer/Vulkan/VulkanDefs.h @@ -0,0 +1,127 @@ +// No header guard + +#ifdef VK_VERSION_1_0 + #ifdef MLX_VULKAN_GLOBAL_FUNCTION + MLX_VULKAN_GLOBAL_FUNCTION(vkCreateInstance) + MLX_VULKAN_GLOBAL_FUNCTION(vkEnumerateInstanceExtensionProperties) + MLX_VULKAN_GLOBAL_FUNCTION(vkEnumerateInstanceLayerProperties) + MLX_VULKAN_GLOBAL_FUNCTION(vkGetInstanceProcAddr) + #endif + + #ifdef MLX_VULKAN_INSTANCE_FUNCTION + MLX_VULKAN_INSTANCE_FUNCTION(vkCreateDevice) + MLX_VULKAN_INSTANCE_FUNCTION(vkDestroyInstance) + MLX_VULKAN_INSTANCE_FUNCTION(vkEnumerateDeviceExtensionProperties) + MLX_VULKAN_INSTANCE_FUNCTION(vkEnumeratePhysicalDevices) + MLX_VULKAN_INSTANCE_FUNCTION(vkGetDeviceProcAddr) + MLX_VULKAN_INSTANCE_FUNCTION(vkGetPhysicalDeviceFeatures) + MLX_VULKAN_INSTANCE_FUNCTION(vkGetPhysicalDeviceFormatProperties) + MLX_VULKAN_INSTANCE_FUNCTION(vkGetPhysicalDeviceImageFormatProperties) + MLX_VULKAN_INSTANCE_FUNCTION(vkGetPhysicalDeviceMemoryProperties) + MLX_VULKAN_INSTANCE_FUNCTION(vkGetPhysicalDeviceProperties) + MLX_VULKAN_INSTANCE_FUNCTION(vkGetPhysicalDeviceQueueFamilyProperties) + #ifdef DEBUG + #ifdef VK_EXT_debug_utils + MLX_VULKAN_INSTANCE_FUNCTION(vkSetDebugUtilsObjectNameEXT) + //MLX_VULKAN_INSTANCE_FUNCTION(vkSetDebugUtilsObjectTagEXT) + #endif + #endif + #endif + + #ifdef MLX_VULKAN_DEVICE_FUNCTION + MLX_VULKAN_DEVICE_FUNCTION(vkAllocateCommandBuffers) + MLX_VULKAN_DEVICE_FUNCTION(vkAllocateDescriptorSets) + MLX_VULKAN_DEVICE_FUNCTION(vkAllocateMemory) + MLX_VULKAN_DEVICE_FUNCTION(vkBeginCommandBuffer) + MLX_VULKAN_DEVICE_FUNCTION(vkBindBufferMemory) + MLX_VULKAN_DEVICE_FUNCTION(vkBindImageMemory) + MLX_VULKAN_DEVICE_FUNCTION(vkCmdBeginRenderPass) + MLX_VULKAN_DEVICE_FUNCTION(vkCmdBindDescriptorSets) + MLX_VULKAN_DEVICE_FUNCTION(vkCmdBindIndexBuffer) + MLX_VULKAN_DEVICE_FUNCTION(vkCmdBindPipeline) + MLX_VULKAN_DEVICE_FUNCTION(vkCmdBindVertexBuffers) + MLX_VULKAN_DEVICE_FUNCTION(vkCmdClearAttachments) + MLX_VULKAN_DEVICE_FUNCTION(vkCmdClearColorImage) + MLX_VULKAN_DEVICE_FUNCTION(vkCmdClearDepthStencilImage) + MLX_VULKAN_DEVICE_FUNCTION(vkCmdCopyBuffer) + MLX_VULKAN_DEVICE_FUNCTION(vkCmdCopyBufferToImage) + MLX_VULKAN_DEVICE_FUNCTION(vkCmdCopyImage) + MLX_VULKAN_DEVICE_FUNCTION(vkCmdCopyImageToBuffer) + MLX_VULKAN_DEVICE_FUNCTION(vkCmdDraw) + MLX_VULKAN_DEVICE_FUNCTION(vkCmdDrawIndexed) + MLX_VULKAN_DEVICE_FUNCTION(vkCmdEndRenderPass) + MLX_VULKAN_DEVICE_FUNCTION(vkCmdPipelineBarrier) + MLX_VULKAN_DEVICE_FUNCTION(vkCmdPushConstants) + MLX_VULKAN_DEVICE_FUNCTION(vkCmdSetScissor) + MLX_VULKAN_DEVICE_FUNCTION(vkCmdSetViewport) + MLX_VULKAN_DEVICE_FUNCTION(vkCreateBuffer) + MLX_VULKAN_DEVICE_FUNCTION(vkCreateCommandPool) + MLX_VULKAN_DEVICE_FUNCTION(vkCreateDescriptorPool) + MLX_VULKAN_DEVICE_FUNCTION(vkCreateDescriptorSetLayout) + MLX_VULKAN_DEVICE_FUNCTION(vkCreateFence) + MLX_VULKAN_DEVICE_FUNCTION(vkCreateFramebuffer) + MLX_VULKAN_DEVICE_FUNCTION(vkCreateGraphicsPipelines) + MLX_VULKAN_DEVICE_FUNCTION(vkCreateImage) + MLX_VULKAN_DEVICE_FUNCTION(vkCreateImageView) + MLX_VULKAN_DEVICE_FUNCTION(vkCreatePipelineLayout) + MLX_VULKAN_DEVICE_FUNCTION(vkCreateRenderPass) + MLX_VULKAN_DEVICE_FUNCTION(vkCreateSampler) + MLX_VULKAN_DEVICE_FUNCTION(vkCreateSemaphore) + MLX_VULKAN_DEVICE_FUNCTION(vkCreateShaderModule) + MLX_VULKAN_DEVICE_FUNCTION(vkDestroyBuffer) + MLX_VULKAN_DEVICE_FUNCTION(vkDestroyCommandPool) + MLX_VULKAN_DEVICE_FUNCTION(vkDestroyDescriptorPool) + MLX_VULKAN_DEVICE_FUNCTION(vkDestroyDescriptorSetLayout) + MLX_VULKAN_DEVICE_FUNCTION(vkDestroyDevice) + MLX_VULKAN_DEVICE_FUNCTION(vkDestroyFence) + MLX_VULKAN_DEVICE_FUNCTION(vkDestroyFramebuffer) + MLX_VULKAN_DEVICE_FUNCTION(vkDestroyImage) + MLX_VULKAN_DEVICE_FUNCTION(vkDestroyImageView) + MLX_VULKAN_DEVICE_FUNCTION(vkDestroyPipeline) + MLX_VULKAN_DEVICE_FUNCTION(vkDestroyPipelineLayout) + MLX_VULKAN_DEVICE_FUNCTION(vkDestroyRenderPass) + MLX_VULKAN_DEVICE_FUNCTION(vkDestroySampler) + MLX_VULKAN_DEVICE_FUNCTION(vkDestroySemaphore) + MLX_VULKAN_DEVICE_FUNCTION(vkDestroyShaderModule) + MLX_VULKAN_DEVICE_FUNCTION(vkDeviceWaitIdle) + MLX_VULKAN_DEVICE_FUNCTION(vkEndCommandBuffer) + MLX_VULKAN_DEVICE_FUNCTION(vkFlushMappedMemoryRanges) + MLX_VULKAN_DEVICE_FUNCTION(vkFreeCommandBuffers) + MLX_VULKAN_DEVICE_FUNCTION(vkFreeMemory) + MLX_VULKAN_DEVICE_FUNCTION(vkGetBufferMemoryRequirements) + MLX_VULKAN_DEVICE_FUNCTION(vkGetDeviceMemoryCommitment) + MLX_VULKAN_DEVICE_FUNCTION(vkGetDeviceQueue) + MLX_VULKAN_DEVICE_FUNCTION(vkGetFenceStatus) + MLX_VULKAN_DEVICE_FUNCTION(vkGetImageMemoryRequirements) + MLX_VULKAN_DEVICE_FUNCTION(vkGetImageSubresourceLayout) + MLX_VULKAN_DEVICE_FUNCTION(vkInvalidateMappedMemoryRanges) + MLX_VULKAN_DEVICE_FUNCTION(vkMapMemory) + MLX_VULKAN_DEVICE_FUNCTION(vkQueueSubmit) + MLX_VULKAN_DEVICE_FUNCTION(vkQueueWaitIdle) + MLX_VULKAN_DEVICE_FUNCTION(vkResetCommandBuffer) + MLX_VULKAN_DEVICE_FUNCTION(vkResetDescriptorPool) + MLX_VULKAN_DEVICE_FUNCTION(vkResetEvent) + MLX_VULKAN_DEVICE_FUNCTION(vkResetFences) + MLX_VULKAN_DEVICE_FUNCTION(vkUnmapMemory) + MLX_VULKAN_DEVICE_FUNCTION(vkUpdateDescriptorSets) + MLX_VULKAN_DEVICE_FUNCTION(vkWaitForFences) + #endif +#endif +#ifdef VK_KHR_swapchain + #ifdef MLX_VULKAN_DEVICE_FUNCTION + MLX_VULKAN_DEVICE_FUNCTION(vkAcquireNextImageKHR) + MLX_VULKAN_DEVICE_FUNCTION(vkCreateSwapchainKHR) + MLX_VULKAN_DEVICE_FUNCTION(vkDestroySwapchainKHR) + MLX_VULKAN_DEVICE_FUNCTION(vkGetSwapchainImagesKHR) + MLX_VULKAN_DEVICE_FUNCTION(vkQueuePresentKHR) + #endif +#endif +#ifdef VK_KHR_surface + #ifdef MLX_VULKAN_INSTANCE_FUNCTION + MLX_VULKAN_INSTANCE_FUNCTION(vkDestroySurfaceKHR) + MLX_VULKAN_INSTANCE_FUNCTION(vkGetPhysicalDeviceSurfaceCapabilitiesKHR) + MLX_VULKAN_INSTANCE_FUNCTION(vkGetPhysicalDeviceSurfaceFormatsKHR) + MLX_VULKAN_INSTANCE_FUNCTION(vkGetPhysicalDeviceSurfacePresentModesKHR) + MLX_VULKAN_INSTANCE_FUNCTION(vkGetPhysicalDeviceSurfaceSupportKHR) + #endif +#endif diff --git a/runtime/Includes/Utils/Ansi.h b/runtime/Includes/Utils/Ansi.h new file mode 100644 index 0000000..a299d74 --- /dev/null +++ b/runtime/Includes/Utils/Ansi.h @@ -0,0 +1,44 @@ +#ifndef __MLX_ANSI__ +#define __MLX_ANSI__ + +#include +#include + +namespace mlx +{ + enum class Ansi : std::uint32_t + { + red = 31, + green = 32, + blue = 34, + def = 0, + black = 30, + yellow = 33, + magenta = 35, + cyan = 36, + white = 37, + bg_red = 41, + bg_green = 42, + bg_blue = 44, + bg_def = 0, + bg_black = 40, + bg_yellow = 43, + bg_magenta = 45, + bg_cyan = 46, + bg_white = 47, + reset = 0, + bold = 1, + underline = 4, + inverse = 7, + bold_off = 21, + underline_off = 24, + inverse_off = 27 + }; + + inline std::ostream& operator<<(std::ostream& os, Ansi ansi) + { + return os << "\033[1;" << std::to_string(static_cast(ansi)) << "m"; + } +} + +#endif diff --git a/runtime/Includes/Utils/AntiWindows.h b/runtime/Includes/Utils/AntiWindows.h new file mode 100644 index 0000000..4f9f600 --- /dev/null +++ b/runtime/Includes/Utils/AntiWindows.h @@ -0,0 +1,7 @@ +#undef CreateWindow +#undef GetEnvironmentVariable +#undef GetSystemDirectory +#undef MemoryBarrier +#undef RemoveDirectory +#undef min +#undef max diff --git a/runtime/Includes/Utils/AntiX11.h b/runtime/Includes/Utils/AntiX11.h new file mode 100644 index 0000000..2068855 --- /dev/null +++ b/runtime/Includes/Utils/AntiX11.h @@ -0,0 +1,5 @@ +#undef Always +#undef Bool +#undef False +#undef None +#undef True diff --git a/runtime/Includes/Utils/Buffer.h b/runtime/Includes/Utils/Buffer.h new file mode 100644 index 0000000..fe95d25 --- /dev/null +++ b/runtime/Includes/Utils/Buffer.h @@ -0,0 +1,41 @@ +#ifndef __MLX_CPU_BUFFER__ +#define __MLX_CPU_BUFFER__ + +namespace mlx +{ + class CPUBuffer + { + public: + CPUBuffer() {} + CPUBuffer(std::size_t size) try : m_data(new std::uint8_t[size]), m_size(size) + {} + catch(...) + { + FatalError("memory allocation for a CPU buffer failed"); + } + + [[nodiscard]] inline CPUBuffer Duplicate() const + { + CPUBuffer buffer(m_size); + std::memcpy(buffer.GetData(), m_data.get(), m_size); + return buffer; + } + + inline bool Empty() const { return m_size == 0; } + + [[nodiscard]] inline std::size_t GetSize() const noexcept { return m_size; } + + template + [[nodiscard]] inline T* GetDataAs() const { return reinterpret_cast(m_data.get()); } + [[nodiscard]] inline std::uint8_t* GetData() const { return m_data.get(); } + inline operator bool() const { return (bool)m_data; } + + ~CPUBuffer() = default; + + private: + std::shared_ptr m_data; + std::size_t m_size = 0; + }; +} + +#endif diff --git a/runtime/Includes/Utils/CallOnExit.h b/runtime/Includes/Utils/CallOnExit.h new file mode 100644 index 0000000..5e86f5d --- /dev/null +++ b/runtime/Includes/Utils/CallOnExit.h @@ -0,0 +1,33 @@ +#ifndef __MLX_CALL_ON_EXIT__ +#define __MLX_CALL_ON_EXIT__ + +namespace mlx +{ + template + class CallOnExit + { + public: + CallOnExit() = default; + CallOnExit(F&& functor); + CallOnExit(const CallOnExit&) = delete; + CallOnExit(CallOnExit&&) = delete; + + void CallAndReset(); + void Reset(); + + CallOnExit& operator=(const CallOnExit&) = delete; + CallOnExit& operator=(CallOnExit&&) = default; + + ~CallOnExit(); + + private: + std::optional m_functor; + }; + + template + CallOnExit(F) -> CallOnExit; +} + +#include + +#endif diff --git a/runtime/Includes/Utils/CallOnExit.inl b/runtime/Includes/Utils/CallOnExit.inl new file mode 100644 index 0000000..69fa3ab --- /dev/null +++ b/runtime/Includes/Utils/CallOnExit.inl @@ -0,0 +1,29 @@ +#pragma once +#include + +namespace mlx +{ + template + CallOnExit::CallOnExit(F&& functor) : m_functor(std::move(functor)) {} + + template + CallOnExit::~CallOnExit() + { + if(m_functor.has_value()) + (*m_functor)(); + } + + template + void CallOnExit::CallAndReset() + { + if(m_functor.has_value()) + (*m_functor)(); + m_functor.reset(); + } + + template + void CallOnExit::Reset() + { + m_functor.reset(); + } +} diff --git a/runtime/Includes/Utils/CombineHash.h b/runtime/Includes/Utils/CombineHash.h new file mode 100644 index 0000000..b334300 --- /dev/null +++ b/runtime/Includes/Utils/CombineHash.h @@ -0,0 +1,17 @@ +#ifndef __MLX_HASH__ +#define __MLX_HASH__ + +namespace mlx +{ + inline void HashCombine([[maybe_unused]] std::size_t& seed) noexcept {} + + template + inline void HashCombine(std::size_t& seed, const T& v, Rest... rest) + { + std::hash hasher; + seed ^= hasher(v) + 0x9e3779b9 + (seed << 6) + (seed >> 2); + HashCombine(seed, rest...); + } +} + +#endif diff --git a/runtime/Includes/Utils/ConstMap.h b/runtime/Includes/Utils/ConstMap.h new file mode 100644 index 0000000..74666ae --- /dev/null +++ b/runtime/Includes/Utils/ConstMap.h @@ -0,0 +1,57 @@ +#ifndef __MLX_CONST_MAP__ +#define __MLX_CONST_MAP__ + +namespace mlx +{ + template + class ConstMap + { + public: + using ValueType = std::pair; + using ContainerType = std::vector; + + using iterator = typename ContainerType::const_iterator; + using const_iterator = iterator; + + public: + ConstMap(std::initializer_list init) : m_container(init) + { + std::sort(m_container.begin(), m_container.end()); + } + + ConstMap(ContainerType container) : m_container(std::move(container)) + { + std::sort(m_container.begin(), m_container.end()); + } + + inline const_iterator begin() const { return m_container.begin(); } + inline const_iterator end() const { return m_container.end(); } + + template + inline const_iterator Find(const K& key) const + { + const_iterator it = std::lower_bound(begin(), end(), key, + [](const ValueType& p, const K& key) + { + return p.first < key; + } + ); + return it != end() && it->first == key ? it : end(); + } + + template + inline bool Has(const K& key) const + { + return Find(key) != end(); + } + + inline std::size_t Size() const { return m_container.size(); } + + ~ConstMap() = default; + + private: + ContainerType m_container; + }; +} + +#endif diff --git a/runtime/Includes/Utils/NonCopyable.h b/runtime/Includes/Utils/NonCopyable.h new file mode 100644 index 0000000..aa9c806 --- /dev/null +++ b/runtime/Includes/Utils/NonCopyable.h @@ -0,0 +1,21 @@ +#ifndef __MLX_NON_COPYABLE__ +#define __MLX_NON_COPYABLE__ + +namespace mlx +{ + class NonCopyable + { + protected: + NonCopyable() = default; + virtual ~NonCopyable() = default; + + public: + NonCopyable(const NonCopyable&) = delete; + NonCopyable(NonCopyable&&) noexcept = default; + NonCopyable &operator=(const NonCopyable&) = delete; + NonCopyable &operator=(NonCopyable&&) noexcept = default; + }; + +} + +#endif // __MLX_NON_COPYABLE__ diff --git a/runtime/Includes/Utils/NonOwningPtr.h b/runtime/Includes/Utils/NonOwningPtr.h new file mode 100644 index 0000000..8ece694 --- /dev/null +++ b/runtime/Includes/Utils/NonOwningPtr.h @@ -0,0 +1,45 @@ +#ifndef __MLX_NON_OWNING_PTR__ +#define __MLX_NON_OWNING_PTR__ + +namespace mlx +{ + template + class NonOwningPtr + { + public: + NonOwningPtr(T* ptr = nullptr); + NonOwningPtr(const NonOwningPtr&) = default; + NonOwningPtr(NonOwningPtr&& ptr) noexcept; + + NonOwningPtr& operator=(T* ptr); + NonOwningPtr& operator=(const NonOwningPtr&) = default; + NonOwningPtr& operator=(NonOwningPtr&& ptr) noexcept; + + inline operator bool() const noexcept; + + inline T* Get() const noexcept; + inline T* operator->() const noexcept; + inline T& operator*() const noexcept; + + ~NonOwningPtr() = default; + + private: + T* p_ptr = nullptr; + }; +} + +#include + +namespace std +{ + template + struct hash> + { + std::size_t operator()(const mlx::NonOwningPtr& ptr) const noexcept + { + return std::hash{}(ptr.Get()); + } + }; +} + +#endif diff --git a/runtime/Includes/Utils/NonOwningPtr.inl b/runtime/Includes/Utils/NonOwningPtr.inl new file mode 100644 index 0000000..579809e --- /dev/null +++ b/runtime/Includes/Utils/NonOwningPtr.inl @@ -0,0 +1,53 @@ +#pragma once +#include + +namespace mlx +{ + template + NonOwningPtr::NonOwningPtr(T* ptr) : p_ptr(ptr) {} + + template + NonOwningPtr::NonOwningPtr(NonOwningPtr&& ptr) noexcept : p_ptr(ptr.p_ptr) + { + ptr.p_ptr = nullptr; + } + + template + NonOwningPtr& NonOwningPtr::operator=(T* ptr) + { + p_ptr = ptr; + return *this; + } + + template + NonOwningPtr& NonOwningPtr::operator=(NonOwningPtr&& ptr) noexcept + { + p_ptr = ptr.p_ptr; + ptr.p_ptr = nullptr; + return *this; + } + + template + NonOwningPtr::operator bool() const noexcept + { + return p_ptr != nullptr; + } + + template + T* NonOwningPtr::Get() const noexcept + { + return p_ptr; + } + + template + T* NonOwningPtr::operator->() const noexcept + { + return p_ptr; + } + + template + T& NonOwningPtr::operator*() const noexcept + { + return *p_ptr; + } +} diff --git a/runtime/Sources/Core/Application.cpp b/runtime/Sources/Core/Application.cpp new file mode 100644 index 0000000..ce32c96 --- /dev/null +++ b/runtime/Sources/Core/Application.cpp @@ -0,0 +1,114 @@ +#include + +#include +#include +#include +#include +#include + +namespace mlx +{ + Application::Application() : p_mem_manager(std::make_unique()), p_sdl_manager(std::make_unique()), m_fps(), m_in() + { + MLX_PROFILE_FUNCTION(); + std::srand(std::time(nullptr)); + EventBus::RegisterListener({ [](const EventBase& event) + { + if(event.What() == Event::FatalErrorEventCode) + std::abort(); + }, "__MlxApplication" }); + + #ifdef PROFILER + p_profiler = std::make_unique(); + #endif + + m_fps.Init(); + p_render_core = std::make_unique(); + LoadFont("default", 6.0f); + } + + void Application::Run() noexcept + { + m_in.Run(); + + while(m_in.IsRunning()) + { + if(!m_fps.Update()) + continue; + + m_in.FetchInputs(); + + if(f_loop_hook) + f_loop_hook(p_param); + + for(auto& gs : m_graphics) + { + if(gs) + gs->Render(); + } + } + RenderCore::Get().WaitDeviceIdle(); + } + + void* Application::NewTexture(int w, int h) + { + MLX_PROFILE_FUNCTION(); + Texture* texture; + try { texture = new Texture({}, w, h, VK_FORMAT_R8G8B8A8_SRGB, false, "mlx_user_image"); } + catch(...) { return nullptr; } + m_image_registry.RegisterTexture(texture); + return texture; + } + + void* Application::NewStbTexture(char* file, int* w, int* h) + { + MLX_PROFILE_FUNCTION(); + Texture* texture = StbTextureLoad(file, w, h); + if(texture == nullptr) + return nullptr; + m_image_registry.RegisterTexture(texture); + return texture; + } + + void Application::DestroyTexture(void* ptr) + { + MLX_PROFILE_FUNCTION(); + RenderCore::Get().WaitDeviceIdle(); + if(!m_image_registry.IsTextureKnown(static_cast(ptr))) + { + Error("invalid image ptr"); + return; + } + + Texture* texture = static_cast(ptr); + if(!texture->IsInit()) + Error("trying to destroy a texture that has already been destroyed"); + else + texture->Destroy(); + + for(auto& gs : m_graphics) + { + if(gs) + gs->TryEraseSpritesInScene(texture); + } + m_image_registry.UnregisterTexture(texture); + delete texture; + } + + Application::~Application() + { + for(auto& window : m_graphics) + { + if(window && window->GetWindow()->GetName() == "让我们在月光下åšçˆ±å§") + window.reset(); + } + + m_font_registry.Reset(); + p_render_core.reset(); + p_sdl_manager.reset(); + #ifdef PROFILER + p_profiler.reset(); + #endif + p_mem_manager.reset(); + } +} diff --git a/runtime/Sources/Core/Bridge.cpp b/runtime/Sources/Core/Bridge.cpp new file mode 100644 index 0000000..2588115 --- /dev/null +++ b/runtime/Sources/Core/Bridge.cpp @@ -0,0 +1,313 @@ +#include + +#include +#include +#include +#include +#include + +static void* __mlx_ptr = nullptr; + +#ifndef DISABLE_ALL_SAFETIES + #define MLX_CHECK_APPLICATION_POINTER(ptr) \ + if(ptr != __mlx_ptr || ptr == NULL) \ + mlx::FatalError("invalid mlx pointer passed to '%'", MLX_FUNC_SIG); \ + else {} // just to avoid issues with possible if-else statements outside this macro +#else + #define MLX_CHECK_APPLICATION_POINTER(ptr) +#endif + +extern "C" +{ + void* mlx_init() + { + if(__mlx_ptr != nullptr) + { + mlx::Error("MLX cannot be initialized multiple times"); + return nullptr; + } + mlx::MemManager::Get(); // just to initialize the C garbage collector + mlx::Application* app = new mlx::Application; + if(app == nullptr) + mlx::FatalError("Tout a pété"); + __mlx_ptr = static_cast(app); + return __mlx_ptr; + } + + void* mlx_new_window(void* mlx, int w, int h, const char* title) + { + MLX_CHECK_APPLICATION_POINTER(mlx); + if(w <= 0 || h <= 0) + { + mlx::FatalError("invalid window size (%d x %d)", w, h); + return NULL; // not nullptr for the C compatibility + } + return static_cast(mlx)->NewGraphicsSuport(w, h, title, false); + } + + void* mlx_new_resizable_window(void* mlx, int w, int h, const char* title) + { + MLX_CHECK_APPLICATION_POINTER(mlx); + if(w <= 0 || h <= 0) + { + mlx::FatalError("invalid window size (%d x %d)", w, h); + return NULL; // not nullptr for the C compatibility + } + return static_cast(mlx)->NewGraphicsSuport(w, h, title, true); + } + + void mlx_set_window_position(void *mlx, void *win, int x, int y) + { + MLX_CHECK_APPLICATION_POINTER(mlx); + static_cast(mlx)->SetGraphicsSupportPosition(win, x, y); + } + + void mlx_loop_hook(void* mlx, int (*f)(void*), void* param) + { + MLX_CHECK_APPLICATION_POINTER(mlx); + static_cast(mlx)->LoopHook(f, param); + } + + void mlx_loop(void* mlx) + { + MLX_CHECK_APPLICATION_POINTER(mlx); + static_cast(mlx)->Run(); + } + + void mlx_loop_end(void* mlx) + { + MLX_CHECK_APPLICATION_POINTER(mlx); + static_cast(mlx)->LoopEnd(); + } + + void mlx_mouse_show() + { + mlx::SDLManager::ShowCursor(); + } + + void mlx_mouse_hide() + { + mlx::SDLManager::HideCursor(); + } + + void mlx_mouse_move(void* mlx, void* win, int x, int y) + { + MLX_CHECK_APPLICATION_POINTER(mlx); + static_cast(mlx)->MouseMove(win, x, y); + } + + void mlx_mouse_get_pos(void* mlx, int* x, int* y) + { + MLX_CHECK_APPLICATION_POINTER(mlx); + static_cast(mlx)->GetMousePos(x, y); + } + + void mlx_on_event(void* mlx, void* win, mlx_event_type event, int (*funct_ptr)(int, void*), void* param) + { + MLX_CHECK_APPLICATION_POINTER(mlx); + static_cast(mlx)->OnEvent(win, static_cast(event), funct_ptr, param); + } + + void* mlx_new_image(void* mlx, int width, int height) + { + MLX_CHECK_APPLICATION_POINTER(mlx); + if (width <= 0 || height <= 0) + { + mlx::Error("invalid image size (% x %)", width, height); + return nullptr; + } + return static_cast(mlx)->NewTexture(width, height); + } + + int mlx_get_image_pixel(void* mlx, void* img, int x, int y) + { + MLX_CHECK_APPLICATION_POINTER(mlx); + int color = static_cast(mlx)->GetTexturePixel(img, x, y); + unsigned char color_bits[4]; + color_bits[0] = (color & 0x000000FF); + color_bits[1] = (color & 0x0000FF00) >> 8; + color_bits[2] = (color & 0x00FF0000) >> 16; + color_bits[3] = (color & 0xFF000000) >> 24; + return *reinterpret_cast(color_bits); + } + + void mlx_set_image_pixel(void* mlx, void* img, int x, int y, int color) + { + MLX_CHECK_APPLICATION_POINTER(mlx); + unsigned char color_bits[4]; + color_bits[0] = (color & 0x00FF0000) >> 16; + color_bits[1] = (color & 0x0000FF00) >> 8; + color_bits[2] = (color & 0x000000FF); + color_bits[3] = (color & 0xFF000000) >> 24; + static_cast(mlx)->SetTexturePixel(img, x, y, *reinterpret_cast(color_bits)); + } + + void mlx_put_image_to_window(void* mlx, void* win, void* img, int x, int y) + { + MLX_CHECK_APPLICATION_POINTER(mlx); + static_cast(mlx)->TexturePut(win, img, x, y, 1.0f, 0.0f); + } + + void mlx_transform_put_image_to_window(void* mlx, void* win, void* img, int x, int y, float scale, float angle) + { + MLX_CHECK_APPLICATION_POINTER(mlx); + static_cast(mlx)->TexturePut(win, img, x, y, scale, angle); + } + + void mlx_destroy_image(void* mlx, void* img) + { + MLX_CHECK_APPLICATION_POINTER(mlx); + static_cast(mlx)->DestroyTexture(img); + } + + void* mlx_png_file_to_image(void* mlx, char* filename, int* width, int* height) + { + MLX_CHECK_APPLICATION_POINTER(mlx); + if (filename == nullptr) + { + mlx::Error("PNG loader: filename is NULL"); + return nullptr; + } + std::filesystem::path file(filename); + if(file.extension() != ".png") + { + mlx::Error("PNG loader: not a png file '%'", filename); + return nullptr; + } + return static_cast(mlx)->NewStbTexture(filename, width, height); + } + + void* mlx_jpg_file_to_image(void* mlx, char* filename, int* width, int* height) + { + MLX_CHECK_APPLICATION_POINTER(mlx); + if (filename == nullptr) + { + mlx::Error("JPG loader: filename is NULL"); + return nullptr; + } + std::filesystem::path file(filename); + if(file.extension() != ".jpg" && file.extension() != ".jpeg") + { + mlx::Error("JPG loader: not a jpg file '%'", filename); + return nullptr; + } + return static_cast(mlx)->NewStbTexture(filename, width, height); + } + + void* mlx_bmp_file_to_image(void* mlx, char* filename, int* width, int* height) + { + MLX_CHECK_APPLICATION_POINTER(mlx); + if (filename == nullptr) + { + mlx::Error("BMP loader: filename is NULL"); + return nullptr; + } + std::filesystem::path file(filename); + if(file.extension() != ".bmp" && file.extension() != ".dib") + { + mlx::Error("BMP loader: not a bmp file '%'", filename); + return nullptr; + } + return static_cast(mlx)->NewStbTexture(filename, width, height); + } + + void mlx_pixel_put(void* mlx, void* win, int x, int y, int color) + { + MLX_CHECK_APPLICATION_POINTER(mlx); + unsigned char color_bits[4]; + color_bits[0] = (color & 0x00FF0000) >> 16; + color_bits[1] = (color & 0x0000FF00) >> 8; + color_bits[2] = (color & 0x000000FF); + color_bits[3] = (color & 0xFF000000) >> 24; + static_cast(mlx)->PixelPut(win, x, y, *reinterpret_cast(color_bits)); + } + + void mlx_string_put(void* mlx, void* win, int x, int y, int color, char* str) + { + MLX_CHECK_APPLICATION_POINTER(mlx); + unsigned char color_bits[4]; + color_bits[0] = (color & 0x00FF0000) >> 16; + color_bits[1] = (color & 0x0000FF00) >> 8; + color_bits[2] = (color & 0x000000FF); + color_bits[3] = (color & 0xFF000000) >> 24; + static_cast(mlx)->StringPut(win, x, y, *reinterpret_cast(color_bits), str); + } + + void mlx_set_font(void* mlx, char* filepath) + { + MLX_CHECK_APPLICATION_POINTER(mlx); + if (filepath == nullptr) + { + mlx::Error("Font loader: filepath is NULL"); + return; + } + std::filesystem::path file(filepath); + if(std::strcmp(filepath, "default") != 0 && file.extension() != ".ttf" && file.extension() != ".tte") + { + mlx::Error("TTF loader: not a truetype font file '%'", filepath); + return; + } + if(std::strcmp(filepath, "default") == 0) + static_cast(mlx)->LoadFont(file, 6.f); + else + static_cast(mlx)->LoadFont(file, 16.f); + } + + void mlx_set_font_scale(void* mlx, char* filepath, float scale) + { + MLX_CHECK_APPLICATION_POINTER(mlx); + if (filepath == nullptr) + { + mlx::Error("Font loader: filepath is NULL"); + return; + } + std::filesystem::path file(filepath); + if(std::strcmp(filepath, "default") != 0 && file.extension() != ".ttf" && file.extension() != ".tte") + { + mlx::Error("TTF loader: not a truetype font file '%'", filepath); + return; + } + static_cast(mlx)->LoadFont(file, scale); + } + + void mlx_clear_window(void* mlx, void* win, int color) + { + MLX_CHECK_APPLICATION_POINTER(mlx); + unsigned char color_bits[4]; + color_bits[0] = (color & 0x00FF0000) >> 16; + color_bits[1] = (color & 0x0000FF00) >> 8; + color_bits[2] = (color & 0x000000FF); + color_bits[3] = (color & 0xFF000000) >> 24; + static_cast(mlx)->ClearGraphicsSupport(win, *reinterpret_cast(color_bits)); + } + + void mlx_destroy_window(void* mlx, void* win) + { + MLX_CHECK_APPLICATION_POINTER(mlx); + static_cast(mlx)->DestroyGraphicsSupport(win); + } + + void mlx_destroy_display(void* mlx) + { + MLX_CHECK_APPLICATION_POINTER(mlx); + delete static_cast(mlx); + __mlx_ptr = nullptr; + } + + void mlx_get_screens_size(void* mlx, void* win, int* w, int* h) + { + MLX_CHECK_APPLICATION_POINTER(mlx); + static_cast(mlx)->GetScreenSize(win, w, h); + } + + void mlx_set_fps_goal(void* mlx, int fps) + { + MLX_CHECK_APPLICATION_POINTER(mlx); + if(fps < 0) + mlx::Error("You cannot set a negative FPS cap (nice try)"); + else if(fps == 0) + mlx::Error("You cannot set a FPS cap to 0 (nice try)"); + else + static_cast(mlx)->SetFPSCap(static_cast(fps)); + } +} diff --git a/runtime/Sources/Core/EventBus.cpp b/runtime/Sources/Core/EventBus.cpp new file mode 100644 index 0000000..721796a --- /dev/null +++ b/runtime/Sources/Core/EventBus.cpp @@ -0,0 +1,25 @@ +#include +#include +#include + +namespace mlx +{ + void EventBus::Send(const std::string& listener_name, const EventBase& event) + { + for(const EventListener& listener : s_listeners) + { + if(listener.GetName() == listener_name) + { + listener.Call(event); + return; + } + } + Warning("Event Bus : listener not found, '%'", listener_name); + } + + void EventBus::SendBroadcast(const EventBase& event) + { + for(const EventListener& listener : s_listeners) + listener.Call(event); + } +} diff --git a/runtime/Sources/Core/EventListener.cpp b/runtime/Sources/Core/EventListener.cpp new file mode 100644 index 0000000..21540f5 --- /dev/null +++ b/runtime/Sources/Core/EventListener.cpp @@ -0,0 +1,9 @@ +#include +#include + +namespace mlx +{ + EventListener::EventListener(func::function functor, std::string name) + : m_listen_functor(std::move(functor)), m_name(std::move(name)) + {} +} diff --git a/runtime/Sources/Core/Fps.cpp b/runtime/Sources/Core/Fps.cpp new file mode 100644 index 0000000..a464f67 --- /dev/null +++ b/runtime/Sources/Core/Fps.cpp @@ -0,0 +1,30 @@ +#include +#include + +namespace mlx +{ + void FpsManager::Init() + { + m_timer = static_cast(std::chrono::duration_cast(std::chrono::high_resolution_clock::now().time_since_epoch()).count()); + m_fps_before = m_timer; + m_fps_now = m_timer; + } + + bool FpsManager::Update() + { + using namespace std::chrono_literals; + m_fps_now = static_cast(std::chrono::duration_cast(std::chrono::high_resolution_clock::now().time_since_epoch()).count()); + + if(std::chrono::duration{m_fps_now - m_timer} >= 1s) + m_timer += m_fps_now; + + m_fps_elapsed_time = m_fps_now - m_fps_before; + if(m_fps_elapsed_time >= m_ns) + { + m_fps_before += m_ns; + return true; + } + std::this_thread::sleep_for(std::chrono::duration(m_ns - 1)); + return false; + } +} diff --git a/runtime/Sources/Core/Graphics.cpp b/runtime/Sources/Core/Graphics.cpp new file mode 100644 index 0000000..93721c4 --- /dev/null +++ b/runtime/Sources/Core/Graphics.cpp @@ -0,0 +1,59 @@ +#include +#include + +namespace mlx +{ + GraphicsSupport::GraphicsSupport([[maybe_unused]] std::size_t w, [[maybe_unused]] std::size_t h, NonOwningPtr render_target, int id) : + m_put_pixel_manager(&m_renderer), + p_window(nullptr), + m_id(id), + m_has_window(false) + { + MLX_PROFILE_FUNCTION(); + m_renderer.Init(render_target); + m_scene_renderer.Init(render_target); + p_scene = std::make_unique(); + } + + GraphicsSupport::GraphicsSupport(std::size_t w, std::size_t h, std::string title, int id, bool is_resizable) : + m_put_pixel_manager(&m_renderer), + p_window(std::make_shared(w, h, title, is_resizable)), + m_id(id), + m_has_window(true) + { + MLX_PROFILE_FUNCTION(); + m_renderer.Init(p_window.get()); + m_scene_renderer.Init(nullptr); + p_scene = std::make_unique(); + } + + void GraphicsSupport::Render() noexcept + { + MLX_PROFILE_FUNCTION(); + m_renderer.BeginFrame(); + m_draw_layer = 0; + m_scene_renderer.Render(*p_scene, m_renderer); + m_renderer.EndFrame(); + #ifdef GRAPHICS_MEMORY_DUMP + // dump memory to file every two seconds + using namespace std::chrono_literals; + static std::int64_t timer = static_cast(std::chrono::duration_cast(std::chrono::high_resolution_clock::now().time_since_epoch()).count()); + if(std::chrono::duration{ static_cast(std::chrono::duration_cast(std::chrono::high_resolution_clock::now().time_since_epoch()).count()) - timer } >= 1s) + { + RenderCore::Get().GetAllocator().DumpMemoryToJson(); + timer = static_cast(std::chrono::duration_cast(std::chrono::high_resolution_clock::now().time_since_epoch()).count()); + } + #endif + } + + GraphicsSupport::~GraphicsSupport() + { + MLX_PROFILE_FUNCTION(); + RenderCore::Get().WaitDeviceIdle(); + p_scene.reset(); + m_scene_renderer.Destroy(); + m_renderer.Destroy(); + if(p_window) + p_window->Destroy(); + } +} diff --git a/runtime/Sources/Core/Logs.cpp b/runtime/Sources/Core/Logs.cpp new file mode 100644 index 0000000..f61ec99 --- /dev/null +++ b/runtime/Sources/Core/Logs.cpp @@ -0,0 +1,60 @@ +#include +#include +#include + +namespace mlx +{ + namespace Internal + { + struct FatalErrorEvent : public EventBase + { + Event What() const override { return Event::FatalErrorEventCode; } + }; + } + + void Logs::Report(LogType type, std::string message) + { + Report(type, 0, {}, {}, std::move(message)); + } + + void Logs::Report(LogType type, unsigned int line, std::string_view file, std::string_view function, std::string message) + { + using namespace std::literals; + + #ifndef DEBUG + if(type == LogType::Debug) + return; + #endif + + std::string code_infos; + if((type == LogType::Error || type == LogType::FatalError) && !file.empty() && !function.empty()) + { + code_infos += "{in file '"s; + code_infos += file; + code_infos += "', line "s + std::to_string(line) + ", in function '"s; + code_infos += function; + code_infos += "'} "s; + } + + switch(type) + { + case LogType::Debug: std::cout << Ansi::blue << "[MLX Debug] "; break; + case LogType::Message: std::cout << Ansi::blue << "[MLX Message] "; break; + case LogType::Warning: std::cout << Ansi::magenta << "[MLX Warning] "; break; + case LogType::Error: std::cerr << Ansi::red << "[MLX Error] "; break; + case LogType::FatalError: std::cerr << Ansi::red << "[MLX Fatal Error] "; break; + + default: break; + } + + const std::chrono::zoned_time current_time{ std::chrono::current_zone(), std::chrono::floor(std::chrono::system_clock::now()) }; + + std::cout << Ansi::yellow << std::format("[{0:%H:%M:%S}] ", current_time) << Ansi::def << code_infos << message << std::endl; + + if(type == LogType::FatalError) + { + std::cout << Ansi::bg_red << "Fatal Error: emergency exit" << Ansi::bg_def << std::endl; + EventBus::Send("__MlxApplication", Internal::FatalErrorEvent{}); + } + } +} diff --git a/runtime/Sources/Core/Memory.cpp b/runtime/Sources/Core/Memory.cpp new file mode 100644 index 0000000..3a69d32 --- /dev/null +++ b/runtime/Sources/Core/Memory.cpp @@ -0,0 +1,63 @@ +#include + +#include + +namespace mlx +{ + MemManager* MemManager::s_instance = nullptr; + + MemManager::MemManager() + { + s_instance = this; + } + + void* MemManager::Malloc(std::size_t size) + { + void* ptr = std::malloc(size); + if(ptr != nullptr) + s_blocks.push_back(ptr); + return ptr; + } + + void* MemManager::Calloc(std::size_t n, std::size_t size) + { + void* ptr = std::calloc(n, size); + if(ptr != nullptr) + s_blocks.push_back(ptr); + return ptr; + } + + void* MemManager::Realloc(void* ptr, std::size_t size) + { + void* ptr2 = std::realloc(ptr, size); + if(ptr2 != nullptr) + s_blocks.push_back(ptr2); + auto it = std::find(s_blocks.begin(), s_blocks.end(), ptr); + if(it != s_blocks.end()) + s_blocks.erase(it); + return ptr2; + } + + void MemManager::Free(void* ptr) + { + if(ptr == nullptr) + return; + auto it = std::find(s_blocks.begin(), s_blocks.end(), ptr); + if(it == s_blocks.end()) + { + Error("Memory Manager: trying to free a pointer not allocated by the memory manager"); + return; + } + std::free(*it); + s_blocks.erase(it); + } + + MemManager::~MemManager() + { + std::for_each(s_blocks.begin(), s_blocks.end(), [](void* ptr) + { + std::free(ptr); + }); + s_instance = nullptr; + } +} diff --git a/runtime/Sources/Core/Profiler.cpp b/runtime/Sources/Core/Profiler.cpp new file mode 100644 index 0000000..b7c106b --- /dev/null +++ b/runtime/Sources/Core/Profiler.cpp @@ -0,0 +1,70 @@ +#include + +#include + +namespace mlx +{ + Profiler* Profiler::s_instance = nullptr; + + void Profiler::BeginRuntimeSession() + { + std::lock_guard lock(m_mutex); + if(m_runtime_session_began) + return; + m_output_stream.open("./runtime_profile.mlx.json", std::ofstream::out | std::ofstream::trunc); + + if(m_output_stream.is_open()) + WriteHeader(); + else + Error("Profiler: cannot open runtime profile file"); + m_runtime_session_began = true; + } + + void Profiler::AppendProfileData(ProfileResult&& result) + { + std::lock_guard lock(m_mutex); + auto it = m_profile_data.find(result.name); + if(it != m_profile_data.end()) + { + result.elapsed_time = (result.elapsed_time + it->second.second.elapsed_time) / it->second.first; + m_profile_data[result.name].first++; + m_profile_data[result.name].second = result; + } + else + m_profile_data[result.name] = std::make_pair(1, result); + } + + void Profiler::WriteProfile(const ProfileResult& result) + { + std::stringstream json; + json << std::setprecision(9) << std::fixed; + json << ",\n{\n"; + json << "\t\"type\" : \"function\"," << '\n'; + json << "\t\"name\" : \"" << result.name << "\"," << '\n'; + json << "\t\"thread id\" : " << result.thread_id << "," << '\n'; + json << "\t\"average duration\" : \"" << result.elapsed_time.count() << "ms\"\n"; + json << "}"; + m_output_stream << json.str(); + } + + void Profiler::EndRuntimeSession() + { + std::lock_guard lock(m_mutex); + if(!m_runtime_session_began) + return; + for(auto& [_, pair] : m_profile_data) + WriteProfile(pair.second); + WriteFooter(); + m_output_stream.close(); + m_profile_data.clear(); + m_runtime_session_began = false; + } + + Profiler::~Profiler() + { + if(!m_runtime_session_began) + return; + EndRuntimeSession(); + s_instance = nullptr; + } +} diff --git a/runtime/Sources/Core/SDLManager.cpp b/runtime/Sources/Core/SDLManager.cpp new file mode 100644 index 0000000..8239119 --- /dev/null +++ b/runtime/Sources/Core/SDLManager.cpp @@ -0,0 +1,234 @@ +#include +#include +#include +#include + +namespace mlx +{ + #if SDL_BYTEORDER == SDL_BIG_ENDIAN + constexpr const std::uint32_t rmask = 0xff000000; + constexpr const std::uint32_t gmask = 0x00ff0000; + constexpr const std::uint32_t bmask = 0x0000ff00; + constexpr const std::uint32_t amask = 0x000000ff; + #else + constexpr const std::uint32_t rmask = 0x000000ff; + constexpr const std::uint32_t gmask = 0x0000ff00; + constexpr const std::uint32_t bmask = 0x00ff0000; + constexpr const std::uint32_t amask = 0xff000000; + #endif + + namespace Internal + { + struct WindowInfos + { + SDL_Window* window; + SDL_Surface* icon; + }; + } + + SDLManager* SDLManager::s_instance = nullptr; + + SDLManager::SDLManager() + { + MLX_PROFILE_FUNCTION(); + s_instance = this; + + m_drop_sdl_responsability = SDL_WasInit(SDL_INIT_VIDEO); + if(m_drop_sdl_responsability) // is case the mlx is running in a sandbox like MacroUnitTester where SDL is already init + return; + SDL_SetMemoryFunctions(MemManager::Get().Malloc, MemManager::Get().Calloc, MemManager::Get().Realloc, MemManager::Get().Free); + + #ifdef FORCE_WAYLAND + SDL_SetHint(SDL_HINT_VIDEODRIVER, "wayland,x11"); + #endif + + if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_EVENTS | SDL_INIT_TIMER) != 0) + FatalError("SDL: unable to init all subsystems; %", SDL_GetError()); + DebugLog("SDL Manager initialized"); + } + + Handle SDLManager::CreateWindow(const std::string& title, std::size_t w, std::size_t h, bool hidden, std::int32_t& id, bool is_resizable) + { + Internal::WindowInfos* infos = new Internal::WindowInfos; + Verify(infos != nullptr, "SDL: window allocation failed"); + + std::uint32_t flags = SDL_WINDOW_VULKAN; + if(hidden) + flags |= SDL_WINDOW_HIDDEN; + else + flags |= SDL_WINDOW_SHOWN; + if(is_resizable) + flags |= SDL_WINDOW_RESIZABLE; + + infos->window = SDL_CreateWindow(title.c_str(), SDL_WINDOWPOS_CENTERED, SDL_WINDOWPOS_CENTERED, w, h, flags); + if(!infos->window) + FatalError("SDL: unable to open a new window; %", SDL_GetError()); + infos->icon = SDL_CreateRGBSurfaceFrom(static_cast(logo_mlx), logo_mlx_width, logo_mlx_height, 32, 4 * logo_mlx_width, rmask, gmask, bmask, amask); + SDL_SetWindowIcon(infos->window, infos->icon); + + m_windows_registry.insert(infos); + + id = SDL_GetWindowID(infos->window); + + return infos; + } + + void SDLManager::DestroyWindow(Handle window) noexcept + { + Verify(m_windows_registry.find(window) != m_windows_registry.end(), "SDL: cannot destroy window; unknown window pointer"); + + Internal::WindowInfos* infos = static_cast(window); + if(infos->window != nullptr) + SDL_DestroyWindow(infos->window); + if(infos->icon != nullptr) + SDL_FreeSurface(infos->icon); + + m_windows_registry.erase(infos); + delete infos; + } + + VkSurfaceKHR SDLManager::CreateVulkanSurface(Handle window, VkInstance instance) const noexcept + { + VkSurfaceKHR surface; + if(!SDL_Vulkan_CreateSurface(static_cast(window)->window, instance, &surface)) + FatalError("SDL: could not create a Vulkan surface; %", SDL_GetError()); + return surface; + } + + std::vector SDLManager::GetRequiredVulkanInstanceExtentions(Handle window) const noexcept + { + std::uint32_t count; + if(!SDL_Vulkan_GetInstanceExtensions(static_cast(window)->window, &count, nullptr)) + FatalError("SDL Manager: could not retrieve Vulkan instance extensions"); + std::vector extensions(count); + if(!SDL_Vulkan_GetInstanceExtensions(static_cast(window)->window, &count, extensions.data())) + FatalError("SDL Manager: could not retrieve Vulkan instance extensions"); + extensions.push_back(VK_KHR_SURFACE_EXTENSION_NAME); + return extensions; + } + + Vec2ui SDLManager::GetVulkanDrawableSize(Handle window) const noexcept + { + Vec2i extent; + SDL_Vulkan_GetDrawableSize(static_cast(window)->window, &extent.x, &extent.y); + return Vec2ui{ extent }; + } + + void SDLManager::MoveMouseOnWindow(Handle window, int x, int y) const noexcept + { + SDL_WarpMouseInWindow(static_cast(window)->window, x, y); + SDL_PumpEvents(); + } + + void SDLManager::GetScreenSizeWindowIsOn(Handle window, int* x, int* y) const noexcept + { + SDL_DisplayMode DM; + SDL_GetDesktopDisplayMode(SDL_GetWindowDisplayIndex(static_cast(window)->window), &DM); + *x = DM.w; + *y = DM.h; + } + + void SDLManager::SetWindowPosition(Handle window, int x, int y) const noexcept + { + SDL_SetWindowPosition(static_cast(window)->window, x, y); + } + + void SDLManager::HideCursor() noexcept + { + SDL_ShowCursor(SDL_DISABLE); + } + + void SDLManager::ShowCursor() noexcept + { + SDL_ShowCursor(SDL_ENABLE); + } + + std::int32_t SDLManager::GetX() const noexcept + { + int dummy; + int x; + SDL_GetMouseState(&x, &dummy); + return x; + } + + std::int32_t SDLManager::GetY() const noexcept + { + int dummy; + int y; + SDL_GetMouseState(&dummy, &y); + return y; + } + + std::int32_t SDLManager::GetXRel() const noexcept + { + int dummy; + int x; + SDL_GetRelativeMouseState(&x, &dummy); + return x; + } + + std::int32_t SDLManager::GetYRel() const noexcept + { + int dummy; + int y; + SDL_GetRelativeMouseState(&dummy, &y); + return y; + } + + void SDLManager::InputsFetcher(func::function functor) + { + SDL_Event event; + while(SDL_PollEvent(&event)) + { + std::uint32_t id = event.window.windowID; + switch(event.type) + { + case SDL_KEYUP: functor(MLX_KEYUP, id, event.key.keysym.scancode); break; + case SDL_KEYDOWN: functor(MLX_KEYDOWN, id, event.key.keysym.scancode); break; + case SDL_MOUSEBUTTONUP: functor(MLX_MOUSEUP, id, event.button.button); break; + case SDL_MOUSEBUTTONDOWN: functor(MLX_MOUSEDOWN, id, event.button.button); break; + case SDL_MOUSEWHEEL: + { + if(event.wheel.y > 0) // scroll up + functor(MLX_MOUSEWHEEL, id, 1); + else if(event.wheel.y < 0) // scroll down + functor(MLX_MOUSEWHEEL, id, 2); + if(event.wheel.x > 0) // scroll right + functor(MLX_MOUSEWHEEL, id, 3); + else if(event.wheel.x < 0) // scroll left + functor(MLX_MOUSEWHEEL, id, 4); + break; + } + case SDL_WINDOWEVENT: + { + switch(event.window.event) + { + case SDL_WINDOWEVENT_CLOSE: functor(MLX_WINDOW_EVENT, id, 0); break; + case SDL_WINDOWEVENT_MOVED: functor(MLX_WINDOW_EVENT, id, 1); break; + case SDL_WINDOWEVENT_MINIMIZED: functor(MLX_WINDOW_EVENT, id, 2); break; + case SDL_WINDOWEVENT_MAXIMIZED: functor(MLX_WINDOW_EVENT, id, 3); break; + case SDL_WINDOWEVENT_ENTER: functor(MLX_WINDOW_EVENT, id, 4); break; + case SDL_WINDOWEVENT_FOCUS_GAINED: functor(MLX_WINDOW_EVENT, id, 5); break; + case SDL_WINDOWEVENT_LEAVE: functor(MLX_WINDOW_EVENT, id, 6); break; + case SDL_WINDOWEVENT_FOCUS_LOST: functor(MLX_WINDOW_EVENT, id, 7); break; + + default : break; + } + break; + } + + default: break; + } + } + } + + SDLManager::~SDLManager() + { + if(m_drop_sdl_responsability) + return; + SDL_QuitSubSystem(SDL_INIT_VIDEO | SDL_INIT_TIMER | SDL_INIT_EVENTS); + SDL_Quit(); + s_instance = nullptr; + DebugLog("SDL Manager uninitialized"); + } +} diff --git a/runtime/Sources/Core/UUID.cpp b/runtime/Sources/Core/UUID.cpp new file mode 100644 index 0000000..cf2855a --- /dev/null +++ b/runtime/Sources/Core/UUID.cpp @@ -0,0 +1,13 @@ +#include + +#include + +namespace mlx +{ + static std::random_device random_device; + static std::mt19937_64 engine(random_device()); + static std::uniform_int_distribution uniform_distribution; + + UUID::UUID() : m_uuid(uniform_distribution(engine)) {} + UUID::UUID(std::uint64_t uuid) : m_uuid(uuid) {} +} diff --git a/runtime/Sources/Graphics/Font.cpp b/runtime/Sources/Graphics/Font.cpp new file mode 100644 index 0000000..339aa62 --- /dev/null +++ b/runtime/Sources/Graphics/Font.cpp @@ -0,0 +1,70 @@ +#include + +#include +#include + +#define STBRP_ASSERT(x) mlx::Assert(x, "internal stb assertion " #x) +#define STB_RECT_PACK_IMPLEMENTATION +#include + +#define STB_TRUETYPE_IMPLEMENTATION +#define STB_malloc(x, u) ((void)(u), mlx::MemManager::Get().Malloc(x)) +#define STB_free(x, u) ((void)(u), mlx::MemManager::Get().Free(x)) +#include + +namespace mlx +{ + void Font::BuildFont() + { + MLX_PROFILE_FUNCTION(); + std::vector file_bytes; + if(std::holds_alternative(m_build_data)) + { + std::ifstream file(std::get(m_build_data), std::ios::binary); + if(!file.is_open()) + { + Error("Font: cannot open font file, %", m_name); + return; + } + std::ifstream::pos_type file_size = std::filesystem::file_size(std::get(m_build_data)); + file.seekg(0, std::ios::beg); + file_bytes.resize(file_size); + file.read(reinterpret_cast(file_bytes.data()), file_size); + file.close(); + } + + CPUBuffer bitmap(RANGE * RANGE); + + stbtt_pack_context pc; + stbtt_PackBegin(&pc, bitmap.GetData(), RANGE, RANGE, RANGE, 1, nullptr); + if(std::holds_alternative(m_build_data)) + stbtt_PackFontRange(&pc, file_bytes.data(), 0, m_scale, 32, 96, m_cdata.data()); + else + stbtt_PackFontRange(&pc, std::get>(m_build_data).data(), 0, m_scale, 32, 96, m_cdata.data()); + stbtt_PackEnd(&pc); + + // TODO : find better solution; No, using VK_FORMAT_R8_SRGB does not work + CPUBuffer vulkan_bitmap(RANGE * RANGE * 4); + for(int i = 0, j = 0; i < RANGE * RANGE; i++, j += 4) + { + vulkan_bitmap.GetData()[j + 0] = bitmap.GetData()[i]; + vulkan_bitmap.GetData()[j + 1] = bitmap.GetData()[i]; + vulkan_bitmap.GetData()[j + 2] = bitmap.GetData()[i]; + vulkan_bitmap.GetData()[j + 3] = bitmap.GetData()[i]; + } + + #ifdef DEBUG + m_atlas.Init(vulkan_bitmap, RANGE, RANGE, VK_FORMAT_R8G8B8A8_SRGB, false, m_name + "_font_atlas_" + std::to_string(m_scale)); + #else + m_atlas.Init(vulkan_bitmap, RANGE, RANGE, VK_FORMAT_R8G8B8A8_SRGB, false, {}); + #endif + + DebugLog("Font: loaded % with a scale of %", m_name, m_scale); + } + + void Font::Destroy() + { + m_atlas.Destroy(); + DebugLog("Font: unloaded % with a scale of %", m_name, m_scale); + } +} diff --git a/runtime/Sources/Graphics/Mesh.cpp b/runtime/Sources/Graphics/Mesh.cpp new file mode 100644 index 0000000..751d12e --- /dev/null +++ b/runtime/Sources/Graphics/Mesh.cpp @@ -0,0 +1,34 @@ +#include +#include +#include + +namespace mlx +{ + void Mesh::Draw(VkCommandBuffer cmd, std::size_t& drawcalls, std::size_t& polygondrawn) const noexcept + { + MLX_PROFILE_FUNCTION(); + for(std::size_t i = 0; i < m_sub_meshes.size(); i++) + Draw(cmd, drawcalls, polygondrawn, i); + } + + void Mesh::Draw(VkCommandBuffer cmd, std::size_t& drawcalls, std::size_t& polygondrawn, std::size_t submesh_index) const noexcept + { + MLX_PROFILE_FUNCTION(); + Verify(submesh_index < m_sub_meshes.size(), "invalid submesh index"); + m_sub_meshes[submesh_index].vbo.Bind(cmd); + m_sub_meshes[submesh_index].ibo.Bind(cmd); + mlx::RenderCore::Get().vkCmdDrawIndexed(cmd, static_cast(m_sub_meshes[submesh_index].ibo.GetSize() / sizeof(std::uint32_t)), 1, 0, 0, 0); + polygondrawn += m_sub_meshes[submesh_index].triangle_count; + drawcalls++; + } + + Mesh::~Mesh() + { + MLX_PROFILE_FUNCTION(); + for(auto& mesh : m_sub_meshes) + { + mesh.vbo.Destroy(); + mesh.ibo.Destroy(); + } + } +} diff --git a/runtime/Sources/Graphics/PutPixelManager.cpp b/runtime/Sources/Graphics/PutPixelManager.cpp new file mode 100644 index 0000000..9d4d575 --- /dev/null +++ b/runtime/Sources/Graphics/PutPixelManager.cpp @@ -0,0 +1,40 @@ +#include + +#include +#include + +namespace mlx +{ + NonOwningPtr PutPixelManager::DrawPixel(int x, int y, std::uint64_t draw_layer, std::uint32_t color) + { + Verify((bool)p_renderer, "invalid renderer pointer"); + + VkExtent2D extent; + if(p_renderer->GetWindow()) + extent = kvfGetSwapchainImagesSize(p_renderer->GetSwapchain().Get()); + else if(p_renderer->GetRenderTarget()) + extent = VkExtent2D{ .width = p_renderer->GetRenderTarget()->GetWidth(), .height = p_renderer->GetRenderTarget()->GetHeight() }; + else + FatalError("a renderer was created without window nor render target attached (wtf)"); + + #ifdef DEBUG + auto res = m_textures.try_emplace(draw_layer, CPUBuffer{}, extent.width, extent.height, VK_FORMAT_R8G8B8A8_SRGB, false, "mlx_put_pixel_layer_" + std::to_string(draw_layer)); + #else + auto res = m_textures.try_emplace(draw_layer, CPUBuffer{}, extent.width, extent.height, VK_FORMAT_R8G8B8A8_SRGB, false, std::string_view{}); + #endif + if(res.second) + res.first->second.Clear(VK_NULL_HANDLE, Vec4f{ 0.0f }); + res.first->second.SetPixel(x, y, color); + return (res.second ? &res.first->second : nullptr); + } + + void PutPixelManager::ResetRenderData() + { + m_textures.clear(); + } + + PutPixelManager::~PutPixelManager() + { + ResetRenderData(); + } +} diff --git a/runtime/Sources/Graphics/Scene.cpp b/runtime/Sources/Graphics/Scene.cpp new file mode 100644 index 0000000..c070e64 --- /dev/null +++ b/runtime/Sources/Graphics/Scene.cpp @@ -0,0 +1,131 @@ +#include +#include +#include +#include + +namespace mlx +{ + Sprite& Scene::CreateSprite(NonOwningPtr texture) noexcept + { + MLX_PROFILE_FUNCTION(); + Verify((bool)texture, "Scene: invalid texture (internal mlx issue, please report to devs)"); + + for(auto& drawable : m_drawables) + { + if(!drawable || drawable->GetType() != DrawableType::Sprite) + continue; + if(texture->GetWidth() == static_cast(drawable.get())->GetTexture()->GetWidth() && texture->GetHeight() == static_cast(drawable.get())->GetTexture()->GetHeight()) + { + std::shared_ptr new_sprite = std::make_shared(drawable->GetMesh(), texture); + m_drawables.push_back(new_sprite); + return *new_sprite; + } + } + + std::shared_ptr sprite = std::make_shared(texture); + m_drawables.push_back(sprite); + return *sprite; + } + + NonOwningPtr Scene::GetSpriteFromTexturePositionScaleRotation(NonOwningPtr texture, const Vec2f& position, float scale, float rotation) const + { + MLX_PROFILE_FUNCTION(); + auto it = std::find_if(m_drawables.begin(), m_drawables.end(), [&texture, &position, scale, rotation](std::shared_ptr drawable) + { + if(!drawable || drawable->GetType() != DrawableType::Sprite) + return false; + return static_cast(drawable.get())->GetTexture() == texture && + drawable->GetPosition() == position && + drawable->GetScale() == Vec2f{ scale, scale } && + drawable->GetRotation().ToEulerAngles() == EulerAnglesf{ 0.0f, 0.0f, rotation }; + }); + return static_cast(it != m_drawables.end() ? it->get() : nullptr); + } + + void Scene::TryEraseSpriteFromTexture(NonOwningPtr texture) + { + MLX_PROFILE_FUNCTION(); + auto it = m_drawables.begin(); + do + { + it = std::find_if(m_drawables.begin(), m_drawables.end(), [&texture](std::shared_ptr drawable) + { + if(!drawable || drawable->GetType() != DrawableType::Sprite) + return false; + return static_cast(drawable.get())->GetTexture() == texture; + }); + if(it != m_drawables.end()) + m_drawables.erase(it); + } while(it != m_drawables.end()); + } + + bool Scene::IsTextureAtGivenDrawLayer(NonOwningPtr texture, std::uint64_t draw_layer) const + { + MLX_PROFILE_FUNCTION(); + if(draw_layer >= m_drawables.size()) + return false; + if(!m_drawables[draw_layer] || m_drawables[draw_layer]->GetType() != DrawableType::Sprite) + return false; + return static_cast(m_drawables[draw_layer].get())->GetTexture() == texture; + } + + Text& Scene::CreateText(const std::string& text) noexcept + { + MLX_PROFILE_FUNCTION(); + + Assert((bool)p_bound_font, "no font bound"); + + for(auto& drawable : m_drawables) + { + if(!drawable || drawable->GetType() != DrawableType::Text) + continue; + if(text == static_cast(drawable.get())->GetText() && p_bound_font == static_cast(drawable.get())->GetFont()) + { + std::shared_ptr new_text = std::make_shared(text, p_bound_font, drawable->GetMesh()); + m_drawables.push_back(new_text); + return *new_text; + } + } + + std::shared_ptr new_text = std::make_shared(text, p_bound_font); + m_drawables.push_back(new_text); + return *new_text; + } + + NonOwningPtr Scene::GetTextFromPositionAndColor(const std::string& text, const Vec2f& position, const Vec4f& color) const + { + MLX_PROFILE_FUNCTION(); + auto it = std::find_if(m_drawables.begin(), m_drawables.end(), [&text, &position, &color](std::shared_ptr drawable) + { + if(!drawable || drawable->GetType() != DrawableType::Text) + return false; + return static_cast(drawable.get())->GetText() == text && drawable->GetPosition() == position && drawable->GetColor() == color; + }); + return static_cast(it != m_drawables.end() ? it->get() : nullptr); + } + + bool Scene::IsTextAtGivenDrawLayer(const std::string& text, std::uint64_t draw_layer) const + { + MLX_PROFILE_FUNCTION(); + if(draw_layer >= m_drawables.size()) + return false; + if(!m_drawables[draw_layer] || m_drawables[draw_layer]->GetType() != DrawableType::Text) + return false; + Text* ptr = static_cast(m_drawables[draw_layer].get()); + return ptr->GetText() == text && ptr->GetFont() == p_bound_font; + } + + void Scene::BringToDrawLayer(NonOwningPtr drawable, std::uint64_t draw_layer) + { + MLX_PROFILE_FUNCTION(); + if(draw_layer < m_drawables.size()) + return; + auto it = std::find_if(m_drawables.begin(), m_drawables.end(), [&drawable](std::shared_ptr drawable_ptr) + { + return drawable_ptr.get() == drawable.Get(); + }); + if(it == m_drawables.end()) + return; + std::swap(*it, *(m_drawables.begin() + draw_layer)); + } +} diff --git a/runtime/Sources/Graphics/Sprite.cpp b/runtime/Sources/Graphics/Sprite.cpp new file mode 100644 index 0000000..3fd9ae0 --- /dev/null +++ b/runtime/Sources/Graphics/Sprite.cpp @@ -0,0 +1,56 @@ +#include +#include +#include +#include +#include + +namespace mlx +{ + std::shared_ptr CreateQuad(float x, float y, float width, float height) + { + MLX_PROFILE_FUNCTION(); + std::vector data(4); + + data[0].position = Vec4f(x, y, 0.0f, 1.0f); + data[0].uv = Vec2f(1.0f, 1.0f); + + data[1].position = Vec4f(x + width, y, 0.0f, 1.0f); + data[1].uv = Vec2f(0.0f, 1.0f); + + data[2].position = Vec4f(x + width, y + height, 0.0f, 1.0f); + data[2].uv = Vec2f(0.0f, 0.0f); + + data[3].position = Vec4f(x, y + height, 0.0f, 1.0f); + data[3].uv = Vec2f(1.0f, 0.0f); + + std::vector indices{ + 0, + 1, + 2, + 2, + 3, + 0, + }; + + std::shared_ptr mesh = std::make_shared(); + mesh->AddSubMesh({ std::move(data), std::move(indices) }); + return mesh; + } + + Sprite::Sprite(NonOwningPtr texture) : Drawable(DrawableType::Sprite) + { + MLX_PROFILE_FUNCTION(); + Verify((bool)texture, "Sprite: invalid texture (internal mlx issue, please report to devs)"); + p_mesh = CreateQuad(0, 0, texture->GetWidth(), texture->GetHeight()); + p_texture = texture; + } + + Sprite::Sprite(std::shared_ptr mesh, NonOwningPtr texture) : Drawable(DrawableType::Sprite) + { + MLX_PROFILE_FUNCTION(); + Verify((bool)texture, "Sprite: invalid texture (internal mlx issue, please report to devs)"); + Verify((bool)mesh, "Sprite: invalid mesh (internal mlx issue, please report to devs)"); + p_mesh = mesh; + p_texture = texture; + } +} diff --git a/runtime/Sources/Graphics/Text.cpp b/runtime/Sources/Graphics/Text.cpp new file mode 100644 index 0000000..90033d5 --- /dev/null +++ b/runtime/Sources/Graphics/Text.cpp @@ -0,0 +1,59 @@ +#include + +#include + +namespace mlx +{ + Text::Text(const std::string& text, std::shared_ptr font) : Drawable(DrawableType::Text) + { + MLX_PROFILE_FUNCTION(); + + Assert(font != nullptr, "invalid font"); + + std::vector vertex_data; + std::vector index_data; + + float stb_x = 0.0f; + float stb_y = 0.0f; + + const auto& char_data = font->GetCharData(); + for(char c : text) + { + if(c < 32) + continue; + + stbtt_aligned_quad q; + stbtt_GetPackedQuad(char_data.data(), RANGE, RANGE, c - 32, &stb_x, &stb_y, &q, 1); + + std::size_t index = vertex_data.size(); + + vertex_data.emplace_back(Vec4f{ q.x0, q.y0, 0.0f, 1.0f }, -Vec2f{ q.s0, q.t0 }); + vertex_data.emplace_back(Vec4f{ q.x1, q.y0, 0.0f, 1.0f }, -Vec2f{ q.s1, q.t0 }); + vertex_data.emplace_back(Vec4f{ q.x1, q.y1, 0.0f, 1.0f }, -Vec2f{ q.s1, q.t1 }); + vertex_data.emplace_back(Vec4f{ q.x0, q.y1, 0.0f, 1.0f }, -Vec2f{ q.s0, q.t1 }); + + index_data.emplace_back(index + 0); + index_data.emplace_back(index + 1); + index_data.emplace_back(index + 2); + index_data.emplace_back(index + 2); + index_data.emplace_back(index + 3); + index_data.emplace_back(index + 0); + } + + std::shared_ptr mesh = std::make_shared(); + mesh->AddSubMesh({ std::move(vertex_data), std::move(index_data) }); + Init(text, font, mesh); + } + + void Text::Init(const std::string& text, std::shared_ptr font, std::shared_ptr mesh) + { + MLX_PROFILE_FUNCTION(); + + Assert(font != nullptr, "invalid font"); + Assert(mesh != nullptr, "invalid mesh"); + + p_mesh = mesh; + p_font = font; + m_text = text; + } +} diff --git a/runtime/Sources/Platform/Inputs.cpp b/runtime/Sources/Platform/Inputs.cpp new file mode 100644 index 0000000..25ea36f --- /dev/null +++ b/runtime/Sources/Platform/Inputs.cpp @@ -0,0 +1,40 @@ +#include + +#include +#include +#include + +namespace mlx +{ + void Inputs::FetchInputs() + { + SDLManager::Get().InputsFetcher([this](mlx_event_type event, int window_id, int code) + { + if(!m_windows.contains(window_id)) + return; + if(!m_events_hooks.contains(window_id) || !m_events_hooks[window_id][event].hook) + return; + m_events_hooks[window_id][event].hook(code, m_events_hooks[window_id][event].param); + }); + } + + std::int32_t Inputs::GetX() const noexcept + { + return SDLManager::Get().GetX(); + } + + std::int32_t Inputs::GetY() const noexcept + { + return SDLManager::Get().GetY(); + } + + std::int32_t Inputs::GetXRel() const noexcept + { + return SDLManager::Get().GetXRel(); + } + + std::int32_t Inputs::GetYRel() const noexcept + { + return SDLManager::Get().GetYRel(); + } +} diff --git a/runtime/Sources/Platform/Window.cpp b/runtime/Sources/Platform/Window.cpp new file mode 100644 index 0000000..53bd6c3 --- /dev/null +++ b/runtime/Sources/Platform/Window.cpp @@ -0,0 +1,21 @@ +#include + +#include +#include + +namespace mlx +{ + Window::Window(std::size_t w, std::size_t h, const std::string& title, bool is_resizable, bool hidden) : m_name(title), m_width(w), m_height(h) + { + p_window = SDLManager::Get().CreateWindow(title, w, h, hidden, m_id, is_resizable); + } + + void Window::Destroy() noexcept + { + if(p_window != nullptr) + { + SDLManager::Get().DestroyWindow(p_window); + p_window = nullptr; + } + } +} diff --git a/runtime/Sources/Renderer/Buffer.cpp b/runtime/Sources/Renderer/Buffer.cpp new file mode 100644 index 0000000..95a70c3 --- /dev/null +++ b/runtime/Sources/Renderer/Buffer.cpp @@ -0,0 +1,228 @@ +#include +#include +#include + +namespace mlx +{ + void GPUBuffer::Init(BufferType type, VkDeviceSize size, VkBufferUsageFlags usage, CPUBuffer data, [[maybe_unused]] std::string_view debug_name) + { + MLX_PROFILE_FUNCTION(); + VmaAllocationCreateInfo alloc_info{}; + alloc_info.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT; + alloc_info.usage = VMA_MEMORY_USAGE_AUTO; + + if(type == BufferType::Constant) + { + if(data.Empty()) + { + Warning("Vulkan: trying to create constant buffer without data (constant buffers cannot be modified after creation)"); + return; + } + m_usage = usage | VK_BUFFER_USAGE_TRANSFER_SRC_BIT; + } + else if(type == BufferType::HighDynamic) + m_usage = usage; + else // LowDynamic or Staging + m_usage = usage | VK_BUFFER_USAGE_TRANSFER_SRC_BIT; + + CreateBuffer(size, m_usage, alloc_info, std::move(debug_name)); + + if(!data.Empty()) + { + if(p_map != nullptr) + std::memcpy(p_map, data.GetData(), data.GetSize()); + } + if(type == BufferType::Constant || type == BufferType::LowDynamic) + PushToGPU(); + } + + void GPUBuffer::CreateBuffer(VkDeviceSize size, VkBufferUsageFlags usage, VmaAllocationCreateInfo alloc_info, [[maybe_unused]] std::string_view debug_name) + { + MLX_PROFILE_FUNCTION(); + VkBufferCreateInfo bufferInfo{}; + bufferInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; + bufferInfo.size = size; + bufferInfo.usage = usage; + bufferInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE; + + #ifdef DEBUG + m_debug_name = debug_name; + std::string alloc_name{ debug_name }; + if(usage & VK_BUFFER_USAGE_INDEX_BUFFER_BIT) + alloc_name.append("_index_buffer"); + else if(usage & VK_BUFFER_USAGE_VERTEX_BUFFER_BIT) + alloc_name.append("_vertex_buffer"); + else if(usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) + alloc_name.append("_uniform_buffer"); + else + alloc_name.append("_buffer"); + m_allocation = RenderCore::Get().GetAllocator().CreateBuffer(&bufferInfo, &alloc_info, m_buffer, alloc_name.c_str()); + #else + m_allocation = RenderCore::Get().GetAllocator().CreateBuffer(&bufferInfo, &alloc_info, m_buffer, nullptr); + #endif + if(alloc_info.flags != 0) + RenderCore::Get().GetAllocator().MapMemory(m_allocation, &p_map); + m_size = size; + } + + bool GPUBuffer::CopyFrom(const GPUBuffer& buffer) noexcept + { + MLX_PROFILE_FUNCTION(); + if(!(m_usage & VK_BUFFER_USAGE_TRANSFER_DST_BIT)) + { + Error("Vulkan: buffer cannot be the destination of a copy because it does not have the correct usage flag"); + return false; + } + if(!(buffer.m_usage & VK_BUFFER_USAGE_TRANSFER_SRC_BIT)) + { + Error("Vulkan: buffer cannot be the source of a copy because it does not have the correct usage flag"); + return false; + } + + VkCommandBuffer cmd = kvfCreateCommandBuffer(RenderCore::Get().GetDevice()); + kvfBeginCommandBuffer(cmd, VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT); + kvfCopyBufferToBuffer(cmd, m_buffer, buffer.Get(), m_size); + kvfEndCommandBuffer(cmd); + VkFence fence = kvfCreateFence(RenderCore::Get().GetDevice()); + kvfSubmitSingleTimeCommandBuffer(RenderCore::Get().GetDevice(), cmd, KVF_GRAPHICS_QUEUE, fence); + kvfDestroyFence(RenderCore::Get().GetDevice(), fence); + return true; + } + + void GPUBuffer::PushToGPU() noexcept + { + MLX_PROFILE_FUNCTION(); + VmaAllocationCreateInfo alloc_info{}; + alloc_info.usage = VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE; + + GPUBuffer new_buffer; + new_buffer.m_usage = (this->m_usage & 0xFFFFFFFC) | VK_BUFFER_USAGE_TRANSFER_DST_BIT; + + #ifdef DEBUG + std::string new_name = m_debug_name + "_gpu"; + new_buffer.CreateBuffer(m_size, new_buffer.m_usage, alloc_info, new_name); + #else + new_buffer.CreateBuffer(m_size, new_buffer.m_usage, alloc_info, {}); + #endif + + if(new_buffer.CopyFrom(*this)) + Swap(new_buffer); + new_buffer.Destroy(); + DebugLog("Vulkan: pushed buffer to GPU memory"); + } + + void GPUBuffer::Destroy() noexcept + { + MLX_PROFILE_FUNCTION(); + if(m_buffer == VK_NULL_HANDLE) + return; + RenderCore::Get().GetAllocator().UnmapMemory(m_allocation); + #ifdef DEBUG + std::string alloc_name{ m_debug_name }; + if(m_usage & VK_BUFFER_USAGE_INDEX_BUFFER_BIT) + alloc_name.append("_index_buffer"); + else if(m_usage & VK_BUFFER_USAGE_VERTEX_BUFFER_BIT) + alloc_name.append("_vertex_buffer"); + else if(m_usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) + alloc_name.append("_uniform_buffer"); + else + alloc_name.append("_buffer"); + RenderCore::Get().GetAllocator().DestroyBuffer(m_allocation, m_buffer, alloc_name.c_str()); + #else + RenderCore::Get().GetAllocator().DestroyBuffer(m_allocation, m_buffer, nullptr); + #endif + m_buffer = VK_NULL_HANDLE; + } + + void GPUBuffer::Swap(GPUBuffer& buffer) noexcept + { + MLX_PROFILE_FUNCTION(); + std::swap(m_buffer, buffer.m_buffer); + std::swap(m_allocation, buffer.m_allocation); + std::swap(m_size, buffer.m_size); + std::swap(m_offset, buffer.m_offset); + std::swap(p_map, buffer.p_map); + std::swap(m_usage, buffer.m_usage); + } + + void VertexBuffer::SetData(CPUBuffer data) + { + MLX_PROFILE_FUNCTION(); + if(data.GetSize() > m_size) + { + Error("Vulkan: trying to store to much data in a vertex buffer (% bytes in % bytes)", data.GetSize(), m_size); + return; + } + if(data.Empty()) + { + Warning("Vulkan: cannot set empty data in a vertex buffer"); + return; + } + GPUBuffer staging; + #ifdef DEBUG + staging.Init(BufferType::Staging, data.GetSize(), VK_BUFFER_USAGE_VERTEX_BUFFER_BIT, data, m_debug_name); + #else + staging.Init(BufferType::Staging, data.GetSize(), VK_BUFFER_USAGE_VERTEX_BUFFER_BIT, data, {}); + #endif + CopyFrom(staging); + staging.Destroy(); + } + + void IndexBuffer::SetData(CPUBuffer data) + { + MLX_PROFILE_FUNCTION(); + if(data.GetSize() > m_size) + { + Error("Vulkan: trying to store to much data in an index buffer (% bytes in % bytes)", data.GetSize(), m_size); + return; + } + if(data.Empty()) + { + Warning("Vulkan: cannot set empty data in an index buffer"); + return; + } + GPUBuffer staging; + #ifdef DEBUG + staging.Init(BufferType::Staging, data.GetSize(), VK_BUFFER_USAGE_INDEX_BUFFER_BIT, data, m_debug_name); + #else + staging.Init(BufferType::Staging, data.GetSize(), VK_BUFFER_USAGE_INDEX_BUFFER_BIT, data, {}); + #endif + CopyFrom(staging); + staging.Destroy(); + } + + void UniformBuffer::Init(std::uint32_t size, [[maybe_unused]] std::string_view debug_name) + { + MLX_PROFILE_FUNCTION(); + for(int i = 0; i < MAX_FRAMES_IN_FLIGHT; i++) + { + #ifdef DEBUG + m_buffers[i].Init(BufferType::HighDynamic, size, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT, {}, std::string{ debug_name } + '_' + std::to_string(i)); + #else + m_buffers[i].Init(BufferType::HighDynamic, size, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT, {}, {}); + #endif + m_maps[i] = m_buffers[i].GetMap(); + if(m_maps[i] == nullptr) + FatalError("Vulkan: unable to map a uniform buffer"); + } + } + + void UniformBuffer::SetData(CPUBuffer data, std::size_t frame_index) + { + MLX_PROFILE_FUNCTION(); + if(data.GetSize() != m_buffers[frame_index].GetSize()) + { + Error("Vulkan: invalid data size to update to a uniform buffer, % != %", data.GetSize(), m_buffers[frame_index].GetSize()); + return; + } + if(m_maps[frame_index] != nullptr) + std::memcpy(m_maps[frame_index], data.GetData(), data.GetSize()); + } + + void UniformBuffer::Destroy() noexcept + { + MLX_PROFILE_FUNCTION(); + for(int i = 0; i < MAX_FRAMES_IN_FLIGHT; i++) + m_buffers[i].Destroy(); + } +} diff --git a/runtime/Sources/Renderer/Descriptor.cpp b/runtime/Sources/Renderer/Descriptor.cpp new file mode 100644 index 0000000..6436770 --- /dev/null +++ b/runtime/Sources/Renderer/Descriptor.cpp @@ -0,0 +1,271 @@ +#include + +#include +#include +#include +#include +#include +#include + +namespace mlx +{ + constexpr std::size_t MAX_SETS_PER_POOL = MAX_FRAMES_IN_FLIGHT * 1024; + + void TransitionImageToCorrectLayout(Image& image, VkCommandBuffer cmd) + { + MLX_PROFILE_FUNCTION(); + if(!image.IsInit()) + return; + if(image.GetType() == ImageType::Color) + image.TransitionLayout(VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, cmd); + else + Error("Vulkan: cannot transition descriptor image layout, unkown image type"); + } + + void DescriptorPool::Init() noexcept + { + MLX_PROFILE_FUNCTION(); + VkDescriptorPoolSize pool_sizes[] = { + { VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, MAX_SETS_PER_POOL }, + { VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, MAX_SETS_PER_POOL }, + { VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, MAX_SETS_PER_POOL } + }; + + VkDescriptorPoolCreateInfo pool_info{}; + pool_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; + pool_info.poolSizeCount = sizeof(pool_sizes) / sizeof(pool_sizes[0]); + pool_info.pPoolSizes = pool_sizes; + pool_info.maxSets = MAX_SETS_PER_POOL; + pool_info.flags = 0; + kvfCheckVk(RenderCore::Get().vkCreateDescriptorPool(RenderCore::Get().GetDevice(), &pool_info, nullptr, &m_pool)); + m_allocation_count = 0; + } + + void DescriptorPool::Destroy() noexcept + { + MLX_PROFILE_FUNCTION(); + if(m_pool == VK_NULL_HANDLE) + return; + for(auto& set : m_free_sets) + kvfDestroyDescriptorSetLayout(RenderCore::Get().GetDevice(), set->m_set_layout); + for(auto& set : m_used_sets) + kvfDestroyDescriptorSetLayout(RenderCore::Get().GetDevice(), set->m_set_layout); + RenderCore::Get().vkDestroyDescriptorPool(RenderCore::Get().GetDevice(), m_pool, nullptr); + m_pool = VK_NULL_HANDLE; + m_allocation_count = 0; + m_free_sets.clear(); + m_used_sets.clear(); + } + + std::shared_ptr DescriptorPool::RequestDescriptorSet(const ShaderSetLayout& layout, ShaderType shader_type) + { + MLX_PROFILE_FUNCTION(); + auto it = std::find_if(m_free_sets.begin(), m_free_sets.end(), [&](std::shared_ptr set) + { + return shader_type == set->GetShaderType() && layout == set->GetShaderLayout(); + }); + if(it != m_free_sets.end()) + { + std::shared_ptr set = *it; + m_free_sets.erase(it); + m_used_sets.push_back(set); + return set; + } + + std::array vulkan_sets; + + VkShaderStageFlagBits vulkan_shader_stage; + switch(shader_type) + { + case ShaderType::Vertex: vulkan_shader_stage = VK_SHADER_STAGE_VERTEX_BIT; break; + case ShaderType::Fragment: vulkan_shader_stage = VK_SHADER_STAGE_FRAGMENT_BIT; break; + + default : FatalError("wtf"); break; + } + + std::vector bindings(layout.binds.size()); + for(std::size_t i = 0; i < layout.binds.size(); i++) + { + bindings[i].binding = layout.binds[i].first; + bindings[i].descriptorCount = 1; + bindings[i].descriptorType = layout.binds[i].second; + bindings[i].pImmutableSamplers = nullptr; + bindings[i].stageFlags = vulkan_shader_stage; + } + VkDescriptorSetLayout vulkan_layout = kvfCreateDescriptorSetLayout(RenderCore::Get().GetDevice(), bindings.data(), bindings.size()); + + for(int i = 0; i < MAX_FRAMES_IN_FLIGHT; i++) + { + VkDescriptorSetAllocateInfo alloc_info = {}; + alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; + alloc_info.descriptorPool = m_pool; + alloc_info.descriptorSetCount = 1; + alloc_info.pSetLayouts = &vulkan_layout; + VkDescriptorSet vulkan_set; + kvfCheckVk(RenderCore::Get().vkAllocateDescriptorSets(RenderCore::Get().GetDevice(), &alloc_info, &vulkan_set)); + m_allocation_count++; + vulkan_sets[i] = vulkan_set; + } + + std::shared_ptr set(new DescriptorSet(*this, vulkan_layout, layout, std::move(vulkan_sets), shader_type)); + m_used_sets.push_back(set); + return set; + } + + void DescriptorPool::ReturnDescriptorSet(std::shared_ptr set) + { + auto it = std::find_if(m_used_sets.begin(), m_used_sets.end(), [&](std::shared_ptr rhs_set) + { + return set == rhs_set; + }); + if(it == m_used_sets.end()) + return; + m_used_sets.erase(it); + m_free_sets.push_back(set); + } + + DescriptorPool& DescriptorPoolManager::GetAvailablePool() + { + MLX_PROFILE_FUNCTION(); + for(auto& pool : m_pools) + { + if(pool.GetNumberOfSetsAllocated() < MAX_SETS_PER_POOL) + return pool; + } + m_pools.emplace_back().Init(); + return m_pools.back(); + } + + void DescriptorPoolManager::Destroy() + { + MLX_PROFILE_FUNCTION(); + for(auto& pool : m_pools) + pool.Destroy(); + m_pools.clear(); + } + + DescriptorSet::DescriptorSet(DescriptorPool& pool, VkDescriptorSetLayout vulkan_layout, const ShaderSetLayout& layout, std::array vulkan_sets, ShaderType shader_type) : + m_shader_layout(layout), + m_sets(std::move(vulkan_sets)), + m_set_layout(vulkan_layout), + m_shader_type(shader_type), + m_pool(pool) + { + MLX_PROFILE_FUNCTION(); + for(auto& [binding, type] : layout.binds) + { + m_descriptors.emplace_back(); + m_descriptors.back().type = type; + m_descriptors.back().binding = binding; + } + } + + void DescriptorSet::SetImage(std::size_t i, std::uint32_t binding, class Image& image) + { + MLX_PROFILE_FUNCTION(); + Verify(m_sets[i] != VK_NULL_HANDLE, "invalid descriptor"); + auto it = std::find_if(m_descriptors.begin(), m_descriptors.end(), [=](Descriptor descriptor) + { + return binding == descriptor.binding; + }); + if(it == m_descriptors.end()) + { + Warning("Vulkan: cannot update descriptor set image; invalid binding"); + return; + } + if(it->type != VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER) + { + Error("Vulkan: trying to bind an image to the wrong descriptor"); + return; + } + it->image_ptr = ℑ + } + + void DescriptorSet::SetStorageBuffer(std::size_t i, std::uint32_t binding, class GPUBuffer& buffer) + { + MLX_PROFILE_FUNCTION(); + Verify(m_sets[i] != VK_NULL_HANDLE, "invalid descriptor"); + auto it = std::find_if(m_descriptors.begin(), m_descriptors.end(), [=](Descriptor descriptor) + { + return binding == descriptor.binding; + }); + if(it == m_descriptors.end()) + { + Warning("Vulkan: cannot update descriptor set buffer; invalid binding"); + return; + } + if(it->type != VK_DESCRIPTOR_TYPE_STORAGE_BUFFER) + { + Error("Vulkan: trying to bind a buffer to the wrong descriptor"); + return; + } + it->storage_buffer_ptr = &buffer; + } + + void DescriptorSet::SetUniformBuffer(std::size_t i, std::uint32_t binding, class GPUBuffer& buffer) + { + MLX_PROFILE_FUNCTION(); + Verify(m_sets[i] != VK_NULL_HANDLE, "invalid descriptor"); + auto it = std::find_if(m_descriptors.begin(), m_descriptors.end(), [=](Descriptor descriptor) + { + return binding == descriptor.binding; + }); + if(it == m_descriptors.end()) + { + Warning("Vulkan: cannot update descriptor set buffer; invalid binding"); + return; + } + if(it->type != VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER) + { + Error("Vulkan: trying to bind a buffer to the wrong descriptor"); + return; + } + it->uniform_buffer_ptr = &buffer; + } + + void DescriptorSet::Update(std::size_t i, VkCommandBuffer cmd) noexcept + { + MLX_PROFILE_FUNCTION(); + Verify(m_sets[i] != VK_NULL_HANDLE, "invalid descriptor"); + std::vector writes; + std::vector buffer_infos; + std::vector image_infos; + for(auto& descriptor : m_descriptors) + { + if(descriptor.image_ptr) + { + TransitionImageToCorrectLayout(*descriptor.image_ptr, cmd); + VkDescriptorImageInfo info{}; + info.sampler = descriptor.image_ptr->GetSampler(); + info.imageLayout = descriptor.image_ptr->GetLayout(); + info.imageView = descriptor.image_ptr->GetImageView(); + image_infos.push_back(info); + writes.push_back(kvfWriteImageToDescriptorSet(RenderCore::Get().GetDevice(), m_sets[i], &image_infos.back(), descriptor.binding)); + } + else if(descriptor.uniform_buffer_ptr) + { + VkDescriptorBufferInfo info{}; + info.buffer = descriptor.uniform_buffer_ptr->Get(); + info.offset = descriptor.uniform_buffer_ptr->GetOffset(); + info.range = VK_WHOLE_SIZE; + buffer_infos.push_back(info); + writes.push_back(kvfWriteUniformBufferToDescriptorSet(RenderCore::Get().GetDevice(), m_sets[i], &buffer_infos.back(), descriptor.binding)); + } + else if(descriptor.storage_buffer_ptr) + { + VkDescriptorBufferInfo info{}; + info.buffer = descriptor.storage_buffer_ptr->Get(); + info.offset = descriptor.storage_buffer_ptr->GetOffset(); + info.range = VK_WHOLE_SIZE; + buffer_infos.push_back(info); + writes.push_back(kvfWriteStorageBufferToDescriptorSet(RenderCore::Get().GetDevice(), m_sets[i], &buffer_infos.back(), descriptor.binding)); + } + } + RenderCore::Get().vkUpdateDescriptorSets(RenderCore::Get().GetDevice(), writes.size(), writes.data(), 0, nullptr); + } + + void DescriptorSet::ReturnDescriptorSetToPool() + { + m_pool.ReturnDescriptorSet(shared_from_this()); + } +} diff --git a/runtime/Sources/Renderer/Image.cpp b/runtime/Sources/Renderer/Image.cpp new file mode 100644 index 0000000..f4f1276 --- /dev/null +++ b/runtime/Sources/Renderer/Image.cpp @@ -0,0 +1,313 @@ +#include +#include +#include +#include +#include +#include +#include + +#define STB_IMAGE_IMPLEMENTATION + +#define STBI_ASSERT(x) (mlx::Assert(x, "internal stb assertion " #x)) +#define STBI_MALLOC(x) (mlx::MemManager::Get().Malloc(x)) +#define STBI_REALLOC(p, x) (mlx::MemManager::Get().Realloc(p, x)) +#define STBI_FREE(x) (mlx::MemManager::Get().Free(x)) + +#ifdef MLX_COMPILER_GCC + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wstringop-overflow" + #include + #pragma GCC diagnostic pop +#else + #include +#endif + +#ifdef IMAGE_OPTIMIZED + #define TILING VK_IMAGE_TILING_OPTIMAL +#else + #define TILING VK_IMAGE_TILING_LINEAR +#endif + +namespace mlx +{ + void Image::Init(ImageType type, std::uint32_t width, std::uint32_t height, VkFormat format, VkImageTiling tiling, VkImageUsageFlags usage, bool is_multisampled, [[maybe_unused]] std::string_view debug_name) + { + MLX_PROFILE_FUNCTION(); + m_type = type; + m_width = width; + m_height = height; + m_format = format; + m_tiling = tiling; + m_is_multisampled = is_multisampled; + #ifdef DEBUG + m_debug_name = std::move(debug_name); + #endif + + VmaAllocationCreateInfo alloc_info{}; + alloc_info.usage = VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE; + + VkImageCreateInfo image_info{}; + image_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; + image_info.imageType = VK_IMAGE_TYPE_2D; + image_info.extent.width = width; + image_info.extent.height = height; + image_info.extent.depth = 1; + image_info.mipLevels = 1; + image_info.arrayLayers = 1; + image_info.format = format; + image_info.tiling = tiling; + image_info.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; + image_info.usage = usage; + image_info.samples = (m_is_multisampled ? VK_SAMPLE_COUNT_4_BIT : VK_SAMPLE_COUNT_1_BIT); + image_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE; + #ifdef DEBUG + m_allocation = RenderCore::Get().GetAllocator().CreateImage(&image_info, &alloc_info, m_image, m_debug_name.c_str()); + #else + m_allocation = RenderCore::Get().GetAllocator().CreateImage(&image_info, &alloc_info, m_image, nullptr); + #endif + } + + void Image::CreateImageView(VkImageViewType type, VkImageAspectFlags aspect_flags, int layer_count) noexcept + { + MLX_PROFILE_FUNCTION(); + m_image_view = kvfCreateImageView(RenderCore::Get().GetDevice(), m_image, m_format, type, aspect_flags, layer_count); + #ifdef MLX_HAS_DEBUG_UTILS_FUNCTIONS + VkDebugUtilsObjectNameInfoEXT name_info{}; + name_info.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_OBJECT_NAME_INFO_EXT; + name_info.objectType = VK_OBJECT_TYPE_IMAGE_VIEW; + name_info.objectHandle = reinterpret_cast(m_image_view); + name_info.pObjectName = m_debug_name.c_str(); + RenderCore::Get().vkSetDebugUtilsObjectNameEXT(RenderCore::Get().GetDevice(), &name_info); + #endif + } + + void Image::CreateSampler() noexcept + { + MLX_PROFILE_FUNCTION(); + m_sampler = kvfCreateSampler(RenderCore::Get().GetDevice(), VK_FILTER_NEAREST, VK_SAMPLER_ADDRESS_MODE_REPEAT, VK_SAMPLER_MIPMAP_MODE_NEAREST); + } + + void Image::TransitionLayout(VkImageLayout new_layout, VkCommandBuffer cmd) + { + MLX_PROFILE_FUNCTION(); + if(new_layout == m_layout) + return; + bool is_single_time_cmd_buffer = (cmd == VK_NULL_HANDLE); + if(is_single_time_cmd_buffer) + cmd = kvfCreateCommandBuffer(RenderCore::Get().GetDevice()); + kvfTransitionImageLayout(RenderCore::Get().GetDevice(), m_image, KVF_IMAGE_COLOR, cmd, m_format, m_layout, new_layout, is_single_time_cmd_buffer); + m_layout = new_layout; + } + + void Image::Clear(VkCommandBuffer cmd, Vec4f color) + { + MLX_PROFILE_FUNCTION(); + VkImageSubresourceRange subresource_range{}; + subresource_range.baseMipLevel = 0; + subresource_range.layerCount = 1; + subresource_range.levelCount = 1; + subresource_range.baseArrayLayer = 0; + + bool is_single_time_cmd_buffer = (cmd == VK_NULL_HANDLE); + if(is_single_time_cmd_buffer) + { + cmd = kvfCreateCommandBuffer(RenderCore::Get().GetDevice()); + kvfBeginCommandBuffer(cmd, VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT); + } + + VkImageLayout old_layout = m_layout; + TransitionLayout(VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, cmd); + subresource_range.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; + VkClearColorValue clear_color = VkClearColorValue({ { color.x, color.y, color.z, color.w } }); + RenderCore::Get().vkCmdClearColorImage(cmd, m_image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, &clear_color, 1, &subresource_range); + TransitionLayout(old_layout, cmd); + + if(is_single_time_cmd_buffer) + { + kvfEndCommandBuffer(cmd); + VkFence fence = kvfCreateFence(RenderCore::Get().GetDevice()); + kvfSubmitSingleTimeCommandBuffer(RenderCore::Get().GetDevice(), cmd, KVF_GRAPHICS_QUEUE, fence); + kvfDestroyFence(RenderCore::Get().GetDevice(), fence); + } + } + + void Image::DestroySampler() noexcept + { + MLX_PROFILE_FUNCTION(); + if(m_sampler != VK_NULL_HANDLE) + kvfDestroySampler(RenderCore::Get().GetDevice(), m_sampler); + m_sampler = VK_NULL_HANDLE; + } + + void Image::DestroyImageView() noexcept + { + MLX_PROFILE_FUNCTION(); + if(m_image_view != VK_NULL_HANDLE) + kvfDestroyImageView(RenderCore::Get().GetDevice(), m_image_view); + m_image_view = VK_NULL_HANDLE; + } + + void Image::Destroy() noexcept + { + MLX_PROFILE_FUNCTION(); + DestroySampler(); + DestroyImageView(); + + if(m_image != VK_NULL_HANDLE) + { + #ifdef DEBUG + RenderCore::Get().GetAllocator().DestroyImage(m_allocation, m_image, m_debug_name.c_str()); + #else + RenderCore::Get().GetAllocator().DestroyImage(m_allocation, m_image, nullptr); + #endif + } + m_image = VK_NULL_HANDLE; + m_layout = VK_IMAGE_LAYOUT_UNDEFINED; + m_width = 0; + m_height = 0; + m_is_multisampled = false; + } + + void Texture::Init(CPUBuffer pixels, std::uint32_t width, std::uint32_t height, VkFormat format, bool is_multisampled, [[maybe_unused]] std::string_view debug_name) + { + MLX_PROFILE_FUNCTION(); + Image::Init(ImageType::Color, width, height, format, TILING, VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, is_multisampled, std::move(debug_name)); + Image::CreateImageView(VK_IMAGE_VIEW_TYPE_2D, VK_IMAGE_ASPECT_COLOR_BIT); + Image::CreateSampler(); + if(pixels) + { + GPUBuffer staging_buffer; + std::size_t size = width * height * kvfFormatSize(format); + staging_buffer.Init(BufferType::Staging, size, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, std::move(pixels), debug_name); + VkCommandBuffer cmd = kvfCreateCommandBuffer(RenderCore::Get().GetDevice()); + kvfBeginCommandBuffer(cmd, VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT); + TransitionLayout(VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, cmd); + kvfCopyBufferToImage(cmd, Image::Get(), staging_buffer.Get(), staging_buffer.GetOffset(), VK_IMAGE_ASPECT_COLOR_BIT, { width, height, 1 }); + kvfEndCommandBuffer(cmd); + VkFence fence = kvfCreateFence(RenderCore::Get().GetDevice()); + kvfSubmitSingleTimeCommandBuffer(RenderCore::Get().GetDevice(), cmd, KVF_GRAPHICS_QUEUE, fence); + kvfDestroyFence(RenderCore::Get().GetDevice(), fence); + staging_buffer.Destroy(); + } + TransitionLayout(VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL); + } + + void Texture::Destroy() noexcept + { + if(m_staging_buffer.has_value()) + m_staging_buffer->Destroy(); + Image::Destroy(); + } + + void Texture::SetPixel(int x, int y, std::uint32_t color) noexcept + { + MLX_PROFILE_FUNCTION(); + if(x < 0 || y < 0 || static_cast(x) > m_width || static_cast(y) > m_height) + return; + if(!m_staging_buffer.has_value()) + OpenCPUBuffer(); + m_cpu_buffer[(y * m_width) + x] = color; + m_has_been_modified = true; + } + + int Texture::GetPixel(int x, int y) noexcept + { + MLX_PROFILE_FUNCTION(); + if(x < 0 || y < 0 || static_cast(x) > m_width || static_cast(y) > m_height) + return 0; + if(!m_staging_buffer.has_value()) + OpenCPUBuffer(); + std::uint32_t color = m_cpu_buffer[(y * m_width) + x]; + std::uint8_t* bytes = reinterpret_cast(&color); + std::uint8_t tmp = bytes[0]; + bytes[0] = bytes[2]; + bytes[2] = tmp; + return *reinterpret_cast(bytes); + } + + void Texture::Update(VkCommandBuffer cmd) + { + MLX_PROFILE_FUNCTION(); + if(!m_has_been_modified) + return; + std::memcpy(m_staging_buffer->GetMap(), m_cpu_buffer.data(), m_cpu_buffer.size() * kvfFormatSize(m_format)); + + VkImageLayout old_layout = m_layout; + TransitionLayout(VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, cmd); + kvfCopyBufferToImage(cmd, Image::Get(), m_staging_buffer->Get(), m_staging_buffer->GetOffset(), VK_IMAGE_ASPECT_COLOR_BIT, { m_width, m_height, 1 }); + TransitionLayout(old_layout, cmd); + + m_has_been_modified = false; + } + + void Texture::OpenCPUBuffer() + { + MLX_PROFILE_FUNCTION(); + if(m_staging_buffer.has_value()) + return; + #ifdef DEBUG + DebugLog("Texture: enabling CPU mapping for '%'", m_debug_name); + #endif + m_staging_buffer.emplace(); + std::size_t size = m_width * m_height * kvfFormatSize(m_format); + #ifdef DEBUG + m_staging_buffer->Init(BufferType::Staging, size, VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT, {}, m_debug_name); + #else + m_staging_buffer->Init(BufferType::Staging, size, VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT, {}, {}); + #endif + + VkImageLayout old_layout = m_layout; + VkCommandBuffer cmd = kvfCreateCommandBuffer(RenderCore::Get().GetDevice()); + kvfBeginCommandBuffer(cmd, VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT); + TransitionLayout(VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, cmd); + kvfCopyImageToBuffer(cmd, m_staging_buffer->Get(), m_image, m_staging_buffer->GetOffset(), VK_IMAGE_ASPECT_COLOR_BIT, { m_width, m_height, 1 }); + TransitionLayout(old_layout, cmd); + kvfEndCommandBuffer(cmd); + VkFence fence = kvfCreateFence(RenderCore::Get().GetDevice()); + kvfSubmitSingleTimeCommandBuffer(RenderCore::Get().GetDevice(), cmd, KVF_GRAPHICS_QUEUE, fence); + kvfDestroyFence(RenderCore::Get().GetDevice(), fence); + + m_cpu_buffer.resize(m_width * m_height); + std::memcpy(m_cpu_buffer.data(), m_staging_buffer->GetMap(), m_cpu_buffer.size()); + } + + Texture* StbTextureLoad(const std::filesystem::path& file, int* w, int* h) + { + using namespace std::literals; + MLX_PROFILE_FUNCTION(); + std::string filename = file.string(); + + if(file.stem() == "terracotta.pie") + Message("banana, banana, banana, banana, terracotta banana terracotta, terracotta pie"); + + if(!std::filesystem::exists(file)) + { + Error("Image: file not found %", file); + return nullptr; + } + if(stbi_is_hdr(filename.c_str())) + { + Error("Texture: unsupported image format from % (HDR image)", file); + return nullptr; + } + + Vec2i size; + int channels; + + std::uint8_t* data = stbi_load(filename.c_str(), &size.x, &size.y, &channels, 4); + CallOnExit defer([=]() { stbi_image_free(data); }); + + CPUBuffer buffer(size.x * size.y * 4); + std::memcpy(buffer.GetData(), data, buffer.GetSize()); + + if(w != nullptr) + *w = size.x; + if(h != nullptr) + *h = size.y; + + Texture* texture; + try { texture = new Texture(std::move(buffer), size.x, size.y, VK_FORMAT_R8G8B8A8_SRGB, false, std::move(filename)); } + catch(...) { return nullptr; } + return texture; + } +} diff --git a/runtime/Sources/Renderer/Memory.cpp b/runtime/Sources/Renderer/Memory.cpp new file mode 100644 index 0000000..6243bab --- /dev/null +++ b/runtime/Sources/Renderer/Memory.cpp @@ -0,0 +1,177 @@ +#include +#define VMA_IMPLEMENTATION +#ifdef MLX_COMPILER_CLANG + #pragma clang diagnostic push + #pragma clang diagnostic ignored "-Weverything" + #include + #pragma clang diagnostic pop +#elif defined(MLX_COMPILER_GCC) + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wimplicit-fallthrough" + #pragma GCC diagnostic ignored "-Wmissing-field-initializers" + #pragma GCC diagnostic ignored "-Wunused-parameter" + #pragma GCC diagnostic ignored "-Wunused-variable" + #pragma GCC diagnostic ignored "-Wparentheses" + #include + #pragma GCC diagnostic pop +#else + #include +#endif + +#include + +namespace mlx +{ + void GPUAllocator::Init() noexcept + { + MLX_PROFILE_FUNCTION(); + VmaVulkanFunctions vma_vulkan_func{}; + vma_vulkan_func.vkAllocateMemory = RenderCore::Get().vkAllocateMemory; + vma_vulkan_func.vkBindBufferMemory = RenderCore::Get().vkBindBufferMemory; + vma_vulkan_func.vkBindImageMemory = RenderCore::Get().vkBindImageMemory; + vma_vulkan_func.vkCreateBuffer = RenderCore::Get().vkCreateBuffer; + vma_vulkan_func.vkCreateImage = RenderCore::Get().vkCreateImage; + vma_vulkan_func.vkDestroyBuffer = RenderCore::Get().vkDestroyBuffer; + vma_vulkan_func.vkDestroyImage = RenderCore::Get().vkDestroyImage; + vma_vulkan_func.vkFlushMappedMemoryRanges = RenderCore::Get().vkFlushMappedMemoryRanges; + vma_vulkan_func.vkFreeMemory = RenderCore::Get().vkFreeMemory; + vma_vulkan_func.vkGetBufferMemoryRequirements = RenderCore::Get().vkGetBufferMemoryRequirements; + vma_vulkan_func.vkGetImageMemoryRequirements = RenderCore::Get().vkGetImageMemoryRequirements; + vma_vulkan_func.vkGetPhysicalDeviceMemoryProperties = RenderCore::Get().vkGetPhysicalDeviceMemoryProperties; + vma_vulkan_func.vkGetPhysicalDeviceProperties = RenderCore::Get().vkGetPhysicalDeviceProperties; + vma_vulkan_func.vkInvalidateMappedMemoryRanges = RenderCore::Get().vkInvalidateMappedMemoryRanges; + vma_vulkan_func.vkMapMemory = RenderCore::Get().vkMapMemory; + vma_vulkan_func.vkUnmapMemory = RenderCore::Get().vkUnmapMemory; + vma_vulkan_func.vkCmdCopyBuffer = RenderCore::Get().vkCmdCopyBuffer; + + VmaAllocatorCreateInfo allocator_create_info{}; + allocator_create_info.vulkanApiVersion = VK_API_VERSION_1_0; + allocator_create_info.physicalDevice = RenderCore::Get().GetPhysicalDevice(); + allocator_create_info.device = RenderCore::Get().GetDevice(); + allocator_create_info.instance = RenderCore::Get().GetInstance(); + allocator_create_info.pVulkanFunctions = &vma_vulkan_func; + + kvfCheckVk(vmaCreateAllocator(&allocator_create_info, &m_allocator)); + DebugLog("Graphics Allocator: created new allocator"); + } + + VmaAllocation GPUAllocator::CreateBuffer(const VkBufferCreateInfo* binfo, const VmaAllocationCreateInfo* vinfo, VkBuffer& buffer, const char* name) noexcept + { + MLX_PROFILE_FUNCTION(); + VmaAllocation allocation; + kvfCheckVk(vmaCreateBuffer(m_allocator, binfo, vinfo, &buffer, &allocation, nullptr)); + if(name != nullptr) + { + #ifdef MLX_HAS_DEBUG_UTILS_FUNCTIONS + VkDebugUtilsObjectNameInfoEXT name_info{}; + name_info.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_OBJECT_NAME_INFO_EXT; + name_info.objectType = VK_OBJECT_TYPE_BUFFER; + name_info.objectHandle = reinterpret_cast(buffer); + name_info.pObjectName = name; + RenderCore::Get().vkSetDebugUtilsObjectNameEXT(RenderCore::Get().GetDevice(), &name_info); + #endif + vmaSetAllocationName(m_allocator, allocation, name); + } + DebugLog("Graphics Allocator: created new buffer '%'", name); + m_active_buffers_allocations++; + return allocation; + } + + void GPUAllocator::DestroyBuffer(VmaAllocation allocation, VkBuffer buffer, const char* name) noexcept + { + MLX_PROFILE_FUNCTION(); + RenderCore::Get().WaitDeviceIdle(); + vmaDestroyBuffer(m_allocator, buffer, allocation); + if(name != nullptr) + DebugLog("Graphics Allocator: destroyed buffer '%'", name); + else + DebugLog("Graphics Allocator: destroyed buffer"); + m_active_buffers_allocations--; + } + + VmaAllocation GPUAllocator::CreateImage(const VkImageCreateInfo* iminfo, const VmaAllocationCreateInfo* vinfo, VkImage& image, const char* name) noexcept + { + MLX_PROFILE_FUNCTION(); + VmaAllocation allocation; + kvfCheckVk(vmaCreateImage(m_allocator, iminfo, vinfo, &image, &allocation, nullptr)); + if(name != nullptr) + { + #ifdef MLX_HAS_DEBUG_UTILS_FUNCTIONS + VkDebugUtilsObjectNameInfoEXT name_info{}; + name_info.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_OBJECT_NAME_INFO_EXT; + name_info.objectType = VK_OBJECT_TYPE_IMAGE; + name_info.objectHandle = reinterpret_cast(image); + name_info.pObjectName = name; + RenderCore::Get().vkSetDebugUtilsObjectNameEXT(RenderCore::Get().GetDevice(), &name_info); + #endif + vmaSetAllocationName(m_allocator, allocation, name); + } + DebugLog("Graphics Allocator: created new image '%'", name); + m_active_images_allocations++; + return allocation; + } + + void GPUAllocator::DestroyImage(VmaAllocation allocation, VkImage image, const char* name) noexcept + { + MLX_PROFILE_FUNCTION(); + RenderCore::Get().WaitDeviceIdle(); + vmaDestroyImage(m_allocator, image, allocation); + if(name != nullptr) + DebugLog("Graphics Allocator: destroyed image '%'", name); + else + DebugLog("Graphics Allocator: destroyed image"); + m_active_images_allocations--; + } + + void GPUAllocator::MapMemory(VmaAllocation allocation, void** data) noexcept + { + MLX_PROFILE_FUNCTION(); + kvfCheckVk(vmaMapMemory(m_allocator, allocation, data)); + } + + void GPUAllocator::UnmapMemory(VmaAllocation allocation) noexcept + { + MLX_PROFILE_FUNCTION(); + vmaUnmapMemory(m_allocator, allocation); + } + + void GPUAllocator::DumpMemoryToJson() + { + static std::uint32_t id = 0; + std::string name("memory_dump"); + name.append(std::to_string(id) + ".json"); + std::ofstream file(name); + if(!file.is_open()) + { + Error("Graphics Allocator: unable to dump memory to a json file"); + return; + } + char* str = nullptr; + vmaBuildStatsString(m_allocator, &str, true); + file << str; + vmaFreeStatsString(m_allocator, str); + file.close(); + id++; + } + + void GPUAllocator::Flush(VmaAllocation allocation, VkDeviceSize size, VkDeviceSize offset) noexcept + { + MLX_PROFILE_FUNCTION(); + vmaFlushAllocation(m_allocator, allocation, offset, size); + } + + void GPUAllocator::Destroy() noexcept + { + MLX_PROFILE_FUNCTION(); + if(m_active_images_allocations != 0) + Error("Graphics Allocator: some user-dependant allocations were not freed before destroying the display (% active allocations). You may have not destroyed all the MLX resources you've created", m_active_images_allocations); + else if(m_active_buffers_allocations != 0) + Error("Graphics Allocator: some MLX-dependant allocations were not freed before destroying the display (% active allocations). This is an error in the MLX, please report this should not happen", m_active_buffers_allocations); + if(m_active_images_allocations < 0 || m_active_buffers_allocations < 0) + Warning("Graphics Allocator: the impossible happened, the MLX has freed more allocations than it has made (wtf)"); + vmaDestroyAllocator(m_allocator); + m_active_buffers_allocations = 0; + m_active_images_allocations = 0; + DebugLog("Vulkan: destroyed a graphics allocator"); + } +} diff --git a/runtime/Sources/Renderer/Pipelines/Graphics.cpp b/runtime/Sources/Renderer/Pipelines/Graphics.cpp new file mode 100644 index 0000000..d7fba28 --- /dev/null +++ b/runtime/Sources/Renderer/Pipelines/Graphics.cpp @@ -0,0 +1,229 @@ +#include +#include +#include +#include +#include +#include + +namespace mlx +{ + void GraphicPipeline::Init(const GraphicPipelineDescriptor& descriptor, [[maybe_unused]] std::string_view debug_name) + { + MLX_PROFILE_FUNCTION(); + if(!descriptor.vertex_shader || !descriptor.fragment_shader) + FatalError("Vulkan: invalid shaders"); + + m_attachments = descriptor.color_attachments; + p_vertex_shader = descriptor.vertex_shader; + p_fragment_shader = descriptor.fragment_shader; + p_renderer = descriptor.renderer; + + #ifdef DEBUG + m_debug_name = debug_name; + #endif + + std::vector push_constants; + std::vector set_layouts; + push_constants.insert(push_constants.end(), p_vertex_shader->GetPipelineLayout().push_constants.begin(), p_vertex_shader->GetPipelineLayout().push_constants.end()); + push_constants.insert(push_constants.end(), p_fragment_shader->GetPipelineLayout().push_constants.begin(), p_fragment_shader->GetPipelineLayout().push_constants.end()); + set_layouts.insert(set_layouts.end(), p_vertex_shader->GetPipelineLayout().set_layouts.begin(), p_vertex_shader->GetPipelineLayout().set_layouts.end()); + set_layouts.insert(set_layouts.end(), p_fragment_shader->GetPipelineLayout().set_layouts.begin(), p_fragment_shader->GetPipelineLayout().set_layouts.end()); + m_pipeline_layout = kvfCreatePipelineLayout(RenderCore::Get().GetDevice(), set_layouts.data(), set_layouts.size(), push_constants.data(), push_constants.size()); + + TransitionAttachments(); + CreateFramebuffers(m_attachments, descriptor.clear_color_attachments); + + KvfGraphicsPipelineBuilder* builder = kvfCreateGPipelineBuilder(); + kvfGPipelineBuilderAddShaderStage(builder, p_vertex_shader->GetShaderStage(), p_vertex_shader->GetShaderModule(), "main"); + kvfGPipelineBuilderAddShaderStage(builder, p_fragment_shader->GetShaderStage(), p_fragment_shader->GetShaderModule(), "main"); + kvfGPipelineBuilderSetInputTopology(builder, VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST); + kvfGPipelineBuilderSetCullMode(builder, VK_CULL_MODE_NONE, VK_FRONT_FACE_CLOCKWISE); + kvfGPipelineBuilderDisableDepthTest(builder); + kvfGPipelineBuilderSetPolygonMode(builder, VK_POLYGON_MODE_FILL, 1.0f); + kvfGPipelineBuilderSetMultisampling(builder, VK_SAMPLE_COUNT_1_BIT); + kvfGPipelineBuilderEnableAlphaBlending(builder); + + if(!descriptor.no_vertex_inputs) + { + VkVertexInputBindingDescription binding_description = Vertex::GetBindingDescription(); + auto attributes_description = Vertex::GetAttributeDescriptions(); + kvfGPipelineBuilderSetVertexInputs(builder, binding_description, attributes_description.data(), attributes_description.size()); + } + + m_pipeline = kvfCreateGraphicsPipeline(RenderCore::Get().GetDevice(), VK_NULL_HANDLE, m_pipeline_layout, builder, m_renderpass); + #ifdef DEBUG + DebugLog("Vulkan: graphics pipeline created %", m_debug_name); + #endif + kvfDestroyGPipelineBuilder(builder); + + #ifdef MLX_HAS_DEBUG_UTILS_FUNCTIONS + VkDebugUtilsObjectNameInfoEXT name_info{}; + name_info.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_OBJECT_NAME_INFO_EXT; + name_info.objectType = VK_OBJECT_TYPE_PIPELINE; + name_info.objectHandle = reinterpret_cast(m_pipeline); + name_info.pObjectName = m_debug_name.c_str(); + RenderCore::Get().vkSetDebugUtilsObjectNameEXT(RenderCore::Get().GetDevice(), &name_info); + + name_info.objectType = VK_OBJECT_TYPE_RENDER_PASS; + name_info.objectHandle = reinterpret_cast(m_renderpass); + RenderCore::Get().vkSetDebugUtilsObjectNameEXT(RenderCore::Get().GetDevice(), &name_info); + + name_info.objectType = VK_OBJECT_TYPE_SHADER_MODULE; + name_info.objectHandle = reinterpret_cast(p_vertex_shader->GetShaderModule()); + RenderCore::Get().vkSetDebugUtilsObjectNameEXT(RenderCore::Get().GetDevice(), &name_info); + + name_info.objectHandle = reinterpret_cast(p_fragment_shader->GetShaderModule()); + RenderCore::Get().vkSetDebugUtilsObjectNameEXT(RenderCore::Get().GetDevice(), &name_info); + + name_info.objectType = VK_OBJECT_TYPE_FRAMEBUFFER; + for(VkFramebuffer fb : m_framebuffers) + { + name_info.objectHandle = reinterpret_cast(fb); + RenderCore::Get().vkSetDebugUtilsObjectNameEXT(RenderCore::Get().GetDevice(), &name_info); + } + #endif + } + + bool GraphicPipeline::BindPipeline(VkCommandBuffer cmd, std::size_t framebuffer_index, std::array clear) noexcept + { + MLX_PROFILE_FUNCTION(); + TransitionAttachments(cmd); + VkFramebuffer fb = m_framebuffers[framebuffer_index]; + VkExtent2D fb_extent = kvfGetFramebufferSize(fb); + + VkViewport viewport{}; + viewport.x = 0.0f; + viewport.y = 0.0f; + viewport.width = fb_extent.width; + viewport.height = fb_extent.height; + viewport.minDepth = 0.0f; + viewport.maxDepth = 1.0f; + RenderCore::Get().vkCmdSetViewport(cmd, 0, 1, &viewport); + + VkRect2D scissor{}; + scissor.offset = { 0, 0 }; + scissor.extent = fb_extent; + RenderCore::Get().vkCmdSetScissor(cmd, 0, 1, &scissor); + + for(std::size_t i = 0; i < m_clears.size(); i++) + { + m_clears[i].color.float32[0] = clear[0]; + m_clears[i].color.float32[1] = clear[1]; + m_clears[i].color.float32[2] = clear[2]; + m_clears[i].color.float32[3] = clear[3]; + } + + kvfBeginRenderPass(m_renderpass, cmd, fb, fb_extent, m_clears.data(), m_clears.size()); + RenderCore::Get().vkCmdBindPipeline(cmd, GetPipelineBindPoint(), GetPipeline()); + return true; + } + + void GraphicPipeline::EndPipeline(VkCommandBuffer cmd) noexcept + { + MLX_PROFILE_FUNCTION(); + RenderCore::Get().vkCmdEndRenderPass(cmd); + } + + void GraphicPipeline::Destroy() noexcept + { + MLX_PROFILE_FUNCTION(); + p_vertex_shader.reset(); + p_fragment_shader.reset(); + for(auto fb : m_framebuffers) + { + kvfDestroyFramebuffer(RenderCore::Get().GetDevice(), fb); + #ifdef DEBUG + DebugLog("Vulkan: framebuffer destroyed in %", m_debug_name); + #endif + } + m_framebuffers.clear(); + + kvfDestroyPipelineLayout(RenderCore::Get().GetDevice(), m_pipeline_layout); + m_pipeline_layout = VK_NULL_HANDLE; + #ifdef DEBUG + DebugLog("Vulkan: graphics pipeline layout destroyed %", m_debug_name); + #endif + + kvfDestroyRenderPass(RenderCore::Get().GetDevice(), m_renderpass); + m_renderpass = VK_NULL_HANDLE; + #ifdef DEBUG + DebugLog("Vulkan: renderpass destroyed for %", m_debug_name); + #endif + + kvfDestroyPipeline(RenderCore::Get().GetDevice(), m_pipeline); + m_pipeline = VK_NULL_HANDLE; + #ifdef DEBUG + DebugLog("Vulkan: graphics pipeline destroyed %", m_debug_name); + #endif + + p_renderer = nullptr; + m_clears.clear(); + m_attachments.clear(); + } + + void GraphicPipeline::CreateFramebuffers(const std::vector>& render_targets, bool clear_attachments) + { + MLX_PROFILE_FUNCTION(); + std::vector attachments; + std::vector attachment_views; + std::vector dependencies; + + if(p_renderer) + { + attachments.push_back(kvfBuildSwapchainAttachmentDescription(p_renderer->GetSwapchain().Get(), clear_attachments)); + attachment_views.push_back(p_renderer->GetSwapchain().GetSwapchainImages()[0].GetImageView()); + } + + for(NonOwningPtr image : render_targets) + { + attachments.push_back(kvfBuildAttachmentDescription(KVF_IMAGE_COLOR, image->GetFormat(), image->GetLayout(), image->GetLayout(), clear_attachments, VK_SAMPLE_COUNT_1_BIT)); + attachment_views.push_back(image->GetImageView()); + } + + VkSubpassDependency& dependency = dependencies.emplace_back(); + dependency.srcSubpass = VK_SUBPASS_EXTERNAL; + dependency.dstSubpass = 0; + dependency.srcStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT; + dependency.dstStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT; + dependency.srcAccessMask = 0; + dependency.dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; + dependency.dependencyFlags = 0; + + m_renderpass = kvfCreateRenderPassWithSubpassDependencies(RenderCore::Get().GetDevice(), attachments.data(), attachments.size(), GetPipelineBindPoint(), dependencies.data(), dependencies.size()); + m_clears.clear(); + m_clears.resize(attachments.size()); + #ifdef DEBUG + DebugLog("Vulkan: renderpass created for %", m_debug_name); + #endif + + if(p_renderer) + { + for(const Image& image : p_renderer->GetSwapchain().GetSwapchainImages()) + { + attachment_views[0] = image.GetImageView(); + m_framebuffers.push_back(kvfCreateFramebuffer(RenderCore::Get().GetDevice(), m_renderpass, attachment_views.data(), attachment_views.size(), { .width = image.GetWidth(), .height = image.GetHeight() })); + #ifdef DEBUG + DebugLog("Vulkan: framebuffer created for %", m_debug_name); + #endif + } + } + for(NonOwningPtr image : render_targets) + { + m_framebuffers.push_back(kvfCreateFramebuffer(RenderCore::Get().GetDevice(), m_renderpass, attachment_views.data(), attachment_views.size(), { .width = image->GetWidth(), .height = image->GetHeight() })); + #ifdef DEBUG + DebugLog("Vulkan: framebuffer created for %", m_debug_name); + #endif + } + } + + void GraphicPipeline::TransitionAttachments(VkCommandBuffer cmd) + { + MLX_PROFILE_FUNCTION(); + for(NonOwningPtr image : m_attachments) + { + if(!image->IsInit()) + continue; + image->TransitionLayout(VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, cmd); + } + } +} diff --git a/runtime/Sources/Renderer/Pipelines/Shader.cpp b/runtime/Sources/Renderer/Pipelines/Shader.cpp new file mode 100644 index 0000000..5a92e35 --- /dev/null +++ b/runtime/Sources/Renderer/Pipelines/Shader.cpp @@ -0,0 +1,66 @@ +#include +#include +#include + +namespace mlx +{ + Shader::Shader(const std::vector& bytecode, ShaderType type, ShaderLayout layout) : m_layout(std::move(layout)), m_bytecode(bytecode) + { + MLX_PROFILE_FUNCTION(); + switch(type) + { + case ShaderType::Vertex : m_stage = VK_SHADER_STAGE_VERTEX_BIT; break; + case ShaderType::Fragment : m_stage = VK_SHADER_STAGE_FRAGMENT_BIT; break; + + default : FatalError("wtf"); break; + } + m_module = kvfCreateShaderModule(RenderCore::Get().GetDevice(), reinterpret_cast(m_bytecode.data()), m_bytecode.size() / 4); + DebugLog("Vulkan: shader module created"); + + GeneratePipelineLayout(m_layout); + } + + void Shader::GeneratePipelineLayout(ShaderLayout layout) + { + MLX_PROFILE_FUNCTION(); + for(auto& [n, set] : layout.set_layouts) + { + std::vector bindings(set.binds.size()); + for(std::size_t i = 0; i < set.binds.size(); i++) + { + bindings[i].binding = set.binds[i].first; + bindings[i].descriptorCount = 1; + bindings[i].descriptorType = set.binds[i].second; + bindings[i].pImmutableSamplers = nullptr; + bindings[i].stageFlags = m_stage; + } + m_set_layouts.emplace_back(kvfCreateDescriptorSetLayout(RenderCore::Get().GetDevice(), bindings.data(), bindings.size())); + DebugLog("Vulkan: descriptor set layout created"); + m_pipeline_layout_part.set_layouts.push_back(m_set_layouts.back()); + } + + std::size_t i = 0; + m_pipeline_layout_part.push_constants.resize(layout.push_constants.size()); + for(const auto& pc : layout.push_constants) + { + VkPushConstantRange push_constant_range = {}; + push_constant_range.offset = pc.offset; + push_constant_range.size = pc.size; + push_constant_range.stageFlags = m_stage; + m_pipeline_layout_part.push_constants[i] = push_constant_range; + i++; + } + } + + Shader::~Shader() + { + MLX_PROFILE_FUNCTION(); + kvfDestroyShaderModule(RenderCore::Get().GetDevice(), m_module); + DebugLog("Vulkan: shader module destroyed"); + for(auto& layout : m_set_layouts) + { + kvfDestroyDescriptorSetLayout(RenderCore::Get().GetDevice(), layout); + DebugLog("Vulkan: descriptor set layout destroyed"); + } + } +} diff --git a/runtime/Sources/Renderer/RenderCore.cpp b/runtime/Sources/Renderer/RenderCore.cpp new file mode 100644 index 0000000..e4e6aa4 --- /dev/null +++ b/runtime/Sources/Renderer/RenderCore.cpp @@ -0,0 +1,219 @@ +#include +#include +#include + +#define KVF_IMPLEMENTATION +#ifdef DEBUG + #define KVF_ENABLE_VALIDATION_LAYERS +#endif + +#define KVF_ASSERT(x) (mlx::Assert(x, "internal kvf assertion " #x)) +#define KVF_MALLOC(x) (mlx::MemManager::Get().Malloc(x)) +#define KVF_REALLOC(p, x) (mlx::MemManager::Get().Realloc(p, x)) +#define KVF_FREE(x) (mlx::MemManager::Get().Free(x)) + +#if defined(MLX_COMPILER_GCC) + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wmissing-field-initializers" + #include + #pragma GCC diagnostic pop +#elif defined(MLX_COMPILER_CLANG) + #pragma clang diagnostic push + #pragma clang diagnostic ignored "-Wmissing-field-initializers" + #include + #pragma clang diagnostic pop +#else + #include +#endif + +#include +#include +#include +#include + +namespace mlx +{ + static std::unique_ptr loader; + + void ErrorCallback(const char* message) noexcept + { + Logs::Report(LogType::FatalError, 0, "", "", message); + std::cout << std::endl; + } + + void ValidationErrorCallback(const char* message) noexcept + { + Logs::Report(LogType::Error, 0, "", "", message); + std::cout << std::endl; + } + + void WarningCallback(const char* message) noexcept + { + Logs::Report(LogType::Warning, 0, "", "", message); + std::cout << std::endl; + } + + RenderCore* RenderCore::s_instance = nullptr; + + RenderCore::RenderCore() + { + if(s_instance != nullptr) + return; + s_instance = this; + + loader = std::make_unique(); + + LoadKVFGlobalVulkanFunctionPointers(); + + kvfSetErrorCallback(&ErrorCallback); + kvfSetWarningCallback(&WarningCallback); + kvfSetValidationErrorCallback(&ValidationErrorCallback); + kvfSetValidationWarningCallback(&WarningCallback); + + Window window(1, 1, "", true); + std::vector instance_extensions = window.GetRequiredVulkanInstanceExtentions(); + #ifdef MLX_PLAT_MACOS + instance_extensions.push_back(VK_KHR_PORTABILITY_ENUMERATION_EXTENSION_NAME); + #endif + + m_instance = kvfCreateInstance(instance_extensions.data(), instance_extensions.size()); + DebugLog("Vulkan: instance created"); + + loader->LoadInstance(m_instance); + LoadKVFInstanceVulkanFunctionPointers(); + + VkSurfaceKHR surface = window.CreateVulkanSurface(m_instance); + + m_physical_device = kvfPickGoodDefaultPhysicalDevice(m_instance, surface); + + // just for style + VkPhysicalDeviceProperties props; + vkGetPhysicalDeviceProperties(m_physical_device, &props); + DebugLog("Vulkan: physical device picked '%'", props.deviceName); + + const char* device_extensions[] = { VK_KHR_SWAPCHAIN_EXTENSION_NAME }; + VkPhysicalDeviceFeatures features{}; + vkGetPhysicalDeviceFeatures(m_physical_device, &features); + m_device = kvfCreateDevice(m_physical_device, device_extensions, sizeof(device_extensions) / sizeof(device_extensions[0]), &features); + DebugLog("Vulkan: logical device created"); + + loader->LoadDevice(m_device); + LoadKVFDeviceVulkanFunctionPointers(); + + vkDestroySurfaceKHR(m_instance, surface, nullptr); + + m_allocator.Init(); + } + +#undef MLX_LOAD_FUNCTION +#define MLX_LOAD_FUNCTION(fn) pfns.fn = this->fn + + void RenderCore::LoadKVFGlobalVulkanFunctionPointers() const noexcept + { + KvfGlobalVulkanFunctions pfns; + MLX_LOAD_FUNCTION(vkCreateInstance); + MLX_LOAD_FUNCTION(vkEnumerateInstanceExtensionProperties); + MLX_LOAD_FUNCTION(vkEnumerateInstanceLayerProperties); + MLX_LOAD_FUNCTION(vkGetInstanceProcAddr); + kvfPassGlobalVulkanFunctionPointers(&pfns); + } + + void RenderCore::LoadKVFInstanceVulkanFunctionPointers() const noexcept + { + KvfInstanceVulkanFunctions pfns; + MLX_LOAD_FUNCTION(vkCreateDevice); + MLX_LOAD_FUNCTION(vkDestroyInstance); + MLX_LOAD_FUNCTION(vkEnumerateDeviceExtensionProperties); + MLX_LOAD_FUNCTION(vkEnumeratePhysicalDevices); + MLX_LOAD_FUNCTION(vkGetPhysicalDeviceFeatures); + MLX_LOAD_FUNCTION(vkGetPhysicalDeviceFormatProperties); + MLX_LOAD_FUNCTION(vkGetPhysicalDeviceImageFormatProperties); + MLX_LOAD_FUNCTION(vkGetPhysicalDeviceMemoryProperties); + MLX_LOAD_FUNCTION(vkGetPhysicalDeviceProperties); + MLX_LOAD_FUNCTION(vkGetPhysicalDeviceQueueFamilyProperties); + MLX_LOAD_FUNCTION(vkDestroySurfaceKHR); + MLX_LOAD_FUNCTION(vkGetPhysicalDeviceSurfaceCapabilitiesKHR); + MLX_LOAD_FUNCTION(vkGetPhysicalDeviceSurfaceFormatsKHR); + MLX_LOAD_FUNCTION(vkGetPhysicalDeviceSurfacePresentModesKHR); + MLX_LOAD_FUNCTION(vkGetPhysicalDeviceSurfaceSupportKHR); + kvfPassInstanceVulkanFunctionPointers(&pfns); + } + + void RenderCore::LoadKVFDeviceVulkanFunctionPointers() const noexcept + { + KvfDeviceVulkanFunctions pfns; + MLX_LOAD_FUNCTION(vkAllocateCommandBuffers); + MLX_LOAD_FUNCTION(vkAllocateDescriptorSets); + MLX_LOAD_FUNCTION(vkBeginCommandBuffer); + MLX_LOAD_FUNCTION(vkCmdBeginRenderPass); + MLX_LOAD_FUNCTION(vkCmdCopyBuffer); + MLX_LOAD_FUNCTION(vkCmdCopyBufferToImage); + MLX_LOAD_FUNCTION(vkCmdCopyImage); + MLX_LOAD_FUNCTION(vkCmdCopyImageToBuffer); + MLX_LOAD_FUNCTION(vkCmdEndRenderPass); + MLX_LOAD_FUNCTION(vkCmdPipelineBarrier); + MLX_LOAD_FUNCTION(vkCreateBuffer); + MLX_LOAD_FUNCTION(vkCreateCommandPool); + MLX_LOAD_FUNCTION(vkCreateDescriptorPool); + MLX_LOAD_FUNCTION(vkCreateDescriptorSetLayout); + MLX_LOAD_FUNCTION(vkCreateFence); + MLX_LOAD_FUNCTION(vkCreateFramebuffer); + MLX_LOAD_FUNCTION(vkCreateGraphicsPipelines); + MLX_LOAD_FUNCTION(vkCreateImage); + MLX_LOAD_FUNCTION(vkCreateImageView); + MLX_LOAD_FUNCTION(vkCreatePipelineLayout); + MLX_LOAD_FUNCTION(vkCreateRenderPass); + MLX_LOAD_FUNCTION(vkCreateSampler); + MLX_LOAD_FUNCTION(vkCreateSemaphore); + MLX_LOAD_FUNCTION(vkCreateShaderModule); + MLX_LOAD_FUNCTION(vkDestroyBuffer); + MLX_LOAD_FUNCTION(vkDestroyCommandPool); + MLX_LOAD_FUNCTION(vkDestroyDescriptorPool); + MLX_LOAD_FUNCTION(vkDestroyDescriptorSetLayout); + MLX_LOAD_FUNCTION(vkDestroyDevice); + MLX_LOAD_FUNCTION(vkDestroyFence); + MLX_LOAD_FUNCTION(vkDestroyFramebuffer); + MLX_LOAD_FUNCTION(vkDestroyImage); + MLX_LOAD_FUNCTION(vkDestroyImageView); + MLX_LOAD_FUNCTION(vkDestroyPipeline); + MLX_LOAD_FUNCTION(vkDestroyPipelineLayout); + MLX_LOAD_FUNCTION(vkDestroyRenderPass); + MLX_LOAD_FUNCTION(vkDestroySampler); + MLX_LOAD_FUNCTION(vkDestroySemaphore); + MLX_LOAD_FUNCTION(vkDestroyShaderModule); + MLX_LOAD_FUNCTION(vkDeviceWaitIdle); + MLX_LOAD_FUNCTION(vkEndCommandBuffer); + MLX_LOAD_FUNCTION(vkGetDeviceQueue); + MLX_LOAD_FUNCTION(vkGetImageSubresourceLayout); + MLX_LOAD_FUNCTION(vkQueueSubmit); + MLX_LOAD_FUNCTION(vkResetCommandBuffer); + MLX_LOAD_FUNCTION(vkResetDescriptorPool); + MLX_LOAD_FUNCTION(vkResetEvent); + MLX_LOAD_FUNCTION(vkResetFences); + MLX_LOAD_FUNCTION(vkUpdateDescriptorSets); + MLX_LOAD_FUNCTION(vkWaitForFences); + MLX_LOAD_FUNCTION(vkCreateSwapchainKHR); + MLX_LOAD_FUNCTION(vkDestroySwapchainKHR); + MLX_LOAD_FUNCTION(vkGetSwapchainImagesKHR); + MLX_LOAD_FUNCTION(vkQueuePresentKHR); + kvfPassDeviceVulkanFunctionPointers(m_physical_device, m_device, &pfns); + } + +#undef MLX_LOAD_FUNCTION + + RenderCore::~RenderCore() + { + if(s_instance == nullptr) + return; + WaitDeviceIdle(); + m_descriptor_pool_manager.Destroy(); + m_allocator.Destroy(); + kvfDestroyDevice(m_device); + DebugLog("Vulkan: logical device destroyed"); + kvfDestroyInstance(m_instance); + DebugLog("Vulkan: instance destroyed"); + loader.reset(); + + s_instance = nullptr; + } +} diff --git a/runtime/Sources/Renderer/RenderPasses/2DPass.cpp b/runtime/Sources/Renderer/RenderPasses/2DPass.cpp new file mode 100644 index 0000000..0d1de8e --- /dev/null +++ b/runtime/Sources/Renderer/RenderPasses/2DPass.cpp @@ -0,0 +1,143 @@ +#include +#include +#include +#include +#include +#include +#include + +namespace mlx +{ + struct DrawableData + { + Mat4f model_matrix; + Vec4f color; + }; + + void Render2DPass::Init() + { + MLX_PROFILE_FUNCTION(); + + ShaderLayout vertex_shader_layout( + { + { 0, + ShaderSetLayout({ + { 0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER } + }) + } + }, { ShaderPushConstantLayout({ 0, sizeof(DrawableData) }) } + ); + std::vector vertex_shader_code = { + #include + }; + p_vertex_shader = std::make_shared(vertex_shader_code, ShaderType::Vertex, std::move(vertex_shader_layout)); + ShaderLayout fragment_shader_layout( + { + { 1, + ShaderSetLayout({ + { 0, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER } + }) + } + }, {} + ); + std::vector fragment_shader_code = { + #include + }; + p_fragment_shader = std::make_shared(fragment_shader_code, ShaderType::Fragment, std::move(fragment_shader_layout)); + + func::function functor = [this](const EventBase& event) + { + if(event.What() == Event::ResizeEventCode) + m_pipeline.Destroy(); + }; + EventBus::RegisterListener({ functor, "mlx_2d_render_pass_" + std::to_string(reinterpret_cast(this)) }); + + p_viewer_data_set = RenderCore::Get().GetDescriptorPoolManager().GetAvailablePool().RequestDescriptorSet(p_vertex_shader->GetShaderLayout().set_layouts[0].second, ShaderType::Vertex); + p_texture_set = RenderCore::Get().GetDescriptorPoolManager().GetAvailablePool().RequestDescriptorSet(p_fragment_shader->GetShaderLayout().set_layouts[0].second, ShaderType::Fragment); + + p_viewer_data_buffer = std::make_shared(); + p_viewer_data_buffer->Init(sizeof(ViewerData), "mlx_2d_pass_viewer_data"); + + for(std::size_t i = 0; i < MAX_FRAMES_IN_FLIGHT; i++) + { + p_viewer_data_set->SetUniformBuffer(i, 0, p_viewer_data_buffer->Get(i)); + p_viewer_data_set->Update(i); + } + } + + void Render2DPass::Pass(Scene& scene, Renderer& renderer, Texture& render_target) + { + MLX_PROFILE_FUNCTION(); + if(m_pipeline.GetPipeline() == VK_NULL_HANDLE) + { + GraphicPipelineDescriptor pipeline_descriptor; + pipeline_descriptor.vertex_shader = p_vertex_shader; + pipeline_descriptor.fragment_shader = p_fragment_shader; + pipeline_descriptor.color_attachments = { &render_target }; + pipeline_descriptor.clear_color_attachments = false; + #ifdef DEBUG + if(renderer.GetWindow()) + m_pipeline.Init(pipeline_descriptor, "mlx_2D_pass_" + renderer.GetWindow()->GetName()); + else + m_pipeline.Init(pipeline_descriptor, "mlx_2D_pass"); + #else + m_pipeline.Init(pipeline_descriptor, {}); + #endif + } + + std::uint32_t frame_index = renderer.GetCurrentFrameIndex(); + + ViewerData viewer_data; + viewer_data.projection_matrix = Mat4f::Ortho(0.0f, render_target.GetWidth(), render_target.GetHeight(), 0.0f, -1.0f, 1.0f); + static CPUBuffer buffer(sizeof(ViewerData)); + std::memcpy(buffer.GetData(), &viewer_data, buffer.GetSize()); + p_viewer_data_buffer->SetData(buffer, frame_index); + + VkCommandBuffer cmd = renderer.GetActiveCommandBuffer(); + + const auto& drawables = scene.GetDrawables(); + + for(auto& drawable : drawables) + { + // Check every textures and update modified ones to GPU before starting the render pass + if(!drawable->IsSetInit()) + drawable->UpdateDescriptorSet(p_texture_set); + drawable->Update(cmd); + } + + m_pipeline.BindPipeline(cmd, 0, {}); + for(auto& drawable : drawables) + { + DrawableData drawable_data; + drawable_data.color = drawable->GetColor(); + drawable_data.model_matrix = Mat4f::Identity(); + drawable_data.model_matrix.ApplyTranslation(Vec3f{ -drawable->GetCenter() / 2.0f, 0.0f }); + drawable_data.model_matrix.ApplyRotation(drawable->GetRotation()); + drawable_data.model_matrix.ApplyTranslation(Vec3f{ drawable->GetPosition() + drawable->GetCenter(), 0.0f }); + drawable_data.model_matrix.ApplyScale(Vec3f{ drawable->GetScale(), 1.0f }); + + drawable->Bind(frame_index, cmd); + + std::array sets = { p_viewer_data_set->GetSet(frame_index), drawable->GetSet(frame_index) }; + + RenderCore::Get().vkCmdPushConstants(cmd, m_pipeline.GetPipelineLayout(), VK_SHADER_STAGE_VERTEX_BIT, 0, sizeof(DrawableData), &drawable_data); + RenderCore::Get().vkCmdBindDescriptorSets(cmd, m_pipeline.GetPipelineBindPoint(), m_pipeline.GetPipelineLayout(), 0, sets.size(), sets.data(), 0, nullptr); + + drawable->GetMesh()->Draw(cmd, renderer.GetDrawCallsCounterRef(), renderer.GetPolygonDrawnCounterRef()); + } + m_pipeline.EndPipeline(cmd); + } + + void Render2DPass::Destroy() + { + MLX_PROFILE_FUNCTION(); + m_pipeline.Destroy(); + p_vertex_shader.reset(); + p_fragment_shader.reset(); + p_viewer_data_set->ReturnDescriptorSetToPool(); + p_viewer_data_set.reset(); + p_viewer_data_buffer->Destroy(); + p_texture_set->ReturnDescriptorSetToPool(); + p_texture_set.reset(); + } +} diff --git a/runtime/Sources/Renderer/RenderPasses/FinalPass.cpp b/runtime/Sources/Renderer/RenderPasses/FinalPass.cpp new file mode 100644 index 0000000..0a897a8 --- /dev/null +++ b/runtime/Sources/Renderer/RenderPasses/FinalPass.cpp @@ -0,0 +1,90 @@ +#include +#include +#include +#include +#include +#include + +namespace mlx +{ + void FinalPass::Init() + { + MLX_PROFILE_FUNCTION(); + ShaderLayout vertex_shader_layout( + {}, {} + ); + std::vector vertex_shader_code = { + #include + }; + p_vertex_shader = std::make_shared(vertex_shader_code, ShaderType::Vertex, std::move(vertex_shader_layout)); + ShaderLayout fragment_shader_layout( + { + { 0, + ShaderSetLayout({ + { 0, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER } + }) + } + }, {} + ); + std::vector fragment_shader_code = { + #include + }; + p_fragment_shader = std::make_shared(fragment_shader_code, ShaderType::Fragment, std::move(fragment_shader_layout)); + + func::function functor = [this](const EventBase& event) + { + if(event.What() == Event::ResizeEventCode) + m_pipeline.Destroy(); + }; + EventBus::RegisterListener({ functor, "mlx_final_pass_" + std::to_string(reinterpret_cast(this)) }); + + p_set = RenderCore::Get().GetDescriptorPoolManager().GetAvailablePool().RequestDescriptorSet(p_fragment_shader->GetShaderLayout().set_layouts[0].second, ShaderType::Fragment); + } + + void FinalPass::Pass([[maybe_unused]] Scene& scene, Renderer& renderer, Texture& render_target, NonOwningPtr final_target) + { + MLX_PROFILE_FUNCTION(); + if(m_pipeline.GetPipeline() == VK_NULL_HANDLE) + { + GraphicPipelineDescriptor pipeline_descriptor; + pipeline_descriptor.vertex_shader = p_vertex_shader; + pipeline_descriptor.fragment_shader = p_fragment_shader; + if(final_target) + pipeline_descriptor.color_attachments = { final_target }; + else + pipeline_descriptor.renderer = &renderer; + pipeline_descriptor.no_vertex_inputs = true; + #ifdef DEBUG + if(final_target) + m_pipeline.Init(pipeline_descriptor, "mlx_final_pass"); + else + m_pipeline.Init(pipeline_descriptor, "mlx_final_pass_" + renderer.GetWindow()->GetName()); + #else + m_pipeline.Init(pipeline_descriptor, {}); + #endif + } + + VkCommandBuffer cmd = renderer.GetActiveCommandBuffer(); + + p_set->SetImage(renderer.GetCurrentFrameIndex(), 0, render_target); + p_set->Update(renderer.GetCurrentFrameIndex(), cmd); + + m_pipeline.BindPipeline(cmd, renderer.GetSwapchain().GetImageIndex(), { 0.0f, 0.0f, 0.0f, 1.0f }); + VkDescriptorSet set = p_set->GetSet(renderer.GetCurrentFrameIndex()); + RenderCore::Get().vkCmdBindDescriptorSets(cmd, m_pipeline.GetPipelineBindPoint(), m_pipeline.GetPipelineLayout(), 0, 1, &set, 0, nullptr); + RenderCore::Get().vkCmdDraw(cmd, 3, 1, 0, 0); + renderer.GetDrawCallsCounterRef()++; + renderer.GetPolygonDrawnCounterRef()++; + m_pipeline.EndPipeline(cmd); + } + + void FinalPass::Destroy() + { + MLX_PROFILE_FUNCTION(); + m_pipeline.Destroy(); + p_vertex_shader.reset(); + p_fragment_shader.reset(); + p_set->ReturnDescriptorSetToPool(); + p_set.reset(); + } +} diff --git a/runtime/Sources/Renderer/RenderPasses/Passes.cpp b/runtime/Sources/Renderer/RenderPasses/Passes.cpp new file mode 100644 index 0000000..41987d7 --- /dev/null +++ b/runtime/Sources/Renderer/RenderPasses/Passes.cpp @@ -0,0 +1,50 @@ +#include +#include +#include +#include + +namespace mlx +{ + void RenderPasses::Init(NonOwningPtr render_target) + { + p_render_target = render_target; + + m_2Dpass.Init(); + m_final.Init(); + func::function functor = [this](const EventBase& event) + { + if(event.What() == Event::ResizeEventCode) + m_main_render_texture.Destroy(); + }; + EventBus::RegisterListener({ functor, "mlx_render_passes" }); + } + + void RenderPasses::Pass(Scene& scene, Renderer& renderer, const Vec4f& clear_color) + { + if(!m_main_render_texture.IsInit()) + { + VkExtent2D extent; + if(p_render_target) + extent = VkExtent2D{ .width = p_render_target->GetWidth(), .height = p_render_target->GetHeight() }; + else + extent = kvfGetSwapchainImagesSize(renderer.GetSwapchain().Get()); + #ifdef DEBUG + m_main_render_texture.Init({}, extent.width, extent.height, VK_FORMAT_R8G8B8A8_SRGB, false, "mlx_renderpasses_target"); + #else + m_main_render_texture.Init({}, extent.width, extent.height, VK_FORMAT_R8G8B8A8_SRGB, false, {}); + #endif + m_main_render_texture.TransitionLayout(VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL); + } + m_main_render_texture.Clear(renderer.GetActiveCommandBuffer(), clear_color); + + m_2Dpass.Pass(scene, renderer, m_main_render_texture); + m_final.Pass(scene, renderer, m_main_render_texture, p_render_target); + } + + void RenderPasses::Destroy() + { + m_2Dpass.Destroy(); + m_final.Destroy(); + m_main_render_texture.Destroy(); + } +} diff --git a/runtime/Sources/Renderer/Renderer.cpp b/runtime/Sources/Renderer/Renderer.cpp new file mode 100644 index 0000000..8c18706 --- /dev/null +++ b/runtime/Sources/Renderer/Renderer.cpp @@ -0,0 +1,96 @@ +#include +#include +#include +#include +#include +#include + +namespace mlx +{ + namespace Internal + { + struct FrameBeginEventBroadcast : public EventBase + { + Event What() const override { return Event::FrameBeginEventCode; } + }; + } + + void Renderer::Init(NonOwningPtr window) + { + MLX_PROFILE_FUNCTION(); + p_window = window; + m_swapchain.Init(p_window); + for(std::size_t i = 0; i < MAX_FRAMES_IN_FLIGHT; i++) + { + m_image_available_semaphores[i] = kvfCreateSemaphore(RenderCore::Get().GetDevice()); + DebugLog("Vulkan: image available semaphore created"); + m_render_finished_semaphores[i] = kvfCreateSemaphore(RenderCore::Get().GetDevice()); + DebugLog("Vulkan: render finished semaphore created"); + m_cmd_buffers[i] = kvfCreateCommandBuffer(RenderCore::Get().GetDevice()); + DebugLog("Vulkan: command buffer created"); + m_cmd_fences[i] = kvfCreateFence(RenderCore::Get().GetDevice()); + DebugLog("Vulkan: fence created"); + } + } + + void Renderer::Init(NonOwningPtr render_target) + { + MLX_PROFILE_FUNCTION(); + p_render_target = render_target; + for(std::size_t i = 0; i < MAX_FRAMES_IN_FLIGHT; i++) + { + m_image_available_semaphores[i] = kvfCreateSemaphore(RenderCore::Get().GetDevice()); + DebugLog("Vulkan: image available semaphore created"); + m_render_finished_semaphores[i] = kvfCreateSemaphore(RenderCore::Get().GetDevice()); + DebugLog("Vulkan: render finished semaphore created"); + m_cmd_buffers[i] = kvfCreateCommandBuffer(RenderCore::Get().GetDevice()); + DebugLog("Vulkan: command buffer created"); + m_cmd_fences[i] = kvfCreateFence(RenderCore::Get().GetDevice()); + DebugLog("Vulkan: fence created"); + } + } + + void Renderer::BeginFrame() + { + MLX_PROFILE_FUNCTION(); + kvfWaitForFence(RenderCore::Get().GetDevice(), m_cmd_fences[m_current_frame_index]); + if(p_window) + m_swapchain.AquireFrame(m_image_available_semaphores[m_current_frame_index]); + RenderCore::Get().vkResetCommandBuffer(m_cmd_buffers[m_current_frame_index], 0); + kvfBeginCommandBuffer(m_cmd_buffers[m_current_frame_index], 0); + m_drawcalls = 0; + m_polygons_drawn = 0; + EventBus::SendBroadcast(Internal::FrameBeginEventBroadcast{}); + } + + void Renderer::EndFrame() + { + MLX_PROFILE_FUNCTION(); + VkPipelineStageFlags wait_stages[] = { VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT }; + kvfEndCommandBuffer(m_cmd_buffers[m_current_frame_index]); + if(p_window) + kvfSubmitCommandBuffer(RenderCore::Get().GetDevice(), m_cmd_buffers[m_current_frame_index], KVF_GRAPHICS_QUEUE, m_render_finished_semaphores[m_current_frame_index], m_image_available_semaphores[m_current_frame_index], m_cmd_fences[m_current_frame_index], wait_stages); + else + kvfSubmitCommandBuffer(RenderCore::Get().GetDevice(), m_cmd_buffers[m_current_frame_index], KVF_GRAPHICS_QUEUE, VK_NULL_HANDLE, VK_NULL_HANDLE, m_cmd_fences[m_current_frame_index], wait_stages); + if(p_window) + m_swapchain.Present(m_render_finished_semaphores[m_current_frame_index]); + m_current_frame_index = (m_current_frame_index + 1) % MAX_FRAMES_IN_FLIGHT; + } + + void Renderer::Destroy() noexcept + { + MLX_PROFILE_FUNCTION(); + RenderCore::Get().WaitDeviceIdle(); + for(std::size_t i = 0; i < MAX_FRAMES_IN_FLIGHT; i++) + { + kvfDestroySemaphore(RenderCore::Get().GetDevice(), m_image_available_semaphores[i]); + DebugLog("Vulkan: image available semaphore destroyed"); + kvfDestroySemaphore(RenderCore::Get().GetDevice(), m_render_finished_semaphores[i]); + DebugLog("Vulkan: render finished semaphore destroyed"); + kvfDestroyFence(RenderCore::Get().GetDevice(), m_cmd_fences[i]); + DebugLog("Vulkan: fence destroyed"); + } + if(p_window) + m_swapchain.Destroy(); + } +} diff --git a/runtime/Sources/Renderer/SceneRenderer.cpp b/runtime/Sources/Renderer/SceneRenderer.cpp new file mode 100644 index 0000000..4daf819 --- /dev/null +++ b/runtime/Sources/Renderer/SceneRenderer.cpp @@ -0,0 +1,26 @@ +#include +#include +#include +#include +#include + +namespace mlx +{ + void SceneRenderer::Init(NonOwningPtr render_target) + { + MLX_PROFILE_FUNCTION(); + m_passes.Init(render_target); + } + + void SceneRenderer::Render(Scene& scene, Renderer& renderer) + { + MLX_PROFILE_FUNCTION(); + m_passes.Pass(scene, renderer, scene.GetClearColor()); + } + + void SceneRenderer::Destroy() + { + MLX_PROFILE_FUNCTION(); + m_passes.Destroy(); + } +} diff --git a/runtime/Sources/Renderer/Swapchain.cpp b/runtime/Sources/Renderer/Swapchain.cpp new file mode 100644 index 0000000..d598120 --- /dev/null +++ b/runtime/Sources/Renderer/Swapchain.cpp @@ -0,0 +1,107 @@ +#include + +#include +#include +#include +#include +#include + +namespace mlx +{ + namespace Internal + { + struct ResizeEventBroadcast : public EventBase + { + Event What() const override { return Event::ResizeEventCode; } + }; + } + + void Swapchain::Init(NonOwningPtr window) + { + p_window = window; + CreateSwapchain(); + } + + void Swapchain::AquireFrame(VkSemaphore signal) + { + if(m_resize) + { + RenderCore::Get().WaitDeviceIdle(); + Destroy(); + CreateSwapchain(); + EventBus::SendBroadcast(Internal::ResizeEventBroadcast{}); + } + + VkResult result = RenderCore::Get().vkAcquireNextImageKHR(RenderCore::Get().GetDevice(), m_swapchain, UINT64_MAX, signal, VK_NULL_HANDLE, &m_current_image_index); + if(result == VK_SUBOPTIMAL_KHR) + m_resize = true; // Recreate Swapchain next time + else if(result == VK_ERROR_OUT_OF_DATE_KHR) + { + m_resize = true; + AquireFrame(signal); + } + else if(result != VK_SUCCESS) + FatalError("Vulkan: failed to acquire swapchain image, %", kvfVerbaliseVkResult(result)); + } + + void Swapchain::Present(VkSemaphore wait) noexcept + { + if(!kvfQueuePresentKHR(RenderCore::Get().GetDevice(), wait, m_swapchain, m_current_image_index)) + m_resize = true; + } + + void Swapchain::Destroy() + { + if(m_swapchain == VK_NULL_HANDLE) + return; + RenderCore::Get().WaitDeviceIdle(); + + for(Image& img : m_swapchain_images) + img.DestroyImageView(); + m_swapchain_images.clear(); + kvfDestroySwapchainKHR(RenderCore::Get().GetDevice(), m_swapchain); + m_swapchain = VK_NULL_HANDLE; + DebugLog("Vulkan: swapchain destroyed"); + + RenderCore::Get().vkDestroySurfaceKHR(RenderCore::Get().GetInstance(), m_surface, nullptr); + m_surface = VK_NULL_HANDLE; + DebugLog("Vulkan: surface destroyed"); + } + + void Swapchain::CreateSwapchain() + { + VkExtent2D extent; + do + { + Vec2ui size = p_window->GetVulkanDrawableSize(); + extent = { size.x, size.y }; + } while(extent.width == 0 || extent.height == 0); + + m_surface = p_window->CreateVulkanSurface(RenderCore::Get().GetInstance()); + DebugLog("Vulkan: surface created"); + m_swapchain = kvfCreateSwapchainKHR(RenderCore::Get().GetDevice(), RenderCore::Get().GetPhysicalDevice(), m_surface, extent, VK_NULL_HANDLE, false); + + m_images_count = kvfGetSwapchainImagesCount(m_swapchain); + m_min_images_count = kvfGetSwapchainMinImagesCount(m_swapchain); + std::vector tmp(m_images_count); + m_swapchain_images.resize(m_images_count); + RenderCore::Get().vkGetSwapchainImagesKHR(RenderCore::Get().GetDevice(), m_swapchain, &m_images_count, tmp.data()); + VkCommandBuffer cmd = kvfCreateCommandBuffer(RenderCore::Get().GetDevice()); + kvfBeginCommandBuffer(cmd, VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT); + for(std::size_t i = 0; i < m_images_count; i++) + { + #ifdef DEBUG + m_swapchain_images[i].Init(tmp[i], kvfGetSwapchainImagesFormat(m_swapchain), extent.width, extent.height, VK_IMAGE_LAYOUT_UNDEFINED, "mlx_swapchain_image_" + std::to_string(i)); + #else + m_swapchain_images[i].Init(tmp[i], kvfGetSwapchainImagesFormat(m_swapchain), extent.width, extent.height, VK_IMAGE_LAYOUT_UNDEFINED, {}); + #endif + m_swapchain_images[i].TransitionLayout(VK_IMAGE_LAYOUT_PRESENT_SRC_KHR, cmd); + m_swapchain_images[i].CreateImageView(VK_IMAGE_VIEW_TYPE_2D, VK_IMAGE_ASPECT_COLOR_BIT); + } + kvfEndCommandBuffer(cmd); + VkFence fence = kvfCreateFence(RenderCore::Get().GetDevice()); + kvfSubmitSingleTimeCommandBuffer(RenderCore::Get().GetDevice(), cmd, KVF_GRAPHICS_QUEUE, fence); + kvfDestroyFence(RenderCore::Get().GetDevice(), fence); + DebugLog("Vulkan: swapchain created"); + } +} diff --git a/runtime/Sources/Renderer/Vulkan/VulkanLoader.cpp b/runtime/Sources/Renderer/Vulkan/VulkanLoader.cpp new file mode 100644 index 0000000..4f63d54 --- /dev/null +++ b/runtime/Sources/Renderer/Vulkan/VulkanLoader.cpp @@ -0,0 +1,152 @@ +#include +#include +#include + +#ifdef MLX_PLAT_WINDOWS + __declspec(dllimport) HMODULE __stdcall LoadLibraryA(LPCSTR); + __declspec(dllimport) FARPROC __stdcall GetProcAddress(HMODULE, LPCSTR); + __declspec(dllimport) int __stdcall FreeLibrary(HMODULE); + using LibModule = HMODULE; +#else + using LibModule = Handle; +#endif + +#if defined(MLX_COMPILER_GCC) + #define DISABLE_GCC_PEDANTIC_WARNINGS \ + _Pragma("GCC diagnostic push") \ + _Pragma("GCC diagnostic ignored \"-Wpedantic\"") + #define RESTORE_GCC_PEDANTIC_WARNINGS \ + _Pragma("GCC diagnostic pop") +#else + #define DISABLE_GCC_PEDANTIC_WARNINGS + #define RESTORE_GCC_PEDANTIC_WARNINGS +#endif + +namespace mlx +{ + namespace Internal + { + static inline PFN_vkVoidFunction vkGetInstanceProcAddrStub(Handle context, const char* name) + { + PFN_vkVoidFunction function = RenderCore::Get().vkGetInstanceProcAddr(static_cast(context), name); + if(!function) + FatalError("Vulkan Loader: could not load '%'", name); + //DebugLog("Vulkan Loader: loaded %", name); + return function; + } + + static inline PFN_vkVoidFunction vkGetDeviceProcAddrStub(Handle context, const char* name) + { + PFN_vkVoidFunction function = RenderCore::Get().vkGetDeviceProcAddr(static_cast(context), name); + if(!function) + FatalError("Vulkan Loader: could not load '%'", name); + //DebugLog("Vulkan Loader: loaded %", name); + return function; + } + + static inline LibModule LoadLib(const char* libname) + { + #ifdef MLX_PLAT_WINDOWS + return LoadLibraryA(libname); + #else + return dlopen(libname, RTLD_NOW | RTLD_LOCAL); + #endif + } + + static inline void* GetSymbol(LibModule module, const char* name) + { + #ifdef MLX_PLAT_WINDOWS + return (void*)(void(*)(void))GetProcAddress(module, name); + #else + return dlsym(module, name); + #endif + } + } + + VulkanLoader::VulkanLoader() + { + #if defined(MLX_PLAT_WINDOWS) + std::array libnames{ + "vulkan-1.dll" + }; + #elif defined(MLX_PLAT_MACOS) + std::array libnames{ + "libvulkan.dylib", + "libvulkan.1.dylib", + "libMoltenVK.dylib", + "vulkan.framework/vulkan", + "MoltenVK.framework/MoltenVK", + "/usr/local/lib/libvulkan.dylib", + }; + #else + std::array libnames{ + "libvulkan.so.1", + "libvulkan.so" + }; + #endif + + for(auto libname : libnames) + { + p_module = Internal::LoadLib(libname); + if(p_module != nullptr) + { + DISABLE_GCC_PEDANTIC_WARNINGS + RenderCore::Get().vkGetInstanceProcAddr = reinterpret_cast(Internal::GetSymbol(p_module, "vkGetInstanceProcAddr")); + RESTORE_GCC_PEDANTIC_WARNINGS + if(RenderCore::Get().vkGetInstanceProcAddr) + { + DebugLog("Vulkan Loader: libvulkan loaded using '%'", libname); + break; + } + } + } + if(!p_module || !RenderCore::Get().vkGetInstanceProcAddr) + FatalError("Vulkan Loader: failed to load libvulkan"); + LoadGlobalFunctions(nullptr, Internal::vkGetInstanceProcAddrStub); + } + + void VulkanLoader::LoadInstance(VkInstance instance) + { + LoadInstanceFunctions(instance, Internal::vkGetInstanceProcAddrStub); + } + + void VulkanLoader::LoadDevice(VkDevice device) + { + LoadDeviceFunctions(device, Internal::vkGetDeviceProcAddrStub); + } + + void VulkanLoader::LoadGlobalFunctions(void* context, PFN_vkVoidFunction (*load)(void*, const char*)) noexcept + { + #define MLX_VULKAN_GLOBAL_FUNCTION(fn) RenderCore::Get().fn = reinterpret_cast(load(context, #fn)); + #include + #undef MLX_VULKAN_GLOBAL_FUNCTION + DebugLog("Vulkan Loader: global functions loaded"); + } + + void VulkanLoader::LoadInstanceFunctions(void* context, PFN_vkVoidFunction (*load)(void*, const char*)) noexcept + { + #define MLX_VULKAN_INSTANCE_FUNCTION(fn) RenderCore::Get().fn = reinterpret_cast(load(context, #fn)); + #include + #undef MLX_VULKAN_INSTANCE_FUNCTION + DebugLog("Vulkan Loader: instance functions loaded"); + } + + void VulkanLoader::LoadDeviceFunctions(void* context, PFN_vkVoidFunction (*load)(void*, const char*)) noexcept + { + #define MLX_VULKAN_DEVICE_FUNCTION(fn) RenderCore::Get().fn = reinterpret_cast(load(context, #fn)); + #include + #undef MLX_VULKAN_DEVICE_FUNCTION + DebugLog("Vulkan Loader: device functions loaded"); + } + + VulkanLoader::~VulkanLoader() + { + #ifdef MLX_PLAT_WINDOWS + FreeLibrary(p_module); + #else + dlclose(p_module); + #endif + p_module = nullptr; + DebugLog("Vulkan Loader: libvulkan unloaded"); + } +} diff --git a/runtime/Sources/Renderer/Vulkan/VulkanLoader.h b/runtime/Sources/Renderer/Vulkan/VulkanLoader.h new file mode 100644 index 0000000..76ce045 --- /dev/null +++ b/runtime/Sources/Renderer/Vulkan/VulkanLoader.h @@ -0,0 +1,43 @@ +#ifndef __MLX_VULKAN_LOADER__ +#define __MLX_VULKAN_LOADER__ + +#include + +#ifdef MLX_PLAT_WINDOWS + typedef const char* LPCSTR; + typedef struct HINSTANCE__* HINSTANCE; + typedef HINSTANCE HMODULE; + #if defined(_MINWINDEF_) + /* minwindef.h defines FARPROC, and attempting to redefine it may conflict with -Wstrict-prototypes */ + #elif defined(_WIN64) + typedef __int64 (__stdcall* FARPROC)(void); + #else + typedef int (__stdcall* FARPROC)(void); + #endif +#endif + +namespace mlx +{ + class VulkanLoader + { + public: + VulkanLoader(); + void LoadInstance(VkInstance instance); + void LoadDevice(VkDevice device); + ~VulkanLoader(); + + private: + void LoadGlobalFunctions(void* context, PFN_vkVoidFunction (*load)(void*, const char*)) noexcept; + void LoadInstanceFunctions(void* context, PFN_vkVoidFunction (*load)(void*, const char*)) noexcept; + void LoadDeviceFunctions(void* context, PFN_vkVoidFunction (*load)(void*, const char*)) noexcept; + + private: + #ifdef MLX_PLAT_WINDOWS + HMODULE p_module = nullptr; + #else + Handle p_module = nullptr; + #endif + }; +} + +#endif diff --git a/scripts/fetch_dependencies.sh b/scripts/fetch_dependencies.sh index d40d1b2..ba8a0aa 100755 --- a/scripts/fetch_dependencies.sh +++ b/scripts/fetch_dependencies.sh @@ -1,16 +1,5 @@ #!/bin/bash -# Update volk -rm -f ../third_party/volk.c -rm -f ../third_party/volk.h -tag_name=$(curl -sL https://api.github.com/repos/zeux/Volk/releases/latest | jq -r '.tag_name') -wget https://api.github.com/repos/zeux/volk/zipball/$tag_name -O volk.zip -unzip -o volk.zip -d ../third_party/ -mv ../third_party/zeux-volk*/volk.h ../third_party -mv ../third_party/zeux-volk*/volk.c ../third_party -rm -rf ../third_party/zeux-volk* -rm volk.zip - # Update VMA rm -f ../third_party/vma.h tag_name=$(curl -sL https://api.github.com/repos/GPUOpen-LibrariesAndSDKs/VulkanMemoryAllocator/releases/latest | jq -r '.tag_name') @@ -29,3 +18,9 @@ mv ../third_party/Vulkan-Headers-main/include/vulkan ../third_party/ mv ../third_party/Vulkan-Headers-main/include/vk_video ../third_party/ rm -rf ../third_party/Vulkan-Headers-main rm vulkan-headers.zip + +# Update KVF +rm -f ../third_party/kvf.h +git clone https://github.com/Kbz-8/KVF.git ../third_party/KVF/ +mv ../third_party/KVF/kvf.h ../third_party/kvf.h +rm -rf ../third_party/KVF diff --git a/src/core/UUID.cpp b/src/core/UUID.cpp deleted file mode 100644 index c7f2bb2..0000000 --- a/src/core/UUID.cpp +++ /dev/null @@ -1,25 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* UUID.cpp :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2024/01/06 11:26:37 by maldavid #+# #+# */ -/* Updated: 2024/01/06 11:28:15 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#include -#include -#include - -namespace mlx -{ - static std::random_device random_device; - static std::mt19937_64 engine(random_device()); - static std::uniform_int_distribution uniform_distribution; - - UUID::UUID() : _uuid(uniform_distribution(engine)) {} - UUID::UUID(std::uint64_t uuid) : _uuid(uuid) {} -} diff --git a/src/core/UUID.h b/src/core/UUID.h deleted file mode 100644 index b26df3b..0000000 --- a/src/core/UUID.h +++ /dev/null @@ -1,33 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* UUID.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2024/01/06 11:13:23 by maldavid #+# #+# */ -/* Updated: 2024/01/07 01:44:21 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#ifndef __MLX_UUID__ -#define __MLX_UUID__ - -#include - -namespace mlx -{ - class UUID - { - public: - UUID(); - UUID(std::uint64_t uuid); - - inline operator std::uint64_t() const { return _uuid; } - - private: - std::uint64_t _uuid; - }; -} - -#endif diff --git a/src/core/application.cpp b/src/core/application.cpp deleted file mode 100644 index 92bdd27..0000000 --- a/src/core/application.cpp +++ /dev/null @@ -1,129 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* application.cpp :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2022/10/04 22:10:52 by maldavid #+# #+# */ -/* Updated: 2024/10/19 10:49:21 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#include "application.h" -#include -#include -#include -#include -#include -#include -#include -#include - -namespace mlx::core -{ - static bool __drop_sdl_responsability = false; - Application::Application() : _fps(), _in(std::make_unique()) - { - _fps.init(); - __drop_sdl_responsability = SDL_WasInit(SDL_INIT_VIDEO); - if(__drop_sdl_responsability) // is case the mlx is running in a sandbox like MacroUnitTester where SDL is already init - return; - SDL_SetMemoryFunctions(MemManager::malloc, MemManager::calloc, MemManager::realloc, MemManager::free); - - /* Remove this comment if you want to prioritise Wayland over X11/XWayland, at your own risks */ - //SDL_SetHint(SDL_HINT_VIDEODRIVER, "wayland,x11"); - - if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_EVENTS | SDL_INIT_TIMER) != 0) - error::report(e_kind::fatal_error, "SDL error : unable to init all subsystems : %s", SDL_GetError()); - } - - void Application::run() noexcept - { - _in->run(); - while(_in->isRunning()) - { - if(!_fps.update()) - continue; - _in->update(); - - if(_loop_hook) - _loop_hook(_param); - - for(auto& gs : _graphics) - { - if(gs) - gs->render(); - } - } - - Render_Core::get().getSingleTimeCmdManager().updateSingleTimesCmdBuffersSubmitState(); - - for(auto& gs : _graphics) - { - if(!gs) - continue; - for(int i = 0; i < MAX_FRAMES_IN_FLIGHT; i++) - gs->getRenderer().getCmdBuffer(i).waitForExecution(); - } - } - - void* Application::newTexture(int w, int h) - { - MLX_PROFILE_FUNCTION(); - #ifdef DEBUG - _textures.emplace_front().create(nullptr, w, h, VK_FORMAT_R8G8B8A8_UNORM, "__mlx_unamed_user_texture"); - #else - _textures.emplace_front().create(nullptr, w, h, VK_FORMAT_R8G8B8A8_UNORM, nullptr); - #endif - return &_textures.front(); - } - - void* Application::newStbTexture(char* file, int* w, int* h) - { - MLX_PROFILE_FUNCTION(); - _textures.emplace_front(stbTextureLoad(file, w, h)); - return &_textures.front(); - } - - void Application::destroyTexture(void* ptr) - { - MLX_PROFILE_FUNCTION(); - vkDeviceWaitIdle(Render_Core::get().getDevice().get()); // TODO : synchronize with another method than waiting for GPU to be idle - if(ptr == nullptr) - { - core::error::report(e_kind::error, "invalid image ptr (NULL)"); - return; - } - - auto it = std::find_if(_textures.begin(), _textures.end(), [=](const Texture& texture) { return &texture == ptr; }); - if(it == _textures.end()) - { - core::error::report(e_kind::error, "invalid image ptr"); - return; - } - Texture* texture = static_cast(ptr); - if(!texture->isInit()) - core::error::report(e_kind::error, "trying to destroy a texture that has already been destroyed"); - else - texture->destroy(); - for(auto& gs : _graphics) - { - if(gs) - gs->tryEraseTextureFromManager(texture); - } - _textures.erase(it); - } - - Application::~Application() - { - TextLibrary::get().clearLibrary(); - TextLibrary::get().reset(); - FontLibrary::get().clearLibrary(); - FontLibrary::get().reset(); - if(__drop_sdl_responsability) - return; - SDL_QuitSubSystem(SDL_INIT_VIDEO | SDL_INIT_TIMER | SDL_INIT_EVENTS); - SDL_Quit(); - } -} diff --git a/src/core/application.h b/src/core/application.h deleted file mode 100644 index fbb9ad0..0000000 --- a/src/core/application.h +++ /dev/null @@ -1,81 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* application.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2022/10/04 21:49:46 by maldavid #+# #+# */ -/* Updated: 2024/09/12 01:30:35 by tdelage ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#ifndef __MLX_APPLICATION__ -#define __MLX_APPLICATION__ - -#include -#include -#include -#include - -#include - -#include -#include -#include -#include -#include - -namespace mlx::core -{ - class Application - { - public: - Application(); - - inline void getMousePos(int* x, int* y) noexcept; - inline void mouseMove(void* win, int x, int y) noexcept; - - inline void onEvent(void* win, int event, int (*funct_ptr)(int, void*), void* param) noexcept; - - inline void getScreenSize(void* win, int* w, int* h) noexcept; - - inline void setFPSCap(std::uint32_t fps) noexcept; - - inline void* newGraphicsSuport(std::size_t w, std::size_t h, const char* title); - inline void clearGraphicsSupport(void* win); - inline void destroyGraphicsSupport(void* win); - inline void setWindowPosition(void *win, int x, int y); - - inline void pixelPut(void* win, int x, int y, std::uint32_t color) const noexcept; - inline void stringPut(void* win, int x, int y, std::uint32_t color, char* str); - - void* newTexture(int w, int h); - void* newStbTexture(char* file, int* w, int* h); // stb textures are format managed by stb image (png, jpg, bpm, ...) - inline void texturePut(void* win, void* img, int x, int y); - inline int getTexturePixel(void* img, int x, int y); - inline void setTexturePixel(void* img, int x, int y, std::uint32_t color); - void destroyTexture(void* ptr); - - inline void loopHook(int (*f)(void*), void* param); - inline void loopEnd() noexcept; - - inline void loadFont(void* win, const std::filesystem::path& filepath, float scale); - - void run() noexcept; - - ~Application(); - - private: - FpsManager _fps; - std::list _textures; - std::vector> _graphics; - std::function _loop_hook; - std::unique_ptr _in; - void* _param = nullptr; - }; -} - -#include - -#endif // __MLX_APPLICATION__ diff --git a/src/core/application.inl b/src/core/application.inl deleted file mode 100644 index be5e7c4..0000000 --- a/src/core/application.inl +++ /dev/null @@ -1,213 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* application.inl :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2022/10/04 21:49:46 by maldavid #+# #+# */ -/* Updated: 2023/04/02 14:56:27 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#include -#include -#include - -#define CHECK_WINDOW_PTR(win) \ - if(win == nullptr) \ - { \ - core::error::report(e_kind::error, "invalid window ptr (NULL)"); \ - return; \ - } \ - else if(*static_cast(win) < 0 || *static_cast(win) > static_cast(_graphics.size()))\ - { \ - core::error::report(e_kind::error, "invalid window ptr"); \ - return; \ - } else {} - -#define CHECK_IMAGE_PTR(img, retval) \ - if(img == nullptr) \ - { \ - core::error::report(e_kind::error, "invalid image ptr (NULL)"); \ - retval; \ - } \ - else if(std::find_if(_textures.begin(), _textures.end(), [=](const Texture& texture) \ - { \ - return &texture == img; \ - }) == _textures.end()) \ - { \ - core::error::report(e_kind::error, "invalid image ptr"); \ - retval; \ - } else {} - -namespace mlx::core -{ - void Application::getMousePos(int* x, int* y) noexcept - { - *x = _in->getX(); - *y = _in->getY(); - } - - void Application::mouseMove(void* win, int x, int y) noexcept - { - CHECK_WINDOW_PTR(win); - if(!_graphics[*static_cast(win)]->hasWindow()) - { - error::report(e_kind::warning, "trying to move the mouse relative to a window that is targeting an image and not a real window, this is not allowed (move ignored)"); - return; - } - SDL_WarpMouseInWindow(_graphics[*static_cast(win)]->getWindow()->getNativeWindow(), x, y); - SDL_PumpEvents(); - } - - void Application::onEvent(void* win, int event, int (*funct_ptr)(int, void*), void* param) noexcept - { - CHECK_WINDOW_PTR(win); - if(!_graphics[*static_cast(win)]->hasWindow()) - { - error::report(e_kind::warning, "trying to add event hook for a window that is targeting an image and not a real window, this is not allowed (hook ignored)"); - return; - } - _in->onEvent(_graphics[*static_cast(win)]->getWindow()->getID(), event, funct_ptr, param); - } - - void Application::setWindowPosition(void* win, int x, int y) - { - CHECK_WINDOW_PTR(win); - if(!_graphics[*static_cast(win)]->hasWindow()) - { - error::report(e_kind::warning, "trying to move a window that is targeting an image and not a real window, this is not allowed"); - return; - } - SDL_SetWindowPosition(_graphics[*static_cast(win)]->getWindow()->getNativeWindow(), x, y); - } - - void Application::getScreenSize(void* win, int* w, int* h) noexcept - { - CHECK_WINDOW_PTR(win); - SDL_DisplayMode DM; - SDL_GetDesktopDisplayMode(SDL_GetWindowDisplayIndex(_graphics[*static_cast(win)]->getWindow()->getNativeWindow()), &DM); - *w = DM.w; - *h = DM.h; - } - - void Application::setFPSCap(std::uint32_t fps) noexcept - { - _fps.setMaxFPS(fps); - } - - void* Application::newGraphicsSuport(std::size_t w, std::size_t h, const char* title) - { - MLX_PROFILE_FUNCTION(); - auto it = std::find_if(_textures.begin(), _textures.end(), [=](const Texture& texture) - { - return &texture == reinterpret_cast(const_cast(title)); - }); - if(it != _textures.end()) - _graphics.emplace_back(std::make_unique(w, h, reinterpret_cast(const_cast(title)), _graphics.size())); - else - { - if(title == NULL) - { - core::error::report(e_kind::fatal_error, "invalid window title (NULL)"); - return nullptr; - } - _graphics.emplace_back(std::make_unique(w, h, title, _graphics.size())); - _in->addWindow(_graphics.back()->getWindow()); - } - return static_cast(&_graphics.back()->getID()); - } - - void Application::clearGraphicsSupport(void* win) - { - MLX_PROFILE_FUNCTION(); - CHECK_WINDOW_PTR(win); - _graphics[*static_cast(win)]->clearRenderData(); - } - - void Application::destroyGraphicsSupport(void* win) - { - MLX_PROFILE_FUNCTION(); - CHECK_WINDOW_PTR(win); - _graphics[*static_cast(win)].reset(); - } - - void Application::pixelPut(void* win, int x, int y, std::uint32_t color) const noexcept - { - MLX_PROFILE_FUNCTION(); - CHECK_WINDOW_PTR(win); - _graphics[*static_cast(win)]->pixelPut(x, y, color); - } - - void Application::stringPut(void* win, int x, int y, std::uint32_t color, char* str) - { - MLX_PROFILE_FUNCTION(); - CHECK_WINDOW_PTR(win); - if(str == nullptr) - { - core::error::report(e_kind::error, "wrong text (NULL)"); - return; - } - if(std::strlen(str) == 0) - { - core::error::report(e_kind::warning, "trying to put an empty text"); - return; - } - _graphics[*static_cast(win)]->stringPut(x, y, color, str); - } - - void Application::loadFont(void* win, const std::filesystem::path& filepath, float scale) - { - MLX_PROFILE_FUNCTION(); - CHECK_WINDOW_PTR(win); - _graphics[*static_cast(win)]->loadFont(filepath, scale); - } - - void Application::texturePut(void* win, void* img, int x, int y) - { - MLX_PROFILE_FUNCTION(); - CHECK_WINDOW_PTR(win); - CHECK_IMAGE_PTR(img, return); - Texture* texture = static_cast(img); - if(!texture->isInit()) - core::error::report(e_kind::error, "trying to put a texture that has been destroyed"); - else - _graphics[*static_cast(win)]->texturePut(texture, x, y); - } - - int Application::getTexturePixel(void* img, int x, int y) - { - MLX_PROFILE_FUNCTION(); - CHECK_IMAGE_PTR(img, return 0); - Texture* texture = static_cast(img); - if(!texture->isInit()) - { - core::error::report(e_kind::error, "trying to get a pixel from texture that has been destroyed"); - return 0; - } - return texture->getPixel(x, y); - } - - void Application::setTexturePixel(void* img, int x, int y, std::uint32_t color) - { - MLX_PROFILE_FUNCTION(); - CHECK_IMAGE_PTR(img, return); - Texture* texture = static_cast(img); - if(!texture->isInit()) - core::error::report(e_kind::error, "trying to set a pixel on texture that has been destroyed"); - else - texture->setPixel(x, y, color); - } - - void Application::loopHook(int (*f)(void*), void* param) - { - _loop_hook = f; - _param = param; - } - - void Application::loopEnd() noexcept - { - _in->finish(); - } -} diff --git a/src/core/bridge.cpp b/src/core/bridge.cpp deleted file mode 100644 index debed1c..0000000 --- a/src/core/bridge.cpp +++ /dev/null @@ -1,312 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* bridge.cpp :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2022/10/04 17:35:20 by maldavid #+# #+# */ -/* Updated: 2024/09/12 01:29:33 by tdelage ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#include -#include "errors.h" -#include "application.h" -#include -#include -#include -#include -#include - -static void* __mlx_ptr = nullptr; - -#define MLX_CHECK_APPLICATION_POINTER(ptr) \ - if(ptr != __mlx_ptr || ptr == NULL) \ - mlx::core::error::report(e_kind::fatal_error, "invalid mlx pointer passed to '%s'", MLX_FUNC_SIG); \ - else {} // just to avoid issues with possible if-else statements outside this macro - -extern "C" -{ - void* mlx_init() - { - if(__mlx_ptr != nullptr) - { - mlx::core::error::report(e_kind::error, "MLX cannot be initialized multiple times"); - return NULL; // not nullptr for the C compatibility - } - mlx::MemManager::get(); // just to initialize the C garbage collector - mlx::core::Application* app = new mlx::core::Application; - mlx::Render_Core::get().init(); - if(app == nullptr) - mlx::core::error::report(e_kind::fatal_error, "Tout a pété"); - __mlx_ptr = static_cast(app); - return __mlx_ptr; - } - - void* mlx_new_window(void* mlx, int w, int h, const char* title) - { - MLX_CHECK_APPLICATION_POINTER(mlx); - if(w <= 0 || h <= 0) - { - mlx::core::error::report(e_kind::fatal_error, "invalid window size (%d x %d)", w, h); - return NULL; // not nullptr for the C compatibility - } - return static_cast(mlx)->newGraphicsSuport(w, h, title); - } - - int mlx_loop_hook(void* mlx, int (*f)(void*), void* param) - { - MLX_CHECK_APPLICATION_POINTER(mlx); - static_cast(mlx)->loopHook(f, param); - return 0; - } - - void mlx_set_window_position(void *mlx, void *win, int x, int y) - { - MLX_CHECK_APPLICATION_POINTER(mlx); - static_cast(mlx)->setWindowPosition(win, x, y); - } - - int mlx_loop(void* mlx) - { - MLX_CHECK_APPLICATION_POINTER(mlx); - static_cast(mlx)->run(); - return 0; - } - - int mlx_loop_end(void* mlx) - { - MLX_CHECK_APPLICATION_POINTER(mlx); - static_cast(mlx)->loopEnd(); - return 0; - } - - int mlx_mouse_show() - { - SDL_ShowCursor(SDL_ENABLE); - return 0; - } - - int mlx_mouse_hide() - { - SDL_ShowCursor(SDL_DISABLE); - return 0; - } - - int mlx_mouse_move(void* mlx, void* win, int x, int y) - { - MLX_CHECK_APPLICATION_POINTER(mlx); - static_cast(mlx)->mouseMove(win, x, y); - return 0; - } - - int mlx_mouse_get_pos(void* mlx, int* x, int* y) - { - MLX_CHECK_APPLICATION_POINTER(mlx); - static_cast(mlx)->getMousePos(x, y); - return 0; - } - - int mlx_on_event(void* mlx, void* win, mlx_event_type event, int (*funct_ptr)(int, void*), void* param) - { - MLX_CHECK_APPLICATION_POINTER(mlx); - static_cast(mlx)->onEvent(win, static_cast(event), funct_ptr, param); - return 0; - } - - void* mlx_new_image(void* mlx, int width, int height) - { - MLX_CHECK_APPLICATION_POINTER(mlx); - if (width <= 0 || height <= 0) - mlx::core::error::report(e_kind::fatal_error, "invalid image size (%d x %d)", width, height); - return static_cast(mlx)->newTexture(width, height); - } - - int mlx_get_image_pixel(void* mlx, void* img, int x, int y) - { - MLX_CHECK_APPLICATION_POINTER(mlx); - int color = static_cast(mlx)->getTexturePixel(img, x, y); - unsigned char color_bits[4]; - color_bits[0] = (color & 0x000000FF); - color_bits[1] = (color & 0x0000FF00) >> 8; - color_bits[2] = (color & 0x00FF0000) >> 16; - color_bits[3] = (color & 0xFF000000) >> 24; - return *reinterpret_cast(color_bits); - } - - void mlx_set_image_pixel(void* mlx, void* img, int x, int y, int color) - { - MLX_CHECK_APPLICATION_POINTER(mlx); - unsigned char color_bits[4]; - color_bits[0] = (color & 0x00FF0000) >> 16; - color_bits[1] = (color & 0x0000FF00) >> 8; - color_bits[2] = (color & 0x000000FF); - color_bits[3] = (color & 0xFF000000) >> 24; - static_cast(mlx)->setTexturePixel(img, x, y, *reinterpret_cast(color_bits)); - } - - int mlx_put_image_to_window(void* mlx, void* win, void* img, int x, int y) - { - MLX_CHECK_APPLICATION_POINTER(mlx); - static_cast(mlx)->texturePut(win, img, x, y); - return 0; - } - - int mlx_destroy_image(void* mlx, void* img) - { - MLX_CHECK_APPLICATION_POINTER(mlx); - static_cast(mlx)->destroyTexture(img); - return 0; - } - - void* mlx_png_file_to_image(void* mlx, char* filename, int* width, int* height) - { - MLX_CHECK_APPLICATION_POINTER(mlx); - if (filename == nullptr) - mlx::core::error::report(e_kind::fatal_error, "PNG loader : filename is NULL"); - std::filesystem::path file(filename); - if(file.extension() != ".png") - { - mlx::core::error::report(e_kind::error, "PNG loader : not a png file '%s'", filename); - return nullptr; - } - return static_cast(mlx)->newStbTexture(filename, width, height); - } - - void* mlx_jpg_file_to_image(void* mlx, char* filename, int* width, int* height) - { - MLX_CHECK_APPLICATION_POINTER(mlx); - if (filename == nullptr) - mlx::core::error::report(e_kind::fatal_error, "JPG loader : filename is NULL"); - std::filesystem::path file(filename); - if(file.extension() != ".jpg" && file.extension() != ".jpeg") - { - mlx::core::error::report(e_kind::error, "JPG loader : not a jpg file '%s'", filename); - return nullptr; - } - return static_cast(mlx)->newStbTexture(filename, width, height); - } - - void* mlx_bmp_file_to_image(void* mlx, char* filename, int* width, int* height) - { - MLX_CHECK_APPLICATION_POINTER(mlx); - if (filename == nullptr) - mlx::core::error::report(e_kind::fatal_error, "BMP loader : filename is NULL"); - std::filesystem::path file(filename); - if(file.extension() != ".bmp" && file.extension() != ".dib") - { - mlx::core::error::report(e_kind::error, "BMP loader : not a bmp file '%s'", filename); - return nullptr; - } - return static_cast(mlx)->newStbTexture(filename, width, height); - } - - int mlx_pixel_put(void* mlx, void* win, int x, int y, int color) - { - MLX_CHECK_APPLICATION_POINTER(mlx); - unsigned char color_bits[4]; - color_bits[0] = (color & 0x00FF0000) >> 16; - color_bits[1] = (color & 0x0000FF00) >> 8; - color_bits[2] = (color & 0x000000FF); - color_bits[3] = (color & 0xFF000000) >> 24; - static_cast(mlx)->pixelPut(win, x, y, *reinterpret_cast(color_bits)); - return 0; - } - - int mlx_string_put(void* mlx, void* win, int x, int y, int color, char* str) - { - MLX_CHECK_APPLICATION_POINTER(mlx); - unsigned char color_bits[4]; - color_bits[0] = (color & 0x00FF0000) >> 16; - color_bits[1] = (color & 0x0000FF00) >> 8; - color_bits[2] = (color & 0x000000FF); - color_bits[3] = (color & 0xFF000000) >> 24; - static_cast(mlx)->stringPut(win, x, y, *reinterpret_cast(color_bits), str); - return 0; - } - - void mlx_set_font(void* mlx, void* win, char* filepath) - { - MLX_CHECK_APPLICATION_POINTER(mlx); - if (filepath == nullptr) - { - mlx::core::error::report(e_kind::error, "Font loader : filepath is NULL"); - return; - } - std::filesystem::path file(filepath); - if(std::strcmp(filepath, "default") != 0 && file.extension() != ".ttf" && file.extension() != ".tte") - { - mlx::core::error::report(e_kind::error, "TTF loader : not a truetype font file '%s'", filepath); - return; - } - if(std::strcmp(filepath, "default") == 0) - static_cast(mlx)->loadFont(win, file, 6.f); - else - static_cast(mlx)->loadFont(win, file, 16.f); - } - - void mlx_set_font_scale(void* mlx, void* win, char* filepath, float scale) - { - MLX_CHECK_APPLICATION_POINTER(mlx); - if (filepath == nullptr) - { - mlx::core::error::report(e_kind::error, "Font loader : filepath is NULL"); - return; - } - std::filesystem::path file(filepath); - if(std::strcmp(filepath, "default") != 0 && file.extension() != ".ttf" && file.extension() != ".tte") - { - mlx::core::error::report(e_kind::error, "TTF loader : not a truetype font file '%s'", filepath); - return; - } - static_cast(mlx)->loadFont(win, file, scale); - } - - int mlx_clear_window(void* mlx, void* win) - { - MLX_CHECK_APPLICATION_POINTER(mlx); - static_cast(mlx)->clearGraphicsSupport(win); - return 0; - } - - int mlx_destroy_window(void* mlx, void* win) - { - MLX_CHECK_APPLICATION_POINTER(mlx); - static_cast(mlx)->destroyGraphicsSupport(win); - return 0; - } - - int mlx_destroy_display(void* mlx) - { - MLX_CHECK_APPLICATION_POINTER(mlx); - delete static_cast(mlx); - mlx::Render_Core::get().destroy(); - __mlx_ptr = nullptr; - return 0; - } - - int mlx_get_screens_size(void* mlx, void* win, int* w, int* h) - { - MLX_CHECK_APPLICATION_POINTER(mlx); - static_cast(mlx)->getScreenSize(win, w, h); - return 0; - } - - int mlx_set_fps_goal(void* mlx, int fps) - { - MLX_CHECK_APPLICATION_POINTER(mlx); - if(fps < 0) - { - mlx::core::error::report(e_kind::error, "You cannot set a negative FPS cap (nice try)"); - fps = -fps; - } - if(fps == 0) - { - mlx::core::error::report(e_kind::error, "You cannot set a FPS cap to 0 (nice try)"); - return 0; - } - static_cast(mlx)->setFPSCap(static_cast(fps)); - return 0; - } -} diff --git a/src/core/errors.cpp b/src/core/errors.cpp deleted file mode 100644 index 63aa607..0000000 --- a/src/core/errors.cpp +++ /dev/null @@ -1,44 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* errors.cpp :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2022/10/04 17:48:06 by maldavid #+# #+# */ -/* Updated: 2024/01/05 20:41:17 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#include -#include -#include -#include - -#include "errors.h" - -constexpr const int BUFFER_SIZE = 4096; - -namespace mlx::core::error -{ - void report(e_kind kind, std::string msg, ...) - { - char buffer[BUFFER_SIZE]; - - va_list al; - va_start(al, msg); - std::vsnprintf(buffer, BUFFER_SIZE, msg.c_str(), al); - va_end(al); - - switch(kind) - { - case e_kind::message: std::cout << "\033[1;34m[MacroLibX] Message : \033[1;0m" << buffer << std::endl; break; - case e_kind::warning: std::cout << "\033[1;35m[MacroLibX] Warning : \033[1;0m" << buffer << std::endl; break; - case e_kind::error: std::cerr << "\033[1;31m[MacroLibX] Error : \033[1;0m" << buffer << std::endl; break; - case e_kind::fatal_error: - std::cerr << "\033[1;31m[MacroLibX] Fatal Error : \033[1;0m" << buffer << std::endl; - std::exit(EXIT_FAILURE); - break; - } - } -} diff --git a/src/core/errors.h b/src/core/errors.h deleted file mode 100644 index 9bfde94..0000000 --- a/src/core/errors.h +++ /dev/null @@ -1,32 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* errors.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2022/10/04 17:42:32 by maldavid #+# #+# */ -/* Updated: 2023/12/27 17:21:07 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#ifndef __MLX_ERRORS__ -#define __MLX_ERRORS__ - -#include -#include - -enum class e_kind -{ - message, - warning, - error, - fatal_error -}; - -namespace mlx::core::error -{ - void report(e_kind kind, std::string msg, ...); -} - -#endif // __MLX_ERRORS__ diff --git a/src/core/fps.cpp b/src/core/fps.cpp deleted file mode 100644 index eae6a8b..0000000 --- a/src/core/fps.cpp +++ /dev/null @@ -1,44 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* fps.cpp :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2024/01/18 14:56:17 by maldavid #+# #+# */ -/* Updated: 2024/03/25 16:44:15 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#include -#include -#include -#include - -namespace mlx -{ - void FpsManager::init() - { - _timer = SDL_GetTicks64(); - _fps_before = static_cast(std::chrono::duration_cast(std::chrono::high_resolution_clock::now().time_since_epoch()).count()); - _fps_now = static_cast(std::chrono::duration_cast(std::chrono::high_resolution_clock::now().time_since_epoch()).count()); - } - - bool FpsManager::update() - { - using namespace std::chrono_literals; - _fps_now = static_cast(std::chrono::duration_cast(std::chrono::high_resolution_clock::now().time_since_epoch()).count()); - - if(SDL_GetTicks64() - _timer > 1000) - _timer += 1000; - - _fps_elapsed_time = _fps_now - _fps_before; - if(_fps_elapsed_time >= _ns) - { - _fps_before += _ns; - return true; - } - std::this_thread::sleep_for(std::chrono::duration(_ns - 1)); - return false; - } -} diff --git a/src/core/fps.h b/src/core/fps.h deleted file mode 100644 index 4433f07..0000000 --- a/src/core/fps.h +++ /dev/null @@ -1,41 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* fps.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2024/01/18 14:53:30 by maldavid #+# #+# */ -/* Updated: 2024/01/18 15:16:06 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#ifndef __MLX_FPS__ -#define __MLX_FPS__ - -#include - -namespace mlx -{ - class FpsManager - { - public: - FpsManager() = default; - - void init(); - bool update(); - inline void setMaxFPS(std::uint32_t fps) noexcept { _max_fps = fps; _ns = 1000000000.0 / fps; } - - ~FpsManager() = default; - - private: - double _ns = 1000000000.0 / 1'337'000.0; - std::uint64_t _timer = 0; - std::uint64_t _fps_before = 0; - std::uint64_t _fps_now = 0; - std::uint32_t _max_fps = 1'337'000; - std::uint32_t _fps_elapsed_time = 0; - }; -} - -#endif diff --git a/src/core/graphics.cpp b/src/core/graphics.cpp deleted file mode 100644 index 0ecf4be..0000000 --- a/src/core/graphics.cpp +++ /dev/null @@ -1,91 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* graphics.cpp :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2023/04/02 15:13:55 by maldavid #+# #+# */ -/* Updated: 2024/04/22 17:34:27 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#include - -namespace mlx -{ - GraphicsSupport::GraphicsSupport(std::size_t w, std::size_t h, Texture* render_target, int id) : - _window(nullptr), - _renderer(std::make_unique()), - _width(w), - _height(h), - _id(id), - _has_window(false) - { - MLX_PROFILE_FUNCTION(); - _renderer->setWindow(nullptr); - _renderer->init(render_target); - _pixel_put_pipeline.init(w, h, *_renderer); - _text_manager.init(*_renderer); - } - - GraphicsSupport::GraphicsSupport(std::size_t w, std::size_t h, std::string title, int id) : - _window(std::make_shared(w, h, title)), - _renderer(std::make_unique()), - _width(w), - _height(h), - _id(id), - _has_window(true) - { - MLX_PROFILE_FUNCTION(); - _renderer->setWindow(_window.get()); - _renderer->init(nullptr); - _pixel_put_pipeline.init(w, h, *_renderer); - _text_manager.init(*_renderer); - } - - void GraphicsSupport::render() noexcept - { - MLX_PROFILE_FUNCTION(); - if(!_renderer->beginFrame()) - return; - _proj = glm::ortho(0, _width, 0, _height); - _renderer->getUniformBuffer()->setData(sizeof(_proj), &_proj); - - std::array sets = { - _renderer->getVertDescriptorSet().get(), - VK_NULL_HANDLE - }; - - for(auto& data : _drawlist) - data->render(sets, *_renderer); - - _pixel_put_pipeline.render(sets, *_renderer); - - _renderer->endFrame(); - - for(auto& data : _drawlist) - data->resetUpdate(); - - #ifdef GRAPHICS_MEMORY_DUMP - // dump memory to file every two seconds - static std::uint64_t timer = SDL_GetTicks64(); - if(SDL_GetTicks64() - timer > 2000) - { - Render_Core::get().getAllocator().dumpMemoryToJson(); - timer += 2000; - } - #endif - } - - GraphicsSupport::~GraphicsSupport() - { - MLX_PROFILE_FUNCTION(); - vkDeviceWaitIdle(Render_Core::get().getDevice().get()); - _text_manager.destroy(); - _pixel_put_pipeline.destroy(); - _renderer->destroy(); - if(_window) - _window->destroy(); - } -} diff --git a/src/core/graphics.h b/src/core/graphics.h deleted file mode 100644 index 0966527..0000000 --- a/src/core/graphics.h +++ /dev/null @@ -1,83 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* graphics.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2023/04/02 14:49:49 by maldavid #+# #+# */ -/* Updated: 2024/03/24 14:43:09 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#ifndef __MLX_GRAPHICS__ -#define __MLX_GRAPHICS__ - -#include -#include - -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -namespace mlx -{ - class GraphicsSupport : public NonCopyable - { - public: - GraphicsSupport(std::size_t w, std::size_t h, Texture* render_target, int id); - GraphicsSupport(std::size_t w, std::size_t h, std::string title, int id); - - inline int& getID() noexcept; - inline std::shared_ptr getWindow(); - - void render() noexcept; - - inline void clearRenderData() noexcept; - inline void pixelPut(int x, int y, std::uint32_t color) noexcept; - inline void stringPut(int x, int y, std::uint32_t color, std::string str); - inline void texturePut(Texture* texture, int x, int y); - inline void loadFont(const std::filesystem::path& filepath, float scale); - inline void tryEraseTextureFromManager(Texture* texture) noexcept; - - inline bool hasWindow() const noexcept { return _has_window; } - - inline Renderer& getRenderer() { return *_renderer; } - - ~GraphicsSupport(); - - private: - PixelPutPipeline _pixel_put_pipeline; - - std::vector _drawlist; - - TextManager _text_manager; - TextureManager _texture_manager; - - glm::mat4 _proj = glm::mat4(1.0); - - std::shared_ptr _window; - std::unique_ptr _renderer; - - std::size_t _width = 0; - std::size_t _height = 0; - - int _id; - - bool _has_window; - }; -} - -#include - -#endif diff --git a/src/core/graphics.inl b/src/core/graphics.inl deleted file mode 100644 index 6335cbf..0000000 --- a/src/core/graphics.inl +++ /dev/null @@ -1,80 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* graphics.inl :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2023/04/02 15:13:55 by maldavid #+# #+# */ -/* Updated: 2023/04/02 15:26:16 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#include -#include - -namespace mlx -{ - int& GraphicsSupport::getID() noexcept { return _id; } - std::shared_ptr GraphicsSupport::getWindow() { return _window; } - - void GraphicsSupport::clearRenderData() noexcept - { - MLX_PROFILE_FUNCTION(); - _drawlist.clear(); - _pixel_put_pipeline.clear(); - _text_manager.clear(); - _texture_manager.clear(); - } - - void GraphicsSupport::pixelPut(int x, int y, std::uint32_t color) noexcept - { - MLX_PROFILE_FUNCTION(); - _pixel_put_pipeline.setPixel(x, y, color); - } - - void GraphicsSupport::stringPut(int x, int y, std::uint32_t color, std::string str) - { - MLX_PROFILE_FUNCTION(); - std::pair res = _text_manager.registerText(x, y, color, str); - if(!res.second) // if this is not a completly new text draw - { - auto it = std::find(_drawlist.begin(), _drawlist.end(), res.first); - if(it != _drawlist.end()) - _drawlist.erase(it); - } - _drawlist.push_back(res.first); - } - - void GraphicsSupport::texturePut(Texture* texture, int x, int y) - { - MLX_PROFILE_FUNCTION(); - auto res = _texture_manager.registerTexture(texture, x, y); - if(!res.second) // if this is not a completly new texture draw - { - auto it = std::find(_drawlist.begin(), _drawlist.end(), res.first); - if(it != _drawlist.end()) - _drawlist.erase(it); - } - _drawlist.push_back(res.first); - } - - void GraphicsSupport::loadFont(const std::filesystem::path& filepath, float scale) - { - MLX_PROFILE_FUNCTION(); - _text_manager.loadFont(*_renderer, filepath, scale); - } - - void GraphicsSupport::tryEraseTextureFromManager(Texture* texture) noexcept - { - MLX_PROFILE_FUNCTION(); - for(auto it = _drawlist.begin(); it != _drawlist.end();) - { - if(_texture_manager.isTextureKnown(texture)) - it = _drawlist.erase(it); - else - ++it; - } - _texture_manager.eraseTextures(texture); - } -} diff --git a/src/core/memory.cpp b/src/core/memory.cpp deleted file mode 100644 index 2e2ee51..0000000 --- a/src/core/memory.cpp +++ /dev/null @@ -1,65 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* memory.cpp :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2023/12/07 16:32:01 by kbz_8 #+# #+# */ -/* Updated: 2023/12/11 15:25:02 by kbz_8 ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#include -#include -#include - -namespace mlx -{ - void* MemManager::malloc(std::size_t size) - { - void* ptr = std::malloc(size); - if(ptr != nullptr) - _blocks.push_back(ptr); - return ptr; - } - - void* MemManager::calloc(std::size_t n, std::size_t size) - { - void* ptr = std::calloc(n, size); - if(ptr != nullptr) - _blocks.push_back(ptr); - return ptr; - } - - void* MemManager::realloc(void* ptr, std::size_t size) - { - void* ptr2 = std::realloc(ptr, size); - if(ptr2 != nullptr) - _blocks.push_back(ptr2); - auto it = std::find(_blocks.begin(), _blocks.end(), ptr); - if(it != _blocks.end()) - _blocks.erase(it); - return ptr2; - } - - void MemManager::free(void* ptr) - { - auto it = std::find(_blocks.begin(), _blocks.end(), ptr); - if(it == _blocks.end()) - { - core::error::report(e_kind::error, "Memory Manager : trying to free a pointer not allocated by the memory manager"); - return; - } - std::free(*it); - _blocks.erase(it); - } - - MemManager::~MemManager() - { - std::for_each(_blocks.begin(), _blocks.end(), [](void* ptr) - { - std::free(ptr); - }); - } -} diff --git a/src/core/memory.h b/src/core/memory.h deleted file mode 100644 index 6d61b88..0000000 --- a/src/core/memory.h +++ /dev/null @@ -1,41 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* memory.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2023/12/07 16:31:51 by kbz_8 #+# #+# */ -/* Updated: 2023/12/11 19:47:13 by kbz_8 ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#ifndef __MLX_MEMORY__ -#define __MLX_MEMORY__ - -#include -#include -#include - -namespace mlx -{ - class MemManager : public Singleton - { - friend class Singleton; - - public: - static void* malloc(std::size_t size); - static void* calloc(std::size_t n, std::size_t size); - static void* realloc(void* ptr, std::size_t size); - static void free(void* ptr); - - private: - MemManager() = default; - ~MemManager(); - - private: - inline static std::list _blocks; - }; -} - -#endif diff --git a/src/core/profiler.cpp b/src/core/profiler.cpp deleted file mode 100644 index 43bc96d..0000000 --- a/src/core/profiler.cpp +++ /dev/null @@ -1,79 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* profiler.cpp :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2024/01/10 13:56:21 by maldavid #+# #+# */ -/* Updated: 2024/01/10 18:17:35 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#include -#include -#include - -namespace mlx -{ - void Profiler::beginRuntimeSession() - { - std::lock_guard lock(_mutex); - if(_runtime_session_began) - return; - _output_stream.open("./runtime_profile.mlx.json", std::ofstream::out | std::ofstream::trunc); - - if(_output_stream.is_open()) - writeHeader(); - else - core::error::report(e_kind::error, "Profiler : cannot open runtime profile file"); - _runtime_session_began = true; - } - - void Profiler::appendProfileData(ProfileResult&& result) - { - std::lock_guard lock(_mutex); - auto it = _profile_data.find(result.name); - if(it != _profile_data.end()) - { - result.elapsed_time = (result.elapsed_time + it->second.second.elapsed_time) / it->second.first; - _profile_data[result.name].first++; - _profile_data[result.name].second = result; - } - else - _profile_data[result.name] = std::make_pair(1, result); - } - - void Profiler::writeProfile(const ProfileResult& result) - { - std::stringstream json; - json << std::setprecision(9) << std::fixed; - json << ",\n{\n"; - json << "\t\"type\" : \"function\"," << '\n'; - json << "\t\"name\" : \"" << result.name << "\"," << '\n'; - json << "\t\"thread id\" : " << result.thread_id << "," << '\n'; - json << "\t\"average duration\" : \"" << result.elapsed_time.count() << "ms\"\n"; - json << "}"; - _output_stream << json.str(); - } - - void Profiler::endRuntimeSession() - { - std::lock_guard lock(_mutex); - if(!_runtime_session_began) - return; - for(auto& [_, pair] : _profile_data) - writeProfile(pair.second); - writeFooter(); - _output_stream.close(); - _profile_data.clear(); - _runtime_session_began = false; - } - - Profiler::~Profiler() - { - if(!_runtime_session_began) - return; - endRuntimeSession(); - } -} diff --git a/src/core/profiler.h b/src/core/profiler.h deleted file mode 100644 index 4424238..0000000 --- a/src/core/profiler.h +++ /dev/null @@ -1,146 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* profiler.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2024/01/10 13:35:45 by maldavid #+# #+# */ -/* Updated: 2024/03/24 14:41:27 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#ifndef __MLX_PROFILER__ -#define __MLX_PROFILER__ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -namespace mlx -{ - using FloatingPointMilliseconds = std::chrono::duration; - - struct ProfileResult - { - std::string name; - FloatingPointMilliseconds elapsed_time; - std::thread::id thread_id; - }; - - class Profiler : public Singleton - { - friend class Singleton; - - public: - Profiler(const Profiler&) = delete; - Profiler(Profiler&&) = delete; - - void appendProfileData(ProfileResult&& result); - - private: - Profiler() { beginRuntimeSession(); } - ~Profiler(); - - void beginRuntimeSession(); - void writeProfile(const ProfileResult& result); - void endRuntimeSession(); - inline void writeHeader() - { - _output_stream << "{\"profileData\":[{}"; - _output_stream.flush(); - } - - inline void writeFooter() - { - _output_stream << "]}"; - _output_stream.flush(); - } - - private: - std::unordered_map> _profile_data; - std::ofstream _output_stream; - std::mutex _mutex; - bool _runtime_session_began = false; - }; - - class ProfilerTimer - { - public: - ProfilerTimer(const char* name) : _name(name) - { - _start_timepoint = std::chrono::steady_clock::now(); - } - - inline void stop() - { - auto end_timepoint = std::chrono::steady_clock::now(); - auto high_res_start = FloatingPointMilliseconds{ _start_timepoint.time_since_epoch() }; - auto elapsed_time = std::chrono::time_point_cast(end_timepoint).time_since_epoch() - std::chrono::time_point_cast(_start_timepoint).time_since_epoch(); - - Profiler::get().appendProfileData({ _name, elapsed_time, std::this_thread::get_id() }); - - _stopped = true; - } - - ~ProfilerTimer() - { - if(!_stopped) - stop(); - } - - private: - std::chrono::time_point _start_timepoint; - const char* _name; - bool _stopped = false; - }; - - namespace ProfilerUtils - { - template - struct ChangeResult - { - char data[N]; - }; - - template - constexpr auto cleanupOutputString(const char(&expr)[N], const char(&remove)[K]) - { - ChangeResult result = {}; - - std::size_t src_index = 0; - std::size_t dst_index = 0; - while(src_index < N) - { - std::size_t match_index = 0; - while(match_index < K - 1 && src_index + match_index < N - 1 && expr[src_index + match_index] == remove[match_index]) - match_index++; - if(match_index == K - 1) - src_index += match_index; - result.data[dst_index++] = expr[src_index] == '"' ? '\'' : expr[src_index]; - src_index++; - } - return result; - } - } -} - -#ifdef PROFILER - #define MLX_PROFILE_SCOPE_LINE2(name, line) constexpr auto fixedName##line = ::mlx::ProfilerUtils::cleanupOutputString(name, "__cdecl ");\ - ::mlx::ProfilerTimer timer##line(fixedName##line.data) - #define MLX_PROFILE_SCOPE_LINE(name, line) MLX_PROFILE_SCOPE_LINE2(name, line) - #define MLX_PROFILE_SCOPE(name) MLX_PROFILE_SCOPE_LINE(name, __LINE__) - #define MLX_PROFILE_FUNCTION() MLX_PROFILE_SCOPE(MLX_FUNC_SIG) -#else - #define MLX_PROFILE_SCOPE(name) - #define MLX_PROFILE_FUNCTION() -#endif - -#endif diff --git a/src/platform/inputs.cpp b/src/platform/inputs.cpp deleted file mode 100644 index 40adb8f..0000000 --- a/src/platform/inputs.cpp +++ /dev/null @@ -1,151 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* inputs.cpp :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2022/10/05 16:30:19 by maldavid #+# #+# */ -/* Updated: 2024/02/23 22:27:30 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#include "inputs.h" -#include -#include - -namespace mlx -{ - void Input::update() - { - MLX_PROFILE_FUNCTION(); - _xRel = 0; - _yRel = 0; - - while(SDL_PollEvent(&_event)) - { - if(_event.type == SDL_MOUSEMOTION) - { - _x = _event.motion.x; - _y = _event.motion.y; - - _xRel = _event.motion.xrel; - _yRel = _event.motion.yrel; - } - - std::uint32_t id = _event.window.windowID; - if(_events_hooks.find(id) == _events_hooks.end()) - continue; - auto& hooks = _events_hooks[id]; - - switch(_event.type) - { - case SDL_KEYDOWN: - { - if(hooks[MLX_KEYDOWN].hook) - hooks[MLX_KEYDOWN].hook(_event.key.keysym.scancode, hooks[MLX_KEYDOWN].param); - break; - } - - case SDL_KEYUP: - { - if(hooks[MLX_KEYUP].hook) - hooks[MLX_KEYUP].hook(_event.key.keysym.scancode, hooks[MLX_KEYUP].param); - break; - } - - case SDL_MOUSEBUTTONDOWN: - { - if(hooks[MLX_MOUSEDOWN].hook) - hooks[MLX_MOUSEDOWN].hook(_event.button.button, hooks[MLX_MOUSEDOWN].param); - break; - } - - case SDL_MOUSEBUTTONUP: - { - if(hooks[MLX_MOUSEUP].hook) - hooks[MLX_MOUSEUP].hook(_event.button.button, hooks[MLX_MOUSEUP].param); - break; - } - - case SDL_MOUSEWHEEL: - { - if(hooks[MLX_MOUSEWHEEL].hook) - { - if(_event.wheel.y > 0) // scroll up - hooks[MLX_MOUSEWHEEL].hook(1, hooks[MLX_MOUSEWHEEL].param); - else if(_event.wheel.y < 0) // scroll down - hooks[MLX_MOUSEWHEEL].hook(2, hooks[MLX_MOUSEWHEEL].param); - - if(_event.wheel.x > 0) // scroll right - hooks[MLX_MOUSEWHEEL].hook(3, hooks[MLX_MOUSEWHEEL].param); - else if(_event.wheel.x < 0) // scroll left - hooks[MLX_MOUSEWHEEL].hook(4, hooks[MLX_MOUSEWHEEL].param); - } - break; - } - - case SDL_WINDOWEVENT: - { - auto& win_hook = hooks[MLX_WINDOW_EVENT]; - switch(_event.window.event) - { - case SDL_WINDOWEVENT_CLOSE: - { - if(win_hook.hook) - win_hook.hook(0, win_hook.param); - break; - } - case SDL_WINDOWEVENT_MOVED: - { - if(win_hook.hook) - win_hook.hook(1, win_hook.param); - break; - } - case SDL_WINDOWEVENT_MINIMIZED: - { - if(win_hook.hook) - win_hook.hook(2, win_hook.param); - break; - } - case SDL_WINDOWEVENT_MAXIMIZED: - { - if(win_hook.hook) - win_hook.hook(3, win_hook.param); - break; - } - case SDL_WINDOWEVENT_ENTER: - { - if(win_hook.hook) - win_hook.hook(4, win_hook.param); - break; - } - case SDL_WINDOWEVENT_FOCUS_GAINED: - { - if(win_hook.hook) - win_hook.hook(5, win_hook.param); - break; - } - case SDL_WINDOWEVENT_LEAVE: - { - if(win_hook.hook) - win_hook.hook(6, win_hook.param); - break; - } - case SDL_WINDOWEVENT_FOCUS_LOST: - { - if(win_hook.hook) - win_hook.hook(7, win_hook.param); - break; - } - - default : break; - } - break; - } - - default: break; - } - } - } -} diff --git a/src/platform/inputs.h b/src/platform/inputs.h deleted file mode 100644 index c014f56..0000000 --- a/src/platform/inputs.h +++ /dev/null @@ -1,77 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* inputs.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2022/10/05 16:27:35 by maldavid #+# #+# */ -/* Updated: 2024/04/22 17:35:23 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#include -#include -#include -#include -#include -#include - -#include - -#include "window.h" - -namespace mlx -{ - struct Hook - { - func::function hook; - void* param = nullptr; - }; - - class Input - { - public: - Input() = default; - - void update(); - - inline bool isMouseMoving() const noexcept { return _xRel || _yRel; } - - inline int getX() const noexcept { return _x; } - inline int getY() const noexcept { return _y; } - - inline int getXRel() const noexcept { return _xRel; } - inline int getYRel() const noexcept { return _yRel; } - - inline bool isRunning() const noexcept { return !_end; } - inline constexpr void run() noexcept { _end = false; } - inline constexpr void finish() noexcept { _end = true; } - - inline void addWindow(std::shared_ptr window) - { - _windows[window->getID()] = window; - _events_hooks[window->getID()] = {}; - } - - inline void onEvent(std::uint32_t id, int event, int (*funct_ptr)(int, void*), void* param) noexcept - { - _events_hooks[id][event].hook = funct_ptr; - _events_hooks[id][event].param = param; - } - - ~Input() = default; - - private: - std::unordered_map> _windows; - std::unordered_map> _events_hooks; - SDL_Event _event; - - int _x = 0; - int _y = 0; - int _xRel = 0; - int _yRel = 0; - - bool _end = false; - }; -} diff --git a/src/platform/window.cpp b/src/platform/window.cpp deleted file mode 100644 index 890bb39..0000000 --- a/src/platform/window.cpp +++ /dev/null @@ -1,60 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* window.cpp :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2022/10/04 17:36:44 by maldavid #+# #+# */ -/* Updated: 2024/10/27 00:31:39 by kiroussa ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#include -#include -#include - -#ifndef MLX_WINDOW_CREATE_FLAGS - #define MLX_WINDOW_CREATE_FLAGS SDL_WINDOW_VULKAN | SDL_WINDOW_SHOWN -#endif // MLX_WINDOW_CREATE_FLAGS - -namespace mlx -{ - #if SDL_BYTEORDER == SDL_BIG_ENDIAN - constexpr const std::uint32_t rmask = 0xff000000; - constexpr const std::uint32_t gmask = 0x00ff0000; - constexpr const std::uint32_t bmask = 0x0000ff00; - constexpr const std::uint32_t amask = 0x000000ff; - #else - constexpr const std::uint32_t rmask = 0x000000ff; - constexpr const std::uint32_t gmask = 0x0000ff00; - constexpr const std::uint32_t bmask = 0x00ff0000; - constexpr const std::uint32_t amask = 0xff000000; - #endif - - MLX_Window::MLX_Window(std::size_t w, std::size_t h, const std::string& title) : _width(w), _height(h) - { - if(title.find("vvaas") != std::string::npos) - core::error::report(e_kind::message, "vvaas est mauvais"); - _win = SDL_CreateWindow(title.c_str(), SDL_WINDOWPOS_CENTERED, SDL_WINDOWPOS_CENTERED, w, h, MLX_WINDOW_CREATE_FLAGS); - if(!_win) - core::error::report(e_kind::fatal_error, std::string("unable to open a new window, ") + SDL_GetError()); - _id = SDL_GetWindowID(_win); - _icon = SDL_CreateRGBSurfaceFrom(static_cast(logo_mlx), logo_mlx_width, logo_mlx_height, 32, 4 * logo_mlx_width, rmask, gmask, bmask, amask); - SDL_SetWindowIcon(_win, _icon); - } - - void MLX_Window::destroy() noexcept - { - if(_win != nullptr) - { - SDL_DestroyWindow(_win); - _win = nullptr; - } - if(_icon != nullptr) - { - SDL_FreeSurface(_icon); - _icon = nullptr; - } - } -} diff --git a/src/platform/window.h b/src/platform/window.h deleted file mode 100644 index dfe0924..0000000 --- a/src/platform/window.h +++ /dev/null @@ -1,45 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* window.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2022/10/04 21:53:12 by maldavid #+# #+# */ -/* Updated: 2023/12/21 00:24:26 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#ifndef __MLX_WINDOW__ -#define __MLX_WINDOW__ - -#include -#include -#include - -namespace mlx -{ - class MLX_Window - { - public: - MLX_Window(std::size_t w, std::size_t h, const std::string& title); - - inline SDL_Window* getNativeWindow() const noexcept { return _win; } - inline int getWidth() const noexcept { return _width; } - inline int getHeight() const noexcept { return _height; } - inline std::uint32_t getID() const noexcept { return _id; } - - void destroy() noexcept; - - ~MLX_Window() = default; - - private: - SDL_Surface* _icon = nullptr; - SDL_Window* _win = nullptr; - int _width = 0; - int _height = 0; - std::uint32_t _id = -1; - }; -} - -#endif diff --git a/src/renderer/buffers/vk_buffer.cpp b/src/renderer/buffers/vk_buffer.cpp deleted file mode 100644 index 71693cc..0000000 --- a/src/renderer/buffers/vk_buffer.cpp +++ /dev/null @@ -1,167 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* vk_buffer.cpp :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2022/10/08 18:55:57 by maldavid #+# #+# */ -/* Updated: 2024/03/14 17:28:35 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#include "vk_buffer.h" -#include -#include -#include -#include -#include -#include - -namespace mlx -{ - void Buffer::create(Buffer::kind type, VkDeviceSize size, VkBufferUsageFlags usage, const char* name, const void* data) - { - MLX_PROFILE_FUNCTION(); - _usage = usage; - if(type == Buffer::kind::constant || type == Buffer::kind::dynamic_device_local) - { - if(data == nullptr && type == Buffer::kind::constant) - { - core::error::report(e_kind::warning, "Vulkan : trying to create constant buffer without data (constant buffers cannot be modified after creation)"); - return; - } - _usage |= VK_BUFFER_USAGE_TRANSFER_SRC_BIT; - } - - VmaAllocationCreateInfo alloc_info{}; - alloc_info.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT; - alloc_info.usage = VMA_MEMORY_USAGE_AUTO; - - createBuffer(_usage, alloc_info, size, name); - - if(data != nullptr) - { - void* mapped = nullptr; - mapMem(&mapped); - std::memcpy(mapped, data, size); - unmapMem(); - if(type == Buffer::kind::constant || type == Buffer::kind::dynamic_device_local) - pushToGPU(); - } - } - - void Buffer::destroy() noexcept - { - MLX_PROFILE_FUNCTION(); - if(_is_mapped) - unmapMem(); - if(_buffer != VK_NULL_HANDLE) - Render_Core::get().getAllocator().destroyBuffer(_allocation, _buffer); - _buffer = VK_NULL_HANDLE; - } - - void Buffer::createBuffer(VkBufferUsageFlags usage, VmaAllocationCreateInfo info, VkDeviceSize size, [[maybe_unused]] const char* name) - { - MLX_PROFILE_FUNCTION(); - VkBufferCreateInfo bufferInfo{}; - bufferInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; - bufferInfo.size = size; - bufferInfo.usage = usage; - bufferInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE; - - #ifdef DEBUG - _name = name; - std::string alloc_name = _name; - if(usage & VK_BUFFER_USAGE_INDEX_BUFFER_BIT) - alloc_name.append("_index_buffer"); - else if(usage & VK_BUFFER_USAGE_VERTEX_BUFFER_BIT) - alloc_name.append("_vertex_buffer"); - else if(!(usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT)) - alloc_name.append("_buffer"); - _allocation = Render_Core::get().getAllocator().createBuffer(&bufferInfo, &info, _buffer, alloc_name.c_str()); - #else - _allocation = Render_Core::get().getAllocator().createBuffer(&bufferInfo, &info, _buffer, nullptr); - #endif - _size = size; - } - - bool Buffer::copyFromBuffer(const Buffer& buffer) noexcept - { - MLX_PROFILE_FUNCTION(); - if(!(_usage & VK_BUFFER_USAGE_TRANSFER_DST_BIT)) - { - core::error::report(e_kind::error, "Vulkan : buffer cannot be the destination of a copy because it does not have the correct usage flag"); - return false; - } - if(!(buffer._usage & VK_BUFFER_USAGE_TRANSFER_SRC_BIT)) - { - core::error::report(e_kind::error, "Vulkan : buffer cannot be the source of a copy because it does not have the correct usage flag"); - return false; - } - - CmdBuffer& cmd = Render_Core::get().getSingleTimeCmdBuffer(); - cmd.beginRecord(); - - cmd.copyBuffer(*this, const_cast(buffer)); - - cmd.endRecord(); - cmd.submitIdle(); - - return true; - } - - void Buffer::pushToGPU() noexcept - { - MLX_PROFILE_FUNCTION(); - VmaAllocationCreateInfo alloc_info{}; - alloc_info.usage = VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE; - - Buffer newBuffer; - newBuffer._usage = (_usage & 0xFFFFFFFC) | VK_BUFFER_USAGE_TRANSFER_DST_BIT; - #ifdef DEBUG - std::string new_name = _name + "_GPU"; - newBuffer.createBuffer(newBuffer._usage, alloc_info, _size, new_name.c_str()); - #else - newBuffer.createBuffer(newBuffer._usage, alloc_info, _size, nullptr); - #endif - - if(newBuffer.copyFromBuffer(*this)) // if the copy succeded we swap the buffers, otherwise the new one is deleted - this->swap(newBuffer); - newBuffer.destroy(); - } - - void Buffer::swap(Buffer& buffer) noexcept - { - VkBuffer temp_b = _buffer; - _buffer = buffer._buffer; - buffer._buffer = temp_b; - - VmaAllocation temp_a = buffer._allocation; - buffer._allocation = _allocation; - _allocation = temp_a; - - VkDeviceSize temp_size = buffer._size; - buffer._size = _size; - _size = temp_size; - - VkDeviceSize temp_offset = buffer._offset; - buffer._offset = _offset; - _offset = temp_offset; - - VkBufferUsageFlags temp_u = _usage; - _usage = buffer._usage; - buffer._usage = temp_u; - - #ifdef DEBUG - std::string temp_n = _name; - _name = buffer._name; - buffer._name = temp_n; - #endif - } - - void Buffer::flush(VkDeviceSize size, VkDeviceSize offset) - { - Render_Core::get().getAllocator().flush(_allocation, size, offset); - } -} diff --git a/src/renderer/buffers/vk_buffer.h b/src/renderer/buffers/vk_buffer.h deleted file mode 100644 index f36b27f..0000000 --- a/src/renderer/buffers/vk_buffer.h +++ /dev/null @@ -1,65 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* vk_buffer.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2022/10/06 23:18:52 by maldavid #+# #+# */ -/* Updated: 2024/01/11 05:16:58 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#ifndef __MLX_VK_BUFFER__ -#define __MLX_VK_BUFFER__ - -#include -#include -#include -#include - -namespace mlx -{ - class Buffer : public CmdResource - { - public: - enum class kind { dynamic, dynamic_device_local, uniform, constant }; - - void create(kind type, VkDeviceSize size, VkBufferUsageFlags usage, const char* name, const void* data = nullptr); - void destroy() noexcept; - - inline void mapMem(void** data) noexcept { Render_Core::get().getAllocator().mapMemory(_allocation, data); _is_mapped = true; } - inline bool isMapped() const noexcept { return _is_mapped; } - inline void unmapMem() noexcept { Render_Core::get().getAllocator().unmapMemory(_allocation); _is_mapped = false; } - - void flush(VkDeviceSize size = VK_WHOLE_SIZE, VkDeviceSize offset = 0); - bool copyFromBuffer(const Buffer& buffer) noexcept; - - inline VkBuffer& operator()() noexcept { return _buffer; } - inline VkBuffer& get() noexcept { return _buffer; } - inline VkDeviceSize getSize() const noexcept { return _size; } - inline VkDeviceSize getOffset() const noexcept { return _offset; } - - protected: - void pushToGPU() noexcept; - void swap(Buffer& buffer) noexcept; - - protected: - VmaAllocation _allocation; - VkBuffer _buffer = VK_NULL_HANDLE; - VkDeviceSize _offset = 0; - VkDeviceSize _size = 0; - - private: - void createBuffer(VkBufferUsageFlags usage, VmaAllocationCreateInfo info, VkDeviceSize size, const char* name); - - private: - #ifdef DEBUG - std::string _name; - #endif - VkBufferUsageFlags _usage = 0; - bool _is_mapped = false; - }; -} - -#endif diff --git a/src/renderer/buffers/vk_ibo.h b/src/renderer/buffers/vk_ibo.h deleted file mode 100644 index 7acc988..0000000 --- a/src/renderer/buffers/vk_ibo.h +++ /dev/null @@ -1,31 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* vk_ibo.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2023/01/25 15:05:05 by maldavid #+# #+# */ -/* Updated: 2024/01/10 23:05:15 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#ifndef __VK_IBO__ -#define __VK_IBO__ - -#include -#include -#include "vk_buffer.h" -#include - -namespace mlx -{ - class C_IBO : public Buffer - { - public: - inline void create(std::uint32_t size, const std::uint16_t* data, const char* name) { Buffer::create(Buffer::kind::constant, size, VK_BUFFER_USAGE_INDEX_BUFFER_BIT, name, data); } - inline void bind(Renderer& renderer) noexcept { renderer.getActiveCmdBuffer().bindIndexBuffer(*this); } - }; -} - -#endif diff --git a/src/renderer/buffers/vk_ubo.cpp b/src/renderer/buffers/vk_ubo.cpp deleted file mode 100644 index 996516c..0000000 --- a/src/renderer/buffers/vk_ubo.cpp +++ /dev/null @@ -1,78 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* vk_ubo.cpp :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2022/10/06 18:45:52 by maldavid #+# #+# */ -/* Updated: 2024/01/10 18:30:57 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#include "vk_ubo.h" -#include -#include -#include - -namespace mlx -{ - void UBO::create(Renderer* renderer, std::uint32_t size, [[maybe_unused]] const char* name) - { - MLX_PROFILE_FUNCTION(); - _renderer = renderer; - - for(int i = 0; i < MAX_FRAMES_IN_FLIGHT; i++) - { - #ifdef DEBUG - std::string name_frame = name; - name_frame.append(std::to_string(i)); - _buffers[i].create(Buffer::kind::uniform, size, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT, name_frame.c_str()); - #else - _buffers[i].create(Buffer::kind::uniform, size, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT, nullptr); - #endif - _buffers[i].mapMem(&_maps[i]); - if(_maps[i] == nullptr) - core::error::report(e_kind::fatal_error, "Vulkan : unable to map a uniform buffer"); - } - } - - void UBO::setData(std::uint32_t size, const void* data) - { - MLX_PROFILE_FUNCTION(); - std::memcpy(_maps[_renderer->getActiveImageIndex()], data, static_cast(size)); - } - - void UBO::setDynamicData(std::uint32_t size, const void* data) - { - MLX_PROFILE_FUNCTION(); - std::memcpy(_maps[_renderer->getActiveImageIndex()], data, static_cast(size)); - _buffers[_renderer->getActiveImageIndex()].flush(); - } - - unsigned int UBO::getSize() noexcept - { - return _buffers[_renderer->getActiveImageIndex()].getSize(); - } - - unsigned int UBO::getOffset() noexcept - { - return _buffers[_renderer->getActiveImageIndex()].getOffset(); - } - - VkBuffer& UBO::operator()() noexcept - { - return _buffers[_renderer->getActiveImageIndex()].get(); - } - - VkBuffer& UBO::get() noexcept - { - return _buffers[_renderer->getActiveImageIndex()].get(); - } - - void UBO::destroy() noexcept - { - for(int i = 0; i < MAX_FRAMES_IN_FLIGHT; i++) - _buffers[i].destroy(); - } -} diff --git a/src/renderer/buffers/vk_ubo.h b/src/renderer/buffers/vk_ubo.h deleted file mode 100644 index 74a7fd9..0000000 --- a/src/renderer/buffers/vk_ubo.h +++ /dev/null @@ -1,51 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* vk_ubo.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2022/10/06 18:45:29 by maldavid #+# #+# */ -/* Updated: 2023/12/08 19:06:28 by kbz_8 ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#ifndef __MLX_VK_UBO__ -#define __MLX_VK_UBO__ - -#include "vk_buffer.h" -#include -#include -#include - -namespace mlx -{ - class UBO - { - public: - void create(class Renderer* renderer, std::uint32_t size, const char* name); - - void setData(std::uint32_t size, const void* data); - void setDynamicData(std::uint32_t size, const void* data); - - void destroy() noexcept; - - unsigned int getSize() noexcept; - unsigned int getOffset() noexcept; - VkDeviceMemory getDeviceMemory() noexcept; - VkBuffer& operator()() noexcept; - VkBuffer& get() noexcept; - - inline unsigned int getSize(int i) noexcept { return _buffers[i].getSize(); } - inline unsigned int getOffset(int i) noexcept { return _buffers[i].getOffset(); } - inline VkBuffer& operator()(int i) noexcept { return _buffers[i].get(); } - inline VkBuffer& get(int i) noexcept { return _buffers[i].get(); } - - private: - std::array _buffers; - std::array _maps; - class Renderer* _renderer = nullptr; - }; -} - -#endif // __MLX_VK_UBO__ diff --git a/src/renderer/buffers/vk_vbo.cpp b/src/renderer/buffers/vk_vbo.cpp deleted file mode 100644 index ec17a88..0000000 --- a/src/renderer/buffers/vk_vbo.cpp +++ /dev/null @@ -1,55 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* vk_vbo.cpp :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2022/10/06 18:28:08 by maldavid #+# #+# */ -/* Updated: 2023/12/12 22:17:14 by kbz_8 ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#include "vk_vbo.h" -#include - -namespace mlx -{ - void VBO::setData(std::uint32_t size, const void* data) - { - if(size > getSize()) - { - core::error::report(e_kind::error, "Vulkan : trying to store to much data in a vertex buffer (%d bytes in %d bytes)", size, getSize()); - return; - } - - if(data == nullptr) - core::error::report(e_kind::warning, "Vulkan : mapping null data in a vertex buffer"); - - void* temp = nullptr; - mapMem(&temp); - std::memcpy(temp, data, static_cast(size)); - unmapMem(); - } - - void D_VBO::setData(std::uint32_t size, const void* data) - { - if(size > getSize()) - { - core::error::report(e_kind::error, "Vulkan : trying to store to much data in a vertex buffer (%d bytes in %d bytes)", size, getSize()); - return; - } - - if(data == nullptr) - core::error::report(e_kind::warning, "Vulkan : mapping null data in a vertex buffer"); - - Buffer tmp_buf; - #ifdef DEBUG - tmp_buf.create(Buffer::kind::dynamic, size, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_SRC_BIT, "tmp_buffer", data); - #else - tmp_buf.create(Buffer::kind::dynamic, size, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_SRC_BIT, nullptr, data); - #endif - copyFromBuffer(tmp_buf); - tmp_buf.destroy(); - } -} diff --git a/src/renderer/buffers/vk_vbo.h b/src/renderer/buffers/vk_vbo.h deleted file mode 100644 index 88d0023..0000000 --- a/src/renderer/buffers/vk_vbo.h +++ /dev/null @@ -1,46 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* vk_vbo.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2022/10/06 18:27:38 by maldavid #+# #+# */ -/* Updated: 2024/01/10 23:04:40 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#ifndef __MLX_VK_VBO__ -#define __MLX_VK_VBO__ - -#include "vk_buffer.h" -#include -#include - -namespace mlx -{ - class VBO : public Buffer - { - public: - inline void create(std::uint32_t size, const void* data, const char* name) { Buffer::create(Buffer::kind::dynamic, size, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT, name, data); } - void setData(std::uint32_t size, const void* data); - inline void bind(Renderer& renderer) noexcept { renderer.getActiveCmdBuffer().bindVertexBuffer(*this); } - }; - - class D_VBO : public Buffer - { - public: - inline void create(std::uint32_t size, const void* data, const char* name) { Buffer::create(Buffer::kind::dynamic_device_local, size, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT, name, data); } - void setData(std::uint32_t size, const void* data); - inline void bind(Renderer& renderer) noexcept { renderer.getActiveCmdBuffer().bindVertexBuffer(*this); } - }; - - class C_VBO : public Buffer - { - public: - inline void create(std::uint32_t size, const void* data, const char* name) { Buffer::create(Buffer::kind::constant, size, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT, name, data); } - inline void bind(Renderer& renderer) noexcept { renderer.getActiveCmdBuffer().bindVertexBuffer(*this); } - }; -} - -#endif // __MLX_VK_VBO__ diff --git a/src/renderer/command/cmd_manager.cpp b/src/renderer/command/cmd_manager.cpp deleted file mode 100644 index 1e6118e..0000000 --- a/src/renderer/command/cmd_manager.cpp +++ /dev/null @@ -1,40 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* cmd_manager.cpp :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2023/04/02 17:50:52 by maldavid #+# #+# */ -/* Updated: 2023/12/17 20:10:45 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#include - -namespace mlx -{ - void CmdManager::init() noexcept - { - _cmd_pool.init(); - for(int i = 0; i < MAX_FRAMES_IN_FLIGHT; i++) - _cmd_buffers[i].init(CmdBuffer::kind::long_time, this); - } - - void CmdManager::beginRecord(int active_image_index) - { - _cmd_buffers[active_image_index].beginRecord(); - } - - void CmdManager::endRecord(int active_image_index) - { - _cmd_buffers[active_image_index].endRecord(); - } - - void CmdManager::destroy() noexcept - { - for(int i = 0; i < MAX_FRAMES_IN_FLIGHT; i++) - _cmd_buffers[i].destroy(); - _cmd_pool.destroy(); - } -} diff --git a/src/renderer/command/cmd_manager.h b/src/renderer/command/cmd_manager.h deleted file mode 100644 index ac69876..0000000 --- a/src/renderer/command/cmd_manager.h +++ /dev/null @@ -1,47 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* cmd_manager.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2023/04/02 17:48:52 by maldavid #+# #+# */ -/* Updated: 2024/01/03 15:27:35 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#ifndef __MLX_COMMAND_MANAGER__ -#define __MLX_COMMAND_MANAGER__ - -#include - -#include -#include -#include -#include -#include - -namespace mlx -{ - class CmdManager - { - public: - CmdManager() = default; - - void init() noexcept; - void beginRecord(int active_image_index); - void endRecord(int active_image_index); - void destroy() noexcept; - - inline CmdPool& getCmdPool() noexcept { return _cmd_pool; } - inline CmdBuffer& getCmdBuffer(int i) noexcept { return _cmd_buffers[i]; } - - ~CmdManager() = default; - - private: - std::array _cmd_buffers; - CmdPool _cmd_pool; - }; -} - -#endif diff --git a/src/renderer/command/single_time_cmd_manager.cpp b/src/renderer/command/single_time_cmd_manager.cpp deleted file mode 100644 index baf40a3..0000000 --- a/src/renderer/command/single_time_cmd_manager.cpp +++ /dev/null @@ -1,64 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* single_time_cmd_manager.cpp :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2023/12/15 19:57:49 by maldavid #+# #+# */ -/* Updated: 2024/10/19 10:47:04 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#include -#include -#include - -namespace mlx -{ - void SingleTimeCmdManager::init() noexcept - { - _pool.init(); - for(int i = 0; i < BASE_POOL_SIZE; i++) - { - _buffers.emplace_back(); - _buffers.back().init(CmdBuffer::kind::single_time, &_pool); - } - } - - CmdBuffer& SingleTimeCmdManager::getCmdBuffer() noexcept - { - for(CmdBuffer& buf : _buffers) - { - if(buf.isReadyToBeUsed()) - { - buf.reset(); - return buf; - } - } - _buffers.emplace_back().init(CmdBuffer::kind::single_time, &_pool); - return _buffers.back(); - } - - void SingleTimeCmdManager::updateSingleTimesCmdBuffersSubmitState() noexcept - { - for(CmdBuffer& cmd : _buffers) - cmd.updateSubmitState(); - } - - void SingleTimeCmdManager::waitForAllExecutions() noexcept - { - for(CmdBuffer& cmd : _buffers) - cmd.waitForExecution(); - } - - void SingleTimeCmdManager::destroy() noexcept - { - std::for_each(_buffers.begin(), _buffers.end(), [](CmdBuffer& buf) - { - buf.destroy(); - }); - _pool.destroy(); - _buffers.clear(); - } -} diff --git a/src/renderer/command/single_time_cmd_manager.h b/src/renderer/command/single_time_cmd_manager.h deleted file mode 100644 index 271fdf9..0000000 --- a/src/renderer/command/single_time_cmd_manager.h +++ /dev/null @@ -1,50 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* single_time_cmd_manager.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2023/12/15 18:25:57 by maldavid #+# #+# */ -/* Updated: 2024/01/07 01:30:19 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#ifndef __MLX_SINGLE_TIME_CMD_MANAGER__ -#define __MLX_SINGLE_TIME_CMD_MANAGER__ - -#include -#include -#include - -namespace mlx -{ - class CmdBuffer; - - class SingleTimeCmdManager - { - friend class Render_Core; - - public: - SingleTimeCmdManager() = default; - - void init() noexcept; - void destroy() noexcept; - - void updateSingleTimesCmdBuffersSubmitState() noexcept; - void waitForAllExecutions() noexcept; - - inline CmdPool& getCmdPool() noexcept { return _pool; } - CmdBuffer& getCmdBuffer() noexcept; - - ~SingleTimeCmdManager() = default; - - inline static constexpr const std::uint8_t BASE_POOL_SIZE = 16; - - private: - std::vector _buffers; - CmdPool _pool; - }; -} - -#endif diff --git a/src/renderer/command/vk_cmd_buffer.cpp b/src/renderer/command/vk_cmd_buffer.cpp deleted file mode 100644 index d1b208b..0000000 --- a/src/renderer/command/vk_cmd_buffer.cpp +++ /dev/null @@ -1,368 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* vk_cmd_buffer.cpp :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2022/10/06 18:26:06 by maldavid #+# #+# */ -/* Updated: 2024/02/25 08:02:26 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#include "vk_cmd_buffer.h" -#include -#include -#include -#include -#include -#include -#include - -namespace mlx -{ - bool vector_push_back_if_not_found(std::vector& vector, CmdResource* res) - { - auto it = std::find_if(vector.begin(), vector.end(), [=](const CmdResource* vres) - { - return vres->getUUID() == res->getUUID(); - }); - - if(it == vector.end()) - { - vector.push_back(res); - return true; - } - return false; - } - - void CmdBuffer::init(kind type, CmdManager* manager) - { - init(type, &manager->getCmdPool()); - } - - void CmdBuffer::init(kind type, CmdPool* pool) - { - MLX_PROFILE_FUNCTION(); - _type = type; - _pool = pool; - - VkCommandBufferAllocateInfo allocInfo{}; - allocInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO; - allocInfo.commandPool = pool->get(); - allocInfo.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY; - allocInfo.commandBufferCount = 1; - - VkResult res = vkAllocateCommandBuffers(Render_Core::get().getDevice().get(), &allocInfo, &_cmd_buffer); - if(res != VK_SUCCESS) - core::error::report(e_kind::fatal_error, "Vulkan : failed to allocate command buffer, %s", RCore::verbaliseResultVk(res)); - #ifdef DEBUG - core::error::report(e_kind::message, "Vulkan : created new command buffer"); - #endif - - _fence.init(); - _state = state::idle; - } - - void CmdBuffer::beginRecord(VkCommandBufferUsageFlags usage) - { - MLX_PROFILE_FUNCTION(); - if(!isInit()) - core::error::report(e_kind::fatal_error, "Vulkan : begenning record on un uninit command buffer"); - if(_state == state::recording) - return; - - VkCommandBufferBeginInfo beginInfo{}; - beginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; - beginInfo.flags = usage; - if(vkBeginCommandBuffer(_cmd_buffer, &beginInfo) != VK_SUCCESS) - core::error::report(e_kind::fatal_error, "Vulkan : failed to begin recording command buffer"); - - _state = state::recording; - } - - void CmdBuffer::bindVertexBuffer(Buffer& buffer) noexcept - { - MLX_PROFILE_FUNCTION(); - if(!isRecording()) - { - core::error::report(e_kind::warning, "Vulkan : trying to bind a vertex buffer to a non recording command buffer"); - return; - } - VkDeviceSize offset[] = { buffer.getOffset() }; - vkCmdBindVertexBuffers(_cmd_buffer, 0, 1, &buffer.get(), offset); - - buffer.recordedInCmdBuffer(); - vector_push_back_if_not_found(_cmd_resources, &buffer); - } - - void CmdBuffer::bindIndexBuffer(Buffer& buffer) noexcept - { - MLX_PROFILE_FUNCTION(); - if(!isRecording()) - { - core::error::report(e_kind::warning, "Vulkan : trying to bind a index buffer to a non recording command buffer"); - return; - } - vkCmdBindIndexBuffer(_cmd_buffer, buffer.get(), buffer.getOffset(), VK_INDEX_TYPE_UINT16); - - buffer.recordedInCmdBuffer(); - vector_push_back_if_not_found(_cmd_resources, &buffer); - } - - void CmdBuffer::copyBuffer(Buffer& dst, Buffer& src) noexcept - { - MLX_PROFILE_FUNCTION(); - if(!isRecording()) - { - core::error::report(e_kind::warning, "Vulkan : trying to do a buffer to buffer copy in a non recording command buffer"); - return; - } - - preTransferBarrier(); - - VkBufferCopy copyRegion{}; - copyRegion.size = src.getSize(); - vkCmdCopyBuffer(_cmd_buffer, src.get(), dst.get(), 1, ©Region); - - postTransferBarrier(); - - dst.recordedInCmdBuffer(); - src.recordedInCmdBuffer(); - vector_push_back_if_not_found(_cmd_resources, &dst); - vector_push_back_if_not_found(_cmd_resources, &src); - } - - void CmdBuffer::copyBufferToImage(Buffer& buffer, Image& image) noexcept - { - MLX_PROFILE_FUNCTION(); - if(!isRecording()) - { - core::error::report(e_kind::warning, "Vulkan : trying to do a buffer to image copy in a non recording command buffer"); - return; - } - - preTransferBarrier(); - - VkBufferImageCopy region{}; - region.bufferOffset = 0; - region.bufferRowLength = 0; - region.bufferImageHeight = 0; - region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; - region.imageSubresource.mipLevel = 0; - region.imageSubresource.baseArrayLayer = 0; - region.imageSubresource.layerCount = 1; - region.imageOffset = { 0, 0, 0 }; - region.imageExtent = { image.getWidth(), image.getHeight(), 1 }; - - vkCmdCopyBufferToImage(_cmd_buffer, buffer.get(), image.get(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, ®ion); - - postTransferBarrier(); - - image.recordedInCmdBuffer(); - buffer.recordedInCmdBuffer(); - vector_push_back_if_not_found(_cmd_resources, &image); - vector_push_back_if_not_found(_cmd_resources, &buffer); - } - - void CmdBuffer::copyImagetoBuffer(Image& image, Buffer& buffer) noexcept - { - MLX_PROFILE_FUNCTION(); - if(!isRecording()) - { - core::error::report(e_kind::warning, "Vulkan : trying to do an image to buffer copy in a non recording command buffer"); - return; - } - - preTransferBarrier(); - - VkBufferImageCopy region{}; - region.bufferOffset = 0; - region.bufferRowLength = 0; - region.bufferImageHeight = 0; - region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; - region.imageSubresource.mipLevel = 0; - region.imageSubresource.baseArrayLayer = 0; - region.imageSubresource.layerCount = 1; - region.imageOffset = { 0, 0, 0 }; - region.imageExtent = { image.getWidth(), image.getHeight(), 1 }; - - vkCmdCopyImageToBuffer(_cmd_buffer, image.get(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, buffer.get(), 1, ®ion); - - postTransferBarrier(); - - image.recordedInCmdBuffer(); - buffer.recordedInCmdBuffer(); - vector_push_back_if_not_found(_cmd_resources, &buffer); - vector_push_back_if_not_found(_cmd_resources, &image); - } - - void CmdBuffer::transitionImageLayout(Image& image, VkImageLayout new_layout) noexcept - { - MLX_PROFILE_FUNCTION(); - if(!isRecording()) - { - core::error::report(e_kind::warning, "Vulkan : trying to do an image layout transition in a non recording command buffer"); - return; - } - - VkImageMemoryBarrier barrier{}; - barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; - barrier.oldLayout = image.getLayout(); - barrier.newLayout = new_layout; - barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; - barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; - barrier.image = image.get(); - barrier.subresourceRange.aspectMask = isDepthFormat(image.getFormat()) ? VK_IMAGE_ASPECT_DEPTH_BIT : VK_IMAGE_ASPECT_COLOR_BIT; - barrier.subresourceRange.baseMipLevel = 0; - barrier.subresourceRange.levelCount = 1; - barrier.subresourceRange.baseArrayLayer = 0; - barrier.subresourceRange.layerCount = 1; - barrier.srcAccessMask = layoutToAccessMask(image.getLayout(), false); - barrier.dstAccessMask = layoutToAccessMask(new_layout, true); - if(isStencilFormat(image.getFormat())) - barrier.subresourceRange.aspectMask |= VK_IMAGE_ASPECT_STENCIL_BIT; - - VkPipelineStageFlags sourceStage = 0; - if(barrier.oldLayout == VK_IMAGE_LAYOUT_PRESENT_SRC_KHR) - sourceStage = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT; - else if(barrier.srcAccessMask != 0) - sourceStage = RCore::accessFlagsToPipelineStage(barrier.srcAccessMask, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT); - else - sourceStage = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT; - - VkPipelineStageFlags destinationStage = 0; - if(barrier.newLayout == VK_IMAGE_LAYOUT_PRESENT_SRC_KHR) - destinationStage = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT; - else if(barrier.dstAccessMask != 0) - destinationStage = RCore::accessFlagsToPipelineStage(barrier.dstAccessMask, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT); - else - destinationStage = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT; - - vkCmdPipelineBarrier(_cmd_buffer, sourceStage, destinationStage, 0, 0, nullptr, 0, nullptr, 1, &barrier); - - image.recordedInCmdBuffer(); - vector_push_back_if_not_found(_cmd_resources, &image); - } - - void CmdBuffer::endRecord() - { - MLX_PROFILE_FUNCTION(); - if(!isInit()) - core::error::report(e_kind::fatal_error, "Vulkan : ending record on un uninit command buffer"); - if(_state != state::recording) - return; - if(vkEndCommandBuffer(_cmd_buffer) != VK_SUCCESS) - core::error::report(e_kind::fatal_error, "Vulkan : failed to end recording command buffer"); - - _state = state::idle; - } - - void CmdBuffer::submitIdle(bool shouldWaitForExecution) noexcept - { - MLX_PROFILE_FUNCTION(); - if(_type != kind::single_time) - { - core::error::report(e_kind::error, "Vulkan : try to perform an idle submit on a command buffer that is not single-time, this is not allowed"); - return; - } - - _fence.reset(); - - VkSubmitInfo submitInfo{}; - submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; - submitInfo.commandBufferCount = 1; - submitInfo.pCommandBuffers = &_cmd_buffer; - - VkResult res = vkQueueSubmit(Render_Core::get().getQueue().getGraphic(), 1, &submitInfo, _fence.get()); - if(res != VK_SUCCESS) - core::error::report(e_kind::fatal_error, "Vulkan error : failed to submit a single time command buffer, %s", RCore::verbaliseResultVk(res)); - _state = state::submitted; - - if(shouldWaitForExecution) - waitForExecution(); - } - - void CmdBuffer::submit(Semaphore* semaphores) noexcept - { - MLX_PROFILE_FUNCTION(); - std::array signalSemaphores; - std::array waitSemaphores; - - if(semaphores != nullptr) - { - signalSemaphores[0] = semaphores->getRenderImageSemaphore(); - waitSemaphores[0] = semaphores->getImageSemaphore(); - } - else - { - signalSemaphores[0] = VK_NULL_HANDLE; - waitSemaphores[0] = VK_NULL_HANDLE; - } - VkPipelineStageFlags waitStages[] = { VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT }; - - _fence.reset(); - - VkSubmitInfo submitInfo{}; - submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; - submitInfo.waitSemaphoreCount = (semaphores == nullptr ? 0 : waitSemaphores.size()); - submitInfo.pWaitSemaphores = waitSemaphores.data(); - submitInfo.pWaitDstStageMask = waitStages; - submitInfo.commandBufferCount = 1; - submitInfo.pCommandBuffers = &_cmd_buffer; - submitInfo.signalSemaphoreCount = (semaphores == nullptr ? 0 : signalSemaphores.size()); - submitInfo.pSignalSemaphores = signalSemaphores.data(); - - VkResult res = vkQueueSubmit(Render_Core::get().getQueue().getGraphic(), 1, &submitInfo, _fence.get()); - if(res != VK_SUCCESS) - core::error::report(e_kind::fatal_error, "Vulkan error : failed to submit draw command buffer, %s", RCore::verbaliseResultVk(res)); - _state = state::submitted; - } - - void CmdBuffer::updateSubmitState() noexcept - { - MLX_PROFILE_FUNCTION(); - if(!_fence.isReady()) - return; - - for(CmdResource* res : _cmd_resources) - res->removedFromCmdBuffer(); - _cmd_resources.clear(); - _state = state::ready; - } - - void CmdBuffer::preTransferBarrier() noexcept - { - MLX_PROFILE_FUNCTION(); - VkMemoryBarrier memoryBarrier{}; - memoryBarrier.sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER; - memoryBarrier.pNext = nullptr; - memoryBarrier.srcAccessMask = 0U; - memoryBarrier.dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT; - - vkCmdPipelineBarrier(_cmd_buffer, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0, 1, &memoryBarrier, 0, nullptr, 0, nullptr); - } - - void CmdBuffer::postTransferBarrier() noexcept - { - MLX_PROFILE_FUNCTION(); - VkMemoryBarrier memoryBarrier{}; - memoryBarrier.sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER; - memoryBarrier.pNext = nullptr; - memoryBarrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT; - memoryBarrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_UNIFORM_READ_BIT; - - vkCmdPipelineBarrier(_cmd_buffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, 0, 1, &memoryBarrier, 0, nullptr, 0, nullptr); - } - - void CmdBuffer::destroy() noexcept - { - MLX_PROFILE_FUNCTION(); - _fence.destroy(); - _cmd_buffer = VK_NULL_HANDLE; - _state = state::uninit; - #ifdef DEBUG - core::error::report(e_kind::message, "Vulkan : destroyed command buffer"); - #endif - } -} diff --git a/src/renderer/command/vk_cmd_buffer.h b/src/renderer/command/vk_cmd_buffer.h deleted file mode 100644 index 39ef073..0000000 --- a/src/renderer/command/vk_cmd_buffer.h +++ /dev/null @@ -1,89 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* vk_cmd_buffer.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: bonsthie +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2022/10/06 18:25:42 by maldavid #+# #+# */ -/* Updated: 2024/08/08 17:46:00 by bonsthie ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#ifndef __MLX_VK_CMD_BUFFER__ -#define __MLX_VK_CMD_BUFFER__ - -#include -#include -#include -#include -#include - -namespace mlx -{ - class Buffer; - class Image; - - class CmdBuffer - { - public: - enum class state - { - uninit = 0, // buffer not initialized or destroyed - ready, // buffer ready to be used after having been submitted - idle, // buffer has recorded informations but has not been submitted - recording, // buffer is currently recording - submitted, // buffer has been submitted - }; - - enum class kind - { - single_time = 0, - long_time - }; - - public: - void init(kind type, class CmdManager* manager); - void init(kind type, class CmdPool* pool); - void destroy() noexcept; - - void beginRecord(VkCommandBufferUsageFlags usage = 0); - void submit(class Semaphore* semaphores) noexcept; - void submitIdle(bool shouldWaitForExecution = true) noexcept; // TODO : handle `shouldWaitForExecution` as false by default (needs to modify CmdResources lifetimes to do so) - void updateSubmitState() noexcept; - inline void waitForExecution() noexcept { _fence.wait(); updateSubmitState(); _state = state::ready; } - inline void reset() noexcept { vkResetCommandBuffer(_cmd_buffer, 0); } - void endRecord(); - - void bindVertexBuffer(Buffer& buffer) noexcept; - void bindIndexBuffer(Buffer& buffer) noexcept; - void copyBuffer(Buffer& dst, Buffer& src) noexcept; - void copyBufferToImage(Buffer& buffer, Image& image) noexcept; - void copyImagetoBuffer(Image& image, Buffer& buffer) noexcept; - void transitionImageLayout(Image& image, VkImageLayout new_layout) noexcept; - - inline bool isInit() const noexcept { return _state != state::uninit; } - inline bool isReadyToBeUsed() const noexcept { return _state == state::ready; } - inline bool isRecording() const noexcept { return _state == state::recording; } - inline bool hasBeenSubmitted() const noexcept { return _state == state::submitted; } - inline state getCurrentState() const noexcept { return _state; } - - inline VkCommandBuffer& operator()() noexcept { return _cmd_buffer; } - inline VkCommandBuffer& get() noexcept { return _cmd_buffer; } - inline Fence& getFence() noexcept { return _fence; } - - private: - void preTransferBarrier() noexcept; - void postTransferBarrier() noexcept; - - private: - std::vector _cmd_resources; - Fence _fence; - VkCommandBuffer _cmd_buffer = VK_NULL_HANDLE; - class CmdPool* _pool = nullptr; - state _state = state::uninit; - kind _type; - }; -} - -#endif // __MLX_VK_CMD_BUFFER__ diff --git a/src/renderer/command/vk_cmd_pool.cpp b/src/renderer/command/vk_cmd_pool.cpp deleted file mode 100644 index 441d45f..0000000 --- a/src/renderer/command/vk_cmd_pool.cpp +++ /dev/null @@ -1,35 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* vk_cmd_pool.cpp :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2022/10/06 18:24:33 by maldavid #+# #+# */ -/* Updated: 2024/03/14 17:23:20 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#include "vk_cmd_pool.h" -#include - -namespace mlx -{ - void CmdPool::init() - { - VkCommandPoolCreateInfo poolInfo{}; - poolInfo.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO; - poolInfo.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT; - poolInfo.queueFamilyIndex = Render_Core::get().getQueue().getFamilies().graphics_family.value(); - - VkResult res = vkCreateCommandPool(Render_Core::get().getDevice().get(), &poolInfo, nullptr, &_cmd_pool); - if(res != VK_SUCCESS) - core::error::report(e_kind::fatal_error, "Vulkan : failed to create command pool, %s", RCore::verbaliseResultVk(res)); - } - - void CmdPool::destroy() noexcept - { - vkDestroyCommandPool(Render_Core::get().getDevice().get(), _cmd_pool, nullptr); - _cmd_pool = VK_NULL_HANDLE; - } -} diff --git a/src/renderer/command/vk_cmd_pool.h b/src/renderer/command/vk_cmd_pool.h deleted file mode 100644 index 8d97157..0000000 --- a/src/renderer/command/vk_cmd_pool.h +++ /dev/null @@ -1,35 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* vk_cmd_pool.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2022/10/06 18:24:12 by maldavid #+# #+# */ -/* Updated: 2024/01/03 15:27:26 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#ifndef __MLX_VK_CMD_POOL__ -#define __MLX_VK_CMD_POOL__ - -#include -#include - -namespace mlx -{ - class CmdPool - { - public: - void init(); - void destroy() noexcept; - - inline VkCommandPool& operator()() noexcept { return _cmd_pool; } - inline VkCommandPool& get() noexcept { return _cmd_pool; } - - private: - VkCommandPool _cmd_pool = VK_NULL_HANDLE; - }; -} - -#endif // __MLX_VK_CMD_POOL__ diff --git a/src/renderer/core/cmd_resource.h b/src/renderer/core/cmd_resource.h deleted file mode 100644 index 0010333..0000000 --- a/src/renderer/core/cmd_resource.h +++ /dev/null @@ -1,44 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* cmd_resource.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2023/12/16 20:44:29 by maldavid #+# #+# */ -/* Updated: 2024/03/14 17:28:08 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#ifndef __MLX_COMMAND_RESOURCE__ -#define __MLX_COMMAND_RESOURCE__ - -#include -#include - -namespace mlx -{ - class CmdResource - { - friend class SingleTimeCmdManager; - public: - enum class state - { - in_cmd_buffer = 0, - out_cmd_buffer, - }; - - public: - CmdResource() : _uuid() {} - inline void recordedInCmdBuffer() noexcept { _state = state::in_cmd_buffer; } - inline void removedFromCmdBuffer() noexcept { _state = state::out_cmd_buffer; } - inline UUID getUUID() const noexcept { return _uuid; } - virtual ~CmdResource() = default; - - private: - UUID _uuid; - state _state = state::out_cmd_buffer; - }; -} - -#endif diff --git a/src/renderer/core/drawable_resource.h b/src/renderer/core/drawable_resource.h deleted file mode 100644 index 8df2b98..0000000 --- a/src/renderer/core/drawable_resource.h +++ /dev/null @@ -1,32 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* drawable_resource.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2024/01/10 21:00:37 by maldavid #+# #+# */ -/* Updated: 2024/01/11 01:21:15 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#ifndef __MLX_DRAWABLE_RESOURCE__ -#define __MLX_DRAWABLE_RESOURCE__ - -#include -#include -#include - -namespace mlx -{ - class DrawableResource - { - public: - DrawableResource() = default; - virtual void render(std::array& sets, class Renderer& renderer) = 0; - virtual void resetUpdate() {} - virtual ~DrawableResource() = default; - }; -} - -#endif diff --git a/src/renderer/core/memory.cpp b/src/renderer/core/memory.cpp deleted file mode 100644 index cd70658..0000000 --- a/src/renderer/core/memory.cpp +++ /dev/null @@ -1,203 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* memory.cpp :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: kbz_8 +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2023/10/20 22:02:37 by kbz_8 #+# #+# */ -/* Updated: 2024/09/14 00:04:16 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#include -#include -#include - -#define VMA_STATIC_VULKAN_FUNCTIONS 0 -#define VMA_DYNAMIC_VULKAN_FUNCTIONS 0 -#define VMA_VULKAN_VERSION 1002000 -#define VMA_ASSERT(expr) ((void)0) -#define VMA_IMPLEMENTATION - -#ifdef MLX_COMPILER_CLANG - #pragma clang diagnostic push - #pragma clang diagnostic ignored "-Weverything" - #include - #pragma clang diagnostic pop -#elif defined(MLX_COMPILER_GCC) - #pragma GCC diagnostic push - #pragma GCC diagnostic ignored "-Wimplicit-fallthrough" - #pragma GCC diagnostic ignored "-Wmissing-field-initializers" - #pragma GCC diagnostic ignored "-Wunused-parameter" - #pragma GCC diagnostic ignored "-Wunused-variable" - #pragma GCC diagnostic ignored "-Wunused-function" - #pragma GCC diagnostic ignored "-Wparentheses" - #pragma GCC diagnostic ignored "-Wparentheses" - #include - #pragma GCC diagnostic pop -#else - #include -#endif - -#include -#include - -namespace mlx -{ - void GPUallocator::init() noexcept - { - VmaVulkanFunctions vma_vulkan_func{}; - vma_vulkan_func.vkAllocateMemory = vkAllocateMemory; - vma_vulkan_func.vkBindBufferMemory = vkBindBufferMemory; - vma_vulkan_func.vkBindImageMemory = vkBindImageMemory; - vma_vulkan_func.vkCreateBuffer = vkCreateBuffer; - vma_vulkan_func.vkCreateImage = vkCreateImage; - vma_vulkan_func.vkDestroyBuffer = vkDestroyBuffer; - vma_vulkan_func.vkDestroyImage = vkDestroyImage; - vma_vulkan_func.vkFlushMappedMemoryRanges = vkFlushMappedMemoryRanges; - vma_vulkan_func.vkFreeMemory = vkFreeMemory; - vma_vulkan_func.vkGetBufferMemoryRequirements = vkGetBufferMemoryRequirements; - vma_vulkan_func.vkGetImageMemoryRequirements = vkGetImageMemoryRequirements; - vma_vulkan_func.vkGetPhysicalDeviceMemoryProperties = vkGetPhysicalDeviceMemoryProperties; - vma_vulkan_func.vkGetPhysicalDeviceProperties = vkGetPhysicalDeviceProperties; - vma_vulkan_func.vkInvalidateMappedMemoryRanges = vkInvalidateMappedMemoryRanges; - vma_vulkan_func.vkMapMemory = vkMapMemory; - vma_vulkan_func.vkUnmapMemory = vkUnmapMemory; - vma_vulkan_func.vkCmdCopyBuffer = vkCmdCopyBuffer; - vma_vulkan_func.vkGetBufferMemoryRequirements2KHR = vkGetBufferMemoryRequirements2; - vma_vulkan_func.vkGetImageMemoryRequirements2KHR = vkGetImageMemoryRequirements2; - vma_vulkan_func.vkBindBufferMemory2KHR = vkBindBufferMemory2; - vma_vulkan_func.vkBindImageMemory2KHR = vkBindImageMemory2; - vma_vulkan_func.vkGetPhysicalDeviceMemoryProperties2KHR = vkGetPhysicalDeviceMemoryProperties2; - - VmaAllocatorCreateInfo allocatorCreateInfo{}; - allocatorCreateInfo.vulkanApiVersion = VK_API_VERSION_1_2; - allocatorCreateInfo.physicalDevice = Render_Core::get().getDevice().getPhysicalDevice(); - allocatorCreateInfo.device = Render_Core::get().getDevice().get(); - allocatorCreateInfo.instance = Render_Core::get().getInstance().get(); - allocatorCreateInfo.pVulkanFunctions = &vma_vulkan_func; - - VkResult res = vmaCreateAllocator(&allocatorCreateInfo, &_allocator); - if(res != VK_SUCCESS) - core::error::report(e_kind::fatal_error, "Graphics allocator : failed to create graphics memory allocator, %s", RCore::verbaliseResultVk(res)); - #ifdef DEBUG - core::error::report(e_kind::message, "Graphics allocator : created new allocator"); - #endif - } - - VmaAllocation GPUallocator::createBuffer(const VkBufferCreateInfo* binfo, const VmaAllocationCreateInfo* vinfo, VkBuffer& buffer, const char* name) noexcept - { - MLX_PROFILE_FUNCTION(); - VmaAllocation allocation; - VkResult res = vmaCreateBuffer(_allocator, binfo, vinfo, &buffer, &allocation, nullptr); - if(res != VK_SUCCESS) - core::error::report(e_kind::fatal_error, "Graphics allocator : failed to allocate a buffer, %s", RCore::verbaliseResultVk(res)); - if(name != nullptr) - { - Render_Core::get().getLayers().setDebugUtilsObjectNameEXT(VK_OBJECT_TYPE_BUFFER, (std::uint64_t)buffer, name); - vmaSetAllocationName(_allocator, allocation, name); - } - #ifdef DEBUG - core::error::report(e_kind::message, "Graphics Allocator : created new buffer '%s'", name); - #endif - _active_buffers_allocations++; - return allocation; - } - - void GPUallocator::destroyBuffer(VmaAllocation allocation, VkBuffer buffer) noexcept - { - MLX_PROFILE_FUNCTION(); - vkDeviceWaitIdle(Render_Core::get().getDevice().get()); - vmaDestroyBuffer(_allocator, buffer, allocation); - #ifdef DEBUG - core::error::report(e_kind::message, "Graphics Allocator : destroyed buffer"); - #endif - _active_buffers_allocations--; - } - - VmaAllocation GPUallocator::createImage(const VkImageCreateInfo* iminfo, const VmaAllocationCreateInfo* vinfo, VkImage& image, const char* name) noexcept - { - MLX_PROFILE_FUNCTION(); - VmaAllocation allocation; - VkResult res = vmaCreateImage(_allocator, iminfo, vinfo, &image, &allocation, nullptr); - if(res != VK_SUCCESS) - core::error::report(e_kind::fatal_error, "Graphics allocator : failed to allocate an image, %s", RCore::verbaliseResultVk(res)); - if(name != nullptr) - { - Render_Core::get().getLayers().setDebugUtilsObjectNameEXT(VK_OBJECT_TYPE_IMAGE, (std::uint64_t)image, name); - vmaSetAllocationName(_allocator, allocation, name); - } - #ifdef DEBUG - core::error::report(e_kind::message, "Graphics Allocator : created new image '%s'", name); - #endif - _active_images_allocations++; - return allocation; - } - - void GPUallocator::destroyImage(VmaAllocation allocation, VkImage image) noexcept - { - MLX_PROFILE_FUNCTION(); - vkDeviceWaitIdle(Render_Core::get().getDevice().get()); - vmaDestroyImage(_allocator, image, allocation); - #ifdef DEBUG - core::error::report(e_kind::message, "Graphics Allocator : destroyed image"); - #endif - _active_images_allocations--; - } - - void GPUallocator::mapMemory(VmaAllocation allocation, void** data) noexcept - { - MLX_PROFILE_FUNCTION(); - VkResult res = vmaMapMemory(_allocator, allocation, data); - if(res != VK_SUCCESS) - core::error::report(e_kind::fatal_error, "Graphics allocator : unable to map GPU memory to CPU memory, %s", RCore::verbaliseResultVk(res)); - } - - void GPUallocator::unmapMemory(VmaAllocation allocation) noexcept - { - MLX_PROFILE_FUNCTION(); - vmaUnmapMemory(_allocator, allocation); - } - - void GPUallocator::dumpMemoryToJson() - { - static std::uint32_t id = 0; - std::string name("memory_dump"); - name.append(std::to_string(id) + ".json"); - std::ofstream file(name); - if(!file.is_open()) - { - core::error::report(e_kind::error, "Graphics allocator : unable to dump memory to a json file"); - return; - } - char* str = nullptr; - vmaBuildStatsString(_allocator, &str, true); - file << str; - vmaFreeStatsString(_allocator, str); - file.close(); - id++; - } - - void GPUallocator::flush(VmaAllocation allocation, VkDeviceSize size, VkDeviceSize offset) noexcept - { - MLX_PROFILE_FUNCTION(); - vmaFlushAllocation(_allocator, allocation, offset, size); - } - - void GPUallocator::destroy() noexcept - { - if(_active_images_allocations != 0) - core::error::report(e_kind::error, "Graphics allocator : some user-dependant allocations were not freed before destroying the display (%d active allocations). You may have not destroyed all the MLX resources you've created", _active_images_allocations); - else if(_active_buffers_allocations != 0) - core::error::report(e_kind::error, "Graphics allocator : some MLX-dependant allocations were not freed before destroying the display (%d active allocations). This is an error in the MLX, please report this should not happen", _active_buffers_allocations); - if(_active_images_allocations < 0 || _active_buffers_allocations < 0) - core::error::report(e_kind::warning, "Graphics allocator : the impossible happened, the MLX has freed more allocations than it has made (wtf)"); - vmaDestroyAllocator(_allocator); - _active_buffers_allocations = 0; - _active_images_allocations = 0; - #ifdef DEBUG - core::error::report(e_kind::message, "Vulkan : destroyed a graphics allocator"); - #endif - } -} diff --git a/src/renderer/core/memory.h b/src/renderer/core/memory.h deleted file mode 100644 index 46cd13f..0000000 --- a/src/renderer/core/memory.h +++ /dev/null @@ -1,52 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* memory.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2023/10/20 02:13:03 by maldavid #+# #+# */ -/* Updated: 2024/03/25 16:01:06 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#ifndef __MLX_VK_MEMORY__ -#define __MLX_VK_MEMORY__ - -#include -#include -#include - -namespace mlx -{ - class GPUallocator - { - public: - GPUallocator() = default; - - void init() noexcept; - void destroy() noexcept; - - VmaAllocation createBuffer(const VkBufferCreateInfo* binfo, const VmaAllocationCreateInfo* vinfo, VkBuffer& buffer, const char* name = nullptr) noexcept; - void destroyBuffer(VmaAllocation allocation, VkBuffer buffer) noexcept; - - VmaAllocation createImage(const VkImageCreateInfo* iminfo, const VmaAllocationCreateInfo* vinfo, VkImage& image, const char* name = nullptr) noexcept; - void destroyImage(VmaAllocation allocation, VkImage image) noexcept; - - void mapMemory(VmaAllocation allocation, void** data) noexcept; - void unmapMemory(VmaAllocation allocation) noexcept; - - void dumpMemoryToJson(); - - void flush(VmaAllocation allocation, VkDeviceSize size, VkDeviceSize offset) noexcept; - - ~GPUallocator() = default; - - private: - VmaAllocator _allocator; - std::int32_t _active_buffers_allocations = 0; - std::int32_t _active_images_allocations = 0; - }; -} - -#endif diff --git a/src/renderer/core/render_core.cpp b/src/renderer/core/render_core.cpp deleted file mode 100644 index 449cf56..0000000 --- a/src/renderer/core/render_core.cpp +++ /dev/null @@ -1,153 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* render_core.cpp :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2022/12/17 23:33:34 by maldavid #+# #+# */ -/* Updated: 2024/10/19 10:50:13 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#define VOLK_IMPLEMENTATION - -#include -#include -#include - -#ifdef DEBUG - #ifdef MLX_COMPILER_MSVC - #pragma NOTE("MLX is being compiled in debug mode, this activates Vulkan's validation layers and debug messages which may impact rendering performances") - #else - #warning "MLX is being compiled in debug mode, this activates Vulkan's validation layers and debug messages which may impact rendering performances" - #endif -#endif - -namespace mlx -{ - namespace RCore - { - std::optional findMemoryType(std::uint32_t typeFilter, VkMemoryPropertyFlags properties, bool error) - { - VkPhysicalDeviceMemoryProperties memProperties; - vkGetPhysicalDeviceMemoryProperties(Render_Core::get().getDevice().getPhysicalDevice(), &memProperties); - - for(std::uint32_t i = 0; i < memProperties.memoryTypeCount; i++) - { - if((typeFilter & (1 << i)) && (memProperties.memoryTypes[i].propertyFlags & properties) == properties) - return i; - } - if(error) - core::error::report(e_kind::fatal_error, "Vulkan : failed to find suitable memory type"); - return std::nullopt; - } - - const char* verbaliseResultVk(VkResult result) - { - switch(result) - { - case VK_SUCCESS: return "Success"; - case VK_NOT_READY: return "A fence or query has not yet completed"; - case VK_TIMEOUT: return "A wait operation has not completed in the specified time"; - case VK_EVENT_SET: return "An event is signaled"; - case VK_EVENT_RESET: return "An event is unsignaled"; - case VK_INCOMPLETE: return "A return array was too small for the result"; - case VK_ERROR_OUT_OF_HOST_MEMORY: return "A host memory allocation has failed"; - case VK_ERROR_OUT_OF_DEVICE_MEMORY: return "A device memory allocation has failed"; - case VK_ERROR_INITIALIZATION_FAILED: return "Initialization of an object could not be completed for implementation-specific reasons"; - case VK_ERROR_DEVICE_LOST: return "The logical or physical device has been lost"; - case VK_ERROR_MEMORY_MAP_FAILED: return "Mapping of a memory object has failed"; - case VK_ERROR_LAYER_NOT_PRESENT: return "A requested layer is not present or could not be loaded"; - case VK_ERROR_EXTENSION_NOT_PRESENT: return "A requested extension is not supported"; - case VK_ERROR_FEATURE_NOT_PRESENT: return "A requested feature is not supported"; - case VK_ERROR_INCOMPATIBLE_DRIVER: return "The requested version of Vulkan is not supported by the driver or is otherwise incompatible"; - case VK_ERROR_TOO_MANY_OBJECTS: return "Too many objects of the type have already been created"; - case VK_ERROR_FORMAT_NOT_SUPPORTED: return "A requested format is not supported on this device"; - case VK_ERROR_SURFACE_LOST_KHR: return "A surface is no longer available"; - case VK_SUBOPTIMAL_KHR: return "A swapchain no longer matches the surface properties exactly, but can still be used"; - case VK_ERROR_OUT_OF_DATE_KHR: return "A surface has changed in such a way that it is no longer compatible with the swapchain"; - case VK_ERROR_INCOMPATIBLE_DISPLAY_KHR: return "The display used by a swapchain does not use the same presentable image layout"; - case VK_ERROR_NATIVE_WINDOW_IN_USE_KHR: return "The requested window is already connected to a VkSurfaceKHR, or to some other non-Vulkan API"; - case VK_ERROR_VALIDATION_FAILED_EXT: return "A validation layer found an error"; - - default: return "Unknown Vulkan error"; - } - return nullptr; - } - - VkPipelineStageFlags accessFlagsToPipelineStage(VkAccessFlags accessFlags, VkPipelineStageFlags stageFlags) - { - VkPipelineStageFlags stages = 0; - - while(accessFlags != 0) - { - VkAccessFlagBits AccessFlag = static_cast(accessFlags & (~(accessFlags - 1))); - if(AccessFlag == 0 || (AccessFlag & (AccessFlag - 1)) != 0) - core::error::report(e_kind::fatal_error, "Vulkan : an error has been caught during access flag to pipeline stage operation"); - accessFlags &= ~AccessFlag; - - switch(AccessFlag) - { - case VK_ACCESS_INDIRECT_COMMAND_READ_BIT: stages |= VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT; break; - case VK_ACCESS_INDEX_READ_BIT: stages |= VK_PIPELINE_STAGE_VERTEX_INPUT_BIT; break; - case VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT: stages |= VK_PIPELINE_STAGE_VERTEX_INPUT_BIT; break; - case VK_ACCESS_UNIFORM_READ_BIT: stages |= stageFlags | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT; break; - case VK_ACCESS_INPUT_ATTACHMENT_READ_BIT: stages |= VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT; break; - case VK_ACCESS_SHADER_READ_BIT: stages |= stageFlags | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT; break; - case VK_ACCESS_SHADER_WRITE_BIT: stages |= stageFlags | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT; break; - case VK_ACCESS_COLOR_ATTACHMENT_READ_BIT: stages |= VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT; break; - case VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT: stages |= VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT; break; - case VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT: stages |= VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT; break; - case VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT: stages |= VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT; break; - case VK_ACCESS_TRANSFER_READ_BIT: stages |= VK_PIPELINE_STAGE_TRANSFER_BIT; break; - case VK_ACCESS_TRANSFER_WRITE_BIT: stages |= VK_PIPELINE_STAGE_TRANSFER_BIT; break; - case VK_ACCESS_HOST_READ_BIT: stages |= VK_PIPELINE_STAGE_HOST_BIT; break; - case VK_ACCESS_HOST_WRITE_BIT: stages |= VK_PIPELINE_STAGE_HOST_BIT; break; - case VK_ACCESS_MEMORY_READ_BIT: break; - case VK_ACCESS_MEMORY_WRITE_BIT: break; - - default: core::error::report(e_kind::error, "Vulkan : unknown access flag"); break; - } - } - return stages; - } - } - - void Render_Core::init() - { - if(_is_init) - return; - if(volkInitialize() != VK_SUCCESS) - core::error::report(e_kind::fatal_error, "Vulkan loader : cannot load %s, are you sure Vulkan is installed on your system ?", VULKAN_LIB_NAME); - - _instance.init(); - volkLoadInstance(_instance.get()); - _layers.init(); - _device.init(); - volkLoadDevice(_device.get()); - _queues.init(); - _allocator.init(); - _cmd_manager.init(); - _is_init = true; - } - - void Render_Core::destroy() - { - if(!_is_init) - return; - - vkDeviceWaitIdle(_device()); - - _pool_manager.destroyAllPools(); - _cmd_manager.destroy(); - _allocator.destroy(); - _device.destroy(); - _layers.destroy(); - _instance.destroy(); - - volkFinalize(); - - _is_init = false; - } -} diff --git a/src/renderer/core/render_core.h b/src/renderer/core/render_core.h deleted file mode 100644 index cf4aae5..0000000 --- a/src/renderer/core/render_core.h +++ /dev/null @@ -1,87 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* render_core.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2022/10/08 19:16:32 by maldavid #+# #+# */ -/* Updated: 2024/01/20 08:17:58 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#ifndef __MLX_RENDER_CORE__ -#define __MLX_RENDER_CORE__ - -#include -#include -#include - -#include -#include -#include -#include "vk_queues.h" -#include "vk_device.h" -#include "vk_instance.h" -#include "vk_validation_layers.h" -#include "memory.h" - -#include -#include - -namespace mlx -{ - namespace RCore - { - std::optional findMemoryType(std::uint32_t typeFilter, VkMemoryPropertyFlags properties, bool error = true); - const char* verbaliseResultVk(VkResult result); - VkPipelineStageFlags accessFlagsToPipelineStage(VkAccessFlags accessFlags, VkPipelineStageFlags stageFlags); - } - - #ifdef DEBUG - constexpr const bool enableValidationLayers = true; - #else - constexpr const bool enableValidationLayers = false; - #endif - - const std::vector validationLayers = { "VK_LAYER_KHRONOS_validation" }; - - constexpr const int MAX_FRAMES_IN_FLIGHT = 3; - constexpr const int MAX_SETS_PER_POOL = 512; - constexpr const int NUMBER_OF_UNIFORM_BUFFERS = 1; // change this if for wathever reason more than one uniform buffer is needed - - class Render_Core : public Singleton - { - friend class Singleton; - - public: - void init(); - void destroy(); - - inline bool isInit() const noexcept { return _is_init; } - inline Instance& getInstance() noexcept { return _instance; } - inline Device& getDevice() noexcept { return _device; } - inline Queues& getQueue() noexcept { return _queues; } - inline GPUallocator& getAllocator() noexcept { return _allocator; } - inline ValidationLayers& getLayers() noexcept { return _layers; } - inline CmdBuffer& getSingleTimeCmdBuffer() noexcept { return _cmd_manager.getCmdBuffer(); } - inline SingleTimeCmdManager& getSingleTimeCmdManager() noexcept { return _cmd_manager; } - inline DescriptorPool& getDescriptorPool() { return _pool_manager.getAvailablePool(); } - - private: - Render_Core() = default; - ~Render_Core() = default; - - private: - ValidationLayers _layers; - SingleTimeCmdManager _cmd_manager; - Queues _queues; - DescriptorPoolManager _pool_manager; - Device _device; - Instance _instance; - GPUallocator _allocator; - bool _is_init = false; - }; -} - -#endif // __MLX_RENDER_CORE__ diff --git a/src/renderer/core/vk_device.cpp b/src/renderer/core/vk_device.cpp deleted file mode 100644 index f862772..0000000 --- a/src/renderer/core/vk_device.cpp +++ /dev/null @@ -1,167 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* vk_device.cpp :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2022/10/08 19:14:29 by maldavid #+# #+# */ -/* Updated: 2024/03/14 17:23:45 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#include "render_core.h" -#include -#include -#include -#include -#include -#include - -namespace mlx -{ - const std::vector deviceExtensions = {VK_KHR_SWAPCHAIN_EXTENSION_NAME}; - - void Device::init() - { - pickPhysicalDevice(); - - Queues::QueueFamilyIndices indices = Render_Core::get().getQueue().getFamilies(); - - std::vector queueCreateInfos; - std::set uniqueQueueFamilies = { indices.graphics_family.value(), indices.present_family.value() }; - - float queuePriority = 1.0f; - for(std::uint32_t queueFamily : uniqueQueueFamilies) - { - VkDeviceQueueCreateInfo queueCreateInfo{}; - queueCreateInfo.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO; - queueCreateInfo.queueFamilyIndex = queueFamily; - queueCreateInfo.queueCount = 1; - queueCreateInfo.pQueuePriorities = &queuePriority; - queueCreateInfos.push_back(queueCreateInfo); - } - - VkPhysicalDeviceFeatures deviceFeatures{}; - - VkDeviceCreateInfo createInfo{}; - createInfo.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO; - - createInfo.queueCreateInfoCount = static_cast(queueCreateInfos.size()); - createInfo.pQueueCreateInfos = queueCreateInfos.data(); - - createInfo.pEnabledFeatures = &deviceFeatures; - - createInfo.enabledExtensionCount = static_cast(deviceExtensions.size()); - createInfo.ppEnabledExtensionNames = deviceExtensions.data(); - createInfo.enabledLayerCount = 0; - - VkResult res; - if((res = vkCreateDevice(_physical_device, &createInfo, nullptr, &_device)) != VK_SUCCESS) - core::error::report(e_kind::fatal_error, "Vulkan : failed to create logcal device, %s", RCore::verbaliseResultVk(res)); - #ifdef DEBUG - core::error::report(e_kind::message, "Vulkan : created new logical device"); - #endif - } - - void Device::pickPhysicalDevice() - { - std::uint32_t deviceCount = 0; - vkEnumeratePhysicalDevices(Render_Core::get().getInstance().get(), &deviceCount, nullptr); - - if(deviceCount == 0) - core::error::report(e_kind::fatal_error, "Vulkan : failed to find GPUs with Vulkan support"); - - std::vector devices(deviceCount); - vkEnumeratePhysicalDevices(Render_Core::get().getInstance().get(), &deviceCount, devices.data()); - - SDL_Window* window = SDL_CreateWindow("", 0, 0, 1, 1, SDL_WINDOW_VULKAN | SDL_WINDOW_HIDDEN); - if(!window) - core::error::report(e_kind::fatal_error, "Vulkan : failed to create a window to pick physical device"); - - VkSurfaceKHR surface = VK_NULL_HANDLE; - if(SDL_Vulkan_CreateSurface(window, Render_Core::get().getInstance().get(), &surface) != SDL_TRUE) - core::error::report(e_kind::fatal_error, "Vulkan : failed to create a surface to pick physical device"); - - std::multimap devices_score; - - for(const auto& device : devices) - { - int score = deviceScore(device, surface); - devices_score.insert(std::make_pair(score, device)); - } - - if(devices_score.rbegin()->first > 0) - _physical_device = devices_score.rbegin()->second; - else - core::error::report(e_kind::fatal_error, "Vulkan : failed to find a suitable GPU"); - - #ifdef DEBUG - VkPhysicalDeviceProperties props; - vkGetPhysicalDeviceProperties(_physical_device, &props); - core::error::report(e_kind::message, "Vulkan : picked a physical device, %s", props.deviceName); - #endif - Render_Core::get().getQueue().findQueueFamilies(_physical_device, surface); // update queue indicies to current physical device - vkDestroySurfaceKHR(Render_Core::get().getInstance().get(), surface, nullptr); - SDL_DestroyWindow(window); - } - - int Device::deviceScore(VkPhysicalDevice device, VkSurfaceKHR surface) - { - Queues::QueueFamilyIndices indices = Render_Core::get().getQueue().findQueueFamilies(device, surface); - bool extensionsSupported = checkDeviceExtensionSupport(device); - - std::uint32_t formatCount = 0; - if(extensionsSupported) - vkGetPhysicalDeviceSurfaceFormatsKHR(device, surface, &formatCount, nullptr); - - VkPhysicalDeviceProperties props; - vkGetPhysicalDeviceProperties(device, &props); - if(!indices.isComplete() || !extensionsSupported || formatCount == 0) - return -1; - - VkPhysicalDeviceFeatures features; - vkGetPhysicalDeviceFeatures(device, &features); - - int score = 0; - #ifndef FORCE_INTEGRATED_GPU - if(props.deviceType == VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU) - score += 1000; - #else - if(props.deviceType != VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU) - return -1; - #endif - - if(!features.geometryShader) - return -1; - - score += props.limits.maxImageDimension2D; - score += props.limits.maxBoundDescriptorSets; - return score; - } - - bool Device::checkDeviceExtensionSupport(VkPhysicalDevice device) - { - std::uint32_t extensionCount; - vkEnumerateDeviceExtensionProperties(device, nullptr, &extensionCount, nullptr); - - std::vector availableExtensions(extensionCount); - vkEnumerateDeviceExtensionProperties(device, nullptr, &extensionCount, availableExtensions.data()); - - std::set requiredExtensions(deviceExtensions.begin(), deviceExtensions.end()); - - for(const auto& extension : availableExtensions) - requiredExtensions.erase(extension.extensionName); - - return requiredExtensions.empty(); - } - - void Device::destroy() noexcept - { - vkDestroyDevice(_device, nullptr); - _device = VK_NULL_HANDLE; - #ifdef DEBUG - core::error::report(e_kind::message, "Vulkan : destroyed a logical device"); - #endif - } -} diff --git a/src/renderer/core/vk_device.h b/src/renderer/core/vk_device.h deleted file mode 100644 index ad7b5ca..0000000 --- a/src/renderer/core/vk_device.h +++ /dev/null @@ -1,43 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* vk_device.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2022/10/08 19:13:42 by maldavid #+# #+# */ -/* Updated: 2024/03/14 16:59:54 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#ifndef __MLX_VK_DEVICE__ -#define __MLX_VK_DEVICE__ - -#include -#include - -namespace mlx -{ - class Device - { - public: - void init(); - void destroy() noexcept; - - inline VkDevice& operator()() noexcept { return _device; } - inline VkDevice& get() noexcept { return _device; } - - inline VkPhysicalDevice& getPhysicalDevice() noexcept { return _physical_device; } - - private: - void pickPhysicalDevice(); - bool checkDeviceExtensionSupport(VkPhysicalDevice device); - int deviceScore(VkPhysicalDevice device, VkSurfaceKHR surface); - - private: - VkPhysicalDevice _physical_device = VK_NULL_HANDLE; - VkDevice _device = VK_NULL_HANDLE; - }; -} - -#endif // __MLX_VK_DEVICE__ diff --git a/src/renderer/core/vk_fence.cpp b/src/renderer/core/vk_fence.cpp deleted file mode 100644 index f63df12..0000000 --- a/src/renderer/core/vk_fence.cpp +++ /dev/null @@ -1,56 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* vk_fence.cpp :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2023/04/02 17:53:06 by maldavid #+# #+# */ -/* Updated: 2024/02/25 08:02:45 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#include -#include - -namespace mlx -{ - void Fence::init() - { - VkFenceCreateInfo fenceInfo{}; - fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; - fenceInfo.flags = VK_FENCE_CREATE_SIGNALED_BIT; - - VkResult res; - if((res = vkCreateFence(Render_Core::get().getDevice().get(), &fenceInfo, nullptr, &_fence)) != VK_SUCCESS) - core::error::report(e_kind::fatal_error, "Vulkan : failed to create a synchronization object (fence), %s", RCore::verbaliseResultVk(res)); - #ifdef DEBUG - core::error::report(e_kind::message, "Vulkan : created new fence"); - #endif - } - - void Fence::wait() noexcept - { - vkWaitForFences(Render_Core::get().getDevice().get(), 1, &_fence, VK_TRUE, UINT64_MAX); - } - - void Fence::reset() noexcept - { - vkResetFences(Render_Core::get().getDevice().get(), 1, &_fence); - } - - bool Fence::isReady() const noexcept - { - return vkGetFenceStatus(Render_Core::get().getDevice().get(), _fence) == VK_SUCCESS; - } - - void Fence::destroy() noexcept - { - if(_fence != VK_NULL_HANDLE) - vkDestroyFence(Render_Core::get().getDevice().get(), _fence, nullptr); - _fence = VK_NULL_HANDLE; - #ifdef DEBUG - core::error::report(e_kind::message, "Vulkan : destroyed fence"); - #endif - } -} diff --git a/src/renderer/core/vk_fence.h b/src/renderer/core/vk_fence.h deleted file mode 100644 index d8bd364..0000000 --- a/src/renderer/core/vk_fence.h +++ /dev/null @@ -1,43 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* vk_fence.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2023/04/02 17:52:09 by maldavid #+# #+# */ -/* Updated: 2024/01/03 15:26:21 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#ifndef __MLX_VK_FENCE__ -#define __MLX_VK_FENCE__ - -#include -#include - -namespace mlx -{ - class Fence - { - public: - Fence() = default; - - void init(); - - inline VkFence& get() noexcept { return _fence; } - void wait() noexcept; - void reset() noexcept; - bool isReady() const noexcept; - inline void waitAndReset() noexcept { wait(); reset(); } - - void destroy() noexcept; - - ~Fence() = default; - - private: - VkFence _fence = VK_NULL_HANDLE; - }; -} - -#endif diff --git a/src/renderer/core/vk_instance.cpp b/src/renderer/core/vk_instance.cpp deleted file mode 100644 index 040f9d8..0000000 --- a/src/renderer/core/vk_instance.cpp +++ /dev/null @@ -1,100 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* vk_instance.cpp :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2022/10/08 19:04:21 by maldavid #+# #+# */ -/* Updated: 2024/02/24 21:10:32 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#include "vk_instance.h" -#include "render_core.h" -#include -#include - -namespace mlx -{ - void Instance::init() - { - VkApplicationInfo appInfo{}; - appInfo.sType = VK_STRUCTURE_TYPE_APPLICATION_INFO; - appInfo.pEngineName = "MacroLibX"; - appInfo.engineVersion = VK_MAKE_VERSION(1, 3, 1); - appInfo.apiVersion = VK_API_VERSION_1_2; - - auto extensions = getRequiredExtensions(); - - VkInstanceCreateInfo createInfo{}; - createInfo.sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO; - createInfo.pApplicationInfo = &appInfo; - createInfo.enabledExtensionCount = static_cast(extensions.size()); - createInfo.ppEnabledExtensionNames = extensions.data(); - createInfo.enabledLayerCount = 0; // will be replaced if validation layers are enabled - createInfo.pNext = nullptr; - - VkDebugUtilsMessengerCreateInfoEXT debugCreateInfo; - if constexpr(enableValidationLayers) - { - if(Render_Core::get().getLayers().checkValidationLayerSupport()) - { - createInfo.enabledLayerCount = static_cast(validationLayers.size()); - createInfo.ppEnabledLayerNames = validationLayers.data(); - Render_Core::get().getLayers().populateDebugMessengerCreateInfo(debugCreateInfo); - createInfo.pNext = static_cast(&debugCreateInfo); - } - } - - VkResult res; - if((res = vkCreateInstance(&createInfo, nullptr, &_instance)) != VK_SUCCESS) - core::error::report(e_kind::fatal_error, "Vulkan : failed to create Vulkan instance, %s", RCore::verbaliseResultVk(res)); - volkLoadInstance(_instance); - #ifdef DEBUG - core::error::report(e_kind::message, "Vulkan : created new instance"); - #endif - } - - std::vector Instance::getRequiredExtensions() - { - std::vector extensions; - extensions.push_back(VK_KHR_SURFACE_EXTENSION_NAME); - - #ifdef VK_USE_PLATFORM_XCB_KHR - extensions.push_back(VK_KHR_XCB_SURFACE_EXTENSION_NAME); - #endif - - #ifdef VK_USE_PLATFORM_XLIB_KHR - extensions.push_back(VK_KHR_XLIB_SURFACE_EXTENSION_NAME); - #endif - - #ifdef VK_USE_PLATFORM_WAYLAND_KHR - extensions.push_back(VK_KHR_WAYLAND_SURFACE_EXTENSION_NAME); - #endif - - #ifdef VK_USE_PLATFORM_WIN32_KHR - extensions.push_back(VK_KHR_WIN32_SURFACE_EXTENSION_NAME); - #endif - - #ifdef VK_USE_PLATFORM_METAL_EXT - extensions.push_back(VK_EXT_METAL_SURFACE_EXTENSION_NAME); - #endif - - if constexpr(enableValidationLayers) - { - extensions.push_back(VK_EXT_DEBUG_REPORT_EXTENSION_NAME); - extensions.push_back(VK_EXT_DEBUG_UTILS_EXTENSION_NAME); - } - return extensions; - } - - void Instance::destroy() noexcept - { - vkDestroyInstance(_instance, nullptr); - _instance = VK_NULL_HANDLE; - #ifdef DEBUG - core::error::report(e_kind::message, "Vulkan : destroyed an instance"); - #endif - } -} diff --git a/src/renderer/core/vk_instance.h b/src/renderer/core/vk_instance.h deleted file mode 100644 index e827665..0000000 --- a/src/renderer/core/vk_instance.h +++ /dev/null @@ -1,37 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* vk_instance.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2022/10/08 19:03:04 by maldavid #+# #+# */ -/* Updated: 2024/01/03 15:26:26 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#ifndef __MLX_VK_INSTANCE__ -#define __MLX_VK_INSTANCE__ - -#include -#include -#include - -namespace mlx -{ - class Instance - { - public: - void init(); - void destroy() noexcept; - - inline VkInstance& operator()() noexcept { return _instance; } - inline VkInstance& get() noexcept { return _instance; } - - private: - std::vector getRequiredExtensions(); - VkInstance _instance = VK_NULL_HANDLE; - }; -} - -#endif // __MLX_VK_INSTANCE__ diff --git a/src/renderer/core/vk_queues.cpp b/src/renderer/core/vk_queues.cpp deleted file mode 100644 index b4f4ba3..0000000 --- a/src/renderer/core/vk_queues.cpp +++ /dev/null @@ -1,71 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* vk_queues.cpp :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2022/10/08 19:02:42 by maldavid #+# #+# */ -/* Updated: 2024/03/14 17:01:10 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#include "render_core.h" -#include -#include - -namespace mlx -{ - Queues::QueueFamilyIndices Queues::findQueueFamilies(VkPhysicalDevice device, VkSurfaceKHR surface) - { - std::uint32_t queueFamilyCount = 0; - vkGetPhysicalDeviceQueueFamilyProperties(device, &queueFamilyCount, nullptr); - - std::vector queueFamilies(queueFamilyCount); - vkGetPhysicalDeviceQueueFamilyProperties(device, &queueFamilyCount, queueFamilies.data()); - - _families = Queues::QueueFamilyIndices{}; - int i = 0; - for(const auto& queueFamily : queueFamilies) - { - if(queueFamily.queueFlags & VK_QUEUE_GRAPHICS_BIT) - _families->graphics_family = i; - - VkBool32 presentSupport = false; - vkGetPhysicalDeviceSurfaceSupportKHR(device, i, surface, &presentSupport); - - if(presentSupport) - _families->present_family = i; - - if(_families->isComplete()) - return *_families; - i++; - } - - return *_families; - } - - void Queues::init() - { - if(!_families.has_value()) - { - SDL_Window* window = SDL_CreateWindow("", 0, 0, 1, 1, SDL_WINDOW_VULKAN | SDL_WINDOW_HIDDEN); - if(!window) - core::error::report(e_kind::fatal_error, "Vulkan : failed to create a window to init queues"); - - VkSurfaceKHR surface = VK_NULL_HANDLE; - if(SDL_Vulkan_CreateSurface(window, Render_Core::get().getInstance().get(), &surface) != SDL_TRUE) - core::error::report(e_kind::fatal_error, "Vulkan : failed to create a surface to init queues"); - - findQueueFamilies(Render_Core::get().getDevice().getPhysicalDevice(), surface); - - vkDestroySurfaceKHR(Render_Core::get().getInstance().get(), surface, nullptr); - SDL_DestroyWindow(window); - } - vkGetDeviceQueue(Render_Core::get().getDevice().get(), _families->graphics_family.value(), 0, &_graphics_queue); - vkGetDeviceQueue(Render_Core::get().getDevice().get(), _families->present_family.value(), 0, &_present_queue); - #ifdef DEBUG - core::error::report(e_kind::message, "Vulkan : got graphics and present queues"); - #endif - } -} diff --git a/src/renderer/core/vk_queues.h b/src/renderer/core/vk_queues.h deleted file mode 100644 index 2a4a1ba..0000000 --- a/src/renderer/core/vk_queues.h +++ /dev/null @@ -1,56 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* vk_queues.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2022/10/08 19:01:49 by maldavid #+# #+# */ -/* Updated: 2024/03/14 17:00:48 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#ifndef __MLX_VK_QUEUES__ -#define __MLX_VK_QUEUES__ - -#include -#include -#include -#include -#include - -namespace mlx -{ - class Queues - { - public: - struct QueueFamilyIndices - { - std::optional graphics_family; - std::optional present_family; - - inline bool isComplete() { return graphics_family.has_value() && present_family.has_value(); } - }; - - QueueFamilyIndices findQueueFamilies(VkPhysicalDevice device, VkSurfaceKHR surface); - - void init(); - - inline VkQueue& getGraphic() noexcept { return _graphics_queue; } - inline VkQueue& getPresent() noexcept { return _present_queue; } - inline QueueFamilyIndices getFamilies() noexcept - { - if(_families.has_value()) - return *_families; - core::error::report(e_kind::fatal_error, "Vulkan : cannot get queue families, not init"); - return {}; // just to avoid warnings - } - - private: - VkQueue _graphics_queue; - VkQueue _present_queue; - std::optional _families; - }; -} - -#endif // __MLX_VK_QUEUES__ diff --git a/src/renderer/core/vk_semaphore.cpp b/src/renderer/core/vk_semaphore.cpp deleted file mode 100644 index fb8d2ab..0000000 --- a/src/renderer/core/vk_semaphore.cpp +++ /dev/null @@ -1,43 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* vk_semaphore.cpp :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2022/10/08 19:01:08 by maldavid #+# #+# */ -/* Updated: 2024/03/14 17:02:36 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#include "vk_semaphore.h" -#include "render_core.h" -#include - -namespace mlx -{ - void Semaphore::init() - { - VkSemaphoreCreateInfo semaphoreInfo{}; - semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO; - - VkResult res; - if( (res = vkCreateSemaphore(Render_Core::get().getDevice().get(), &semaphoreInfo, nullptr, &_image_available_semaphore)) != VK_SUCCESS || - (res = vkCreateSemaphore(Render_Core::get().getDevice().get(), &semaphoreInfo, nullptr, &_render_finished_semaphore)) != VK_SUCCESS) - core::error::report(e_kind::fatal_error, "Vulkan : failed to create a synchronization object (semaphore), %s", RCore::verbaliseResultVk(res)); - #ifdef DEBUG - core::error::report(e_kind::message, "Vulkan : created new semaphores"); - #endif - } - - void Semaphore::destroy() noexcept - { - vkDestroySemaphore(Render_Core::get().getDevice().get(), _render_finished_semaphore, nullptr); - _render_finished_semaphore = VK_NULL_HANDLE; - vkDestroySemaphore(Render_Core::get().getDevice().get(), _image_available_semaphore, nullptr); - _image_available_semaphore = VK_NULL_HANDLE; - #ifdef DEBUG - core::error::report(e_kind::message, "Vulkan : destroyed semaphores"); - #endif - } -} diff --git a/src/renderer/core/vk_semaphore.h b/src/renderer/core/vk_semaphore.h deleted file mode 100644 index f17ff9e..0000000 --- a/src/renderer/core/vk_semaphore.h +++ /dev/null @@ -1,36 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* vk_semaphore.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2022/10/08 18:59:38 by maldavid #+# #+# */ -/* Updated: 2024/03/14 17:01:57 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#ifndef __MLX_VK_SEMAPHORE__ -#define __MLX_VK_SEMAPHORE__ - -#include -#include - -namespace mlx -{ - class Semaphore - { - public: - void init(); - void destroy() noexcept; - - inline VkSemaphore& getImageSemaphore() noexcept { return _image_available_semaphore; } - inline VkSemaphore& getRenderImageSemaphore() noexcept { return _render_finished_semaphore; } - - private: - VkSemaphore _image_available_semaphore = VK_NULL_HANDLE; - VkSemaphore _render_finished_semaphore = VK_NULL_HANDLE; - }; -} - -#endif // __MLX_VK_SEMAPHORE__ diff --git a/src/renderer/core/vk_surface.cpp b/src/renderer/core/vk_surface.cpp deleted file mode 100644 index e2e2ac4..0000000 --- a/src/renderer/core/vk_surface.cpp +++ /dev/null @@ -1,49 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* vk_surface.cpp :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2022/10/08 18:58:49 by maldavid #+# #+# */ -/* Updated: 2024/01/10 21:55:21 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#include "render_core.h" -#include -#include -#include -#include -#include - -namespace mlx -{ - void Surface::create(Renderer& renderer) - { - if(SDL_Vulkan_CreateSurface(renderer.getWindow()->getNativeWindow(), Render_Core::get().getInstance().get(), &_surface) != SDL_TRUE) - core::error::report(e_kind::fatal_error, "Vulkan : failed to create a surface : %s", SDL_GetError()); - #ifdef DEBUG - core::error::report(e_kind::message, "Vulkan : created new surface"); - #endif - } - - VkSurfaceFormatKHR Surface::chooseSwapSurfaceFormat(const std::vector& availableFormats) - { - auto it = std::find_if(availableFormats.begin(), availableFormats.end(), [](VkSurfaceFormatKHR format) - { - return format.format == VK_FORMAT_R8G8B8A8_SRGB && format.colorSpace == VK_COLOR_SPACE_SRGB_NONLINEAR_KHR; - }); - - return (it == availableFormats.end() ? availableFormats[0] : *it); - } - - void Surface::destroy() noexcept - { - vkDestroySurfaceKHR(Render_Core::get().getInstance().get(), _surface, nullptr); - _surface = VK_NULL_HANDLE; - #ifdef DEBUG - core::error::report(e_kind::message, "Vulkan : destroyed a surface"); - #endif - } -} diff --git a/src/renderer/core/vk_surface.h b/src/renderer/core/vk_surface.h deleted file mode 100644 index c700f8f..0000000 --- a/src/renderer/core/vk_surface.h +++ /dev/null @@ -1,38 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* vk_surface.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2022/10/08 18:57:55 by maldavid #+# #+# */ -/* Updated: 2024/01/03 15:26:43 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#ifndef __MLX_VK_SURFACE__ -#define __MLX_VK_SURFACE__ - -#include -#include -#include - -namespace mlx -{ - class Surface - { - public: - void create(class Renderer& renderer); - void destroy() noexcept; - - VkSurfaceFormatKHR chooseSwapSurfaceFormat(const std::vector& availableFormats); - - inline VkSurfaceKHR& operator()() noexcept { return _surface; } - inline VkSurfaceKHR& get() noexcept { return _surface; } - - private: - VkSurfaceKHR _surface = VK_NULL_HANDLE; - }; -} - -#endif // __MLX_VK_SURFACE__ diff --git a/src/renderer/core/vk_validation_layers.cpp b/src/renderer/core/vk_validation_layers.cpp deleted file mode 100644 index a63b7fb..0000000 --- a/src/renderer/core/vk_validation_layers.cpp +++ /dev/null @@ -1,130 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* vk_validation_layers.cpp :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2022/12/19 14:05:25 by maldavid #+# #+# */ -/* Updated: 2024/03/14 17:03:24 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#include "render_core.h" -#include "vulkan/vulkan_core.h" - -#include -#include -#include - -namespace mlx -{ - void ValidationLayers::init() - { - if constexpr(!enableValidationLayers) - return; - - std::uint32_t extensionCount; - vkEnumerateInstanceExtensionProperties(nullptr, &extensionCount, nullptr); - std::vector extensions(extensionCount); - vkEnumerateInstanceExtensionProperties(nullptr, &extensionCount, extensions.data()); - if(!std::any_of(extensions.begin(), extensions.end(), [=](VkExtensionProperties ext) { return std::strcmp(ext.extensionName, VK_EXT_DEBUG_UTILS_EXTENSION_NAME) == 0; })) - { - core::error::report(e_kind::warning , "Vulkan : %s not present, debug utils are disabled", VK_EXT_DEBUG_UTILS_EXTENSION_NAME); - return; - } - - VkDebugUtilsMessengerCreateInfoEXT createInfo{}; - populateDebugMessengerCreateInfo(createInfo); - VkResult res = createDebugUtilsMessengerEXT(&createInfo, nullptr); - if(res != VK_SUCCESS) - core::error::report(e_kind::warning, "Vulkan : failed to set up debug messenger, %s", RCore::verbaliseResultVk(res)); - #ifdef DEBUG - else - core::error::report(e_kind::message, "Vulkan : enabled validation layers"); - #endif - - _vkSetDebugUtilsObjectNameEXT = (PFN_vkSetDebugUtilsObjectNameEXT)vkGetInstanceProcAddr(Render_Core::get().getInstance().get(), "vkSetDebugUtilsObjectNameEXT"); - if(!_vkSetDebugUtilsObjectNameEXT) - core::error::report(e_kind::warning, "Vulkan : failed to set up debug object names, %s", RCore::verbaliseResultVk(VK_ERROR_EXTENSION_NOT_PRESENT)); - #ifdef DEBUG - else - core::error::report(e_kind::message, "Vulkan : enabled debug object names"); - #endif - } - - bool ValidationLayers::checkValidationLayerSupport() - { - std::uint32_t layerCount; - vkEnumerateInstanceLayerProperties(&layerCount, nullptr); - - std::vector availableLayers(layerCount); - vkEnumerateInstanceLayerProperties(&layerCount, availableLayers.data()); - - return std::all_of(validationLayers.begin(), validationLayers.end(), [&](const char* layerName) - { - if(!std::any_of(availableLayers.begin(), availableLayers.end(), [=](VkLayerProperties props) { return std::strcmp(layerName, props.layerName) == 0; })) - { - core::error::report(e_kind::error, "Vulkan : a validation layer was requested but was not found ('%s')", layerName); - return false; - } - return true; - }); - } - - VkResult ValidationLayers::setDebugUtilsObjectNameEXT(VkObjectType object_type, std::uint64_t object_handle, const char* object_name) - { - if(!_vkSetDebugUtilsObjectNameEXT) - return VK_ERROR_EXTENSION_NOT_PRESENT; - - VkDebugUtilsObjectNameInfoEXT name_info{}; - name_info.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_OBJECT_NAME_INFO_EXT; - name_info.objectType = object_type; - name_info.objectHandle = object_handle; - name_info.pObjectName = object_name; - return _vkSetDebugUtilsObjectNameEXT(Render_Core::get().getDevice().get(), &name_info); - } - - void ValidationLayers::populateDebugMessengerCreateInfo(VkDebugUtilsMessengerCreateInfoEXT& createInfo) - { - createInfo = {}; - createInfo.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT; - createInfo.messageSeverity = VK_DEBUG_UTILS_MESSAGE_SEVERITY_VERBOSE_BIT_EXT | VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT | VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT; - createInfo.messageType = VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT | VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT | VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT; - createInfo.pfnUserCallback = ValidationLayers::debugCallback; - } - - void ValidationLayers::destroy() - { - if constexpr(enableValidationLayers) - { - destroyDebugUtilsMessengerEXT(nullptr); - #ifdef DEBUG - core::error::report(e_kind::message, "Vulkan : destroyed validation layers"); - #endif - } - } - - VkResult ValidationLayers::createDebugUtilsMessengerEXT(const VkDebugUtilsMessengerCreateInfoEXT* pCreateInfo, const VkAllocationCallbacks* pAllocator) - { - auto func = (PFN_vkCreateDebugUtilsMessengerEXT)vkGetInstanceProcAddr(Render_Core::get().getInstance().get(), "vkCreateDebugUtilsMessengerEXT"); - return func != nullptr ? func(Render_Core::get().getInstance().get(), pCreateInfo, pAllocator, &_debug_messenger) : VK_ERROR_EXTENSION_NOT_PRESENT; - } - - VKAPI_ATTR VkBool32 VKAPI_CALL ValidationLayers::debugCallback(VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity, [[maybe_unused]] VkDebugUtilsMessageTypeFlagsEXT messageType, const VkDebugUtilsMessengerCallbackDataEXT* pCallbackData, [[maybe_unused]] void* pUserData) - { - if(messageSeverity == VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT) - core::error::report(e_kind::error, pCallbackData->pMessage); - else if(messageSeverity == VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT) - core::error::report(e_kind::warning, pCallbackData->pMessage); - return VK_FALSE; - } - - void ValidationLayers::destroyDebugUtilsMessengerEXT(const VkAllocationCallbacks* pAllocator) - { - auto func = (PFN_vkDestroyDebugUtilsMessengerEXT)vkGetInstanceProcAddr(Render_Core::get().getInstance().get(), "vkDestroyDebugUtilsMessengerEXT"); - if(func != nullptr) - func(Render_Core::get().getInstance().get(), _debug_messenger, pAllocator); - } - -} diff --git a/src/renderer/core/vk_validation_layers.h b/src/renderer/core/vk_validation_layers.h deleted file mode 100644 index 0758669..0000000 --- a/src/renderer/core/vk_validation_layers.h +++ /dev/null @@ -1,47 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* vk_validation_layers.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2022/12/19 14:04:25 by maldavid #+# #+# */ -/* Updated: 2024/03/14 17:02:55 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#ifndef __VK_VALIDATION_LAYERS__ -#define __VK_VALIDATION_LAYERS__ - -#include -#include - -namespace mlx -{ - class ValidationLayers - { - public: - ValidationLayers() = default; - - void init(); - void destroy(); - - bool checkValidationLayerSupport(); - void populateDebugMessengerCreateInfo(VkDebugUtilsMessengerCreateInfoEXT& createInfo); - - VkResult setDebugUtilsObjectNameEXT(VkObjectType object_type, std::uint64_t object_handle, const char* object_name); - - ~ValidationLayers() = default; - - private: - VkResult createDebugUtilsMessengerEXT(const VkDebugUtilsMessengerCreateInfoEXT* pCreateInfo, const VkAllocationCallbacks* pAllocator); - static VKAPI_ATTR VkBool32 VKAPI_CALL debugCallback(VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity, VkDebugUtilsMessageTypeFlagsEXT messageType, const VkDebugUtilsMessengerCallbackDataEXT* pCallbackData, void* pUserData); - void destroyDebugUtilsMessengerEXT(const VkAllocationCallbacks* pAllocator); - - private: - VkDebugUtilsMessengerEXT _debug_messenger; - PFN_vkSetDebugUtilsObjectNameEXT _vkSetDebugUtilsObjectNameEXT = nullptr; - }; -} - -#endif diff --git a/src/renderer/descriptors/descriptor_pool_manager.cpp b/src/renderer/descriptors/descriptor_pool_manager.cpp deleted file mode 100644 index 4c727b8..0000000 --- a/src/renderer/descriptors/descriptor_pool_manager.cpp +++ /dev/null @@ -1,39 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* descriptor_pool_manager.cpp :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2024/01/20 06:51:47 by maldavid #+# #+# */ -/* Updated: 2024/01/20 08:18:27 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#include -#include - -namespace mlx -{ - DescriptorPool& DescriptorPoolManager::getAvailablePool() - { - for(auto& pool : _pools) - { - if(pool.getNumberOfSetsAllocated() < MAX_SETS_PER_POOL) - return pool; - } - VkDescriptorPoolSize pool_sizes[] = { - { VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, (MAX_FRAMES_IN_FLIGHT * NUMBER_OF_UNIFORM_BUFFERS) }, - { VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, MAX_SETS_PER_POOL - (MAX_FRAMES_IN_FLIGHT * NUMBER_OF_UNIFORM_BUFFERS) } - }; - _pools.emplace_front().init((sizeof(pool_sizes) / sizeof(VkDescriptorPoolSize)), pool_sizes); - return _pools.front(); - } - - void DescriptorPoolManager::destroyAllPools() - { - for(auto& pool : _pools) - pool.destroy(); - _pools.clear(); - } -} diff --git a/src/renderer/descriptors/descriptor_pool_manager.h b/src/renderer/descriptors/descriptor_pool_manager.h deleted file mode 100644 index 228b0f3..0000000 --- a/src/renderer/descriptors/descriptor_pool_manager.h +++ /dev/null @@ -1,36 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* descriptor_pool_manager.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2024/01/20 06:26:26 by maldavid #+# #+# */ -/* Updated: 2024/01/20 08:23:04 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#ifndef __MLX_DESCRIPTOR_POOL_MANAGER__ -#define __MLX_DESCRIPTOR_POOL_MANAGER__ - -#include -#include - -namespace mlx -{ - class DescriptorPoolManager - { - public: - DescriptorPoolManager() = default; - - DescriptorPool& getAvailablePool(); // assumes the pool is for only one set allocation, may cause some issues if this is for more than one - void destroyAllPools(); - - ~DescriptorPoolManager() = default; - - private: - std::list _pools; - }; -} - -#endif diff --git a/src/renderer/descriptors/vk_descriptor_pool.cpp b/src/renderer/descriptors/vk_descriptor_pool.cpp deleted file mode 100644 index 04f4865..0000000 --- a/src/renderer/descriptors/vk_descriptor_pool.cpp +++ /dev/null @@ -1,55 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* vk_descriptor_pool.cpp :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2023/01/23 18:34:23 by maldavid #+# #+# */ -/* Updated: 2024/01/20 07:40:40 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#include "vk_descriptor_pool.h" -#include -#include - -namespace mlx -{ - void DescriptorPool::init(std::size_t n, VkDescriptorPoolSize* size) - { - VkDescriptorPoolCreateInfo poolInfo{}; - poolInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; - poolInfo.poolSizeCount = n; - poolInfo.pPoolSizes = size; - poolInfo.maxSets = MAX_SETS_PER_POOL; - poolInfo.flags = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT; - - VkResult res = vkCreateDescriptorPool(Render_Core::get().getDevice().get(), &poolInfo, nullptr, &_pool); - if(res != VK_SUCCESS) - core::error::report(e_kind::fatal_error, "Vulkan : failed to create descriptor pool, %s", RCore::verbaliseResultVk(res)); - _allocated_sets++; - #ifdef DEBUG - core::error::report(e_kind::message, "Vulkan : created new descriptor pool"); - #endif - } - - void DescriptorPool::freeDescriptor(const DescriptorSet& set) - { - if(!isInit()) - return; - const auto& sets = set.getAllFramesDescriptorSets(); - vkFreeDescriptorSets(Render_Core::get().getDevice().get(), _pool, sets.size(), sets.data()); - _allocated_sets--; // if this goes in underflow I quit - } - - void DescriptorPool::destroy() noexcept - { - if(_pool != VK_NULL_HANDLE) - vkDestroyDescriptorPool(Render_Core::get().getDevice().get(), _pool, nullptr); - _pool = VK_NULL_HANDLE; - #ifdef DEBUG - core::error::report(e_kind::message, "Vulkan : destroyed a descriptor pool"); - #endif - } -} diff --git a/src/renderer/descriptors/vk_descriptor_pool.h b/src/renderer/descriptors/vk_descriptor_pool.h deleted file mode 100644 index 67acdb3..0000000 --- a/src/renderer/descriptors/vk_descriptor_pool.h +++ /dev/null @@ -1,45 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* vk_descriptor_pool.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2023/01/23 18:32:43 by maldavid #+# #+# */ -/* Updated: 2024/01/20 07:38:32 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#ifndef __VK_DESCRIPTOR_POOL__ -#define __VK_DESCRIPTOR_POOL__ - -#include -#include -#include - -namespace mlx -{ - class DescriptorPool - { - public: - DescriptorPool() = default; - - void init(std::size_t n, VkDescriptorPoolSize* size); - void freeDescriptor(const class DescriptorSet& set); - void destroy() noexcept; - - inline VkDescriptorPool& operator()() noexcept { return _pool; } - inline VkDescriptorPool& get() noexcept { return _pool; } - inline std::size_t getNumberOfSetsAllocated() const noexcept { return _allocated_sets; } - - inline bool isInit() const noexcept { return _pool != VK_NULL_HANDLE; } - - ~DescriptorPool() = default; - - private: - VkDescriptorPool _pool = VK_NULL_HANDLE; - std::size_t _allocated_sets = 0; - }; -} - -#endif diff --git a/src/renderer/descriptors/vk_descriptor_set.cpp b/src/renderer/descriptors/vk_descriptor_set.cpp deleted file mode 100644 index 819ba25..0000000 --- a/src/renderer/descriptors/vk_descriptor_set.cpp +++ /dev/null @@ -1,129 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* vk_descriptor_set.cpp :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2023/01/23 18:40:44 by maldavid #+# #+# */ -/* Updated: 2024/01/20 08:18:07 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#include "vk_descriptor_set.h" -#include "renderer/core/render_core.h" -#include "vk_descriptor_pool.h" -#include "vk_descriptor_set_layout.h" -#include -#include -#include -#include - -namespace mlx -{ - void DescriptorSet::init(Renderer* renderer, DescriptorPool* pool, DescriptorSetLayout* layout) - { - MLX_PROFILE_FUNCTION(); - _renderer = renderer; - _layout = layout; - _pool = pool; - - auto device = Render_Core::get().getDevice().get(); - - std::array layouts; - layouts.fill(layout->get()); - - VkDescriptorSetAllocateInfo allocInfo{}; - allocInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; - allocInfo.descriptorPool = _pool->get(); - allocInfo.descriptorSetCount = static_cast(MAX_FRAMES_IN_FLIGHT); - allocInfo.pSetLayouts = layouts.data(); - - VkResult res = vkAllocateDescriptorSets(device, &allocInfo, _desc_set.data()); - if(res != VK_SUCCESS) - core::error::report(e_kind::fatal_error, "Vulkan : failed to allocate descriptor set, %s", RCore::verbaliseResultVk(res)); - #ifdef DEBUG - core::error::report(e_kind::message, "Vulkan : created new descriptor set"); - #endif - } - - void DescriptorSet::writeDescriptor(int binding, UBO* ubo) const noexcept - { - MLX_PROFILE_FUNCTION(); - auto device = Render_Core::get().getDevice().get(); - - for(int i = 0; i < MAX_FRAMES_IN_FLIGHT; i++) - { - VkDescriptorBufferInfo bufferInfo{}; - bufferInfo.buffer = ubo->get(i); - bufferInfo.offset = ubo->getOffset(i); - bufferInfo.range = ubo->getSize(i); - - VkWriteDescriptorSet descriptorWrite{}; - descriptorWrite.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; - descriptorWrite.dstSet = _desc_set[i]; - descriptorWrite.dstBinding = binding; - descriptorWrite.dstArrayElement = 0; - descriptorWrite.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; - descriptorWrite.descriptorCount = 1; - descriptorWrite.pBufferInfo = &bufferInfo; - - vkUpdateDescriptorSets(device, 1, &descriptorWrite, 0, nullptr); - } - } - - void DescriptorSet::writeDescriptor(int binding, const Image& image) const noexcept - { - MLX_PROFILE_FUNCTION(); - auto device = Render_Core::get().getDevice().get(); - - VkDescriptorImageInfo imageInfo{}; - imageInfo.imageLayout = image.getLayout(); - imageInfo.imageView = image.getImageView(); - imageInfo.sampler = image.getSampler(); - - VkWriteDescriptorSet descriptorWrite{}; - descriptorWrite.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; - descriptorWrite.dstSet = _desc_set[_renderer->getActiveImageIndex()]; - descriptorWrite.dstBinding = binding; - descriptorWrite.dstArrayElement = 0; - descriptorWrite.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; - descriptorWrite.descriptorCount = 1; - descriptorWrite.pImageInfo = &imageInfo; - - vkUpdateDescriptorSets(device, 1, &descriptorWrite, 0, nullptr); - } - - DescriptorSet DescriptorSet::duplicate() - { - MLX_PROFILE_FUNCTION(); - DescriptorSet set; - set.init(_renderer, &Render_Core::get().getDescriptorPool(), _layout); - return set; - } - - VkDescriptorSet& DescriptorSet::operator()() noexcept - { - return _desc_set[_renderer->getActiveImageIndex()]; - } - - VkDescriptorSet& DescriptorSet::get() noexcept - { - return _desc_set[_renderer->getActiveImageIndex()]; - } - - void DescriptorSet::destroy() noexcept - { - MLX_PROFILE_FUNCTION(); - if(_pool != nullptr && Render_Core::get().isInit()) // checks if the render core is still init (it should always be init but just in case) - _pool->freeDescriptor(*this); - for(auto& set : _desc_set) - { - if(set != VK_NULL_HANDLE) - set = VK_NULL_HANDLE; - } - #ifdef DEBUG - core::error::report(e_kind::message, "Vulkan : destroyed descriptor set"); - #endif - } -} diff --git a/src/renderer/descriptors/vk_descriptor_set.h b/src/renderer/descriptors/vk_descriptor_set.h deleted file mode 100644 index 4eb7372..0000000 --- a/src/renderer/descriptors/vk_descriptor_set.h +++ /dev/null @@ -1,54 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* vk_descriptor_set.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2023/01/23 18:39:36 by maldavid #+# #+# */ -/* Updated: 2024/01/20 07:17:39 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#ifndef __VK_DESCRIPTOR_SET__ -#define __VK_DESCRIPTOR_SET__ - -#include -#include -#include -#include - -namespace mlx -{ - class DescriptorSet - { - public: - DescriptorSet() = default; - - void init(class Renderer* renderer, class DescriptorPool* pool, class DescriptorSetLayout* layout); - - void writeDescriptor(int binding, class UBO* ubo) const noexcept; - void writeDescriptor(int binding, const class Image& image) const noexcept; - - inline bool isInit() const noexcept { return _pool != nullptr && _renderer != nullptr; } - - DescriptorSet duplicate(); - - VkDescriptorSet& operator()() noexcept; - VkDescriptorSet& get() noexcept; - - inline const std::array& getAllFramesDescriptorSets() const { return _desc_set; } - - void destroy() noexcept; - - ~DescriptorSet() = default; - - private: - std::array _desc_set; - class DescriptorPool* _pool = nullptr; - class DescriptorSetLayout* _layout = nullptr; - class Renderer* _renderer = nullptr; - }; -} - -#endif diff --git a/src/renderer/descriptors/vk_descriptor_set_layout.cpp b/src/renderer/descriptors/vk_descriptor_set_layout.cpp deleted file mode 100644 index 12b98d8..0000000 --- a/src/renderer/descriptors/vk_descriptor_set_layout.cpp +++ /dev/null @@ -1,47 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* vk_descriptor_set_layout.cpp :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2023/01/23 18:37:28 by maldavid #+# #+# */ -/* Updated: 2024/01/03 13:14:58 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#include "vk_descriptor_set_layout.h" -#include - -namespace mlx -{ - void DescriptorSetLayout::init(std::vector> binds, VkShaderStageFlagBits stage) - { - std::vector bindings(binds.size()); - for(std::size_t i = 0; i < binds.size(); i++) - { - bindings[i].binding = binds[i].first; - bindings[i].descriptorCount = 1; - bindings[i].descriptorType = binds[i].second; - bindings[i].pImmutableSamplers = nullptr; - bindings[i].stageFlags = stage; - } - - _bindings = std::move(binds); - - VkDescriptorSetLayoutCreateInfo layoutInfo{}; - layoutInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO; - layoutInfo.bindingCount = _bindings.size(); - layoutInfo.pBindings = bindings.data(); - - VkResult res = vkCreateDescriptorSetLayout(Render_Core::get().getDevice().get(), &layoutInfo, nullptr, &_layout); - if(res != VK_SUCCESS) - core::error::report(e_kind::fatal_error, "Vulkan : failed to create descriptor set layout, %s", RCore::verbaliseResultVk(res)); - } - - void DescriptorSetLayout::destroy() noexcept - { - vkDestroyDescriptorSetLayout(Render_Core::get().getDevice().get(), _layout, nullptr); - _layout = VK_NULL_HANDLE; - } -} diff --git a/src/renderer/descriptors/vk_descriptor_set_layout.h b/src/renderer/descriptors/vk_descriptor_set_layout.h deleted file mode 100644 index b3a5c97..0000000 --- a/src/renderer/descriptors/vk_descriptor_set_layout.h +++ /dev/null @@ -1,42 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* vk_descriptor_set_layout.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2023/01/23 18:36:22 by maldavid #+# #+# */ -/* Updated: 2024/01/20 06:25:54 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#ifndef __VK_DESCRIPTOR_SET_LAYOUT__ -#define __VK_DESCRIPTOR_SET_LAYOUT__ - -#include -#include -#include - -namespace mlx -{ - class DescriptorSetLayout - { - public: - DescriptorSetLayout() = default; - - void init(std::vector> binds, VkShaderStageFlagBits stage); - void destroy() noexcept; - - inline VkDescriptorSetLayout& operator()() noexcept { return _layout; } - inline VkDescriptorSetLayout& get() noexcept { return _layout; } - inline const std::vector>& getBindings() const noexcept { return _bindings; } - - ~DescriptorSetLayout() = default; - - private: - VkDescriptorSetLayout _layout = VK_NULL_HANDLE; - std::vector> _bindings; - }; -} - -#endif diff --git a/src/renderer/images/texture.cpp b/src/renderer/images/texture.cpp deleted file mode 100644 index 2ccb2ae..0000000 --- a/src/renderer/images/texture.cpp +++ /dev/null @@ -1,197 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* texture.cpp :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2023/03/31 18:03:35 by maldavid #+# #+# */ -/* Updated: 2024/09/14 00:04:29 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#include -#include -#include -#include -#include -#include - -#define STB_IMAGE_IMPLEMENTATION -#ifdef MLX_COMPILER_GCC - #pragma GCC diagnostic push - #pragma GCC diagnostic ignored "-Wstringop-overflow" - #include - #pragma GCC diagnostic pop -#else - #include -#endif - -#ifdef IMAGE_OPTIMIZED - #define TILING VK_IMAGE_TILING_OPTIMAL -#else - #define TILING VK_IMAGE_TILING_LINEAR -#endif - -namespace mlx -{ - void Texture::create(std::uint8_t* pixels, std::uint32_t width, std::uint32_t height, VkFormat format, const char* name, bool dedicated_memory) - { - MLX_PROFILE_FUNCTION(); - Image::create(width, height, format, TILING, VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_SAMPLED_BIT, name, dedicated_memory); - Image::createImageView(VK_IMAGE_VIEW_TYPE_2D, VK_IMAGE_ASPECT_COLOR_BIT); - Image::createSampler(); - transitionLayout(VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL); - - std::vector vertexData = { - {{0, 0}, {1.f, 1.f, 1.f, 1.f}, {0.0f, 0.0f}}, - {{width, 0}, {1.f, 1.f, 1.f, 1.f}, {1.0f, 0.0f}}, - {{width, height}, {1.f, 1.f, 1.f, 1.f}, {1.0f, 1.0f}}, - {{0, height}, {1.f, 1.f, 1.f, 1.f}, {0.0f, 1.0f}} - }; - - std::vector indexData = { 0, 1, 2, 2, 3, 0 }; - - #ifdef DEBUG - _vbo.create(sizeof(Vertex) * vertexData.size(), vertexData.data(), name); - _ibo.create(sizeof(std::uint16_t) * indexData.size(), indexData.data(), name); - _name = name; - #else - _vbo.create(sizeof(Vertex) * vertexData.size(), vertexData.data(), nullptr); - _ibo.create(sizeof(std::uint16_t) * indexData.size(), indexData.data(), nullptr); - #endif - - Buffer staging_buffer; - std::size_t size = width * height * formatSize(format); - if(pixels != nullptr) - { - #ifdef DEBUG - staging_buffer.create(Buffer::kind::dynamic, size, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, name, pixels); - #else - staging_buffer.create(Buffer::kind::dynamic, size, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, nullptr, pixels); - #endif - } - else - { - std::vector default_pixels(width * height, 0x00000000); - #ifdef DEBUG - staging_buffer.create(Buffer::kind::dynamic, size, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, name, default_pixels.data()); - #else - staging_buffer.create(Buffer::kind::dynamic, size, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, nullptr, default_pixels.data()); - #endif - } - Image::copyFromBuffer(staging_buffer); - staging_buffer.destroy(); - } - - void Texture::setPixel(int x, int y, std::uint32_t color) noexcept - { - MLX_PROFILE_FUNCTION(); - if(x < 0 || y < 0 || static_cast(x) > getWidth() || static_cast(y) > getHeight()) - return; - if(_map == nullptr) - openCPUmap(); - _cpu_map[(y * getWidth()) + x] = color; - _has_been_modified = true; - } - - int Texture::getPixel(int x, int y) noexcept - { - MLX_PROFILE_FUNCTION(); - if(x < 0 || y < 0 || static_cast(x) > getWidth() || static_cast(y) > getHeight()) - return 0; - if(_map == nullptr) - openCPUmap(); - std::uint32_t color = _cpu_map[(y * getWidth()) + x]; - std::uint8_t* bytes = reinterpret_cast(&color); - std::uint8_t tmp = bytes[0]; - bytes[0] = bytes[2]; - bytes[2] = tmp; - return *reinterpret_cast(bytes); - } - - void Texture::openCPUmap() - { - MLX_PROFILE_FUNCTION(); - if(_map != nullptr) - return; - - #ifdef DEBUG - core::error::report(e_kind::message, "Texture : enabling CPU mapping"); - #endif - std::size_t size = getWidth() * getHeight() * formatSize(getFormat()); - _buf_map.emplace(); - #ifdef DEBUG - _buf_map->create(Buffer::kind::dynamic, size, VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT, _name.c_str()); - #else - _buf_map->create(Buffer::kind::dynamic, size, VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT, nullptr); - #endif - Image::copyToBuffer(*_buf_map); - _buf_map->mapMem(&_map); - _cpu_map = std::vector(getWidth() * getHeight(), 0); - std::memcpy(_cpu_map.data(), _map, size); - #ifdef DEBUG - core::error::report(e_kind::message, "Texture : mapped CPU memory using staging buffer"); - #endif - } - - void Texture::render(std::array& sets, Renderer& renderer, int x, int y) - { - MLX_PROFILE_FUNCTION(); - if(_has_been_modified) - { - std::memcpy(_map, _cpu_map.data(), _cpu_map.size() * formatSize(getFormat())); - Image::copyFromBuffer(*_buf_map); - _has_been_modified = false; - } - if(!_set.isInit()) - _set = renderer.getFragDescriptorSet().duplicate(); - if(getLayout() != VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL) - transitionLayout(VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL); - if(!_has_set_been_updated) - updateSet(0); - auto cmd = renderer.getActiveCmdBuffer(); - _vbo.bind(renderer); - _ibo.bind(renderer); - glm::vec2 translate(x, y); - vkCmdPushConstants(cmd.get(), renderer.getPipeline().getPipelineLayout(), VK_SHADER_STAGE_VERTEX_BIT, 0, sizeof(translate), &translate); - sets[1] = _set.get(); - vkCmdBindDescriptorSets(renderer.getActiveCmdBuffer().get(), VK_PIPELINE_BIND_POINT_GRAPHICS, renderer.getPipeline().getPipelineLayout(), 0, sets.size(), sets.data(), 0, nullptr); - vkCmdDrawIndexed(cmd.get(), static_cast(_ibo.getSize() / sizeof(std::uint16_t)), 1, 0, 0, 0); - } - - void Texture::destroy() noexcept - { - MLX_PROFILE_FUNCTION(); - Image::destroy(); - _set.destroy(); - if(_buf_map.has_value()) - _buf_map->destroy(); - _vbo.destroy(); - _ibo.destroy(); - } - - Texture stbTextureLoad(std::filesystem::path file, int* w, int* h) - { - MLX_PROFILE_FUNCTION(); - Texture texture; - int channels; - std::uint8_t* data = nullptr; - std::string filename = file.string(); - - if(!std::filesystem::exists(std::move(file))) - core::error::report(e_kind::fatal_error, "Image : file not found '%s'", filename.c_str()); - if(stbi_is_hdr(filename.c_str())) - core::error::report(e_kind::fatal_error, "Texture : unsupported image format '%s'", filename.c_str()); - int dummy_w; - int dummy_h; - data = stbi_load(filename.c_str(), (w == nullptr ? &dummy_w : w), (h == nullptr ? &dummy_h : h), &channels, 4); - #ifdef DEBUG - texture.create(data, (w == nullptr ? dummy_w : *w), (h == nullptr ? dummy_h : *h), VK_FORMAT_R8G8B8A8_UNORM, filename.c_str()); - #else - texture.create(data, (w == nullptr ? dummy_w : *w), (h == nullptr ? dummy_h : *h), VK_FORMAT_R8G8B8A8_UNORM, nullptr); - #endif - stbi_image_free(data); - return texture; - } -} diff --git a/src/renderer/images/texture.h b/src/renderer/images/texture.h deleted file mode 100644 index 77282cf..0000000 --- a/src/renderer/images/texture.h +++ /dev/null @@ -1,69 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* texture.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2023/03/08 02:24:58 by maldavid #+# #+# */ -/* Updated: 2024/03/14 19:06:07 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#ifndef __MLX_TEXTURE__ -#define __MLX_TEXTURE__ - -#include -#include -#include -#include -#include -#include -#include -#ifdef DEBUG - #include -#endif - -namespace mlx -{ - class Texture : public Image - { - public: - Texture() = default; - - void create(std::uint8_t* pixels, std::uint32_t width, std::uint32_t height, VkFormat format, const char* name, bool dedicated_memory = false); - void render(std::array& sets, class Renderer& renderer, int x, int y); - void destroy() noexcept override; - - void setPixel(int x, int y, std::uint32_t color) noexcept; - int getPixel(int x, int y) noexcept; - - inline void setDescriptor(DescriptorSet&& set) noexcept { _set = set; } - inline VkDescriptorSet getSet() noexcept { return _set.isInit() ? _set.get() : VK_NULL_HANDLE; } - inline void updateSet(int binding) noexcept { _set.writeDescriptor(binding, *this); _has_set_been_updated = true; } - inline bool hasBeenUpdated() const noexcept { return _has_set_been_updated; } - inline constexpr void resetUpdate() noexcept { _has_set_been_updated = false; } - - ~Texture() = default; - - private: - void openCPUmap(); - - private: - C_VBO _vbo; - C_IBO _ibo; - #ifdef DEBUG - std::string _name; - #endif - DescriptorSet _set; - std::vector _cpu_map; - std::optional _buf_map = std::nullopt; - void* _map = nullptr; - bool _has_been_modified = false; - bool _has_set_been_updated = false; - }; - - Texture stbTextureLoad(std::filesystem::path file, int* w, int* h); -} - -#endif diff --git a/src/renderer/images/texture_atlas.cpp b/src/renderer/images/texture_atlas.cpp deleted file mode 100644 index fc06315..0000000 --- a/src/renderer/images/texture_atlas.cpp +++ /dev/null @@ -1,56 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* texture_atlas.cpp :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2023/04/07 16:40:09 by maldavid #+# #+# */ -/* Updated: 2024/01/18 10:18:08 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#include - -#ifdef IMAGE_OPTIMIZED - #define TILING VK_IMAGE_TILING_OPTIMAL -#else - #define TILING VK_IMAGE_TILING_LINEAR -#endif - -namespace mlx -{ - void TextureAtlas::create(std::uint8_t* pixels, std::uint32_t width, std::uint32_t height, VkFormat format, const char* name, bool dedicated_memory) - { - Image::create(width, height, format, TILING, VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_SAMPLED_BIT, name, dedicated_memory); - Image::createImageView(VK_IMAGE_VIEW_TYPE_2D, VK_IMAGE_ASPECT_COLOR_BIT); - Image::createSampler(); - transitionLayout(VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL); - - if(pixels == nullptr) - { - core::error::report(e_kind::warning, "Renderer : creating an empty texture atlas. They cannot be updated after creation, this might be a mistake or a bug, please report"); - return; - } - Buffer staging_buffer; - std::size_t size = width * height * formatSize(format); - staging_buffer.create(Buffer::kind::dynamic, size, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, name, pixels); - Image::copyFromBuffer(staging_buffer); - staging_buffer.destroy(); - } - - void TextureAtlas::render(Renderer& renderer, int x, int y, std::uint32_t ibo_size) const - { - auto cmd = renderer.getActiveCmdBuffer().get(); - - glm::vec2 translate(x, y); - vkCmdPushConstants(cmd, renderer.getPipeline().getPipelineLayout(), VK_SHADER_STAGE_VERTEX_BIT, 0, sizeof(translate), &translate); - vkCmdDrawIndexed(cmd, ibo_size / sizeof(std::uint16_t), 1, 0, 0, 0); - } - - void TextureAtlas::destroy() noexcept - { - Image::destroy(); - _set.destroy(); - } -} diff --git a/src/renderer/images/texture_atlas.h b/src/renderer/images/texture_atlas.h deleted file mode 100644 index 4a33b1a..0000000 --- a/src/renderer/images/texture_atlas.h +++ /dev/null @@ -1,46 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* texture_atlas.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2023/04/07 16:36:33 by maldavid #+# #+# */ -/* Updated: 2024/03/14 19:57:55 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#ifndef __MLX_TEXTURE_ATLAS__ -#define __MLX_TEXTURE_ATLAS__ - -#include -#include -#include - -namespace mlx -{ - class TextureAtlas : public Image - { - public: - TextureAtlas() = default; - - void create(std::uint8_t* pixels, std::uint32_t width, std::uint32_t height, VkFormat format, const char* name, bool dedicated_memory = false); - void render(class Renderer& renderer, int x, int y, std::uint32_t ibo_size) const; - void destroy() noexcept override; - - inline void setDescriptor(DescriptorSet&& set) noexcept { _set = set; } - inline VkDescriptorSet getVkSet() noexcept { return _set.isInit() ? _set.get() : VK_NULL_HANDLE; } - inline DescriptorSet getSet() noexcept { return _set; } - inline void updateSet(int binding) noexcept { _set.writeDescriptor(binding, *this); _has_been_updated = true; } - inline bool hasBeenUpdated() const noexcept { return _has_been_updated; } - inline constexpr void resetUpdate() noexcept { _has_been_updated = false; } - - ~TextureAtlas() = default; - - private: - DescriptorSet _set; - bool _has_been_updated = false; - }; -} - -#endif diff --git a/src/renderer/images/texture_descriptor.h b/src/renderer/images/texture_descriptor.h deleted file mode 100644 index f3a2663..0000000 --- a/src/renderer/images/texture_descriptor.h +++ /dev/null @@ -1,59 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* texture_descriptor.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2024/01/11 01:00:13 by maldavid #+# #+# */ -/* Updated: 2024/01/11 01:21:52 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#ifndef __MLX_TEXTURE_DESCRIPTOR__ -#define __MLX_TEXTURE_DESCRIPTOR__ - -#include -#include -#include - -namespace mlx -{ - struct TextureRenderDescriptor : public DrawableResource - { - Texture* texture; - int x; - int y; - - TextureRenderDescriptor(Texture* _texture, int _x, int _y) : texture(_texture), x(_x), y(_y) {} - inline bool operator==(const TextureRenderDescriptor& rhs) const { return texture == rhs.texture && x == rhs.x && y == rhs.y; } - inline void render(std::array& sets, class Renderer& renderer) override - { - if(!texture->isInit()) - return; - texture->render(sets, renderer, x, y); - } - inline void resetUpdate() override - { - if(!texture->isInit()) - return; - texture->resetUpdate(); - } - }; -} - -namespace std -{ - template <> - struct hash - { - std::size_t operator()(const mlx::TextureRenderDescriptor& d) const noexcept - { - std::size_t hash = 0; - mlx::hashCombine(hash, d.texture, d.x, d.y); - return hash; - } - }; -} - -#endif diff --git a/src/renderer/images/texture_manager.h b/src/renderer/images/texture_manager.h deleted file mode 100644 index a0b37d5..0000000 --- a/src/renderer/images/texture_manager.h +++ /dev/null @@ -1,67 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* texture_manager.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2024/01/11 00:56:15 by maldavid #+# #+# */ -/* Updated: 2024/03/25 13:53:59 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#ifndef __MLX_TEXTURE_MANAGER__ -#define __MLX_TEXTURE_MANAGER__ - -#include -#include -#include -#include - -namespace mlx -{ - class TextureManager - { - public: - TextureManager() = default; - - inline void clear() { _texture_descriptors.clear(); } - - inline std::pair registerTexture(Texture* texture, int x, int y) - { - MLX_PROFILE_FUNCTION(); - auto res = _texture_descriptors.emplace(texture, x, y); - return std::make_pair(static_cast(&const_cast(*res.first)), res.second); - } - - inline bool isTextureKnown(Texture* texture) noexcept - { - MLX_PROFILE_FUNCTION(); - for(const auto& desc : _texture_descriptors) - { - if(desc.texture == texture) - return true; - } - return false; - } - - inline void eraseTextures(Texture* texture) - { - MLX_PROFILE_FUNCTION(); - for(auto it = _texture_descriptors.begin(); it != _texture_descriptors.end();) - { - if(it->texture == texture) - it = _texture_descriptors.erase(it); - else - ++it; - } - } - - ~TextureManager() = default; - - private: - std::unordered_set _texture_descriptors; - }; -} - -#endif diff --git a/src/renderer/images/vk_image.cpp b/src/renderer/images/vk_image.cpp deleted file mode 100644 index ed2549b..0000000 --- a/src/renderer/images/vk_image.cpp +++ /dev/null @@ -1,394 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* vk_image.cpp :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2023/01/25 11:59:07 by maldavid #+# #+# */ -/* Updated: 2024/03/14 17:28:25 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#include "vk_image.h" -#include -#include -#include -#include - -namespace mlx -{ - bool isStencilFormat(VkFormat format) - { - switch(format) - { - case VK_FORMAT_D32_SFLOAT_S8_UINT: - case VK_FORMAT_D24_UNORM_S8_UINT: - return true; - - default: return false; - } - } - - bool isDepthFormat(VkFormat format) - { - switch(format) - { - case VK_FORMAT_D16_UNORM: - case VK_FORMAT_D32_SFLOAT: - case VK_FORMAT_D32_SFLOAT_S8_UINT: - case VK_FORMAT_D24_UNORM_S8_UINT: - case VK_FORMAT_D16_UNORM_S8_UINT: - return true; - - default: return false; - } - } - - VkFormat bitsToFormat(std::uint32_t bits) - { - switch(bits) - { - case 8: return VK_FORMAT_R8_UNORM; - case 16: return VK_FORMAT_R8G8_UNORM; - case 24: return VK_FORMAT_R8G8B8_UNORM; - case 32: return VK_FORMAT_R8G8B8A8_UNORM; - case 48: return VK_FORMAT_R16G16B16_SFLOAT; - case 64: return VK_FORMAT_R16G16B16A16_SFLOAT; - case 96: return VK_FORMAT_R32G32B32_SFLOAT; - case 128: return VK_FORMAT_R32G32B32A32_SFLOAT; - - default: - core::error::report(e_kind::fatal_error, "Vulkan : unsupported image bit-depth"); - return VK_FORMAT_R8G8B8A8_UNORM; - } - } - - VkPipelineStageFlags layoutToAccessMask(VkImageLayout layout, bool isDestination) - { - VkPipelineStageFlags accessMask = 0; - - switch(layout) - { - case VK_IMAGE_LAYOUT_UNDEFINED: - if(isDestination) - core::error::report(e_kind::error, "Vulkan : the new layout used in a transition must not be VK_IMAGE_LAYOUT_UNDEFINED"); - break; - case VK_IMAGE_LAYOUT_GENERAL: accessMask = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT; break; - case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL: accessMask = VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; break; - case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL: accessMask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT; break; - case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL: - accessMask = VK_ACCESS_SHADER_READ_BIT; // VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT; - break; - case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL: accessMask = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_INPUT_ATTACHMENT_READ_BIT; break; - case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL: accessMask = VK_ACCESS_TRANSFER_READ_BIT; break; - case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL: accessMask = VK_ACCESS_TRANSFER_WRITE_BIT; break; - case VK_IMAGE_LAYOUT_PREINITIALIZED: - if(!isDestination) - accessMask = VK_ACCESS_HOST_WRITE_BIT; - else - core::error::report(e_kind::error, "Vulkan : the new layout used in a transition must not be VK_IMAGE_LAYOUT_PREINITIALIZED"); - break; - case VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL: accessMask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT; break; - case VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL: accessMask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT; break; - case VK_IMAGE_LAYOUT_PRESENT_SRC_KHR: accessMask = VK_ACCESS_MEMORY_READ_BIT; break; - - default: core::error::report(e_kind::error, "Vulkan : unexpected image layout"); break; - } - - return accessMask; - } - - void Image::create(std::uint32_t width, std::uint32_t height, VkFormat format, VkImageTiling tiling, VkImageUsageFlags usage, const char* name, bool dedicated_memory) - { - _width = width; - _height = height; - _format = format; - _tiling = tiling; - - VkImageCreateInfo imageInfo{}; - imageInfo.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; - imageInfo.imageType = VK_IMAGE_TYPE_2D; - imageInfo.extent.width = width; - imageInfo.extent.height = height; - imageInfo.extent.depth = 1; - imageInfo.mipLevels = 1; - imageInfo.arrayLayers = 1; - imageInfo.format = format; - imageInfo.tiling = tiling; - imageInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; - imageInfo.usage = usage; - imageInfo.samples = VK_SAMPLE_COUNT_1_BIT; - imageInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE; - - VmaAllocationCreateInfo alloc_info{}; - alloc_info.usage = VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE; - if(dedicated_memory) - { - alloc_info.flags = VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT; - alloc_info.priority = 1.0f; - } - - _allocation = Render_Core::get().getAllocator().createImage(&imageInfo, &alloc_info, _image, name); - #ifdef DEBUG - _name = name; - #endif - } - - void Image::createImageView(VkImageViewType type, VkImageAspectFlags aspectFlags) noexcept - { - VkImageViewCreateInfo viewInfo{}; - viewInfo.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; - viewInfo.image = _image; - viewInfo.viewType = type; - viewInfo.format = _format; - viewInfo.subresourceRange.aspectMask = aspectFlags; - viewInfo.subresourceRange.baseMipLevel = 0; - viewInfo.subresourceRange.levelCount = 1; - viewInfo.subresourceRange.baseArrayLayer = 0; - viewInfo.subresourceRange.layerCount = 1; - - VkResult res = vkCreateImageView(Render_Core::get().getDevice().get(), &viewInfo, nullptr, &_image_view); - if(res != VK_SUCCESS) - core::error::report(e_kind::fatal_error, "Vulkan : failed to create an image view, %s", RCore::verbaliseResultVk(res)); - #ifdef DEBUG - else - Render_Core::get().getLayers().setDebugUtilsObjectNameEXT(VK_OBJECT_TYPE_IMAGE_VIEW, (std::uint64_t)_image_view, _name.c_str()); - #endif - } - - void Image::createSampler() noexcept - { - VkSamplerCreateInfo info{}; - info.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO; - info.magFilter = VK_FILTER_NEAREST; - info.minFilter = VK_FILTER_NEAREST; - info.mipmapMode = VK_SAMPLER_MIPMAP_MODE_NEAREST; - info.addressModeU = VK_SAMPLER_ADDRESS_MODE_REPEAT; - info.addressModeV = VK_SAMPLER_ADDRESS_MODE_REPEAT; - info.addressModeW = VK_SAMPLER_ADDRESS_MODE_REPEAT; - info.minLod = -1000; - info.maxLod = 1000; - info.anisotropyEnable = VK_FALSE; - info.maxAnisotropy = 1.0f; - - VkResult res = vkCreateSampler(Render_Core::get().getDevice().get(), &info, nullptr, &_sampler); - if(res != VK_SUCCESS) - core::error::report(e_kind::fatal_error, "Vulkan : failed to create an image sampler, %s", RCore::verbaliseResultVk(res)); - #ifdef DEBUG - else - Render_Core::get().getLayers().setDebugUtilsObjectNameEXT(VK_OBJECT_TYPE_SAMPLER, (std::uint64_t)_sampler, _name.c_str()); - #endif - } - - void Image::copyFromBuffer(Buffer& buffer) - { - CmdBuffer& cmd = Render_Core::get().getSingleTimeCmdBuffer(); - cmd.beginRecord(); - - VkImageLayout layout_save = _layout; - transitionLayout(VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, &cmd); - - cmd.copyBufferToImage(buffer, *this); - - transitionLayout(layout_save, &cmd); - - cmd.endRecord(); - cmd.submitIdle(); - } - - void Image::copyToBuffer(Buffer& buffer) - { - CmdBuffer& cmd = Render_Core::get().getSingleTimeCmdBuffer(); - cmd.beginRecord(); - - VkImageLayout layout_save = _layout; - transitionLayout(VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, &cmd); - - cmd.copyImagetoBuffer(*this, buffer); - - transitionLayout(layout_save, &cmd); - - cmd.endRecord(); - cmd.submitIdle(); - } - - void Image::transitionLayout(VkImageLayout new_layout, CmdBuffer* cmd) - { - if(new_layout == _layout) - return; - - bool singleTime = (cmd == nullptr); - if(singleTime) - { - cmd = &Render_Core::get().getSingleTimeCmdBuffer(); - cmd->beginRecord(); - } - - cmd->transitionImageLayout(*this, new_layout); - - if(singleTime) - { - cmd->endRecord(); - cmd->submitIdle(); - } - _layout = new_layout; - } - - void Image::destroySampler() noexcept - { - if(_sampler != VK_NULL_HANDLE) - vkDestroySampler(Render_Core::get().getDevice().get(), _sampler, nullptr); - _sampler = VK_NULL_HANDLE; - } - - void Image::destroyImageView() noexcept - { - if(_image_view != VK_NULL_HANDLE) - vkDestroyImageView(Render_Core::get().getDevice().get(), _image_view, nullptr); - _image_view = VK_NULL_HANDLE; - } - - void Image::destroy() noexcept - { - destroySampler(); - destroyImageView(); - - if(_image != VK_NULL_HANDLE) - Render_Core::get().getAllocator().destroyImage(_allocation, _image); - _image = VK_NULL_HANDLE; - } - - std::uint32_t formatSize(VkFormat format) - { - switch(format) - { - case VK_FORMAT_UNDEFINED: return 0; - case VK_FORMAT_R4G4_UNORM_PACK8: return 1; - case VK_FORMAT_R4G4B4A4_UNORM_PACK16: return 2; - case VK_FORMAT_B4G4R4A4_UNORM_PACK16: return 2; - case VK_FORMAT_R5G6B5_UNORM_PACK16: return 2; - case VK_FORMAT_B5G6R5_UNORM_PACK16: return 2; - case VK_FORMAT_R5G5B5A1_UNORM_PACK16: return 2; - case VK_FORMAT_B5G5R5A1_UNORM_PACK16: return 2; - case VK_FORMAT_A1R5G5B5_UNORM_PACK16: return 2; - case VK_FORMAT_R8_UNORM: return 1; - case VK_FORMAT_R8_SNORM: return 1; - case VK_FORMAT_R8_USCALED: return 1; - case VK_FORMAT_R8_SSCALED: return 1; - case VK_FORMAT_R8_UINT: return 1; - case VK_FORMAT_R8_SINT: return 1; - case VK_FORMAT_R8_SRGB: return 1; - case VK_FORMAT_R8G8_UNORM: return 2; - case VK_FORMAT_R8G8_SNORM: return 2; - case VK_FORMAT_R8G8_USCALED: return 2; - case VK_FORMAT_R8G8_SSCALED: return 2; - case VK_FORMAT_R8G8_UINT: return 2; - case VK_FORMAT_R8G8_SINT: return 2; - case VK_FORMAT_R8G8_SRGB: return 2; - case VK_FORMAT_R8G8B8_UNORM: return 3; - case VK_FORMAT_R8G8B8_SNORM: return 3; - case VK_FORMAT_R8G8B8_USCALED: return 3; - case VK_FORMAT_R8G8B8_SSCALED: return 3; - case VK_FORMAT_R8G8B8_UINT: return 3; - case VK_FORMAT_R8G8B8_SINT: return 3; - case VK_FORMAT_R8G8B8_SRGB: return 3; - case VK_FORMAT_B8G8R8_UNORM: return 3; - case VK_FORMAT_B8G8R8_SNORM: return 3; - case VK_FORMAT_B8G8R8_USCALED: return 3; - case VK_FORMAT_B8G8R8_SSCALED: return 3; - case VK_FORMAT_B8G8R8_UINT: return 3; - case VK_FORMAT_B8G8R8_SINT: return 3; - case VK_FORMAT_B8G8R8_SRGB: return 3; - case VK_FORMAT_R8G8B8A8_UNORM: return 4; - case VK_FORMAT_R8G8B8A8_SNORM: return 4; - case VK_FORMAT_R8G8B8A8_USCALED: return 4; - case VK_FORMAT_R8G8B8A8_SSCALED: return 4; - case VK_FORMAT_R8G8B8A8_UINT: return 4; - case VK_FORMAT_R8G8B8A8_SINT: return 4; - case VK_FORMAT_R8G8B8A8_SRGB: return 4; - case VK_FORMAT_B8G8R8A8_UNORM: return 4; - case VK_FORMAT_B8G8R8A8_SNORM: return 4; - case VK_FORMAT_B8G8R8A8_USCALED: return 4; - case VK_FORMAT_B8G8R8A8_SSCALED: return 4; - case VK_FORMAT_B8G8R8A8_UINT: return 4; - case VK_FORMAT_B8G8R8A8_SINT: return 4; - case VK_FORMAT_B8G8R8A8_SRGB: return 4; - case VK_FORMAT_A8B8G8R8_UNORM_PACK32: return 4; - case VK_FORMAT_A8B8G8R8_SNORM_PACK32: return 4; - case VK_FORMAT_A8B8G8R8_USCALED_PACK32: return 4; - case VK_FORMAT_A8B8G8R8_SSCALED_PACK32: return 4; - case VK_FORMAT_A8B8G8R8_UINT_PACK32: return 4; - case VK_FORMAT_A8B8G8R8_SINT_PACK32: return 4; - case VK_FORMAT_A8B8G8R8_SRGB_PACK32: return 4; - case VK_FORMAT_A2R10G10B10_UNORM_PACK32: return 4; - case VK_FORMAT_A2R10G10B10_SNORM_PACK32: return 4; - case VK_FORMAT_A2R10G10B10_USCALED_PACK32: return 4; - case VK_FORMAT_A2R10G10B10_SSCALED_PACK32: return 4; - case VK_FORMAT_A2R10G10B10_UINT_PACK32: return 4; - case VK_FORMAT_A2R10G10B10_SINT_PACK32: return 4; - case VK_FORMAT_A2B10G10R10_UNORM_PACK32: return 4; - case VK_FORMAT_A2B10G10R10_SNORM_PACK32: return 4; - case VK_FORMAT_A2B10G10R10_USCALED_PACK32: return 4; - case VK_FORMAT_A2B10G10R10_SSCALED_PACK32: return 4; - case VK_FORMAT_A2B10G10R10_UINT_PACK32: return 4; - case VK_FORMAT_A2B10G10R10_SINT_PACK32: return 4; - case VK_FORMAT_R16_UNORM: return 2; - case VK_FORMAT_R16_SNORM: return 2; - case VK_FORMAT_R16_USCALED: return 2; - case VK_FORMAT_R16_SSCALED: return 2; - case VK_FORMAT_R16_UINT: return 2; - case VK_FORMAT_R16_SINT: return 2; - case VK_FORMAT_R16_SFLOAT: return 2; - case VK_FORMAT_R16G16_UNORM: return 4; - case VK_FORMAT_R16G16_SNORM: return 4; - case VK_FORMAT_R16G16_USCALED: return 4; - case VK_FORMAT_R16G16_SSCALED: return 4; - case VK_FORMAT_R16G16_UINT: return 4; - case VK_FORMAT_R16G16_SINT: return 4; - case VK_FORMAT_R16G16_SFLOAT: return 4; - case VK_FORMAT_R16G16B16_UNORM: return 6; - case VK_FORMAT_R16G16B16_SNORM: return 6; - case VK_FORMAT_R16G16B16_USCALED: return 6; - case VK_FORMAT_R16G16B16_SSCALED: return 6; - case VK_FORMAT_R16G16B16_UINT: return 6; - case VK_FORMAT_R16G16B16_SINT: return 6; - case VK_FORMAT_R16G16B16_SFLOAT: return 6; - case VK_FORMAT_R16G16B16A16_UNORM: return 8; - case VK_FORMAT_R16G16B16A16_SNORM: return 8; - case VK_FORMAT_R16G16B16A16_USCALED: return 8; - case VK_FORMAT_R16G16B16A16_SSCALED: return 8; - case VK_FORMAT_R16G16B16A16_UINT: return 8; - case VK_FORMAT_R16G16B16A16_SINT: return 8; - case VK_FORMAT_R16G16B16A16_SFLOAT: return 8; - case VK_FORMAT_R32_UINT: return 4; - case VK_FORMAT_R32_SINT: return 4; - case VK_FORMAT_R32_SFLOAT: return 4; - case VK_FORMAT_R32G32_UINT: return 8; - case VK_FORMAT_R32G32_SINT: return 8; - case VK_FORMAT_R32G32_SFLOAT: return 8; - case VK_FORMAT_R32G32B32_UINT: return 12; - case VK_FORMAT_R32G32B32_SINT: return 12; - case VK_FORMAT_R32G32B32_SFLOAT: return 12; - case VK_FORMAT_R32G32B32A32_UINT: return 16; - case VK_FORMAT_R32G32B32A32_SINT: return 16; - case VK_FORMAT_R32G32B32A32_SFLOAT: return 16; - case VK_FORMAT_R64_UINT: return 8; - case VK_FORMAT_R64_SINT: return 8; - case VK_FORMAT_R64_SFLOAT: return 8; - case VK_FORMAT_R64G64_UINT: return 16; - case VK_FORMAT_R64G64_SINT: return 16; - case VK_FORMAT_R64G64_SFLOAT: return 16; - case VK_FORMAT_R64G64B64_UINT: return 24; - case VK_FORMAT_R64G64B64_SINT: return 24; - case VK_FORMAT_R64G64B64_SFLOAT: return 24; - case VK_FORMAT_R64G64B64A64_UINT: return 32; - case VK_FORMAT_R64G64B64A64_SINT: return 32; - case VK_FORMAT_R64G64B64A64_SFLOAT: return 32; - case VK_FORMAT_B10G11R11_UFLOAT_PACK32: return 4; - case VK_FORMAT_E5B9G9R9_UFLOAT_PACK32: return 4; - - default: return 0; - } - } -} diff --git a/src/renderer/images/vk_image.h b/src/renderer/images/vk_image.h deleted file mode 100644 index 017ec41..0000000 --- a/src/renderer/images/vk_image.h +++ /dev/null @@ -1,91 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* vk_image.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2023/01/25 11:54:21 by maldavid #+# #+# */ -/* Updated: 2024/01/19 06:10:15 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#ifndef __MLX_VK_IMAGE__ -#define __MLX_VK_IMAGE__ - -#include -#include -#include -#include -#include -#include - -#ifdef DEBUG - #include -#endif - -namespace mlx -{ - std::uint32_t formatSize(VkFormat format); - bool isStencilFormat(VkFormat format); - bool isDepthFormat(VkFormat format); - VkFormat bitsToFormat(std::uint32_t bits); - VkPipelineStageFlags layoutToAccessMask(VkImageLayout layout, bool isDestination); - - class Image : public CmdResource - { - friend class SwapChain; - - public: - Image() = default; - - inline void create(VkImage image, VkFormat format, std::uint32_t width, std::uint32_t height, VkImageLayout layout = VK_IMAGE_LAYOUT_UNDEFINED) noexcept - { - _image = image; - _format = format; - _width = width; - _height = height; - _layout = layout; - } - void create(std::uint32_t width, std::uint32_t height, VkFormat format, VkImageTiling tiling, VkImageUsageFlags usage, const char* name, bool decated_memory = false); - void createImageView(VkImageViewType type, VkImageAspectFlags aspectFlags) noexcept; - void createSampler() noexcept; - void copyFromBuffer(class Buffer& buffer); - void copyToBuffer(class Buffer& buffer); - void transitionLayout(VkImageLayout new_layout, CmdBuffer* cmd = nullptr); - virtual void destroy() noexcept; - - inline VkImage get() noexcept { return _image; } - inline VkImage operator()() noexcept { return _image; } - inline VkImageView getImageView() const noexcept { return _image_view; } - inline VkFormat getFormat() const noexcept { return _format; } - inline VkImageTiling getTiling() const noexcept { return _tiling; } - inline VkImageLayout getLayout() const noexcept { return _layout; } - inline VkSampler getSampler() const noexcept { return _sampler; } - inline std::uint32_t getWidth() const noexcept { return _width; } - inline std::uint32_t getHeight() const noexcept { return _height; } - inline bool isInit() const noexcept { return _image != VK_NULL_HANDLE; } - - virtual ~Image() = default; - - private: - void destroySampler() noexcept; - void destroyImageView() noexcept; - - private: - VmaAllocation _allocation; - VkImage _image = VK_NULL_HANDLE; - VkImageView _image_view = VK_NULL_HANDLE; - VkSampler _sampler = VK_NULL_HANDLE; - #ifdef DEBUG - std::string _name; - #endif - VkFormat _format; - VkImageTiling _tiling; - VkImageLayout _layout = VK_IMAGE_LAYOUT_UNDEFINED; - std::uint32_t _width = 0; - std::uint32_t _height = 0; - }; -} - -#endif diff --git a/src/renderer/pipeline/pipeline.cpp b/src/renderer/pipeline/pipeline.cpp deleted file mode 100644 index 311d6b2..0000000 --- a/src/renderer/pipeline/pipeline.cpp +++ /dev/null @@ -1,328 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* pipeline.cpp :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2022/12/18 21:27:38 by maldavid #+# #+# */ -/* Updated: 2024/03/14 17:05:21 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#include "pipeline.h" -#include -#include -#include - -namespace mlx -{ - /** - #version 450 core - - layout(location = 0) in vec2 aPos; - layout(location = 1) in vec4 aColor; - layout(location = 2) in vec2 aUV; - - layout(set = 0, binding = 0) uniform uProjection { - mat4 mat; - } uProj; - - layout(push_constant) uniform uModelPushConstant { - vec2 vec; - } uTranslate; - - out gl_PerVertex { - vec4 gl_Position; - }; - - layout(location = 0) out struct { - vec4 Color; - vec2 UV; - } Out; - - void main() - { - Out.Color = aColor; - Out.UV = aUV; - vec2 pos = aPos + uTranslate.vec; - gl_Position = uProj.mat * vec4(pos.x, pos.y, 0.0, 1.0); - } - */ - const std::vector vertex_shader = { // precompiled vertex shader - 0x07230203,0x00010000,0x0008000b,0x0000003b,0x00000000,0x00020011,0x00000001,0x0006000b, - 0x00000001,0x4c534c47,0x6474732e,0x3035342e,0x00000000,0x0003000e,0x00000000,0x00000001, - 0x000a000f,0x00000000,0x00000004,0x6e69616d,0x00000000,0x0000000b,0x0000000f,0x00000015, - 0x0000001b,0x00000026,0x00030003,0x00000002,0x000001c2,0x00040005,0x00000004,0x6e69616d, - 0x00000000,0x00030005,0x00000009,0x00000000,0x00050006,0x00000009,0x00000000,0x6f6c6f43, - 0x00000072,0x00040006,0x00000009,0x00000001,0x00005655,0x00030005,0x0000000b,0x0074754f, - 0x00040005,0x0000000f,0x6c6f4361,0x0000726f,0x00030005,0x00000015,0x00565561,0x00030005, - 0x0000001a,0x00736f70,0x00040005,0x0000001b,0x736f5061,0x00000000,0x00070005,0x0000001d, - 0x646f4d75,0x75506c65,0x6f436873,0x6174736e,0x0000746e,0x00040006,0x0000001d,0x00000000, - 0x00636576,0x00050005,0x0000001f,0x61725475,0x616c736e,0x00006574,0x00060005,0x00000024, - 0x505f6c67,0x65567265,0x78657472,0x00000000,0x00060006,0x00000024,0x00000000,0x505f6c67, - 0x7469736f,0x006e6f69,0x00030005,0x00000026,0x00000000,0x00050005,0x00000028,0x6f725075, - 0x7463656a,0x006e6f69,0x00040006,0x00000028,0x00000000,0x0074616d,0x00040005,0x0000002a, - 0x6f725075,0x0000006a,0x00040047,0x0000000b,0x0000001e,0x00000000,0x00040047,0x0000000f, - 0x0000001e,0x00000001,0x00040047,0x00000015,0x0000001e,0x00000002,0x00040047,0x0000001b, - 0x0000001e,0x00000000,0x00050048,0x0000001d,0x00000000,0x00000023,0x00000000,0x00030047, - 0x0000001d,0x00000002,0x00050048,0x00000024,0x00000000,0x0000000b,0x00000000,0x00030047, - 0x00000024,0x00000002,0x00040048,0x00000028,0x00000000,0x00000005,0x00050048,0x00000028, - 0x00000000,0x00000023,0x00000000,0x00050048,0x00000028,0x00000000,0x00000007,0x00000010, - 0x00030047,0x00000028,0x00000002,0x00040047,0x0000002a,0x00000022,0x00000000,0x00040047, - 0x0000002a,0x00000021,0x00000000,0x00020013,0x00000002,0x00030021,0x00000003,0x00000002, - 0x00030016,0x00000006,0x00000020,0x00040017,0x00000007,0x00000006,0x00000004,0x00040017, - 0x00000008,0x00000006,0x00000002,0x0004001e,0x00000009,0x00000007,0x00000008,0x00040020, - 0x0000000a,0x00000003,0x00000009,0x0004003b,0x0000000a,0x0000000b,0x00000003,0x00040015, - 0x0000000c,0x00000020,0x00000001,0x0004002b,0x0000000c,0x0000000d,0x00000000,0x00040020, - 0x0000000e,0x00000001,0x00000007,0x0004003b,0x0000000e,0x0000000f,0x00000001,0x00040020, - 0x00000011,0x00000003,0x00000007,0x0004002b,0x0000000c,0x00000013,0x00000001,0x00040020, - 0x00000014,0x00000001,0x00000008,0x0004003b,0x00000014,0x00000015,0x00000001,0x00040020, - 0x00000017,0x00000003,0x00000008,0x00040020,0x00000019,0x00000007,0x00000008,0x0004003b, - 0x00000014,0x0000001b,0x00000001,0x0003001e,0x0000001d,0x00000008,0x00040020,0x0000001e, - 0x00000009,0x0000001d,0x0004003b,0x0000001e,0x0000001f,0x00000009,0x00040020,0x00000020, - 0x00000009,0x00000008,0x0003001e,0x00000024,0x00000007,0x00040020,0x00000025,0x00000003, - 0x00000024,0x0004003b,0x00000025,0x00000026,0x00000003,0x00040018,0x00000027,0x00000007, - 0x00000004,0x0003001e,0x00000028,0x00000027,0x00040020,0x00000029,0x00000002,0x00000028, - 0x0004003b,0x00000029,0x0000002a,0x00000002,0x00040020,0x0000002b,0x00000002,0x00000027, - 0x00040015,0x0000002e,0x00000020,0x00000000,0x0004002b,0x0000002e,0x0000002f,0x00000000, - 0x00040020,0x00000030,0x00000007,0x00000006,0x0004002b,0x0000002e,0x00000033,0x00000001, - 0x0004002b,0x00000006,0x00000036,0x00000000,0x0004002b,0x00000006,0x00000037,0x3f800000, - 0x00050036,0x00000002,0x00000004,0x00000000,0x00000003,0x000200f8,0x00000005,0x0004003b, - 0x00000019,0x0000001a,0x00000007,0x0004003d,0x00000007,0x00000010,0x0000000f,0x00050041, - 0x00000011,0x00000012,0x0000000b,0x0000000d,0x0003003e,0x00000012,0x00000010,0x0004003d, - 0x00000008,0x00000016,0x00000015,0x00050041,0x00000017,0x00000018,0x0000000b,0x00000013, - 0x0003003e,0x00000018,0x00000016,0x0004003d,0x00000008,0x0000001c,0x0000001b,0x00050041, - 0x00000020,0x00000021,0x0000001f,0x0000000d,0x0004003d,0x00000008,0x00000022,0x00000021, - 0x00050081,0x00000008,0x00000023,0x0000001c,0x00000022,0x0003003e,0x0000001a,0x00000023, - 0x00050041,0x0000002b,0x0000002c,0x0000002a,0x0000000d,0x0004003d,0x00000027,0x0000002d, - 0x0000002c,0x00050041,0x00000030,0x00000031,0x0000001a,0x0000002f,0x0004003d,0x00000006, - 0x00000032,0x00000031,0x00050041,0x00000030,0x00000034,0x0000001a,0x00000033,0x0004003d, - 0x00000006,0x00000035,0x00000034,0x00070050,0x00000007,0x00000038,0x00000032,0x00000035, - 0x00000036,0x00000037,0x00050091,0x00000007,0x00000039,0x0000002d,0x00000038,0x00050041, - 0x00000011,0x0000003a,0x00000026,0x0000000d,0x0003003e,0x0000003a,0x00000039,0x000100fd, - 0x00010038 - }; - - /** - #version 450 core - - layout(location = 0) out vec4 fColor; - - layout(set = 1, binding = 0) uniform sampler2D sTexture; - - layout(location = 0) in struct { - vec4 Color; - vec2 UV; - } In; - - void main() - { - vec4 process_color = In.Color * texture(sTexture, In.UV.st); - if(process_color.w == 0) - discard; - fColor = process_color; - } - */ - const std::vector fragment_shader = { // pre compiled fragment shader - 0x07230203,0x00010000,0x0008000b,0x0000002c,0x00000000,0x00020011,0x00000001,0x0006000b, - 0x00000001,0x4c534c47,0x6474732e,0x3035342e,0x00000000,0x0003000e,0x00000000,0x00000001, - 0x0007000f,0x00000004,0x00000004,0x6e69616d,0x00000000,0x0000000d,0x0000002a,0x00030010, - 0x00000004,0x00000007,0x00030003,0x00000002,0x000001c2,0x00040005,0x00000004,0x6e69616d, - 0x00000000,0x00060005,0x00000009,0x636f7270,0x5f737365,0x6f6c6f63,0x00000072,0x00030005, - 0x0000000b,0x00000000,0x00050006,0x0000000b,0x00000000,0x6f6c6f43,0x00000072,0x00040006, - 0x0000000b,0x00000001,0x00005655,0x00030005,0x0000000d,0x00006e49,0x00050005,0x00000016, - 0x78655473,0x65727574,0x00000000,0x00040005,0x0000002a,0x6c6f4366,0x0000726f,0x00040047, - 0x0000000d,0x0000001e,0x00000000,0x00040047,0x00000016,0x00000022,0x00000001,0x00040047, - 0x00000016,0x00000021,0x00000000,0x00040047,0x0000002a,0x0000001e,0x00000000,0x00020013, - 0x00000002,0x00030021,0x00000003,0x00000002,0x00030016,0x00000006,0x00000020,0x00040017, - 0x00000007,0x00000006,0x00000004,0x00040020,0x00000008,0x00000007,0x00000007,0x00040017, - 0x0000000a,0x00000006,0x00000002,0x0004001e,0x0000000b,0x00000007,0x0000000a,0x00040020, - 0x0000000c,0x00000001,0x0000000b,0x0004003b,0x0000000c,0x0000000d,0x00000001,0x00040015, - 0x0000000e,0x00000020,0x00000001,0x0004002b,0x0000000e,0x0000000f,0x00000000,0x00040020, - 0x00000010,0x00000001,0x00000007,0x00090019,0x00000013,0x00000006,0x00000001,0x00000000, - 0x00000000,0x00000000,0x00000001,0x00000000,0x0003001b,0x00000014,0x00000013,0x00040020, - 0x00000015,0x00000000,0x00000014,0x0004003b,0x00000015,0x00000016,0x00000000,0x0004002b, - 0x0000000e,0x00000018,0x00000001,0x00040020,0x00000019,0x00000001,0x0000000a,0x00040015, - 0x0000001e,0x00000020,0x00000000,0x0004002b,0x0000001e,0x0000001f,0x00000003,0x00040020, - 0x00000020,0x00000007,0x00000006,0x0004002b,0x00000006,0x00000023,0x00000000,0x00020014, - 0x00000024,0x00040020,0x00000029,0x00000003,0x00000007,0x0004003b,0x00000029,0x0000002a, - 0x00000003,0x00050036,0x00000002,0x00000004,0x00000000,0x00000003,0x000200f8,0x00000005, - 0x0004003b,0x00000008,0x00000009,0x00000007,0x00050041,0x00000010,0x00000011,0x0000000d, - 0x0000000f,0x0004003d,0x00000007,0x00000012,0x00000011,0x0004003d,0x00000014,0x00000017, - 0x00000016,0x00050041,0x00000019,0x0000001a,0x0000000d,0x00000018,0x0004003d,0x0000000a, - 0x0000001b,0x0000001a,0x00050057,0x00000007,0x0000001c,0x00000017,0x0000001b,0x00050085, - 0x00000007,0x0000001d,0x00000012,0x0000001c,0x0003003e,0x00000009,0x0000001d,0x00050041, - 0x00000020,0x00000021,0x00000009,0x0000001f,0x0004003d,0x00000006,0x00000022,0x00000021, - 0x000500b4,0x00000024,0x00000025,0x00000022,0x00000023,0x000300f7,0x00000027,0x00000000, - 0x000400fa,0x00000025,0x00000026,0x00000027,0x000200f8,0x00000026,0x000100fc,0x000200f8, - 0x00000027,0x0004003d,0x00000007,0x0000002b,0x00000009,0x0003003e,0x0000002a,0x0000002b, - 0x000100fd,0x00010038 - }; - - void GraphicPipeline::init(Renderer& renderer) - { - VkShaderModuleCreateInfo createInfo{}; - createInfo.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO; - createInfo.codeSize = vertex_shader.size() * sizeof(std::uint32_t); - createInfo.pCode = vertex_shader.data(); - VkShaderModule vshader; - if(vkCreateShaderModule(Render_Core::get().getDevice().get(), &createInfo, nullptr, &vshader) != VK_SUCCESS) - core::error::report(e_kind::fatal_error, "Vulkan : failed to create a vertex shader module"); - - VkPushConstantRange push_constant; - push_constant.offset = 0; - push_constant.size = sizeof(glm::vec2); - push_constant.stageFlags = VK_SHADER_STAGE_VERTEX_BIT; - - createInfo.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO; - createInfo.codeSize = fragment_shader.size() * sizeof(std::uint32_t); - createInfo.pCode = fragment_shader.data(); - VkShaderModule fshader; - if(vkCreateShaderModule(Render_Core::get().getDevice().get(), &createInfo, nullptr, &fshader) != VK_SUCCESS) - core::error::report(e_kind::fatal_error, "Vulkan : failed to create a fragment shader module"); - - VkPipelineShaderStageCreateInfo vertShaderStageInfo{}; - vertShaderStageInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO; - vertShaderStageInfo.stage = VK_SHADER_STAGE_VERTEX_BIT; - vertShaderStageInfo.module = vshader; - vertShaderStageInfo.pName = "main"; - - VkPipelineShaderStageCreateInfo fragShaderStageInfo{}; - fragShaderStageInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO; - fragShaderStageInfo.stage = VK_SHADER_STAGE_FRAGMENT_BIT; - fragShaderStageInfo.module = fshader; - fragShaderStageInfo.pName = "main"; - - std::array stages = {vertShaderStageInfo, fragShaderStageInfo}; - - auto bindingDescription = Vertex::getBindingDescription(); - auto attributeDescriptions = Vertex::getAttributeDescriptions(); - - VkPipelineVertexInputStateCreateInfo vertexInputStateCreateInfo{}; - vertexInputStateCreateInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO; - vertexInputStateCreateInfo.vertexBindingDescriptionCount = 1; - vertexInputStateCreateInfo.pVertexBindingDescriptions = &bindingDescription; - vertexInputStateCreateInfo.vertexAttributeDescriptionCount = static_cast(attributeDescriptions.size()); - vertexInputStateCreateInfo.pVertexAttributeDescriptions = attributeDescriptions.data(); - - VkPipelineInputAssemblyStateCreateInfo inputAssembly{}; - inputAssembly.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO; - inputAssembly.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST; - inputAssembly.primitiveRestartEnable = VK_FALSE; - - VkDynamicState states[] = { VK_DYNAMIC_STATE_VIEWPORT, VK_DYNAMIC_STATE_SCISSOR }; - - constexpr std::size_t statesCount = sizeof(states) / sizeof(VkDynamicState); - VkPipelineDynamicStateCreateInfo dynamicStates{}; - dynamicStates.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO; - dynamicStates.dynamicStateCount = statesCount; - dynamicStates.pDynamicStates = states; - - VkViewport viewport{}; - viewport.x = 0.0f; - viewport.y = 0.0f; - viewport.width = (float)renderer.getFrameBuffer(0).getWidth(); - viewport.height = (float)renderer.getFrameBuffer(0).getHeight(); - viewport.minDepth = 0.0f; - viewport.maxDepth = 1.0f; - - VkRect2D scissor{}; - scissor.offset = { 0, 0 }; - scissor.extent = { renderer.getFrameBuffer(0).getWidth(), renderer.getFrameBuffer(0).getHeight()}; - - VkPipelineViewportStateCreateInfo viewportState{}; - viewportState.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO; - viewportState.viewportCount = 1; - viewportState.pViewports = &viewport; - viewportState.scissorCount = 1; - viewportState.pScissors = &scissor; - - VkPipelineRasterizationStateCreateInfo rasterizer{}; - rasterizer.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO; - rasterizer.depthClampEnable = VK_FALSE; - rasterizer.rasterizerDiscardEnable = VK_FALSE; - rasterizer.polygonMode = VK_POLYGON_MODE_FILL; - rasterizer.lineWidth = 1.0f; - rasterizer.cullMode = VK_CULL_MODE_NONE; - rasterizer.frontFace = VK_FRONT_FACE_CLOCKWISE; - rasterizer.depthBiasEnable = VK_FALSE; - - VkPipelineMultisampleStateCreateInfo multisampling{}; - multisampling.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO; - multisampling.sampleShadingEnable = VK_FALSE; - multisampling.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT; - - VkPipelineColorBlendAttachmentState colorBlendAttachment{}; - colorBlendAttachment.colorWriteMask = VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT | VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT; - colorBlendAttachment.blendEnable = VK_TRUE; - colorBlendAttachment.srcColorBlendFactor = VK_BLEND_FACTOR_SRC_ALPHA; - colorBlendAttachment.dstColorBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA; - colorBlendAttachment.colorBlendOp = VK_BLEND_OP_ADD; - colorBlendAttachment.srcAlphaBlendFactor = VK_BLEND_FACTOR_SRC_ALPHA; - colorBlendAttachment.dstAlphaBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA; - colorBlendAttachment.alphaBlendOp = VK_BLEND_OP_ADD; - - VkPipelineColorBlendStateCreateInfo colorBlending{}; - colorBlending.sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO; - colorBlending.logicOpEnable = VK_FALSE; - colorBlending.logicOp = VK_LOGIC_OP_COPY; - colorBlending.attachmentCount = 1; - colorBlending.pAttachments = &colorBlendAttachment; - colorBlending.blendConstants[0] = 1.0f; - colorBlending.blendConstants[1] = 1.0f; - colorBlending.blendConstants[2] = 1.0f; - colorBlending.blendConstants[3] = 1.0f; - - VkDescriptorSetLayout layouts[] = { - renderer.getVertDescriptorSetLayout().get(), - renderer.getFragDescriptorSetLayout().get() - }; - - VkPipelineLayoutCreateInfo pipelineLayoutInfo{}; - pipelineLayoutInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO; - pipelineLayoutInfo.setLayoutCount = 2; - pipelineLayoutInfo.pSetLayouts = layouts; - pipelineLayoutInfo.pushConstantRangeCount = 1; - pipelineLayoutInfo.pPushConstantRanges = &push_constant; - - if(vkCreatePipelineLayout(Render_Core::get().getDevice().get(), &pipelineLayoutInfo, nullptr, &_pipeline_layout) != VK_SUCCESS) - core::error::report(e_kind::fatal_error, "Vulkan : failed to create a graphics pipeline layout"); - - VkGraphicsPipelineCreateInfo pipelineInfo{}; - pipelineInfo.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO; - pipelineInfo.stageCount = stages.size(); - pipelineInfo.pStages = stages.data(); - pipelineInfo.pVertexInputState = &vertexInputStateCreateInfo; - pipelineInfo.pInputAssemblyState = &inputAssembly; - pipelineInfo.pViewportState = &viewportState; - pipelineInfo.pRasterizationState = &rasterizer; - pipelineInfo.pMultisampleState = &multisampling; - pipelineInfo.pColorBlendState = &colorBlending; - pipelineInfo.pDynamicState = &dynamicStates; - pipelineInfo.layout = _pipeline_layout; - pipelineInfo.renderPass = renderer.getRenderPass().get(); - pipelineInfo.subpass = 0; - pipelineInfo.basePipelineHandle = VK_NULL_HANDLE; - - VkResult res = vkCreateGraphicsPipelines(Render_Core::get().getDevice().get(), VK_NULL_HANDLE, 1, &pipelineInfo, nullptr, &_graphics_pipeline); - if(res != VK_SUCCESS) - core::error::report(e_kind::fatal_error, "Vulkan : failed to create a graphics pipeline, %s", RCore::verbaliseResultVk(res)); -#ifdef DEBUG - core::error::report(e_kind::message, "Vulkan : created new graphic pipeline"); -#endif - - vkDestroyShaderModule(Render_Core::get().getDevice().get(), fshader, nullptr); - vkDestroyShaderModule(Render_Core::get().getDevice().get(), vshader, nullptr); - } - - void GraphicPipeline::destroy() noexcept - { - vkDestroyPipeline(Render_Core::get().getDevice().get(), _graphics_pipeline, nullptr); - vkDestroyPipelineLayout(Render_Core::get().getDevice().get(), _pipeline_layout, nullptr); - _graphics_pipeline = VK_NULL_HANDLE; - #ifdef DEBUG - core::error::report(e_kind::message, "Vulkan : destroyed a graphics pipeline"); - #endif - } -} diff --git a/src/renderer/pipeline/pipeline.h b/src/renderer/pipeline/pipeline.h deleted file mode 100644 index 9700dc7..0000000 --- a/src/renderer/pipeline/pipeline.h +++ /dev/null @@ -1,39 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* pipeline.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2022/12/18 21:23:52 by maldavid #+# #+# */ -/* Updated: 2024/03/14 17:04:28 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#ifndef __PIPELINE__ -#define __PIPELINE__ - -#include -#include -#include - -namespace mlx -{ - class GraphicPipeline - { - public: - void init(class Renderer& renderer); - void destroy() noexcept; - - inline void bindPipeline(CmdBuffer& command_buffer) noexcept { vkCmdBindPipeline(command_buffer.get(), VK_PIPELINE_BIND_POINT_GRAPHICS, _graphics_pipeline); } - - inline const VkPipeline& getPipeline() const noexcept { return _graphics_pipeline; } - inline const VkPipelineLayout& getPipelineLayout() const noexcept { return _pipeline_layout; } - - private: - VkPipeline _graphics_pipeline = VK_NULL_HANDLE; - VkPipelineLayout _pipeline_layout = VK_NULL_HANDLE; - }; -} - -#endif diff --git a/src/renderer/pixel_put.cpp b/src/renderer/pixel_put.cpp deleted file mode 100644 index ef2b911..0000000 --- a/src/renderer/pixel_put.cpp +++ /dev/null @@ -1,69 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* pixel_put.cpp :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2023/03/31 15:14:50 by maldavid #+# #+# */ -/* Updated: 2024/01/11 00:06:01 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#include -#include -#include - -namespace mlx -{ - void PixelPutPipeline::init(std::uint32_t width, std::uint32_t height, Renderer& renderer) noexcept - { - MLX_PROFILE_FUNCTION(); - _texture.create(nullptr, width, height, VK_FORMAT_R8G8B8A8_UNORM, "__mlx_pixel_put_pipeline_texture", true); - _texture.setDescriptor(renderer.getFragDescriptorSet().duplicate()); - - _buffer.create(Buffer::kind::dynamic, sizeof(std::uint32_t) * (width * height), VK_BUFFER_USAGE_TRANSFER_SRC_BIT, "__mlx_pixel_put_pipeline_texture"); - _buffer.mapMem(&_buffer_map); - _cpu_map = std::vector(height * width + 1, 0); - _width = width; - _height = height; - } - - void PixelPutPipeline::setPixel(int x, int y, std::uint32_t color) noexcept - { - MLX_PROFILE_FUNCTION(); - if(x < 0 || y < 0 || x > static_cast(_width) || y > static_cast(_height)) - return; - _cpu_map[(y * _width) + x] = color; - _has_been_modified = true; - } - - void PixelPutPipeline::clear() - { - MLX_PROFILE_FUNCTION(); - _cpu_map.assign(_width * _height, 0); - _has_been_modified = true; - } - - void PixelPutPipeline::render(std::array& sets, Renderer& renderer) noexcept - { - MLX_PROFILE_FUNCTION(); - if(_has_been_modified) - { - std::memcpy(_buffer_map, _cpu_map.data(), sizeof(std::uint32_t) * _cpu_map.size()); - _texture.copyFromBuffer(_buffer); - _has_been_modified = false; - } - _texture.updateSet(0); - _texture.render(sets, renderer, 0, 0); - } - - void PixelPutPipeline::destroy() noexcept - { - MLX_PROFILE_FUNCTION(); - _buffer.destroy(); - _texture.destroy(); - } - - PixelPutPipeline::~PixelPutPipeline() {} -} diff --git a/src/renderer/pixel_put.h b/src/renderer/pixel_put.h deleted file mode 100644 index 1de58cd..0000000 --- a/src/renderer/pixel_put.h +++ /dev/null @@ -1,49 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* pixel_put.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2023/03/31 13:18:50 by maldavid #+# #+# */ -/* Updated: 2024/01/11 00:06:05 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#ifndef __MLX_PIXEL_PUT__ -#define __MLX_PIXEL_PUT__ - -#include -#include -#include - -namespace mlx -{ - class PixelPutPipeline - { - public: - PixelPutPipeline() = default; - - void init(std::uint32_t width, std::uint32_t height, class Renderer& renderer) noexcept; - - void setPixel(int x, int y, std::uint32_t color) noexcept; - void render(std::array& sets, class Renderer& renderer) noexcept; - - void clear(); - void destroy() noexcept; - - ~PixelPutPipeline(); - - private: - Texture _texture; - Buffer _buffer; - // using vector as CPU map and not directly writting to mapped buffer to improve performances - std::vector _cpu_map; - void* _buffer_map = nullptr; - std::uint32_t _width = 0; - std::uint32_t _height = 0; - bool _has_been_modified = true; - }; -} - -#endif diff --git a/src/renderer/renderer.cpp b/src/renderer/renderer.cpp deleted file mode 100644 index c1c2cfe..0000000 --- a/src/renderer/renderer.cpp +++ /dev/null @@ -1,190 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* renderer.cpp :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2022/12/18 17:25:16 by maldavid #+# #+# */ -/* Updated: 2024/03/14 16:34:43 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#include -#include -#include -#include - -namespace mlx -{ - void Renderer::init(Texture* render_target) - { - MLX_PROFILE_FUNCTION(); - if(render_target == nullptr) - { - _surface.create(*this); - _swapchain.init(this); - _pass.init(_swapchain.getImagesFormat(), VK_IMAGE_LAYOUT_PRESENT_SRC_KHR); - for(std::size_t i = 0; i < _swapchain.getImagesNumber(); i++) - _framebuffers.emplace_back().init(_pass, _swapchain.getImage(i)); - } - else - { - _render_target = render_target; - _render_target->transitionLayout(VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL); - _pass.init(_render_target->getFormat(), _render_target->getLayout()); - _framebuffers.emplace_back().init(_pass, *static_cast(_render_target)); - } - _cmd.init(); - - for(std::size_t i = 0; i < MAX_FRAMES_IN_FLIGHT; i++) - _semaphores[i].init(); - - _uniform_buffer.reset(new UBO); - #ifdef DEBUG - _uniform_buffer->create(this, sizeof(glm::mat4), "__mlx_matrices_uniform_buffer_"); - #else - _uniform_buffer->create(this, sizeof(glm::mat4), nullptr); - #endif - - _vert_layout.init({ - {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER} - }, VK_SHADER_STAGE_VERTEX_BIT); - _frag_layout.init({ - {0, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER} - }, VK_SHADER_STAGE_FRAGMENT_BIT); - - _vert_set.init(this, &Render_Core::get().getDescriptorPool(), &_vert_layout); - _frag_set.init(this, &Render_Core::get().getDescriptorPool(), &_frag_layout); - - _vert_set.writeDescriptor(0, _uniform_buffer.get()); - - _pipeline.init(*this); - - _framebuffer_resized = false; - } - - bool Renderer::beginFrame() - { - MLX_PROFILE_FUNCTION(); - auto device = Render_Core::get().getDevice().get(); - - if(_render_target == nullptr) - { - _cmd.getCmdBuffer(_current_frame_index).waitForExecution(); - VkResult result = vkAcquireNextImageKHR(device, _swapchain(), UINT64_MAX, _semaphores[_current_frame_index].getImageSemaphore(), VK_NULL_HANDLE, &_image_index); - - if(result == VK_ERROR_OUT_OF_DATE_KHR) - { - recreateRenderData(); - return false; - } - else if(result != VK_SUCCESS && result != VK_SUBOPTIMAL_KHR) - core::error::report(e_kind::fatal_error, "Vulkan error : failed to acquire swapchain image"); - } - else - { - _image_index = 0; - if(_render_target->getLayout() != VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL) - _render_target->transitionLayout(VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL); - } - - _cmd.getCmdBuffer(_current_frame_index).reset(); - _cmd.getCmdBuffer(_current_frame_index).beginRecord(); - auto& fb = _framebuffers[_image_index]; - _pass.begin(getActiveCmdBuffer(), fb); - - _pipeline.bindPipeline(_cmd.getCmdBuffer(_current_frame_index)); - - VkViewport viewport{}; - viewport.x = 0.0f; - viewport.y = 0.0f; - viewport.width = static_cast(fb.getWidth()); - viewport.height = static_cast(fb.getHeight()); - viewport.minDepth = 0.0f; - viewport.maxDepth = 1.0f; - vkCmdSetViewport(_cmd.getCmdBuffer(_current_frame_index).get(), 0, 1, &viewport); - - VkRect2D scissor{}; - scissor.offset = { 0, 0 }; - scissor.extent = { fb.getWidth(), fb.getHeight()}; - vkCmdSetScissor(_cmd.getCmdBuffer(_current_frame_index).get(), 0, 1, &scissor); - - return true; - } - - void Renderer::endFrame() - { - MLX_PROFILE_FUNCTION(); - _pass.end(getActiveCmdBuffer()); - _cmd.getCmdBuffer(_current_frame_index).endRecord(); - - if(_render_target == nullptr) - { - _cmd.getCmdBuffer(_current_frame_index).submit(&_semaphores[_current_frame_index]); - - VkSwapchainKHR swapchain = _swapchain(); - VkSemaphore signalSemaphores[] = { _semaphores[_current_frame_index].getRenderImageSemaphore() }; - - VkPresentInfoKHR presentInfo{}; - presentInfo.sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR; - presentInfo.waitSemaphoreCount = 1; - presentInfo.pWaitSemaphores = signalSemaphores; - presentInfo.swapchainCount = 1; - presentInfo.pSwapchains = &swapchain; - presentInfo.pImageIndices = &_image_index; - - VkResult result = vkQueuePresentKHR(Render_Core::get().getQueue().getPresent(), &presentInfo); - - if(result == VK_ERROR_OUT_OF_DATE_KHR || result == VK_SUBOPTIMAL_KHR || _framebuffer_resized) - { - _framebuffer_resized = false; - recreateRenderData(); - } - else if(result != VK_SUCCESS) - core::error::report(e_kind::fatal_error, "Vulkan error : failed to present swap chain image"); - _current_frame_index = (_current_frame_index + 1) % MAX_FRAMES_IN_FLIGHT; - } - else - { - _cmd.getCmdBuffer(_current_frame_index).submitIdle(true); - _current_frame_index = 0; - } - } - - void Renderer::recreateRenderData() - { - _swapchain.recreate(); - _pass.destroy(); - _pass.init(_swapchain.getImagesFormat(), VK_IMAGE_LAYOUT_PRESENT_SRC_KHR); - for(auto& fb : _framebuffers) - fb.destroy(); - _framebuffers.clear(); - for(std::size_t i = 0; i < _swapchain.getImagesNumber(); i++) - _framebuffers.emplace_back().init(_pass, _swapchain.getImage(i)); - } - - void Renderer::destroy() - { - MLX_PROFILE_FUNCTION(); - vkDeviceWaitIdle(Render_Core::get().getDevice().get()); - - _pipeline.destroy(); - _uniform_buffer->destroy(); - _vert_layout.destroy(); - _frag_layout.destroy(); - _frag_set.destroy(); - _vert_set.destroy(); - _cmd.destroy(); - _pass.destroy(); - if(_render_target == nullptr) - { - _swapchain.destroy(); - _surface.destroy(); - } - for(auto& fb : _framebuffers) - fb.destroy(); - for(int i = 0; i < MAX_FRAMES_IN_FLIGHT; i++) - _semaphores[i].destroy(); - } -} diff --git a/src/renderer/renderer.h b/src/renderer/renderer.h deleted file mode 100644 index a0fc309..0000000 --- a/src/renderer/renderer.h +++ /dev/null @@ -1,145 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* renderer.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2022/12/18 17:14:45 by maldavid #+# #+# */ -/* Updated: 2024/03/14 16:34:20 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#ifndef __RENDERER__ -#define __RENDERER__ - -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#include - -namespace mlx -{ - struct Vertex - { - glm::vec2 pos; - glm::vec4 color; - glm::vec2 uv; - - Vertex(glm::vec2 _pos, glm::vec4 _color, glm::vec2 _uv) : pos(std::move(_pos)), color(std::move(_color)), uv(std::move(_uv)) {} - - static VkVertexInputBindingDescription getBindingDescription() - { - VkVertexInputBindingDescription bindingDescription{}; - bindingDescription.binding = 0; - bindingDescription.stride = sizeof(Vertex); - bindingDescription.inputRate = VK_VERTEX_INPUT_RATE_VERTEX; - - return bindingDescription; - } - - static std::array getAttributeDescriptions() - { - std::array attributeDescriptions; - - attributeDescriptions[0].binding = 0; - attributeDescriptions[0].location = 0; - attributeDescriptions[0].format = VK_FORMAT_R32G32_SFLOAT; - attributeDescriptions[0].offset = offsetof(Vertex, pos); - - attributeDescriptions[1].binding = 0; - attributeDescriptions[1].location = 1; - attributeDescriptions[1].format = VK_FORMAT_R32G32B32A32_SFLOAT; - attributeDescriptions[1].offset = offsetof(Vertex, color); - - attributeDescriptions[2].binding = 0; - attributeDescriptions[2].location = 2; - attributeDescriptions[2].format = VK_FORMAT_R32G32_SFLOAT; - attributeDescriptions[2].offset = offsetof(Vertex, uv); - - return attributeDescriptions; - } - }; - - class Renderer - { - public: - Renderer() = default; - - void init(class Texture* render_target); - - bool beginFrame(); - void endFrame(); - - void destroy(); - - inline class MLX_Window* getWindow() { return _window; } - inline void setWindow(class MLX_Window* window) { _window = window; } - - inline Surface& getSurface() noexcept { return _surface; } - inline CmdPool& getCmdPool() noexcept { return _cmd.getCmdPool(); } - inline UBO* getUniformBuffer() noexcept { return _uniform_buffer.get(); } - inline SwapChain& getSwapChain() noexcept { return _swapchain; } - inline Semaphore& getSemaphore(int i) noexcept { return _semaphores[i]; } - inline RenderPass& getRenderPass() noexcept { return _pass; } - inline GraphicPipeline& getPipeline() noexcept { return _pipeline; } - inline CmdBuffer& getCmdBuffer(int i) noexcept { return _cmd.getCmdBuffer(i); } - inline CmdBuffer& getActiveCmdBuffer() noexcept { return _cmd.getCmdBuffer(_current_frame_index); } - inline FrameBuffer& getFrameBuffer(int i) noexcept { return _framebuffers[i]; } - inline DescriptorSet& getVertDescriptorSet() noexcept { return _vert_set; } - inline DescriptorSet& getFragDescriptorSet() noexcept { return _frag_set; } - inline DescriptorSetLayout& getVertDescriptorSetLayout() noexcept { return _vert_layout; } - inline DescriptorSetLayout& getFragDescriptorSetLayout() noexcept { return _frag_layout; } - inline std::uint32_t getActiveImageIndex() noexcept { return _current_frame_index; } - inline std::uint32_t getImageIndex() noexcept { return _image_index; } - - constexpr inline void requireFrameBufferResize() noexcept { _framebuffer_resized = true; } - - ~Renderer() = default; - - private: - void recreateRenderData(); - - private: - GraphicPipeline _pipeline; - CmdManager _cmd; - RenderPass _pass; - Surface _surface; - SwapChain _swapchain; - std::array _semaphores; - std::vector _framebuffers; - - DescriptorSetLayout _vert_layout; - DescriptorSetLayout _frag_layout; - - DescriptorSet _vert_set; - DescriptorSet _frag_set; - - std::unique_ptr _uniform_buffer; - - class MLX_Window* _window = nullptr; - class Texture* _render_target = nullptr; - - std::uint32_t _current_frame_index = 0; - std::uint32_t _image_index = 0; - bool _framebuffer_resized = false; - }; -} - -#endif diff --git a/src/renderer/renderpass/vk_framebuffer.cpp b/src/renderer/renderpass/vk_framebuffer.cpp deleted file mode 100644 index 20ac661..0000000 --- a/src/renderer/renderpass/vk_framebuffer.cpp +++ /dev/null @@ -1,52 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* vk_framebuffer.cpp :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2022/10/06 18:18:06 by maldavid #+# #+# */ -/* Updated: 2024/01/10 21:52:51 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#include -#include -#include -#include - -namespace mlx -{ - void FrameBuffer::init(RenderPass& renderpass, Image& image) - { - VkImageView attachments[] = { image.getImageView() }; - - _width = image.getWidth(); - _height = image.getHeight(); - - VkFramebufferCreateInfo framebufferInfo{}; - framebufferInfo.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO; - framebufferInfo.renderPass = renderpass.get(); - framebufferInfo.attachmentCount = 1; - framebufferInfo.pAttachments = attachments; - framebufferInfo.width = _width; - framebufferInfo.height = _height; - framebufferInfo.layers = 1; - - VkResult res = vkCreateFramebuffer(Render_Core::get().getDevice().get(), &framebufferInfo, nullptr, &_framebuffer); - if(res != VK_SUCCESS) - core::error::report(e_kind::fatal_error, "Vulkan : failed to create a framebuffer, %s", RCore::verbaliseResultVk(res)); - #ifdef DEBUG - core::error::report(e_kind::message, "Vulkan : created new framebuffer"); - #endif - } - - void FrameBuffer::destroy() noexcept - { - vkDestroyFramebuffer(Render_Core::get().getDevice().get(), _framebuffer, nullptr); - _framebuffer = VK_NULL_HANDLE; - #ifdef DEBUG - core::error::report(e_kind::message, "Vulkan : destroyed a framebuffer"); - #endif - } -} diff --git a/src/renderer/renderpass/vk_framebuffer.h b/src/renderer/renderpass/vk_framebuffer.h deleted file mode 100644 index 275c3e6..0000000 --- a/src/renderer/renderpass/vk_framebuffer.h +++ /dev/null @@ -1,39 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* vk_framebuffer.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2022/10/06 18:19:44 by maldavid #+# #+# */ -/* Updated: 2024/01/03 15:28:19 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#ifndef __MLX_VK_FRAMEBUFFER__ -#define __MLX_VK_FRAMEBUFFER__ - -#include -#include - -namespace mlx -{ - class FrameBuffer - { - public: - void init(class RenderPass& renderpass, class Image& image); - void destroy() noexcept; - - inline VkFramebuffer& operator()() noexcept { return _framebuffer; } - inline VkFramebuffer& get() noexcept { return _framebuffer; } - inline std::uint32_t getWidth() const noexcept { return _width; } - inline std::uint32_t getHeight() const noexcept { return _height; } - - private: - VkFramebuffer _framebuffer = VK_NULL_HANDLE; - std::uint32_t _width = 0; - std::uint32_t _height = 0; - }; -} - -#endif // __MLX_VK_FRAMEBUFFER__ diff --git a/src/renderer/renderpass/vk_render_pass.cpp b/src/renderer/renderpass/vk_render_pass.cpp deleted file mode 100644 index e5c8470..0000000 --- a/src/renderer/renderpass/vk_render_pass.cpp +++ /dev/null @@ -1,119 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* vk_render_pass.cpp :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2022/10/06 18:21:36 by maldavid #+# #+# */ -/* Updated: 2024/03/14 17:06:01 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#include "vk_render_pass.h" -#include -#include -#include -#include - -namespace mlx -{ - static const VkClearValue clearColor = {{{ 0.f, 0.f, 0.f, 1.0f }}}; // wtf, this mess to satisfy a warning - - void RenderPass::init(VkFormat attachement_format, VkImageLayout layout) - { - VkAttachmentDescription colorAttachment{}; - colorAttachment.format = attachement_format; - colorAttachment.samples = VK_SAMPLE_COUNT_1_BIT; - colorAttachment.loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR; - colorAttachment.storeOp = VK_ATTACHMENT_STORE_OP_STORE; - colorAttachment.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE; - colorAttachment.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE; - colorAttachment.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; - colorAttachment.finalLayout = layout; - - VkAttachmentReference colorAttachmentRef{}; - colorAttachmentRef.attachment = 0; - colorAttachmentRef.layout = (layout == VK_IMAGE_LAYOUT_PRESENT_SRC_KHR ? VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL : layout); - - VkSubpassDescription subpass1{}; - subpass1.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS; - subpass1.colorAttachmentCount = 1; - subpass1.pColorAttachments = &colorAttachmentRef; - - VkSubpassDescription subpasses[] = { subpass1 }; - - std::vector subpassesDeps; - subpassesDeps.emplace_back(); - subpassesDeps.back().srcSubpass = VK_SUBPASS_EXTERNAL; - subpassesDeps.back().dstSubpass = 0; - subpassesDeps.back().srcStageMask = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT; - subpassesDeps.back().dstStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT; - subpassesDeps.back().srcAccessMask = VK_ACCESS_MEMORY_READ_BIT; - subpassesDeps.back().dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; - subpassesDeps.back().dependencyFlags = VK_DEPENDENCY_BY_REGION_BIT; - - subpassesDeps.emplace_back(); - subpassesDeps.back().srcSubpass = 0; - subpassesDeps.back().dstSubpass = VK_SUBPASS_EXTERNAL; - subpassesDeps.back().srcStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT; - subpassesDeps.back().dstStageMask = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT; - subpassesDeps.back().srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; - subpassesDeps.back().dstAccessMask = VK_ACCESS_MEMORY_READ_BIT; - subpassesDeps.back().dependencyFlags = VK_DEPENDENCY_BY_REGION_BIT; - - VkRenderPassCreateInfo renderPassInfo{}; - renderPassInfo.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO; - renderPassInfo.attachmentCount = 1; - renderPassInfo.pAttachments = &colorAttachment; - renderPassInfo.subpassCount = sizeof(subpasses) / sizeof(VkSubpassDescription); - renderPassInfo.pSubpasses = subpasses; - renderPassInfo.dependencyCount = static_cast(subpassesDeps.size()); - renderPassInfo.pDependencies = subpassesDeps.data(); - - VkResult res = vkCreateRenderPass(Render_Core::get().getDevice().get(), &renderPassInfo, nullptr, &_render_pass); - if(res != VK_SUCCESS) - core::error::report(e_kind::fatal_error, "Vulkan : failed to create render pass, %s", RCore::verbaliseResultVk(res)); - #ifdef DEBUG - core::error::report(e_kind::message, "Vulkan : created new render pass"); - #endif - } - - void RenderPass::begin(class CmdBuffer& cmd, class FrameBuffer& fb) - { - MLX_PROFILE_FUNCTION(); - if(_is_running) - return; - - VkRenderPassBeginInfo renderPassInfo{}; - renderPassInfo.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO; - renderPassInfo.renderPass = _render_pass; - renderPassInfo.framebuffer = fb.get(); - renderPassInfo.renderArea.offset = { 0, 0 }; - renderPassInfo.renderArea.extent = { fb.getWidth(), fb.getHeight() }; - renderPassInfo.clearValueCount = 1; - renderPassInfo.pClearValues = &clearColor; - - vkCmdBeginRenderPass(cmd.get(), &renderPassInfo, VK_SUBPASS_CONTENTS_INLINE); - - _is_running = true; - } - - void RenderPass::end(class CmdBuffer& cmd) - { - MLX_PROFILE_FUNCTION(); - if(!_is_running) - return; - vkCmdEndRenderPass(cmd.get()); - _is_running = false; - } - - void RenderPass::destroy() noexcept - { - vkDestroyRenderPass(Render_Core::get().getDevice().get(), _render_pass, nullptr); - _render_pass = VK_NULL_HANDLE; - #ifdef DEBUG - core::error::report(e_kind::message, "Vulkan : destroyed a renderpass"); - #endif - } -} diff --git a/src/renderer/renderpass/vk_render_pass.h b/src/renderer/renderpass/vk_render_pass.h deleted file mode 100644 index a0a7c23..0000000 --- a/src/renderer/renderpass/vk_render_pass.h +++ /dev/null @@ -1,39 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* vk_render_pass.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2022/10/06 18:22:00 by maldavid #+# #+# */ -/* Updated: 2024/03/14 17:05:40 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#ifndef __MLX_VK_RENDER_PASS__ -#define __MLX_VK_RENDER_PASS__ - -#include -#include - -namespace mlx -{ - class RenderPass - { - public: - void init(VkFormat attachement_format, VkImageLayout layout); - void destroy() noexcept; - - void begin(class CmdBuffer& cmd, class FrameBuffer& fb); - void end(class CmdBuffer& cmd); - - inline VkRenderPass& operator()() noexcept { return _render_pass; } - inline VkRenderPass& get() noexcept { return _render_pass; } - - private: - VkRenderPass _render_pass = VK_NULL_HANDLE; - bool _is_running = false; - }; -} - -#endif // __MLX_VK_RENDER_PASS__ diff --git a/src/renderer/swapchain/vk_swapchain.cpp b/src/renderer/swapchain/vk_swapchain.cpp deleted file mode 100644 index 0ce2967..0000000 --- a/src/renderer/swapchain/vk_swapchain.cpp +++ /dev/null @@ -1,152 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* vk_swapchain.cpp :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2022/10/06 18:22:28 by maldavid #+# #+# */ -/* Updated: 2024/03/14 17:08:19 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#include -#include -#include -#include -#include - -namespace mlx -{ - void SwapChain::init(Renderer* renderer) - { - VkDevice device = Render_Core::get().getDevice().get(); - - _renderer = renderer; - _swapchain_support = querySwapChainSupport(Render_Core::get().getDevice().getPhysicalDevice()); - - VkSurfaceFormatKHR surfaceFormat = renderer->getSurface().chooseSwapSurfaceFormat(_swapchain_support.formats); - VkPresentModeKHR presentMode = chooseSwapPresentMode(_swapchain_support.present_modes); - _extent = chooseSwapExtent(_swapchain_support.capabilities); - - std::uint32_t imageCount = _swapchain_support.capabilities.minImageCount + 1; - if(_swapchain_support.capabilities.maxImageCount > 0 && imageCount > _swapchain_support.capabilities.maxImageCount) - imageCount = _swapchain_support.capabilities.maxImageCount; - - Queues::QueueFamilyIndices indices = Render_Core::get().getQueue().findQueueFamilies(Render_Core::get().getDevice().getPhysicalDevice(), renderer->getSurface().get()); - std::uint32_t queueFamilyIndices[] = { indices.graphics_family.value(), indices.present_family.value() }; - - VkSwapchainCreateInfoKHR createInfo{}; - createInfo.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR; - createInfo.surface = renderer->getSurface().get(); - createInfo.minImageCount = imageCount; - createInfo.imageFormat = surfaceFormat.format; - createInfo.imageColorSpace = surfaceFormat.colorSpace; - createInfo.imageExtent = _extent; - createInfo.imageArrayLayers = 1; - createInfo.imageUsage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; - createInfo.preTransform = _swapchain_support.capabilities.currentTransform; - createInfo.compositeAlpha = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR; - createInfo.presentMode = presentMode; - createInfo.clipped = VK_TRUE; - createInfo.oldSwapchain = VK_NULL_HANDLE; - if(indices.graphics_family != indices.present_family) - { - createInfo.imageSharingMode = VK_SHARING_MODE_CONCURRENT; - createInfo.queueFamilyIndexCount = 2; - createInfo.pQueueFamilyIndices = queueFamilyIndices; - } - else - createInfo.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE; - - VkResult res = vkCreateSwapchainKHR(device, &createInfo, nullptr, &_swapchain); - if(res != VK_SUCCESS) - core::error::report(e_kind::fatal_error, "Vulkan : failed to create the swapchain, %s", RCore::verbaliseResultVk(res)); - - std::vector tmp; - vkGetSwapchainImagesKHR(device, _swapchain, &imageCount, nullptr); - _images.resize(imageCount); - tmp.resize(imageCount); - vkGetSwapchainImagesKHR(device, _swapchain, &imageCount, tmp.data()); - - for(std::size_t i = 0; i < imageCount; i++) - { - _images[i].create(tmp[i], surfaceFormat.format, _extent.width, _extent.height); - _images[i].transitionLayout(VK_IMAGE_LAYOUT_PRESENT_SRC_KHR); - _images[i].createImageView(VK_IMAGE_VIEW_TYPE_2D, VK_IMAGE_ASPECT_COLOR_BIT); - } - - _swapchain_image_format = surfaceFormat.format; - #ifdef DEBUG - core::error::report(e_kind::message, "Vulkan : created new swapchain"); - #endif - } - - SwapChain::SwapChainSupportDetails SwapChain::querySwapChainSupport(VkPhysicalDevice device) - { - SwapChain::SwapChainSupportDetails details; - VkSurfaceKHR surface = _renderer->getSurface().get(); - - if(vkGetPhysicalDeviceSurfaceCapabilitiesKHR(device, surface, &details.capabilities) != VK_SUCCESS) - core::error::report(e_kind::fatal_error, "Vulkan : unable to retrieve surface capabilities"); - - std::uint32_t formatCount = 0; - vkGetPhysicalDeviceSurfaceFormatsKHR(device, surface, &formatCount, nullptr); - - if(formatCount != 0) - { - details.formats.resize(formatCount); - vkGetPhysicalDeviceSurfaceFormatsKHR(device, surface, &formatCount, details.formats.data()); - } - - std::uint32_t presentModeCount; - vkGetPhysicalDeviceSurfacePresentModesKHR(device, surface, &presentModeCount, nullptr); - - if(presentModeCount != 0) - { - details.present_modes.resize(presentModeCount); - vkGetPhysicalDeviceSurfacePresentModesKHR(device, surface, &presentModeCount, details.present_modes.data()); - } - - return details; - } - - VkPresentModeKHR SwapChain::chooseSwapPresentMode([[maybe_unused]] const std::vector& availablePresentModes) - { - // in the future, you may choose to activate vsync or not - return VK_PRESENT_MODE_IMMEDIATE_KHR; - } - - VkExtent2D SwapChain::chooseSwapExtent(const VkSurfaceCapabilitiesKHR& capabilities) - { - if(capabilities.currentExtent.width != std::numeric_limits::max()) - return capabilities.currentExtent; - - int width, height; - SDL_Vulkan_GetDrawableSize(_renderer->getWindow()->getNativeWindow(), &width, &height); - - VkExtent2D actualExtent = { static_cast(width), static_cast(height) }; - - actualExtent.width = std::clamp(actualExtent.width, capabilities.minImageExtent.width, capabilities.maxImageExtent.width); - actualExtent.height = std::clamp(actualExtent.height, capabilities.minImageExtent.height, capabilities.maxImageExtent.height); - - return actualExtent; - } - - void SwapChain::recreate() - { - destroy(); - init(_renderer); - } - - void SwapChain::destroy() noexcept - { - if(_swapchain == VK_NULL_HANDLE) - return; - vkDeviceWaitIdle(Render_Core::get().getDevice().get()); - vkDestroySwapchainKHR(Render_Core::get().getDevice().get(), _swapchain, nullptr); - _swapchain = VK_NULL_HANDLE; - for(Image& img : _images) - img.destroyImageView(); - } -} diff --git a/src/renderer/swapchain/vk_swapchain.h b/src/renderer/swapchain/vk_swapchain.h deleted file mode 100644 index 81ff0f4..0000000 --- a/src/renderer/swapchain/vk_swapchain.h +++ /dev/null @@ -1,68 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* vk_swapchain.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2022/10/06 18:23:27 by maldavid #+# #+# */ -/* Updated: 2024/03/14 17:06:41 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#ifndef __MLX_VK_SWAPCHAIN__ -#define __MLX_VK_SWAPCHAIN__ - -#include -#include -#include -#include - -namespace mlx -{ - class SwapChain - { - friend class GraphicPipeline; - friend class RenderPass; - friend class Renderer; - - public: - struct SwapChainSupportDetails - { - VkSurfaceCapabilitiesKHR capabilities; - std::vector formats; - std::vector present_modes; - }; - - public: - SwapChain() = default; - - void init(class Renderer* renderer); - void recreate(); - void destroy() noexcept; - - SwapChainSupportDetails querySwapChainSupport(VkPhysicalDevice device); - VkExtent2D chooseSwapExtent(const VkSurfaceCapabilitiesKHR& capabilities); - VkPresentModeKHR chooseSwapPresentMode([[maybe_unused]] const std::vector &availablePresentModes); - - inline VkSwapchainKHR get() noexcept { return _swapchain; } - inline VkSwapchainKHR operator()() noexcept { return _swapchain; } - inline std::size_t getImagesNumber() const noexcept { return _images.size(); } - inline Image& getImage(std::size_t i) noexcept { return _images[i]; } - inline SwapChainSupportDetails getSupport() noexcept { return _swapchain_support; } - inline VkExtent2D getExtent() noexcept { return _extent; } - inline VkFormat getImagesFormat() const noexcept { return _swapchain_image_format; } - - ~SwapChain() = default; - - private: - SwapChainSupportDetails _swapchain_support; - VkSwapchainKHR _swapchain; - std::vector _images; - VkFormat _swapchain_image_format; - VkExtent2D _extent; - class Renderer* _renderer = nullptr; - }; -} - -#endif // __MLX_VK_SWAPCHAIN__ diff --git a/src/renderer/texts/font.cpp b/src/renderer/texts/font.cpp deleted file mode 100644 index 37f2d0a..0000000 --- a/src/renderer/texts/font.cpp +++ /dev/null @@ -1,88 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* font.cpp :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2023/12/11 22:06:09 by kbz_8 #+# #+# */ -/* Updated: 2024/01/18 13:16:18 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#include -#include -#include -#include - -constexpr const int RANGE = 1024; - -namespace mlx -{ - Font::Font(Renderer& renderer, const std::filesystem::path& path, float scale) : _name(path.string()), _renderer(renderer), _scale(scale) - { - _build_data = path; - } - - Font::Font(class Renderer& renderer, const std::string& name, const std::vector& ttf_data, float scale) : _name(name), _renderer(renderer), _scale(scale) - { - _build_data = ttf_data; - } - - void Font::buildFont() - { - MLX_PROFILE_FUNCTION(); - std::vector file_bytes; - if(std::holds_alternative(_build_data)) - { - std::ifstream file(std::get(_build_data), std::ios::binary); - if(!file.is_open()) - { - core::error::report(e_kind::error, "Font load : cannot open font file, %s", _name.c_str()); - return; - } - std::ifstream::pos_type fileSize = std::filesystem::file_size(std::get(_build_data)); - file.seekg(0, std::ios::beg); - file_bytes.resize(fileSize); - file.read(reinterpret_cast(file_bytes.data()), fileSize); - file.close(); - } - - std::vector tmp_bitmap(RANGE * RANGE); - std::vector vulkan_bitmap(RANGE * RANGE * 4); - stbtt_pack_context pc; - stbtt_PackBegin(&pc, tmp_bitmap.data(), RANGE, RANGE, RANGE, 1, nullptr); - if(std::holds_alternative(_build_data)) - stbtt_PackFontRange(&pc, file_bytes.data(), 0, _scale, 32, 96, _cdata.data()); - else - stbtt_PackFontRange(&pc, std::get>(_build_data).data(), 0, _scale, 32, 96, _cdata.data()); - stbtt_PackEnd(&pc); - for(int i = 0, j = 0; i < RANGE * RANGE; i++, j += 4) - { - vulkan_bitmap[j + 0] = tmp_bitmap[i]; - vulkan_bitmap[j + 1] = tmp_bitmap[i]; - vulkan_bitmap[j + 2] = tmp_bitmap[i]; - vulkan_bitmap[j + 3] = tmp_bitmap[i]; - } - #ifdef DEBUG - _atlas.create(vulkan_bitmap.data(), RANGE, RANGE, VK_FORMAT_R8G8B8A8_UNORM, std::string(_name + "_font_altas").c_str(), true); - #else - _atlas.create(vulkan_bitmap.data(), RANGE, RANGE, VK_FORMAT_R8G8B8A8_UNORM, nullptr, true); - #endif - _atlas.setDescriptor(_renderer.getFragDescriptorSet().duplicate()); - _is_init = true; - } - - void Font::destroy() - { - MLX_PROFILE_FUNCTION(); - _atlas.destroy(); - _is_init = false; - } - - Font::~Font() - { - if(_is_init) - destroy(); - } -} diff --git a/src/renderer/texts/font.h b/src/renderer/texts/font.h deleted file mode 100644 index 0c898a8..0000000 --- a/src/renderer/texts/font.h +++ /dev/null @@ -1,56 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* font.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2023/12/11 21:17:04 by kbz_8 #+# #+# */ -/* Updated: 2024/01/18 13:15:55 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#ifndef __MLX_FONT__ -#define __MLX_FONT__ - -#include -#include -#include -#include -#include - -namespace mlx -{ - class Font - { - friend class FontLibrary; - public: - Font() = delete; - Font(class Renderer& renderer, const std::filesystem::path& path, float scale); - Font(class Renderer& renderer, const std::string& name, const std::vector& ttf_data, float scale); - - inline const std::string& getName() const { return _name; } - inline float getScale() const noexcept { return _scale; } - inline const std::array& getCharData() const { return _cdata; } - inline const TextureAtlas& getAtlas() const noexcept { return _atlas; } - inline bool operator==(const Font& rhs) const { return rhs._name == _name && rhs._scale == _scale; } - inline bool operator!=(const Font& rhs) const { return rhs._name != _name || rhs._scale != _scale; } - void destroy(); - - ~Font(); - - private: - void buildFont(); - - private: - std::array _cdata; - TextureAtlas _atlas; - std::variant> _build_data; - std::string _name; - class Renderer& _renderer; - float _scale = 0; - bool _is_init = false; - }; -} - -#endif diff --git a/src/renderer/texts/font_library.cpp b/src/renderer/texts/font_library.cpp deleted file mode 100644 index e3a04c0..0000000 --- a/src/renderer/texts/font_library.cpp +++ /dev/null @@ -1,69 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* font_library.cpp :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2024/01/18 09:28:14 by maldavid #+# #+# */ -/* Updated: 2024/01/18 13:07:48 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#include -#include -#include -#include -#include -#include - -namespace mlx -{ - std::shared_ptr FontLibrary::getFontData(FontID id) - { - MLX_PROFILE_FUNCTION(); - if(!_cache.count(id) || std::find(_invalid_ids.begin(), _invalid_ids.end(), id) != _invalid_ids.end()) - core::error::report(e_kind::fatal_error, "Font Library : wrong font ID '%d'", id); - return _cache[id]; - } - - FontID FontLibrary::addFontToLibrary(std::shared_ptr font) - { - MLX_PROFILE_FUNCTION(); - auto it = std::find_if(_cache.begin(), _cache.end(), [&](const std::pair>& v) - { - return v.second->getScale() == font->getScale() && - v.second->getName() == font->getName() && - std::find(_invalid_ids.begin(), _invalid_ids.end(), v.first) == _invalid_ids.end(); - }); - if(it != _cache.end()) - return it->first; - font->buildFont(); - _cache[_current_id] = font; - _current_id++; - return _current_id - 1; - } - - void FontLibrary::removeFontFromLibrary(FontID id) - { - MLX_PROFILE_FUNCTION(); - if(!_cache.count(id) || std::find(_invalid_ids.begin(), _invalid_ids.end(), id) != _invalid_ids.end()) - { - core::error::report(e_kind::warning, "Font Library : trying to remove a font with an unkown or invalid ID '%d'", id); - return; - } - _cache[id]->destroy(); - _invalid_ids.push_back(id); - } - - void FontLibrary::clearLibrary() - { - MLX_PROFILE_FUNCTION(); - for(auto& [id, font] : _cache) - { - font->destroy(); - _invalid_ids.push_back(id); - } - // do not `_cache.clear();` as it releases the fonts and may not destroy the texture atlas that is in use by command buffers - } -} diff --git a/src/renderer/texts/font_library.h b/src/renderer/texts/font_library.h deleted file mode 100644 index db6364a..0000000 --- a/src/renderer/texts/font_library.h +++ /dev/null @@ -1,54 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* font_library.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2024/01/18 09:26:03 by maldavid #+# #+# */ -/* Updated: 2024/10/19 10:47:32 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#ifndef __MLX_FONT_LIBRARY__ -#define __MLX_FONT_LIBRARY__ - -#include -#include -#include -#include -#include -#include -#include -#include - -namespace mlx -{ - using FontID = std::uint32_t; - constexpr FontID nullfont = 0; - - class FontLibrary : public Singleton - { - friend class Singleton; - - public: - std::shared_ptr getFontData(FontID id); - FontID addFontToLibrary(std::shared_ptr font); - void removeFontFromLibrary(FontID id); - - void clearLibrary(); - - inline void reset() { _cache.clear(); _invalid_ids.clear(); _current_id = 1; } - - private: - FontLibrary() = default; - ~FontLibrary() = default; - - private: - std::unordered_map> _cache; - std::vector _invalid_ids; - FontID _current_id = 1; - }; -} - -#endif diff --git a/src/renderer/texts/text.cpp b/src/renderer/texts/text.cpp deleted file mode 100644 index a7696dd..0000000 --- a/src/renderer/texts/text.cpp +++ /dev/null @@ -1,77 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* text.cpp :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2024/01/11 00:11:56 by maldavid #+# #+# */ -/* Updated: 2024/03/25 16:13:08 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#include -#include -#include - -namespace mlx -{ - void Text::init(std::string text, FontID font, std::uint32_t color, std::vector vbo_data, std::vector ibo_data) - { - MLX_PROFILE_FUNCTION(); - if(_is_init) - return; - _text = std::move(text); - _color = color; - _font = font; - #ifdef DEBUG - std::string debug_name = _text; - for(char& c : debug_name) - { - if(c == ' ' || c == '"' || c == '\'') - c = '_'; - } - for(int i = 0; i < MAX_FRAMES_IN_FLIGHT; i++) - _vbo[i].create(sizeof(Vertex) * vbo_data.size(), static_cast(vbo_data.data()), debug_name.c_str()); - _ibo.create(sizeof(std::uint16_t) * ibo_data.size(), ibo_data.data(), debug_name.c_str()); - #else - for(int i = 0; i < MAX_FRAMES_IN_FLIGHT; i++) - _vbo[i].create(sizeof(Vertex) * vbo_data.size(), static_cast(vbo_data.data()), nullptr); - _ibo.create(sizeof(std::uint16_t) * ibo_data.size(), ibo_data.data(), nullptr); - #endif - _is_init = true; - } - - void Text::bind(Renderer& renderer) noexcept - { - MLX_PROFILE_FUNCTION(); - if(!_is_init) - return; - _vbo[renderer.getActiveImageIndex()].bind(renderer); - _ibo.bind(renderer); - } - - void Text::updateVertexData(int frame, std::vector vbo_data) - { - MLX_PROFILE_FUNCTION(); - if(!_is_init) - return; - _vbo[frame].setData(sizeof(Vertex) * vbo_data.size(), static_cast(vbo_data.data())); - } - - void Text::destroy() noexcept - { - MLX_PROFILE_FUNCTION(); - if(!_is_init) - return; - for(int i = 0; i < MAX_FRAMES_IN_FLIGHT; i++) - _vbo[i].destroy(); - _ibo.destroy(); - _is_init = false; - } - - Text::~Text() - { - destroy(); - } -} diff --git a/src/renderer/texts/text.h b/src/renderer/texts/text.h deleted file mode 100644 index fd244fd..0000000 --- a/src/renderer/texts/text.h +++ /dev/null @@ -1,51 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* text.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2024/01/11 00:09:04 by maldavid #+# #+# */ -/* Updated: 2024/03/25 16:16:48 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#ifndef __MLX_TEXT__ -#define __MLX_TEXT__ - -#include -#include -#include -#include -#include -#include - -namespace mlx -{ - class Text - { - public: - Text() = default; - - void init(std::string text, FontID font, std::uint32_t color, std::vector vbo_data, std::vector ibo_data); - void bind(class Renderer& renderer) noexcept; - inline FontID getFontInUse() const noexcept { return _font; } - void updateVertexData(int frame, std::vector vbo_data); - inline std::uint32_t getIBOsize() noexcept { return _ibo.getSize(); } - inline const std::string& getText() const { return _text; } - inline std::uint32_t getColor() const noexcept { return _color; } - void destroy() noexcept; - - ~Text(); - - private: - std::array _vbo; - C_IBO _ibo; - std::string _text; - std::uint32_t _color; - FontID _font = nullfont; - bool _is_init = false; - }; -} - -#endif diff --git a/src/renderer/texts/text_descriptor.cpp b/src/renderer/texts/text_descriptor.cpp deleted file mode 100644 index f5d7f14..0000000 --- a/src/renderer/texts/text_descriptor.cpp +++ /dev/null @@ -1,110 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* text_descriptor.cpp :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2024/01/11 00:23:11 by maldavid #+# #+# */ -/* Updated: 2024/03/25 16:13:48 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#include -#include -#include -#include -#include - -#define STB_RECT_PACK_IMPLEMENTATION -#include - -#include - -#define STB_TRUETYPE_IMPLEMENTATION -#define STB_malloc(x, u) ((void)(u), MemManager::malloc(x)) -#define STB_free(x, u) ((void)(u), MemManager::free(x)) -#include - -constexpr const int RANGE = 1024; - -namespace mlx -{ - TextDrawDescriptor::TextDrawDescriptor(std::string text, std::uint32_t _color, int _x, int _y) : color(_color), x(_x), y(_y), _text(std::move(text)) - {} - - void TextDrawDescriptor::init(FontID font) noexcept - { - MLX_PROFILE_FUNCTION(); - std::vector vertexData; - std::vector indexData; - - float stb_x = 0.0f; - float stb_y = 0.0f; - - { - std::shared_ptr font_data = FontLibrary::get().getFontData(font); - - for(char c : _text) - { - if(c < 32) - continue; - - stbtt_aligned_quad q; - stbtt_GetPackedQuad(font_data->getCharData().data(), RANGE, RANGE, c - 32, &stb_x, &stb_y, &q, 1); - - std::size_t index = vertexData.size(); - - glm::vec4 vertex_color = { - static_cast((color & 0x000000FF)) / 255.f, - static_cast((color & 0x0000FF00) >> 8) / 255.f, - static_cast((color & 0x00FF0000) >> 16) / 255.f, - static_cast((color & 0xFF000000) >> 24) / 255.f - }; - - vertexData.emplace_back(glm::vec2{q.x0, q.y0}, vertex_color, glm::vec2{q.s0, q.t0}); - vertexData.emplace_back(glm::vec2{q.x1, q.y0}, vertex_color, glm::vec2{q.s1, q.t0}); - vertexData.emplace_back(glm::vec2{q.x1, q.y1}, vertex_color, glm::vec2{q.s1, q.t1}); - vertexData.emplace_back(glm::vec2{q.x0, q.y1}, vertex_color, glm::vec2{q.s0, q.t1}); - - indexData.emplace_back(index + 0); - indexData.emplace_back(index + 1); - indexData.emplace_back(index + 2); - indexData.emplace_back(index + 2); - indexData.emplace_back(index + 3); - indexData.emplace_back(index + 0); - } - } - std::shared_ptr text_data = std::make_shared(); - text_data->init(_text, font, color, std::move(vertexData), std::move(indexData)); - id = TextLibrary::get().addTextToLibrary(text_data); - - #ifdef DEBUG - core::error::report(e_kind::message, "Text put : registered new text to render"); - #endif - } - - void TextDrawDescriptor::render(std::array& sets, Renderer& renderer) - { - MLX_PROFILE_FUNCTION(); - std::shared_ptr draw_data = TextLibrary::get().getTextData(id); - std::shared_ptr font_data = FontLibrary::get().getFontData(draw_data->getFontInUse()); - TextureAtlas& atlas = const_cast(font_data->getAtlas()); - draw_data->bind(renderer); - if(!atlas.getSet().isInit()) - atlas.setDescriptor(renderer.getFragDescriptorSet().duplicate()); - if(!atlas.hasBeenUpdated()) - atlas.updateSet(0); - sets[1] = const_cast(atlas).getVkSet(); - vkCmdBindDescriptorSets(renderer.getActiveCmdBuffer().get(), VK_PIPELINE_BIND_POINT_GRAPHICS, renderer.getPipeline().getPipelineLayout(), 0, sets.size(), sets.data(), 0, nullptr); - atlas.render(renderer, x, y, draw_data->getIBOsize()); - } - - void TextDrawDescriptor::resetUpdate() - { - std::shared_ptr draw_data = TextLibrary::get().getTextData(id); - std::shared_ptr font_data = FontLibrary::get().getFontData(draw_data->getFontInUse()); - TextureAtlas& atlas = const_cast(font_data->getAtlas()); - atlas.resetUpdate(); - } -} diff --git a/src/renderer/texts/text_descriptor.h b/src/renderer/texts/text_descriptor.h deleted file mode 100644 index 27a89cd..0000000 --- a/src/renderer/texts/text_descriptor.h +++ /dev/null @@ -1,66 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* text_descriptor.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2024/01/11 00:13:34 by maldavid #+# #+# */ -/* Updated: 2024/02/25 07:58:13 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#ifndef __MLX_TEXT_DESCRIPTOR__ -#define __MLX_TEXT_DESCRIPTOR__ - -#include -#include -#include -#include -#include -#include -#include -#include - -namespace mlx -{ - class TextDrawDescriptor : public DrawableResource - { - friend class std::hash; - - public: - TextID id; - std::uint32_t color; - int x; - int y; - - public: - TextDrawDescriptor(std::string text, std::uint32_t _color, int _x, int _y); - - void init(FontID font) noexcept; - bool operator==(const TextDrawDescriptor& rhs) const { return _text == rhs._text && x == rhs.x && y == rhs.y && color == rhs.color; } - void render(std::array& sets, Renderer& renderer) override; - void resetUpdate() override; - - TextDrawDescriptor() = default; - - private: - std::string _text; - }; -} - -namespace std -{ - template <> - struct hash - { - std::size_t operator()(const mlx::TextDrawDescriptor& d) const noexcept - { - std::size_t hash = 0; - mlx::hashCombine(hash, d.x, d.y, d.color, d._text); - return hash; - } - }; -} - -#endif diff --git a/src/renderer/texts/text_library.cpp b/src/renderer/texts/text_library.cpp deleted file mode 100644 index 652356c..0000000 --- a/src/renderer/texts/text_library.cpp +++ /dev/null @@ -1,63 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* text_library.cpp :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2023/04/10 11:59:57 by maldavid #+# #+# */ -/* Updated: 2024/03/25 16:17:06 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#include -#include -#include -#include -#include -#include - -namespace mlx -{ - std::shared_ptr TextLibrary::getTextData(TextID id) - { - MLX_PROFILE_FUNCTION(); - if(!_cache.count(id)) - core::error::report(e_kind::fatal_error, "Text Library : wrong text ID '%d'", id); - return _cache[id]; - } - - TextID TextLibrary::addTextToLibrary(std::shared_ptr text) - { - MLX_PROFILE_FUNCTION(); - auto it = std::find_if(_cache.begin(), _cache.end(), [&](const std::pair>& v) - { - return v.second->getText() == text->getText() && v.second->getColor() == text->getColor(); - }); - if(it != _cache.end()) - return it->first; - _cache[_current_id] = text; - _current_id++; - return _current_id - 1; - } - - void TextLibrary::removeTextFromLibrary(TextID id) - { - MLX_PROFILE_FUNCTION(); - if(!_cache.count(id)) - { - core::error::report(e_kind::warning, "Text Library : trying to remove a text with an unkown or invalid ID '%d'", id); - return; - } - _cache[id]->destroy(); - _cache.erase(id); - } - - void TextLibrary::clearLibrary() - { - MLX_PROFILE_FUNCTION(); - for(auto& [id, text] : _cache) - text->destroy(); - _cache.clear(); - } -} diff --git a/src/renderer/texts/text_library.h b/src/renderer/texts/text_library.h deleted file mode 100644 index a7b3bb7..0000000 --- a/src/renderer/texts/text_library.h +++ /dev/null @@ -1,54 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* text_library.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2023/04/10 11:52:30 by maldavid #+# #+# */ -/* Updated: 2024/10/19 10:49:02 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#ifndef __MLX_TEXT_LIBRARY__ -#define __MLX_TEXT_LIBRARY__ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -namespace mlx -{ - using TextID = std::uint32_t; - constexpr TextID nulltext = 0; - - class TextLibrary : public Singleton - { - friend class Singleton; - - public: - std::shared_ptr getTextData(TextID id); - TextID addTextToLibrary(std::shared_ptr text); - void removeTextFromLibrary(TextID id); - - void clearLibrary(); - - inline void reset() { _cache.clear(); _current_id = 1; } - - private: - TextLibrary() = default; - ~TextLibrary() = default; - - private: - std::unordered_map> _cache; - TextID _current_id = 1; - }; -} - -#endif diff --git a/src/renderer/texts/text_manager.cpp b/src/renderer/texts/text_manager.cpp deleted file mode 100644 index 8c1fbac..0000000 --- a/src/renderer/texts/text_manager.cpp +++ /dev/null @@ -1,66 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* text_manager.cpp :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2023/04/06 16:41:13 by maldavid #+# #+# */ -/* Updated: 2024/02/25 09:29:36 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#include -#include -#include -#include -#include - -#include - -namespace mlx -{ - void TextManager::init(Renderer& renderer) noexcept - { - MLX_PROFILE_FUNCTION(); - loadFont(renderer, "default", 6.f); - } - - void TextManager::loadFont(Renderer& renderer, const std::filesystem::path& filepath, float scale) - { - MLX_PROFILE_FUNCTION(); - std::shared_ptr font; - if(filepath.string() == "default") - font = std::make_shared(renderer, "default", dogica_ttf, scale); - else - font = std::make_shared(renderer, filepath, scale); - - _font_in_use = FontLibrary::get().addFontToLibrary(font); - } - - std::pair TextManager::registerText(int x, int y, std::uint32_t color, std::string str) - { - MLX_PROFILE_FUNCTION(); - auto res = _text_descriptors.emplace(std::move(str), color, x, y); - if(res.second) - { - const_cast(*res.first).init(_font_in_use); - return std::make_pair(static_cast(&const_cast(*res.first)), true); - } - - auto text_ptr = TextLibrary::get().getTextData(res.first->id); - if(_font_in_use != text_ptr->getFontInUse()) - { - // TODO : update text vertex buffers rather than destroying it and recreating it - TextLibrary::get().removeTextFromLibrary(res.first->id); - const_cast(*res.first).init(_font_in_use); - } - return std::make_pair(static_cast(&const_cast(*res.first)), false); - } - - void TextManager::destroy() noexcept - { - MLX_PROFILE_FUNCTION(); - _text_descriptors.clear(); - } -} diff --git a/src/renderer/texts/text_manager.h b/src/renderer/texts/text_manager.h deleted file mode 100644 index 140e87d..0000000 --- a/src/renderer/texts/text_manager.h +++ /dev/null @@ -1,48 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* text_manager.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2023/04/06 16:24:11 by maldavid #+# #+# */ -/* Updated: 2024/03/14 17:08:43 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#ifndef __MLX_TEXT_MANAGER__ -#define __MLX_TEXT_MANAGER__ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -namespace mlx -{ - class TextManager - { - public: - TextManager() = default; - - void init(Renderer& renderer) noexcept; - std::pair registerText(int x, int y, std::uint32_t color, std::string str); - inline void clear() { _text_descriptors.clear(); } - void loadFont(Renderer& renderer, const std::filesystem::path& filepath, float scale); - void destroy() noexcept; - - ~TextManager() = default; - - private: - std::unordered_set _text_descriptors; - FontID _font_in_use = nullfont; - }; -} - -#endif diff --git a/src/utils/combine_hash.h b/src/utils/combine_hash.h deleted file mode 100644 index 48a33a3..0000000 --- a/src/utils/combine_hash.h +++ /dev/null @@ -1,32 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* combine_hash.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2023/12/14 16:16:06 by maldavid #+# #+# */ -/* Updated: 2023/12/14 16:47:39 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#ifndef __MLX_HASH__ -#define __MLX_HASH__ - -#include -#include - -namespace mlx -{ - inline void hashCombine([[maybe_unused]] std::size_t& seed) noexcept {} - - template - inline void hashCombine(std::size_t& seed, const T& v, Rest... rest) - { - std::hash hasher; - seed ^= hasher(v) + 0x9e3779b9 + (seed << 6) + (seed >> 2); - hashCombine(seed, rest...); - } -} - -#endif diff --git a/src/utils/non_copyable.h b/src/utils/non_copyable.h deleted file mode 100644 index abc402a..0000000 --- a/src/utils/non_copyable.h +++ /dev/null @@ -1,33 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* non_copyable.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2022/10/08 19:20:13 by maldavid #+# #+# */ -/* Updated: 2024/03/24 14:42:48 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#ifndef __MLX_NON_COPYABLE__ -#define __MLX_NON_COPYABLE__ - -namespace mlx -{ - class NonCopyable - { - protected: - NonCopyable() = default; - virtual ~NonCopyable() = default; - - public: - NonCopyable(const NonCopyable&) = delete; - NonCopyable(NonCopyable&&) noexcept = default; - NonCopyable &operator=(const NonCopyable&) = delete; - NonCopyable &operator=(NonCopyable&&) noexcept = default; - }; - -} - -#endif // __MLX_NON_COPYABLE__ diff --git a/src/utils/singleton.h b/src/utils/singleton.h deleted file mode 100644 index ae8246e..0000000 --- a/src/utils/singleton.h +++ /dev/null @@ -1,32 +0,0 @@ -/* ************************************************************************** */ -/* */ -/* ::: :::::::: */ -/* singleton.h :+: :+: :+: */ -/* +:+ +:+ +:+ */ -/* By: maldavid +#+ +:+ +#+ */ -/* +#+#+#+#+#+ +#+ */ -/* Created: 2022/10/08 19:18:46 by maldavid #+# #+# */ -/* Updated: 2024/03/24 14:42:56 by maldavid ### ########.fr */ -/* */ -/* ************************************************************************** */ - -#ifndef __MLX_SINGLETON__ -#define __MLX_SINGLETON__ - -#include "non_copyable.h" - -namespace mlx -{ - template - class Singleton : public NonCopyable - { - public: - inline static T& get() - { - static T instance; - return instance; - } - }; -} - -#endif // __MLX_SINGLETON__ diff --git a/third_party/glm/common.hpp b/third_party/glm/common.hpp deleted file mode 100755 index 0328dc9..0000000 --- a/third_party/glm/common.hpp +++ /dev/null @@ -1,539 +0,0 @@ -/// @ref core -/// @file glm/common.hpp -/// -/// @see GLSL 4.20.8 specification, section 8.3 Common Functions -/// -/// @defgroup core_func_common Common functions -/// @ingroup core -/// -/// Provides GLSL common functions -/// -/// These all operate component-wise. The description is per component. -/// -/// Include to use these core features. - -#pragma once - -#include "detail/qualifier.hpp" -#include "detail/_fixes.hpp" - -namespace glm -{ - /// @addtogroup core_func_common - /// @{ - - /// Returns x if x >= 0; otherwise, it returns -x. - /// - /// @tparam genType floating-point or signed integer; scalar or vector types. - /// - /// @see GLSL abs man page - /// @see GLSL 4.20.8 specification, section 8.3 Common Functions - template - GLM_FUNC_DECL GLM_CONSTEXPR genType abs(genType x); - - /// Returns x if x >= 0; otherwise, it returns -x. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point or signed integer scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see GLSL abs man page - /// @see GLSL 4.20.8 specification, section 8.3 Common Functions - template - GLM_FUNC_DECL GLM_CONSTEXPR vec abs(vec const& x); - - /// Returns 1.0 if x > 0, 0.0 if x == 0, or -1.0 if x < 0. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see GLSL sign man page - /// @see GLSL 4.20.8 specification, section 8.3 Common Functions - template - GLM_FUNC_DECL vec sign(vec const& x); - - /// Returns a value equal to the nearest integer that is less then or equal to x. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see GLSL floor man page - /// @see GLSL 4.20.8 specification, section 8.3 Common Functions - template - GLM_FUNC_DECL vec floor(vec const& x); - - /// Returns a value equal to the nearest integer to x - /// whose absolute value is not larger than the absolute value of x. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see GLSL trunc man page - /// @see GLSL 4.20.8 specification, section 8.3 Common Functions - template - GLM_FUNC_DECL vec trunc(vec const& x); - - /// Returns a value equal to the nearest integer to x. - /// The fraction 0.5 will round in a direction chosen by the - /// implementation, presumably the direction that is fastest. - /// This includes the possibility that round(x) returns the - /// same value as roundEven(x) for all values of x. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see GLSL round man page - /// @see GLSL 4.20.8 specification, section 8.3 Common Functions - template - GLM_FUNC_DECL vec round(vec const& x); - - /// Returns a value equal to the nearest integer to x. - /// A fractional part of 0.5 will round toward the nearest even - /// integer. (Both 3.5 and 4.5 for x will return 4.0.) - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see GLSL roundEven man page - /// @see GLSL 4.20.8 specification, section 8.3 Common Functions - /// @see New round to even technique - template - GLM_FUNC_DECL vec roundEven(vec const& x); - - /// Returns a value equal to the nearest integer - /// that is greater than or equal to x. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see GLSL ceil man page - /// @see GLSL 4.20.8 specification, section 8.3 Common Functions - template - GLM_FUNC_DECL vec ceil(vec const& x); - - /// Return x - floor(x). - /// - /// @tparam genType Floating-point scalar or vector types. - /// - /// @see GLSL fract man page - /// @see GLSL 4.20.8 specification, section 8.3 Common Functions - template - GLM_FUNC_DECL genType fract(genType x); - - /// Return x - floor(x). - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see GLSL fract man page - /// @see GLSL 4.20.8 specification, section 8.3 Common Functions - template - GLM_FUNC_DECL vec fract(vec const& x); - - template - GLM_FUNC_DECL genType mod(genType x, genType y); - - template - GLM_FUNC_DECL vec mod(vec const& x, T y); - - /// Modulus. Returns x - y * floor(x / y) - /// for each component in x using the floating point value y. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point scalar types, include glm/gtc/integer for integer scalar types support - /// @tparam Q Value from qualifier enum - /// - /// @see GLSL mod man page - /// @see GLSL 4.20.8 specification, section 8.3 Common Functions - template - GLM_FUNC_DECL vec mod(vec const& x, vec const& y); - - /// Returns the fractional part of x and sets i to the integer - /// part (as a whole number floating point value). Both the - /// return value and the output parameter will have the same - /// sign as x. - /// - /// @tparam genType Floating-point scalar or vector types. - /// - /// @see GLSL modf man page - /// @see GLSL 4.20.8 specification, section 8.3 Common Functions - template - GLM_FUNC_DECL genType modf(genType x, genType& i); - - /// Returns y if y < x; otherwise, it returns x. - /// - /// @tparam genType Floating-point or integer; scalar or vector types. - /// - /// @see GLSL min man page - /// @see GLSL 4.20.8 specification, section 8.3 Common Functions - template - GLM_FUNC_DECL GLM_CONSTEXPR genType min(genType x, genType y); - - /// Returns y if y < x; otherwise, it returns x. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point or integer scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see GLSL min man page - /// @see GLSL 4.20.8 specification, section 8.3 Common Functions - template - GLM_FUNC_DECL GLM_CONSTEXPR vec min(vec const& x, T y); - - /// Returns y if y < x; otherwise, it returns x. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point or integer scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see GLSL min man page - /// @see GLSL 4.20.8 specification, section 8.3 Common Functions - template - GLM_FUNC_DECL GLM_CONSTEXPR vec min(vec const& x, vec const& y); - - /// Returns y if x < y; otherwise, it returns x. - /// - /// @tparam genType Floating-point or integer; scalar or vector types. - /// - /// @see GLSL max man page - /// @see GLSL 4.20.8 specification, section 8.3 Common Functions - template - GLM_FUNC_DECL GLM_CONSTEXPR genType max(genType x, genType y); - - /// Returns y if x < y; otherwise, it returns x. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point or integer scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see GLSL max man page - /// @see GLSL 4.20.8 specification, section 8.3 Common Functions - template - GLM_FUNC_DECL GLM_CONSTEXPR vec max(vec const& x, T y); - - /// Returns y if x < y; otherwise, it returns x. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point or integer scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see GLSL max man page - /// @see GLSL 4.20.8 specification, section 8.3 Common Functions - template - GLM_FUNC_DECL GLM_CONSTEXPR vec max(vec const& x, vec const& y); - - /// Returns min(max(x, minVal), maxVal) for each component in x - /// using the floating-point values minVal and maxVal. - /// - /// @tparam genType Floating-point or integer; scalar or vector types. - /// - /// @see GLSL clamp man page - /// @see GLSL 4.20.8 specification, section 8.3 Common Functions - template - GLM_FUNC_DECL GLM_CONSTEXPR genType clamp(genType x, genType minVal, genType maxVal); - - /// Returns min(max(x, minVal), maxVal) for each component in x - /// using the floating-point values minVal and maxVal. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point or integer scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see GLSL clamp man page - /// @see GLSL 4.20.8 specification, section 8.3 Common Functions - template - GLM_FUNC_DECL GLM_CONSTEXPR vec clamp(vec const& x, T minVal, T maxVal); - - /// Returns min(max(x, minVal), maxVal) for each component in x - /// using the floating-point values minVal and maxVal. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point or integer scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see GLSL clamp man page - /// @see GLSL 4.20.8 specification, section 8.3 Common Functions - template - GLM_FUNC_DECL GLM_CONSTEXPR vec clamp(vec const& x, vec const& minVal, vec const& maxVal); - - /// If genTypeU is a floating scalar or vector: - /// Returns x * (1.0 - a) + y * a, i.e., the linear blend of - /// x and y using the floating-point value a. - /// The value for a is not restricted to the range [0, 1]. - /// - /// If genTypeU is a boolean scalar or vector: - /// Selects which vector each returned component comes - /// from. For a component of 'a' that is false, the - /// corresponding component of 'x' is returned. For a - /// component of 'a' that is true, the corresponding - /// component of 'y' is returned. Components of 'x' and 'y' that - /// are not selected are allowed to be invalid floating point - /// values and will have no effect on the results. Thus, this - /// provides different functionality than - /// genType mix(genType x, genType y, genType(a)) - /// where a is a Boolean vector. - /// - /// @see GLSL mix man page - /// @see GLSL 4.20.8 specification, section 8.3 Common Functions - /// - /// @param[in] x Value to interpolate. - /// @param[in] y Value to interpolate. - /// @param[in] a Interpolant. - /// - /// @tparam genTypeT Floating point scalar or vector. - /// @tparam genTypeU Floating point or boolean scalar or vector. It can't be a vector if it is the length of genTypeT. - /// - /// @code - /// #include - /// ... - /// float a; - /// bool b; - /// glm::dvec3 e; - /// glm::dvec3 f; - /// glm::vec4 g; - /// glm::vec4 h; - /// ... - /// glm::vec4 r = glm::mix(g, h, a); // Interpolate with a floating-point scalar two vectors. - /// glm::vec4 s = glm::mix(g, h, b); // Returns g or h; - /// glm::dvec3 t = glm::mix(e, f, a); // Types of the third parameter is not required to match with the first and the second. - /// glm::vec4 u = glm::mix(g, h, r); // Interpolations can be perform per component with a vector for the last parameter. - /// @endcode - template - GLM_FUNC_DECL genTypeT mix(genTypeT x, genTypeT y, genTypeU a); - - template - GLM_FUNC_DECL vec mix(vec const& x, vec const& y, vec const& a); - - template - GLM_FUNC_DECL vec mix(vec const& x, vec const& y, U a); - - /// Returns 0.0 if x < edge, otherwise it returns 1.0 for each component of a genType. - /// - /// @see GLSL step man page - /// @see GLSL 4.20.8 specification, section 8.3 Common Functions - template - GLM_FUNC_DECL genType step(genType edge, genType x); - - /// Returns 0.0 if x < edge, otherwise it returns 1.0. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see GLSL step man page - /// @see GLSL 4.20.8 specification, section 8.3 Common Functions - template - GLM_FUNC_DECL vec step(T edge, vec const& x); - - /// Returns 0.0 if x < edge, otherwise it returns 1.0. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see GLSL step man page - /// @see GLSL 4.20.8 specification, section 8.3 Common Functions - template - GLM_FUNC_DECL vec step(vec const& edge, vec const& x); - - /// Returns 0.0 if x <= edge0 and 1.0 if x >= edge1 and - /// performs smooth Hermite interpolation between 0 and 1 - /// when edge0 < x < edge1. This is useful in cases where - /// you would want a threshold function with a smooth - /// transition. This is equivalent to: - /// genType t; - /// t = clamp ((x - edge0) / (edge1 - edge0), 0, 1); - /// return t * t * (3 - 2 * t); - /// Results are undefined if edge0 >= edge1. - /// - /// @tparam genType Floating-point scalar or vector types. - /// - /// @see GLSL smoothstep man page - /// @see GLSL 4.20.8 specification, section 8.3 Common Functions - template - GLM_FUNC_DECL genType smoothstep(genType edge0, genType edge1, genType x); - - template - GLM_FUNC_DECL vec smoothstep(T edge0, T edge1, vec const& x); - - template - GLM_FUNC_DECL vec smoothstep(vec const& edge0, vec const& edge1, vec const& x); - - /// Returns true if x holds a NaN (not a number) - /// representation in the underlying implementation's set of - /// floating point representations. Returns false otherwise, - /// including for implementations with no NaN - /// representations. - /// - /// /!\ When using compiler fast math, this function may fail. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see GLSL isnan man page - /// @see GLSL 4.20.8 specification, section 8.3 Common Functions - template - GLM_FUNC_DECL vec isnan(vec const& x); - - /// Returns true if x holds a positive infinity or negative - /// infinity representation in the underlying implementation's - /// set of floating point representations. Returns false - /// otherwise, including for implementations with no infinity - /// representations. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see GLSL isinf man page - /// @see GLSL 4.20.8 specification, section 8.3 Common Functions - template - GLM_FUNC_DECL vec isinf(vec const& x); - - /// Returns a signed integer value representing - /// the encoding of a floating-point value. The floating-point - /// value's bit-level representation is preserved. - /// - /// @see GLSL floatBitsToInt man page - /// @see GLSL 4.20.8 specification, section 8.3 Common Functions - GLM_FUNC_DECL int floatBitsToInt(float const& v); - - /// Returns a signed integer value representing - /// the encoding of a floating-point value. The floatingpoint - /// value's bit-level representation is preserved. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam Q Value from qualifier enum - /// - /// @see GLSL floatBitsToInt man page - /// @see GLSL 4.20.8 specification, section 8.3 Common Functions - template - GLM_FUNC_DECL vec floatBitsToInt(vec const& v); - - /// Returns a unsigned integer value representing - /// the encoding of a floating-point value. The floatingpoint - /// value's bit-level representation is preserved. - /// - /// @see GLSL floatBitsToUint man page - /// @see GLSL 4.20.8 specification, section 8.3 Common Functions - GLM_FUNC_DECL uint floatBitsToUint(float const& v); - - /// Returns a unsigned integer value representing - /// the encoding of a floating-point value. The floatingpoint - /// value's bit-level representation is preserved. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam Q Value from qualifier enum - /// - /// @see GLSL floatBitsToUint man page - /// @see GLSL 4.20.8 specification, section 8.3 Common Functions - template - GLM_FUNC_DECL vec floatBitsToUint(vec const& v); - - /// Returns a floating-point value corresponding to a signed - /// integer encoding of a floating-point value. - /// If an inf or NaN is passed in, it will not signal, and the - /// resulting floating point value is unspecified. Otherwise, - /// the bit-level representation is preserved. - /// - /// @see GLSL intBitsToFloat man page - /// @see GLSL 4.20.8 specification, section 8.3 Common Functions - GLM_FUNC_DECL float intBitsToFloat(int const& v); - - /// Returns a floating-point value corresponding to a signed - /// integer encoding of a floating-point value. - /// If an inf or NaN is passed in, it will not signal, and the - /// resulting floating point value is unspecified. Otherwise, - /// the bit-level representation is preserved. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam Q Value from qualifier enum - /// - /// @see GLSL intBitsToFloat man page - /// @see GLSL 4.20.8 specification, section 8.3 Common Functions - template - GLM_FUNC_DECL vec intBitsToFloat(vec const& v); - - /// Returns a floating-point value corresponding to a - /// unsigned integer encoding of a floating-point value. - /// If an inf or NaN is passed in, it will not signal, and the - /// resulting floating point value is unspecified. Otherwise, - /// the bit-level representation is preserved. - /// - /// @see GLSL uintBitsToFloat man page - /// @see GLSL 4.20.8 specification, section 8.3 Common Functions - GLM_FUNC_DECL float uintBitsToFloat(uint const& v); - - /// Returns a floating-point value corresponding to a - /// unsigned integer encoding of a floating-point value. - /// If an inf or NaN is passed in, it will not signal, and the - /// resulting floating point value is unspecified. Otherwise, - /// the bit-level representation is preserved. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam Q Value from qualifier enum - /// - /// @see GLSL uintBitsToFloat man page - /// @see GLSL 4.20.8 specification, section 8.3 Common Functions - template - GLM_FUNC_DECL vec uintBitsToFloat(vec const& v); - - /// Computes and returns a * b + c. - /// - /// @tparam genType Floating-point scalar or vector types. - /// - /// @see GLSL fma man page - /// @see GLSL 4.20.8 specification, section 8.3 Common Functions - template - GLM_FUNC_DECL genType fma(genType const& a, genType const& b, genType const& c); - - /// Splits x into a floating-point significand in the range - /// [0.5, 1.0) and an integral exponent of two, such that: - /// x = significand * exp(2, exponent) - /// - /// The significand is returned by the function and the - /// exponent is returned in the parameter exp. For a - /// floating-point value of zero, the significant and exponent - /// are both zero. For a floating-point value that is an - /// infinity or is not a number, the results are undefined. - /// - /// @tparam genType Floating-point scalar or vector types. - /// - /// @see GLSL frexp man page - /// @see GLSL 4.20.8 specification, section 8.3 Common Functions - template - GLM_FUNC_DECL genType frexp(genType x, int& exp); - - template - GLM_FUNC_DECL vec frexp(vec const& v, vec& exp); - - /// Builds a floating-point number from x and the - /// corresponding integral exponent of two in exp, returning: - /// significand * exp(2, exponent) - /// - /// If this product is too large to be represented in the - /// floating-point type, the result is undefined. - /// - /// @tparam genType Floating-point scalar or vector types. - /// - /// @see GLSL ldexp man page; - /// @see GLSL 4.20.8 specification, section 8.3 Common Functions - template - GLM_FUNC_DECL genType ldexp(genType const& x, int const& exp); - - template - GLM_FUNC_DECL vec ldexp(vec const& v, vec const& exp); - - /// @} -}//namespace glm - -#include "detail/func_common.inl" - diff --git a/third_party/glm/detail/_features.hpp b/third_party/glm/detail/_features.hpp deleted file mode 100755 index b0cbe9f..0000000 --- a/third_party/glm/detail/_features.hpp +++ /dev/null @@ -1,394 +0,0 @@ -#pragma once - -// #define GLM_CXX98_EXCEPTIONS -// #define GLM_CXX98_RTTI - -// #define GLM_CXX11_RVALUE_REFERENCES -// Rvalue references - GCC 4.3 -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2006/n2118.html - -// GLM_CXX11_TRAILING_RETURN -// Rvalue references for *this - GCC not supported -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2439.htm - -// GLM_CXX11_NONSTATIC_MEMBER_INIT -// Initialization of class objects by rvalues - GCC any -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2004/n1610.html - -// GLM_CXX11_NONSTATIC_MEMBER_INIT -// Non-static data member initializers - GCC 4.7 -// http://www.open-std.org/JTC1/SC22/WG21/docs/papers/2008/n2756.htm - -// #define GLM_CXX11_VARIADIC_TEMPLATE -// Variadic templates - GCC 4.3 -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2242.pdf - -// -// Extending variadic template template parameters - GCC 4.4 -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2555.pdf - -// #define GLM_CXX11_GENERALIZED_INITIALIZERS -// Initializer lists - GCC 4.4 -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2672.htm - -// #define GLM_CXX11_STATIC_ASSERT -// Static assertions - GCC 4.3 -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2004/n1720.html - -// #define GLM_CXX11_AUTO_TYPE -// auto-typed variables - GCC 4.4 -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2006/n1984.pdf - -// #define GLM_CXX11_AUTO_TYPE -// Multi-declarator auto - GCC 4.4 -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2004/n1737.pdf - -// #define GLM_CXX11_AUTO_TYPE -// Removal of auto as a storage-class specifier - GCC 4.4 -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2546.htm - -// #define GLM_CXX11_AUTO_TYPE -// New function declarator syntax - GCC 4.4 -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2541.htm - -// #define GLM_CXX11_LAMBDAS -// New wording for C++0x lambdas - GCC 4.5 -// http://www.open-std.org/JTC1/SC22/WG21/docs/papers/2009/n2927.pdf - -// #define GLM_CXX11_DECLTYPE -// Declared type of an expression - GCC 4.3 -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2343.pdf - -// -// Right angle brackets - GCC 4.3 -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2005/n1757.html - -// -// Default template arguments for function templates DR226 GCC 4.3 -// http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_defects.html#226 - -// -// Solving the SFINAE problem for expressions DR339 GCC 4.4 -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2634.html - -// #define GLM_CXX11_ALIAS_TEMPLATE -// Template aliases N2258 GCC 4.7 -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2258.pdf - -// -// Extern templates N1987 Yes -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2006/n1987.htm - -// #define GLM_CXX11_NULLPTR -// Null pointer constant N2431 GCC 4.6 -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2431.pdf - -// #define GLM_CXX11_STRONG_ENUMS -// Strongly-typed enums N2347 GCC 4.4 -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2347.pdf - -// -// Forward declarations for enums N2764 GCC 4.6 -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2764.pdf - -// -// Generalized attributes N2761 GCC 4.8 -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2761.pdf - -// -// Generalized constant expressions N2235 GCC 4.6 -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2235.pdf - -// -// Alignment support N2341 GCC 4.8 -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2341.pdf - -// #define GLM_CXX11_DELEGATING_CONSTRUCTORS -// Delegating constructors N1986 GCC 4.7 -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2006/n1986.pdf - -// -// Inheriting constructors N2540 GCC 4.8 -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2540.htm - -// #define GLM_CXX11_EXPLICIT_CONVERSIONS -// Explicit conversion operators N2437 GCC 4.5 -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2437.pdf - -// -// New character types N2249 GCC 4.4 -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2249.html - -// -// Unicode string literals N2442 GCC 4.5 -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2442.htm - -// -// Raw string literals N2442 GCC 4.5 -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2442.htm - -// -// Universal character name literals N2170 GCC 4.5 -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2170.html - -// #define GLM_CXX11_USER_LITERALS -// User-defined literals N2765 GCC 4.7 -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2765.pdf - -// -// Standard Layout Types N2342 GCC 4.5 -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2342.htm - -// #define GLM_CXX11_DEFAULTED_FUNCTIONS -// #define GLM_CXX11_DELETED_FUNCTIONS -// Defaulted and deleted functions N2346 GCC 4.4 -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2346.htm - -// -// Extended friend declarations N1791 GCC 4.7 -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2005/n1791.pdf - -// -// Extending sizeof N2253 GCC 4.4 -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2253.html - -// #define GLM_CXX11_INLINE_NAMESPACES -// Inline namespaces N2535 GCC 4.4 -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2535.htm - -// #define GLM_CXX11_UNRESTRICTED_UNIONS -// Unrestricted unions N2544 GCC 4.6 -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2544.pdf - -// #define GLM_CXX11_LOCAL_TYPE_TEMPLATE_ARGS -// Local and unnamed types as template arguments N2657 GCC 4.5 -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2657.htm - -// #define GLM_CXX11_RANGE_FOR -// Range-based for N2930 GCC 4.6 -// http://www.open-std.org/JTC1/SC22/WG21/docs/papers/2009/n2930.html - -// #define GLM_CXX11_OVERRIDE_CONTROL -// Explicit virtual overrides N2928 N3206 N3272 GCC 4.7 -// http://www.open-std.org/JTC1/SC22/WG21/docs/papers/2009/n2928.htm -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2010/n3206.htm -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2011/n3272.htm - -// -// Minimal support for garbage collection and reachability-based leak detection N2670 No -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2670.htm - -// #define GLM_CXX11_NOEXCEPT -// Allowing move constructors to throw [noexcept] N3050 GCC 4.6 (core language only) -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2010/n3050.html - -// -// Defining move special member functions N3053 GCC 4.6 -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2010/n3053.html - -// -// Sequence points N2239 Yes -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2239.html - -// -// Atomic operations N2427 GCC 4.4 -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2239.html - -// -// Strong Compare and Exchange N2748 GCC 4.5 -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2427.html - -// -// Bidirectional Fences N2752 GCC 4.8 -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2752.htm - -// -// Memory model N2429 GCC 4.8 -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2429.htm - -// -// Data-dependency ordering: atomics and memory model N2664 GCC 4.4 -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2664.htm - -// -// Propagating exceptions N2179 GCC 4.4 -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2179.html - -// -// Abandoning a process and at_quick_exit N2440 GCC 4.8 -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2440.htm - -// -// Allow atomics use in signal handlers N2547 Yes -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2547.htm - -// -// Thread-local storage N2659 GCC 4.8 -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2659.htm - -// -// Dynamic initialization and destruction with concurrency N2660 GCC 4.3 -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2660.htm - -// -// __func__ predefined identifier N2340 GCC 4.3 -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2340.htm - -// -// C99 preprocessor N1653 GCC 4.3 -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2004/n1653.htm - -// -// long long N1811 GCC 4.3 -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2005/n1811.pdf - -// -// Extended integral types N1988 Yes -// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2006/n1988.pdf - -#if(GLM_COMPILER & GLM_COMPILER_GCC) - -# define GLM_CXX11_STATIC_ASSERT - -#elif(GLM_COMPILER & GLM_COMPILER_CLANG) -# if(__has_feature(cxx_exceptions)) -# define GLM_CXX98_EXCEPTIONS -# endif - -# if(__has_feature(cxx_rtti)) -# define GLM_CXX98_RTTI -# endif - -# if(__has_feature(cxx_access_control_sfinae)) -# define GLM_CXX11_ACCESS_CONTROL_SFINAE -# endif - -# if(__has_feature(cxx_alias_templates)) -# define GLM_CXX11_ALIAS_TEMPLATE -# endif - -# if(__has_feature(cxx_alignas)) -# define GLM_CXX11_ALIGNAS -# endif - -# if(__has_feature(cxx_attributes)) -# define GLM_CXX11_ATTRIBUTES -# endif - -# if(__has_feature(cxx_constexpr)) -# define GLM_CXX11_CONSTEXPR -# endif - -# if(__has_feature(cxx_decltype)) -# define GLM_CXX11_DECLTYPE -# endif - -# if(__has_feature(cxx_default_function_template_args)) -# define GLM_CXX11_DEFAULT_FUNCTION_TEMPLATE_ARGS -# endif - -# if(__has_feature(cxx_defaulted_functions)) -# define GLM_CXX11_DEFAULTED_FUNCTIONS -# endif - -# if(__has_feature(cxx_delegating_constructors)) -# define GLM_CXX11_DELEGATING_CONSTRUCTORS -# endif - -# if(__has_feature(cxx_deleted_functions)) -# define GLM_CXX11_DELETED_FUNCTIONS -# endif - -# if(__has_feature(cxx_explicit_conversions)) -# define GLM_CXX11_EXPLICIT_CONVERSIONS -# endif - -# if(__has_feature(cxx_generalized_initializers)) -# define GLM_CXX11_GENERALIZED_INITIALIZERS -# endif - -# if(__has_feature(cxx_implicit_moves)) -# define GLM_CXX11_IMPLICIT_MOVES -# endif - -# if(__has_feature(cxx_inheriting_constructors)) -# define GLM_CXX11_INHERITING_CONSTRUCTORS -# endif - -# if(__has_feature(cxx_inline_namespaces)) -# define GLM_CXX11_INLINE_NAMESPACES -# endif - -# if(__has_feature(cxx_lambdas)) -# define GLM_CXX11_LAMBDAS -# endif - -# if(__has_feature(cxx_local_type_template_args)) -# define GLM_CXX11_LOCAL_TYPE_TEMPLATE_ARGS -# endif - -# if(__has_feature(cxx_noexcept)) -# define GLM_CXX11_NOEXCEPT -# endif - -# if(__has_feature(cxx_nonstatic_member_init)) -# define GLM_CXX11_NONSTATIC_MEMBER_INIT -# endif - -# if(__has_feature(cxx_nullptr)) -# define GLM_CXX11_NULLPTR -# endif - -# if(__has_feature(cxx_override_control)) -# define GLM_CXX11_OVERRIDE_CONTROL -# endif - -# if(__has_feature(cxx_reference_qualified_functions)) -# define GLM_CXX11_REFERENCE_QUALIFIED_FUNCTIONS -# endif - -# if(__has_feature(cxx_range_for)) -# define GLM_CXX11_RANGE_FOR -# endif - -# if(__has_feature(cxx_raw_string_literals)) -# define GLM_CXX11_RAW_STRING_LITERALS -# endif - -# if(__has_feature(cxx_rvalue_references)) -# define GLM_CXX11_RVALUE_REFERENCES -# endif - -# if(__has_feature(cxx_static_assert)) -# define GLM_CXX11_STATIC_ASSERT -# endif - -# if(__has_feature(cxx_auto_type)) -# define GLM_CXX11_AUTO_TYPE -# endif - -# if(__has_feature(cxx_strong_enums)) -# define GLM_CXX11_STRONG_ENUMS -# endif - -# if(__has_feature(cxx_trailing_return)) -# define GLM_CXX11_TRAILING_RETURN -# endif - -# if(__has_feature(cxx_unicode_literals)) -# define GLM_CXX11_UNICODE_LITERALS -# endif - -# if(__has_feature(cxx_unrestricted_unions)) -# define GLM_CXX11_UNRESTRICTED_UNIONS -# endif - -# if(__has_feature(cxx_user_literals)) -# define GLM_CXX11_USER_LITERALS -# endif - -# if(__has_feature(cxx_variadic_templates)) -# define GLM_CXX11_VARIADIC_TEMPLATES -# endif - -#endif//(GLM_COMPILER & GLM_COMPILER_CLANG) diff --git a/third_party/glm/detail/_fixes.hpp b/third_party/glm/detail/_fixes.hpp deleted file mode 100755 index a503c7c..0000000 --- a/third_party/glm/detail/_fixes.hpp +++ /dev/null @@ -1,27 +0,0 @@ -#include - -//! Workaround for compatibility with other libraries -#ifdef max -#undef max -#endif - -//! Workaround for compatibility with other libraries -#ifdef min -#undef min -#endif - -//! Workaround for Android -#ifdef isnan -#undef isnan -#endif - -//! Workaround for Android -#ifdef isinf -#undef isinf -#endif - -//! Workaround for Chrone Native Client -#ifdef log2 -#undef log2 -#endif - diff --git a/third_party/glm/detail/_noise.hpp b/third_party/glm/detail/_noise.hpp deleted file mode 100755 index 5a874a0..0000000 --- a/third_party/glm/detail/_noise.hpp +++ /dev/null @@ -1,81 +0,0 @@ -#pragma once - -#include "../common.hpp" - -namespace glm{ -namespace detail -{ - template - GLM_FUNC_QUALIFIER T mod289(T const& x) - { - return x - floor(x * (static_cast(1.0) / static_cast(289.0))) * static_cast(289.0); - } - - template - GLM_FUNC_QUALIFIER T permute(T const& x) - { - return mod289(((x * static_cast(34)) + static_cast(1)) * x); - } - - template - GLM_FUNC_QUALIFIER vec<2, T, Q> permute(vec<2, T, Q> const& x) - { - return mod289(((x * static_cast(34)) + static_cast(1)) * x); - } - - template - GLM_FUNC_QUALIFIER vec<3, T, Q> permute(vec<3, T, Q> const& x) - { - return mod289(((x * static_cast(34)) + static_cast(1)) * x); - } - - template - GLM_FUNC_QUALIFIER vec<4, T, Q> permute(vec<4, T, Q> const& x) - { - return mod289(((x * static_cast(34)) + static_cast(1)) * x); - } - - template - GLM_FUNC_QUALIFIER T taylorInvSqrt(T const& r) - { - return static_cast(1.79284291400159) - static_cast(0.85373472095314) * r; - } - - template - GLM_FUNC_QUALIFIER vec<2, T, Q> taylorInvSqrt(vec<2, T, Q> const& r) - { - return static_cast(1.79284291400159) - static_cast(0.85373472095314) * r; - } - - template - GLM_FUNC_QUALIFIER vec<3, T, Q> taylorInvSqrt(vec<3, T, Q> const& r) - { - return static_cast(1.79284291400159) - static_cast(0.85373472095314) * r; - } - - template - GLM_FUNC_QUALIFIER vec<4, T, Q> taylorInvSqrt(vec<4, T, Q> const& r) - { - return static_cast(1.79284291400159) - static_cast(0.85373472095314) * r; - } - - template - GLM_FUNC_QUALIFIER vec<2, T, Q> fade(vec<2, T, Q> const& t) - { - return (t * t * t) * (t * (t * static_cast(6) - static_cast(15)) + static_cast(10)); - } - - template - GLM_FUNC_QUALIFIER vec<3, T, Q> fade(vec<3, T, Q> const& t) - { - return (t * t * t) * (t * (t * static_cast(6) - static_cast(15)) + static_cast(10)); - } - - template - GLM_FUNC_QUALIFIER vec<4, T, Q> fade(vec<4, T, Q> const& t) - { - return (t * t * t) * (t * (t * static_cast(6) - static_cast(15)) + static_cast(10)); - } -}//namespace detail -}//namespace glm - diff --git a/third_party/glm/detail/_swizzle.hpp b/third_party/glm/detail/_swizzle.hpp deleted file mode 100755 index 87896ef..0000000 --- a/third_party/glm/detail/_swizzle.hpp +++ /dev/null @@ -1,804 +0,0 @@ -#pragma once - -namespace glm{ -namespace detail -{ - // Internal class for implementing swizzle operators - template - struct _swizzle_base0 - { - protected: - GLM_FUNC_QUALIFIER T& elem(size_t i){ return (reinterpret_cast(_buffer))[i]; } - GLM_FUNC_QUALIFIER T const& elem(size_t i) const{ return (reinterpret_cast(_buffer))[i]; } - - // Use an opaque buffer to *ensure* the compiler doesn't call a constructor. - // The size 1 buffer is assumed to aligned to the actual members so that the - // elem() - char _buffer[1]; - }; - - template - struct _swizzle_base1 : public _swizzle_base0 - { - }; - - template - struct _swizzle_base1<2, T, Q, E0,E1,-1,-2, Aligned> : public _swizzle_base0 - { - GLM_FUNC_QUALIFIER vec<2, T, Q> operator ()() const { return vec<2, T, Q>(this->elem(E0), this->elem(E1)); } - }; - - template - struct _swizzle_base1<3, T, Q, E0,E1,E2,-1, Aligned> : public _swizzle_base0 - { - GLM_FUNC_QUALIFIER vec<3, T, Q> operator ()() const { return vec<3, T, Q>(this->elem(E0), this->elem(E1), this->elem(E2)); } - }; - - template - struct _swizzle_base1<4, T, Q, E0,E1,E2,E3, Aligned> : public _swizzle_base0 - { - GLM_FUNC_QUALIFIER vec<4, T, Q> operator ()() const { return vec<4, T, Q>(this->elem(E0), this->elem(E1), this->elem(E2), this->elem(E3)); } - }; - - // Internal class for implementing swizzle operators - /* - Template parameters: - - T = type of scalar values (e.g. float, double) - N = number of components in the vector (e.g. 3) - E0...3 = what index the n-th element of this swizzle refers to in the unswizzled vec - - DUPLICATE_ELEMENTS = 1 if there is a repeated element, 0 otherwise (used to specialize swizzles - containing duplicate elements so that they cannot be used as r-values). - */ - template - struct _swizzle_base2 : public _swizzle_base1::value> - { - struct op_equal - { - GLM_FUNC_QUALIFIER void operator() (T& e, T& t) const{ e = t; } - }; - - struct op_minus - { - GLM_FUNC_QUALIFIER void operator() (T& e, T& t) const{ e -= t; } - }; - - struct op_plus - { - GLM_FUNC_QUALIFIER void operator() (T& e, T& t) const{ e += t; } - }; - - struct op_mul - { - GLM_FUNC_QUALIFIER void operator() (T& e, T& t) const{ e *= t; } - }; - - struct op_div - { - GLM_FUNC_QUALIFIER void operator() (T& e, T& t) const{ e /= t; } - }; - - public: - GLM_FUNC_QUALIFIER _swizzle_base2& operator= (const T& t) - { - for (int i = 0; i < N; ++i) - (*this)[i] = t; - return *this; - } - - GLM_FUNC_QUALIFIER _swizzle_base2& operator= (vec const& that) - { - _apply_op(that, op_equal()); - return *this; - } - - GLM_FUNC_QUALIFIER void operator -= (vec const& that) - { - _apply_op(that, op_minus()); - } - - GLM_FUNC_QUALIFIER void operator += (vec const& that) - { - _apply_op(that, op_plus()); - } - - GLM_FUNC_QUALIFIER void operator *= (vec const& that) - { - _apply_op(that, op_mul()); - } - - GLM_FUNC_QUALIFIER void operator /= (vec const& that) - { - _apply_op(that, op_div()); - } - - GLM_FUNC_QUALIFIER T& operator[](size_t i) - { - const int offset_dst[4] = { E0, E1, E2, E3 }; - return this->elem(offset_dst[i]); - } - GLM_FUNC_QUALIFIER T operator[](size_t i) const - { - const int offset_dst[4] = { E0, E1, E2, E3 }; - return this->elem(offset_dst[i]); - } - - protected: - template - GLM_FUNC_QUALIFIER void _apply_op(vec const& that, const U& op) - { - // Make a copy of the data in this == &that. - // The copier should optimize out the copy in cases where the function is - // properly inlined and the copy is not necessary. - T t[N]; - for (int i = 0; i < N; ++i) - t[i] = that[i]; - for (int i = 0; i < N; ++i) - op( (*this)[i], t[i] ); - } - }; - - // Specialization for swizzles containing duplicate elements. These cannot be modified. - template - struct _swizzle_base2 : public _swizzle_base1::value> - { - struct Stub {}; - - GLM_FUNC_QUALIFIER _swizzle_base2& operator= (Stub const&) { return *this; } - - GLM_FUNC_QUALIFIER T operator[] (size_t i) const - { - const int offset_dst[4] = { E0, E1, E2, E3 }; - return this->elem(offset_dst[i]); - } - }; - - template - struct _swizzle : public _swizzle_base2 - { - typedef _swizzle_base2 base_type; - - using base_type::operator=; - - GLM_FUNC_QUALIFIER operator vec () const { return (*this)(); } - }; - -// -// To prevent the C++ syntax from getting entirely overwhelming, define some alias macros -// -#define GLM_SWIZZLE_TEMPLATE1 template -#define GLM_SWIZZLE_TEMPLATE2 template -#define GLM_SWIZZLE_TYPE1 _swizzle -#define GLM_SWIZZLE_TYPE2 _swizzle - -// -// Wrapper for a binary operator (e.g. u.yy + v.zy) -// -#define GLM_SWIZZLE_VECTOR_BINARY_OPERATOR_IMPLEMENTATION(OPERAND) \ - GLM_SWIZZLE_TEMPLATE2 \ - GLM_FUNC_QUALIFIER vec operator OPERAND ( const GLM_SWIZZLE_TYPE1& a, const GLM_SWIZZLE_TYPE2& b) \ - { \ - return a() OPERAND b(); \ - } \ - GLM_SWIZZLE_TEMPLATE1 \ - GLM_FUNC_QUALIFIER vec operator OPERAND ( const GLM_SWIZZLE_TYPE1& a, const vec& b) \ - { \ - return a() OPERAND b; \ - } \ - GLM_SWIZZLE_TEMPLATE1 \ - GLM_FUNC_QUALIFIER vec operator OPERAND ( const vec& a, const GLM_SWIZZLE_TYPE1& b) \ - { \ - return a OPERAND b(); \ - } - -// -// Wrapper for a operand between a swizzle and a binary (e.g. 1.0f - u.xyz) -// -#define GLM_SWIZZLE_SCALAR_BINARY_OPERATOR_IMPLEMENTATION(OPERAND) \ - GLM_SWIZZLE_TEMPLATE1 \ - GLM_FUNC_QUALIFIER vec operator OPERAND ( const GLM_SWIZZLE_TYPE1& a, const T& b) \ - { \ - return a() OPERAND b; \ - } \ - GLM_SWIZZLE_TEMPLATE1 \ - GLM_FUNC_QUALIFIER vec operator OPERAND ( const T& a, const GLM_SWIZZLE_TYPE1& b) \ - { \ - return a OPERAND b(); \ - } - -// -// Macro for wrapping a function taking one argument (e.g. abs()) -// -#define GLM_SWIZZLE_FUNCTION_1_ARGS(RETURN_TYPE,FUNCTION) \ - GLM_SWIZZLE_TEMPLATE1 \ - GLM_FUNC_QUALIFIER typename GLM_SWIZZLE_TYPE1::RETURN_TYPE FUNCTION(const GLM_SWIZZLE_TYPE1& a) \ - { \ - return FUNCTION(a()); \ - } - -// -// Macro for wrapping a function taking two vector arguments (e.g. dot()). -// -#define GLM_SWIZZLE_FUNCTION_2_ARGS(RETURN_TYPE,FUNCTION) \ - GLM_SWIZZLE_TEMPLATE2 \ - GLM_FUNC_QUALIFIER typename GLM_SWIZZLE_TYPE1::RETURN_TYPE FUNCTION(const GLM_SWIZZLE_TYPE1& a, const GLM_SWIZZLE_TYPE2& b) \ - { \ - return FUNCTION(a(), b()); \ - } \ - GLM_SWIZZLE_TEMPLATE1 \ - GLM_FUNC_QUALIFIER typename GLM_SWIZZLE_TYPE1::RETURN_TYPE FUNCTION(const GLM_SWIZZLE_TYPE1& a, const GLM_SWIZZLE_TYPE1& b) \ - { \ - return FUNCTION(a(), b()); \ - } \ - GLM_SWIZZLE_TEMPLATE1 \ - GLM_FUNC_QUALIFIER typename GLM_SWIZZLE_TYPE1::RETURN_TYPE FUNCTION(const GLM_SWIZZLE_TYPE1& a, const typename V& b) \ - { \ - return FUNCTION(a(), b); \ - } \ - GLM_SWIZZLE_TEMPLATE1 \ - GLM_FUNC_QUALIFIER typename GLM_SWIZZLE_TYPE1::RETURN_TYPE FUNCTION(const V& a, const GLM_SWIZZLE_TYPE1& b) \ - { \ - return FUNCTION(a, b()); \ - } - -// -// Macro for wrapping a function take 2 vec arguments followed by a scalar (e.g. mix()). -// -#define GLM_SWIZZLE_FUNCTION_2_ARGS_SCALAR(RETURN_TYPE,FUNCTION) \ - GLM_SWIZZLE_TEMPLATE2 \ - GLM_FUNC_QUALIFIER typename GLM_SWIZZLE_TYPE1::RETURN_TYPE FUNCTION(const GLM_SWIZZLE_TYPE1& a, const GLM_SWIZZLE_TYPE2& b, const T& c) \ - { \ - return FUNCTION(a(), b(), c); \ - } \ - GLM_SWIZZLE_TEMPLATE1 \ - GLM_FUNC_QUALIFIER typename GLM_SWIZZLE_TYPE1::RETURN_TYPE FUNCTION(const GLM_SWIZZLE_TYPE1& a, const GLM_SWIZZLE_TYPE1& b, const T& c) \ - { \ - return FUNCTION(a(), b(), c); \ - } \ - GLM_SWIZZLE_TEMPLATE1 \ - GLM_FUNC_QUALIFIER typename GLM_SWIZZLE_TYPE1::RETURN_TYPE FUNCTION(const GLM_SWIZZLE_TYPE1& a, const typename S0::vec_type& b, const T& c)\ - { \ - return FUNCTION(a(), b, c); \ - } \ - GLM_SWIZZLE_TEMPLATE1 \ - GLM_FUNC_QUALIFIER typename GLM_SWIZZLE_TYPE1::RETURN_TYPE FUNCTION(const typename V& a, const GLM_SWIZZLE_TYPE1& b, const T& c) \ - { \ - return FUNCTION(a, b(), c); \ - } - -}//namespace detail -}//namespace glm - -namespace glm -{ - namespace detail - { - GLM_SWIZZLE_SCALAR_BINARY_OPERATOR_IMPLEMENTATION(-) - GLM_SWIZZLE_SCALAR_BINARY_OPERATOR_IMPLEMENTATION(*) - GLM_SWIZZLE_VECTOR_BINARY_OPERATOR_IMPLEMENTATION(+) - GLM_SWIZZLE_VECTOR_BINARY_OPERATOR_IMPLEMENTATION(-) - GLM_SWIZZLE_VECTOR_BINARY_OPERATOR_IMPLEMENTATION(*) - GLM_SWIZZLE_VECTOR_BINARY_OPERATOR_IMPLEMENTATION(/) - } - - // - // Swizzles are distinct types from the unswizzled type. The below macros will - // provide template specializations for the swizzle types for the given functions - // so that the compiler does not have any ambiguity to choosing how to handle - // the function. - // - // The alternative is to use the operator()() when calling the function in order - // to explicitly convert the swizzled type to the unswizzled type. - // - - //GLM_SWIZZLE_FUNCTION_1_ARGS(vec_type, abs); - //GLM_SWIZZLE_FUNCTION_1_ARGS(vec_type, acos); - //GLM_SWIZZLE_FUNCTION_1_ARGS(vec_type, acosh); - //GLM_SWIZZLE_FUNCTION_1_ARGS(vec_type, all); - //GLM_SWIZZLE_FUNCTION_1_ARGS(vec_type, any); - - //GLM_SWIZZLE_FUNCTION_2_ARGS(value_type, dot); - //GLM_SWIZZLE_FUNCTION_2_ARGS(vec_type, cross); - //GLM_SWIZZLE_FUNCTION_2_ARGS(vec_type, step); - //GLM_SWIZZLE_FUNCTION_2_ARGS_SCALAR(vec_type, mix); -} - -#define GLM_SWIZZLE2_2_MEMBERS(T, Q, E0,E1) \ - struct { detail::_swizzle<2, T, Q, 0,0,-1,-2> E0 ## E0; }; \ - struct { detail::_swizzle<2, T, Q, 0,1,-1,-2> E0 ## E1; }; \ - struct { detail::_swizzle<2, T, Q, 1,0,-1,-2> E1 ## E0; }; \ - struct { detail::_swizzle<2, T, Q, 1,1,-1,-2> E1 ## E1; }; - -#define GLM_SWIZZLE2_3_MEMBERS(T, Q, E0,E1) \ - struct { detail::_swizzle<3,T, Q, 0,0,0,-1> E0 ## E0 ## E0; }; \ - struct { detail::_swizzle<3,T, Q, 0,0,1,-1> E0 ## E0 ## E1; }; \ - struct { detail::_swizzle<3,T, Q, 0,1,0,-1> E0 ## E1 ## E0; }; \ - struct { detail::_swizzle<3,T, Q, 0,1,1,-1> E0 ## E1 ## E1; }; \ - struct { detail::_swizzle<3,T, Q, 1,0,0,-1> E1 ## E0 ## E0; }; \ - struct { detail::_swizzle<3,T, Q, 1,0,1,-1> E1 ## E0 ## E1; }; \ - struct { detail::_swizzle<3,T, Q, 1,1,0,-1> E1 ## E1 ## E0; }; \ - struct { detail::_swizzle<3,T, Q, 1,1,1,-1> E1 ## E1 ## E1; }; - -#define GLM_SWIZZLE2_4_MEMBERS(T, Q, E0,E1) \ - struct { detail::_swizzle<4,T, Q, 0,0,0,0> E0 ## E0 ## E0 ## E0; }; \ - struct { detail::_swizzle<4,T, Q, 0,0,0,1> E0 ## E0 ## E0 ## E1; }; \ - struct { detail::_swizzle<4,T, Q, 0,0,1,0> E0 ## E0 ## E1 ## E0; }; \ - struct { detail::_swizzle<4,T, Q, 0,0,1,1> E0 ## E0 ## E1 ## E1; }; \ - struct { detail::_swizzle<4,T, Q, 0,1,0,0> E0 ## E1 ## E0 ## E0; }; \ - struct { detail::_swizzle<4,T, Q, 0,1,0,1> E0 ## E1 ## E0 ## E1; }; \ - struct { detail::_swizzle<4,T, Q, 0,1,1,0> E0 ## E1 ## E1 ## E0; }; \ - struct { detail::_swizzle<4,T, Q, 0,1,1,1> E0 ## E1 ## E1 ## E1; }; \ - struct { detail::_swizzle<4,T, Q, 1,0,0,0> E1 ## E0 ## E0 ## E0; }; \ - struct { detail::_swizzle<4,T, Q, 1,0,0,1> E1 ## E0 ## E0 ## E1; }; \ - struct { detail::_swizzle<4,T, Q, 1,0,1,0> E1 ## E0 ## E1 ## E0; }; \ - struct { detail::_swizzle<4,T, Q, 1,0,1,1> E1 ## E0 ## E1 ## E1; }; \ - struct { detail::_swizzle<4,T, Q, 1,1,0,0> E1 ## E1 ## E0 ## E0; }; \ - struct { detail::_swizzle<4,T, Q, 1,1,0,1> E1 ## E1 ## E0 ## E1; }; \ - struct { detail::_swizzle<4,T, Q, 1,1,1,0> E1 ## E1 ## E1 ## E0; }; \ - struct { detail::_swizzle<4,T, Q, 1,1,1,1> E1 ## E1 ## E1 ## E1; }; - -#define GLM_SWIZZLE3_2_MEMBERS(T, Q, E0,E1,E2) \ - struct { detail::_swizzle<2,T, Q, 0,0,-1,-2> E0 ## E0; }; \ - struct { detail::_swizzle<2,T, Q, 0,1,-1,-2> E0 ## E1; }; \ - struct { detail::_swizzle<2,T, Q, 0,2,-1,-2> E0 ## E2; }; \ - struct { detail::_swizzle<2,T, Q, 1,0,-1,-2> E1 ## E0; }; \ - struct { detail::_swizzle<2,T, Q, 1,1,-1,-2> E1 ## E1; }; \ - struct { detail::_swizzle<2,T, Q, 1,2,-1,-2> E1 ## E2; }; \ - struct { detail::_swizzle<2,T, Q, 2,0,-1,-2> E2 ## E0; }; \ - struct { detail::_swizzle<2,T, Q, 2,1,-1,-2> E2 ## E1; }; \ - struct { detail::_swizzle<2,T, Q, 2,2,-1,-2> E2 ## E2; }; - -#define GLM_SWIZZLE3_3_MEMBERS(T, Q ,E0,E1,E2) \ - struct { detail::_swizzle<3, T, Q, 0,0,0,-1> E0 ## E0 ## E0; }; \ - struct { detail::_swizzle<3, T, Q, 0,0,1,-1> E0 ## E0 ## E1; }; \ - struct { detail::_swizzle<3, T, Q, 0,0,2,-1> E0 ## E0 ## E2; }; \ - struct { detail::_swizzle<3, T, Q, 0,1,0,-1> E0 ## E1 ## E0; }; \ - struct { detail::_swizzle<3, T, Q, 0,1,1,-1> E0 ## E1 ## E1; }; \ - struct { detail::_swizzle<3, T, Q, 0,1,2,-1> E0 ## E1 ## E2; }; \ - struct { detail::_swizzle<3, T, Q, 0,2,0,-1> E0 ## E2 ## E0; }; \ - struct { detail::_swizzle<3, T, Q, 0,2,1,-1> E0 ## E2 ## E1; }; \ - struct { detail::_swizzle<3, T, Q, 0,2,2,-1> E0 ## E2 ## E2; }; \ - struct { detail::_swizzle<3, T, Q, 1,0,0,-1> E1 ## E0 ## E0; }; \ - struct { detail::_swizzle<3, T, Q, 1,0,1,-1> E1 ## E0 ## E1; }; \ - struct { detail::_swizzle<3, T, Q, 1,0,2,-1> E1 ## E0 ## E2; }; \ - struct { detail::_swizzle<3, T, Q, 1,1,0,-1> E1 ## E1 ## E0; }; \ - struct { detail::_swizzle<3, T, Q, 1,1,1,-1> E1 ## E1 ## E1; }; \ - struct { detail::_swizzle<3, T, Q, 1,1,2,-1> E1 ## E1 ## E2; }; \ - struct { detail::_swizzle<3, T, Q, 1,2,0,-1> E1 ## E2 ## E0; }; \ - struct { detail::_swizzle<3, T, Q, 1,2,1,-1> E1 ## E2 ## E1; }; \ - struct { detail::_swizzle<3, T, Q, 1,2,2,-1> E1 ## E2 ## E2; }; \ - struct { detail::_swizzle<3, T, Q, 2,0,0,-1> E2 ## E0 ## E0; }; \ - struct { detail::_swizzle<3, T, Q, 2,0,1,-1> E2 ## E0 ## E1; }; \ - struct { detail::_swizzle<3, T, Q, 2,0,2,-1> E2 ## E0 ## E2; }; \ - struct { detail::_swizzle<3, T, Q, 2,1,0,-1> E2 ## E1 ## E0; }; \ - struct { detail::_swizzle<3, T, Q, 2,1,1,-1> E2 ## E1 ## E1; }; \ - struct { detail::_swizzle<3, T, Q, 2,1,2,-1> E2 ## E1 ## E2; }; \ - struct { detail::_swizzle<3, T, Q, 2,2,0,-1> E2 ## E2 ## E0; }; \ - struct { detail::_swizzle<3, T, Q, 2,2,1,-1> E2 ## E2 ## E1; }; \ - struct { detail::_swizzle<3, T, Q, 2,2,2,-1> E2 ## E2 ## E2; }; - -#define GLM_SWIZZLE3_4_MEMBERS(T, Q, E0,E1,E2) \ - struct { detail::_swizzle<4,T, Q, 0,0,0,0> E0 ## E0 ## E0 ## E0; }; \ - struct { detail::_swizzle<4,T, Q, 0,0,0,1> E0 ## E0 ## E0 ## E1; }; \ - struct { detail::_swizzle<4,T, Q, 0,0,0,2> E0 ## E0 ## E0 ## E2; }; \ - struct { detail::_swizzle<4,T, Q, 0,0,1,0> E0 ## E0 ## E1 ## E0; }; \ - struct { detail::_swizzle<4,T, Q, 0,0,1,1> E0 ## E0 ## E1 ## E1; }; \ - struct { detail::_swizzle<4,T, Q, 0,0,1,2> E0 ## E0 ## E1 ## E2; }; \ - struct { detail::_swizzle<4,T, Q, 0,0,2,0> E0 ## E0 ## E2 ## E0; }; \ - struct { detail::_swizzle<4,T, Q, 0,0,2,1> E0 ## E0 ## E2 ## E1; }; \ - struct { detail::_swizzle<4,T, Q, 0,0,2,2> E0 ## E0 ## E2 ## E2; }; \ - struct { detail::_swizzle<4,T, Q, 0,1,0,0> E0 ## E1 ## E0 ## E0; }; \ - struct { detail::_swizzle<4,T, Q, 0,1,0,1> E0 ## E1 ## E0 ## E1; }; \ - struct { detail::_swizzle<4,T, Q, 0,1,0,2> E0 ## E1 ## E0 ## E2; }; \ - struct { detail::_swizzle<4,T, Q, 0,1,1,0> E0 ## E1 ## E1 ## E0; }; \ - struct { detail::_swizzle<4,T, Q, 0,1,1,1> E0 ## E1 ## E1 ## E1; }; \ - struct { detail::_swizzle<4,T, Q, 0,1,1,2> E0 ## E1 ## E1 ## E2; }; \ - struct { detail::_swizzle<4,T, Q, 0,1,2,0> E0 ## E1 ## E2 ## E0; }; \ - struct { detail::_swizzle<4,T, Q, 0,1,2,1> E0 ## E1 ## E2 ## E1; }; \ - struct { detail::_swizzle<4,T, Q, 0,1,2,2> E0 ## E1 ## E2 ## E2; }; \ - struct { detail::_swizzle<4,T, Q, 0,2,0,0> E0 ## E2 ## E0 ## E0; }; \ - struct { detail::_swizzle<4,T, Q, 0,2,0,1> E0 ## E2 ## E0 ## E1; }; \ - struct { detail::_swizzle<4,T, Q, 0,2,0,2> E0 ## E2 ## E0 ## E2; }; \ - struct { detail::_swizzle<4,T, Q, 0,2,1,0> E0 ## E2 ## E1 ## E0; }; \ - struct { detail::_swizzle<4,T, Q, 0,2,1,1> E0 ## E2 ## E1 ## E1; }; \ - struct { detail::_swizzle<4,T, Q, 0,2,1,2> E0 ## E2 ## E1 ## E2; }; \ - struct { detail::_swizzle<4,T, Q, 0,2,2,0> E0 ## E2 ## E2 ## E0; }; \ - struct { detail::_swizzle<4,T, Q, 0,2,2,1> E0 ## E2 ## E2 ## E1; }; \ - struct { detail::_swizzle<4,T, Q, 0,2,2,2> E0 ## E2 ## E2 ## E2; }; \ - struct { detail::_swizzle<4,T, Q, 1,0,0,0> E1 ## E0 ## E0 ## E0; }; \ - struct { detail::_swizzle<4,T, Q, 1,0,0,1> E1 ## E0 ## E0 ## E1; }; \ - struct { detail::_swizzle<4,T, Q, 1,0,0,2> E1 ## E0 ## E0 ## E2; }; \ - struct { detail::_swizzle<4,T, Q, 1,0,1,0> E1 ## E0 ## E1 ## E0; }; \ - struct { detail::_swizzle<4,T, Q, 1,0,1,1> E1 ## E0 ## E1 ## E1; }; \ - struct { detail::_swizzle<4,T, Q, 1,0,1,2> E1 ## E0 ## E1 ## E2; }; \ - struct { detail::_swizzle<4,T, Q, 1,0,2,0> E1 ## E0 ## E2 ## E0; }; \ - struct { detail::_swizzle<4,T, Q, 1,0,2,1> E1 ## E0 ## E2 ## E1; }; \ - struct { detail::_swizzle<4,T, Q, 1,0,2,2> E1 ## E0 ## E2 ## E2; }; \ - struct { detail::_swizzle<4,T, Q, 1,1,0,0> E1 ## E1 ## E0 ## E0; }; \ - struct { detail::_swizzle<4,T, Q, 1,1,0,1> E1 ## E1 ## E0 ## E1; }; \ - struct { detail::_swizzle<4,T, Q, 1,1,0,2> E1 ## E1 ## E0 ## E2; }; \ - struct { detail::_swizzle<4,T, Q, 1,1,1,0> E1 ## E1 ## E1 ## E0; }; \ - struct { detail::_swizzle<4,T, Q, 1,1,1,1> E1 ## E1 ## E1 ## E1; }; \ - struct { detail::_swizzle<4,T, Q, 1,1,1,2> E1 ## E1 ## E1 ## E2; }; \ - struct { detail::_swizzle<4,T, Q, 1,1,2,0> E1 ## E1 ## E2 ## E0; }; \ - struct { detail::_swizzle<4,T, Q, 1,1,2,1> E1 ## E1 ## E2 ## E1; }; \ - struct { detail::_swizzle<4,T, Q, 1,1,2,2> E1 ## E1 ## E2 ## E2; }; \ - struct { detail::_swizzle<4,T, Q, 1,2,0,0> E1 ## E2 ## E0 ## E0; }; \ - struct { detail::_swizzle<4,T, Q, 1,2,0,1> E1 ## E2 ## E0 ## E1; }; \ - struct { detail::_swizzle<4,T, Q, 1,2,0,2> E1 ## E2 ## E0 ## E2; }; \ - struct { detail::_swizzle<4,T, Q, 1,2,1,0> E1 ## E2 ## E1 ## E0; }; \ - struct { detail::_swizzle<4,T, Q, 1,2,1,1> E1 ## E2 ## E1 ## E1; }; \ - struct { detail::_swizzle<4,T, Q, 1,2,1,2> E1 ## E2 ## E1 ## E2; }; \ - struct { detail::_swizzle<4,T, Q, 1,2,2,0> E1 ## E2 ## E2 ## E0; }; \ - struct { detail::_swizzle<4,T, Q, 1,2,2,1> E1 ## E2 ## E2 ## E1; }; \ - struct { detail::_swizzle<4,T, Q, 1,2,2,2> E1 ## E2 ## E2 ## E2; }; \ - struct { detail::_swizzle<4,T, Q, 2,0,0,0> E2 ## E0 ## E0 ## E0; }; \ - struct { detail::_swizzle<4,T, Q, 2,0,0,1> E2 ## E0 ## E0 ## E1; }; \ - struct { detail::_swizzle<4,T, Q, 2,0,0,2> E2 ## E0 ## E0 ## E2; }; \ - struct { detail::_swizzle<4,T, Q, 2,0,1,0> E2 ## E0 ## E1 ## E0; }; \ - struct { detail::_swizzle<4,T, Q, 2,0,1,1> E2 ## E0 ## E1 ## E1; }; \ - struct { detail::_swizzle<4,T, Q, 2,0,1,2> E2 ## E0 ## E1 ## E2; }; \ - struct { detail::_swizzle<4,T, Q, 2,0,2,0> E2 ## E0 ## E2 ## E0; }; \ - struct { detail::_swizzle<4,T, Q, 2,0,2,1> E2 ## E0 ## E2 ## E1; }; \ - struct { detail::_swizzle<4,T, Q, 2,0,2,2> E2 ## E0 ## E2 ## E2; }; \ - struct { detail::_swizzle<4,T, Q, 2,1,0,0> E2 ## E1 ## E0 ## E0; }; \ - struct { detail::_swizzle<4,T, Q, 2,1,0,1> E2 ## E1 ## E0 ## E1; }; \ - struct { detail::_swizzle<4,T, Q, 2,1,0,2> E2 ## E1 ## E0 ## E2; }; \ - struct { detail::_swizzle<4,T, Q, 2,1,1,0> E2 ## E1 ## E1 ## E0; }; \ - struct { detail::_swizzle<4,T, Q, 2,1,1,1> E2 ## E1 ## E1 ## E1; }; \ - struct { detail::_swizzle<4,T, Q, 2,1,1,2> E2 ## E1 ## E1 ## E2; }; \ - struct { detail::_swizzle<4,T, Q, 2,1,2,0> E2 ## E1 ## E2 ## E0; }; \ - struct { detail::_swizzle<4,T, Q, 2,1,2,1> E2 ## E1 ## E2 ## E1; }; \ - struct { detail::_swizzle<4,T, Q, 2,1,2,2> E2 ## E1 ## E2 ## E2; }; \ - struct { detail::_swizzle<4,T, Q, 2,2,0,0> E2 ## E2 ## E0 ## E0; }; \ - struct { detail::_swizzle<4,T, Q, 2,2,0,1> E2 ## E2 ## E0 ## E1; }; \ - struct { detail::_swizzle<4,T, Q, 2,2,0,2> E2 ## E2 ## E0 ## E2; }; \ - struct { detail::_swizzle<4,T, Q, 2,2,1,0> E2 ## E2 ## E1 ## E0; }; \ - struct { detail::_swizzle<4,T, Q, 2,2,1,1> E2 ## E2 ## E1 ## E1; }; \ - struct { detail::_swizzle<4,T, Q, 2,2,1,2> E2 ## E2 ## E1 ## E2; }; \ - struct { detail::_swizzle<4,T, Q, 2,2,2,0> E2 ## E2 ## E2 ## E0; }; \ - struct { detail::_swizzle<4,T, Q, 2,2,2,1> E2 ## E2 ## E2 ## E1; }; \ - struct { detail::_swizzle<4,T, Q, 2,2,2,2> E2 ## E2 ## E2 ## E2; }; - -#define GLM_SWIZZLE4_2_MEMBERS(T, Q, E0,E1,E2,E3) \ - struct { detail::_swizzle<2,T, Q, 0,0,-1,-2> E0 ## E0; }; \ - struct { detail::_swizzle<2,T, Q, 0,1,-1,-2> E0 ## E1; }; \ - struct { detail::_swizzle<2,T, Q, 0,2,-1,-2> E0 ## E2; }; \ - struct { detail::_swizzle<2,T, Q, 0,3,-1,-2> E0 ## E3; }; \ - struct { detail::_swizzle<2,T, Q, 1,0,-1,-2> E1 ## E0; }; \ - struct { detail::_swizzle<2,T, Q, 1,1,-1,-2> E1 ## E1; }; \ - struct { detail::_swizzle<2,T, Q, 1,2,-1,-2> E1 ## E2; }; \ - struct { detail::_swizzle<2,T, Q, 1,3,-1,-2> E1 ## E3; }; \ - struct { detail::_swizzle<2,T, Q, 2,0,-1,-2> E2 ## E0; }; \ - struct { detail::_swizzle<2,T, Q, 2,1,-1,-2> E2 ## E1; }; \ - struct { detail::_swizzle<2,T, Q, 2,2,-1,-2> E2 ## E2; }; \ - struct { detail::_swizzle<2,T, Q, 2,3,-1,-2> E2 ## E3; }; \ - struct { detail::_swizzle<2,T, Q, 3,0,-1,-2> E3 ## E0; }; \ - struct { detail::_swizzle<2,T, Q, 3,1,-1,-2> E3 ## E1; }; \ - struct { detail::_swizzle<2,T, Q, 3,2,-1,-2> E3 ## E2; }; \ - struct { detail::_swizzle<2,T, Q, 3,3,-1,-2> E3 ## E3; }; - -#define GLM_SWIZZLE4_3_MEMBERS(T, Q, E0,E1,E2,E3) \ - struct { detail::_swizzle<3, T, Q, 0,0,0,-1> E0 ## E0 ## E0; }; \ - struct { detail::_swizzle<3, T, Q, 0,0,1,-1> E0 ## E0 ## E1; }; \ - struct { detail::_swizzle<3, T, Q, 0,0,2,-1> E0 ## E0 ## E2; }; \ - struct { detail::_swizzle<3, T, Q, 0,0,3,-1> E0 ## E0 ## E3; }; \ - struct { detail::_swizzle<3, T, Q, 0,1,0,-1> E0 ## E1 ## E0; }; \ - struct { detail::_swizzle<3, T, Q, 0,1,1,-1> E0 ## E1 ## E1; }; \ - struct { detail::_swizzle<3, T, Q, 0,1,2,-1> E0 ## E1 ## E2; }; \ - struct { detail::_swizzle<3, T, Q, 0,1,3,-1> E0 ## E1 ## E3; }; \ - struct { detail::_swizzle<3, T, Q, 0,2,0,-1> E0 ## E2 ## E0; }; \ - struct { detail::_swizzle<3, T, Q, 0,2,1,-1> E0 ## E2 ## E1; }; \ - struct { detail::_swizzle<3, T, Q, 0,2,2,-1> E0 ## E2 ## E2; }; \ - struct { detail::_swizzle<3, T, Q, 0,2,3,-1> E0 ## E2 ## E3; }; \ - struct { detail::_swizzle<3, T, Q, 0,3,0,-1> E0 ## E3 ## E0; }; \ - struct { detail::_swizzle<3, T, Q, 0,3,1,-1> E0 ## E3 ## E1; }; \ - struct { detail::_swizzle<3, T, Q, 0,3,2,-1> E0 ## E3 ## E2; }; \ - struct { detail::_swizzle<3, T, Q, 0,3,3,-1> E0 ## E3 ## E3; }; \ - struct { detail::_swizzle<3, T, Q, 1,0,0,-1> E1 ## E0 ## E0; }; \ - struct { detail::_swizzle<3, T, Q, 1,0,1,-1> E1 ## E0 ## E1; }; \ - struct { detail::_swizzle<3, T, Q, 1,0,2,-1> E1 ## E0 ## E2; }; \ - struct { detail::_swizzle<3, T, Q, 1,0,3,-1> E1 ## E0 ## E3; }; \ - struct { detail::_swizzle<3, T, Q, 1,1,0,-1> E1 ## E1 ## E0; }; \ - struct { detail::_swizzle<3, T, Q, 1,1,1,-1> E1 ## E1 ## E1; }; \ - struct { detail::_swizzle<3, T, Q, 1,1,2,-1> E1 ## E1 ## E2; }; \ - struct { detail::_swizzle<3, T, Q, 1,1,3,-1> E1 ## E1 ## E3; }; \ - struct { detail::_swizzle<3, T, Q, 1,2,0,-1> E1 ## E2 ## E0; }; \ - struct { detail::_swizzle<3, T, Q, 1,2,1,-1> E1 ## E2 ## E1; }; \ - struct { detail::_swizzle<3, T, Q, 1,2,2,-1> E1 ## E2 ## E2; }; \ - struct { detail::_swizzle<3, T, Q, 1,2,3,-1> E1 ## E2 ## E3; }; \ - struct { detail::_swizzle<3, T, Q, 1,3,0,-1> E1 ## E3 ## E0; }; \ - struct { detail::_swizzle<3, T, Q, 1,3,1,-1> E1 ## E3 ## E1; }; \ - struct { detail::_swizzle<3, T, Q, 1,3,2,-1> E1 ## E3 ## E2; }; \ - struct { detail::_swizzle<3, T, Q, 1,3,3,-1> E1 ## E3 ## E3; }; \ - struct { detail::_swizzle<3, T, Q, 2,0,0,-1> E2 ## E0 ## E0; }; \ - struct { detail::_swizzle<3, T, Q, 2,0,1,-1> E2 ## E0 ## E1; }; \ - struct { detail::_swizzle<3, T, Q, 2,0,2,-1> E2 ## E0 ## E2; }; \ - struct { detail::_swizzle<3, T, Q, 2,0,3,-1> E2 ## E0 ## E3; }; \ - struct { detail::_swizzle<3, T, Q, 2,1,0,-1> E2 ## E1 ## E0; }; \ - struct { detail::_swizzle<3, T, Q, 2,1,1,-1> E2 ## E1 ## E1; }; \ - struct { detail::_swizzle<3, T, Q, 2,1,2,-1> E2 ## E1 ## E2; }; \ - struct { detail::_swizzle<3, T, Q, 2,1,3,-1> E2 ## E1 ## E3; }; \ - struct { detail::_swizzle<3, T, Q, 2,2,0,-1> E2 ## E2 ## E0; }; \ - struct { detail::_swizzle<3, T, Q, 2,2,1,-1> E2 ## E2 ## E1; }; \ - struct { detail::_swizzle<3, T, Q, 2,2,2,-1> E2 ## E2 ## E2; }; \ - struct { detail::_swizzle<3, T, Q, 2,2,3,-1> E2 ## E2 ## E3; }; \ - struct { detail::_swizzle<3, T, Q, 2,3,0,-1> E2 ## E3 ## E0; }; \ - struct { detail::_swizzle<3, T, Q, 2,3,1,-1> E2 ## E3 ## E1; }; \ - struct { detail::_swizzle<3, T, Q, 2,3,2,-1> E2 ## E3 ## E2; }; \ - struct { detail::_swizzle<3, T, Q, 2,3,3,-1> E2 ## E3 ## E3; }; \ - struct { detail::_swizzle<3, T, Q, 3,0,0,-1> E3 ## E0 ## E0; }; \ - struct { detail::_swizzle<3, T, Q, 3,0,1,-1> E3 ## E0 ## E1; }; \ - struct { detail::_swizzle<3, T, Q, 3,0,2,-1> E3 ## E0 ## E2; }; \ - struct { detail::_swizzle<3, T, Q, 3,0,3,-1> E3 ## E0 ## E3; }; \ - struct { detail::_swizzle<3, T, Q, 3,1,0,-1> E3 ## E1 ## E0; }; \ - struct { detail::_swizzle<3, T, Q, 3,1,1,-1> E3 ## E1 ## E1; }; \ - struct { detail::_swizzle<3, T, Q, 3,1,2,-1> E3 ## E1 ## E2; }; \ - struct { detail::_swizzle<3, T, Q, 3,1,3,-1> E3 ## E1 ## E3; }; \ - struct { detail::_swizzle<3, T, Q, 3,2,0,-1> E3 ## E2 ## E0; }; \ - struct { detail::_swizzle<3, T, Q, 3,2,1,-1> E3 ## E2 ## E1; }; \ - struct { detail::_swizzle<3, T, Q, 3,2,2,-1> E3 ## E2 ## E2; }; \ - struct { detail::_swizzle<3, T, Q, 3,2,3,-1> E3 ## E2 ## E3; }; \ - struct { detail::_swizzle<3, T, Q, 3,3,0,-1> E3 ## E3 ## E0; }; \ - struct { detail::_swizzle<3, T, Q, 3,3,1,-1> E3 ## E3 ## E1; }; \ - struct { detail::_swizzle<3, T, Q, 3,3,2,-1> E3 ## E3 ## E2; }; \ - struct { detail::_swizzle<3, T, Q, 3,3,3,-1> E3 ## E3 ## E3; }; - -#define GLM_SWIZZLE4_4_MEMBERS(T, Q, E0,E1,E2,E3) \ - struct { detail::_swizzle<4, T, Q, 0,0,0,0> E0 ## E0 ## E0 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 0,0,0,1> E0 ## E0 ## E0 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 0,0,0,2> E0 ## E0 ## E0 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 0,0,0,3> E0 ## E0 ## E0 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 0,0,1,0> E0 ## E0 ## E1 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 0,0,1,1> E0 ## E0 ## E1 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 0,0,1,2> E0 ## E0 ## E1 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 0,0,1,3> E0 ## E0 ## E1 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 0,0,2,0> E0 ## E0 ## E2 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 0,0,2,1> E0 ## E0 ## E2 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 0,0,2,2> E0 ## E0 ## E2 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 0,0,2,3> E0 ## E0 ## E2 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 0,0,3,0> E0 ## E0 ## E3 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 0,0,3,1> E0 ## E0 ## E3 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 0,0,3,2> E0 ## E0 ## E3 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 0,0,3,3> E0 ## E0 ## E3 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 0,1,0,0> E0 ## E1 ## E0 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 0,1,0,1> E0 ## E1 ## E0 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 0,1,0,2> E0 ## E1 ## E0 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 0,1,0,3> E0 ## E1 ## E0 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 0,1,1,0> E0 ## E1 ## E1 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 0,1,1,1> E0 ## E1 ## E1 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 0,1,1,2> E0 ## E1 ## E1 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 0,1,1,3> E0 ## E1 ## E1 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 0,1,2,0> E0 ## E1 ## E2 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 0,1,2,1> E0 ## E1 ## E2 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 0,1,2,2> E0 ## E1 ## E2 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 0,1,2,3> E0 ## E1 ## E2 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 0,1,3,0> E0 ## E1 ## E3 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 0,1,3,1> E0 ## E1 ## E3 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 0,1,3,2> E0 ## E1 ## E3 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 0,1,3,3> E0 ## E1 ## E3 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 0,2,0,0> E0 ## E2 ## E0 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 0,2,0,1> E0 ## E2 ## E0 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 0,2,0,2> E0 ## E2 ## E0 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 0,2,0,3> E0 ## E2 ## E0 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 0,2,1,0> E0 ## E2 ## E1 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 0,2,1,1> E0 ## E2 ## E1 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 0,2,1,2> E0 ## E2 ## E1 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 0,2,1,3> E0 ## E2 ## E1 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 0,2,2,0> E0 ## E2 ## E2 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 0,2,2,1> E0 ## E2 ## E2 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 0,2,2,2> E0 ## E2 ## E2 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 0,2,2,3> E0 ## E2 ## E2 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 0,2,3,0> E0 ## E2 ## E3 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 0,2,3,1> E0 ## E2 ## E3 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 0,2,3,2> E0 ## E2 ## E3 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 0,2,3,3> E0 ## E2 ## E3 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 0,3,0,0> E0 ## E3 ## E0 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 0,3,0,1> E0 ## E3 ## E0 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 0,3,0,2> E0 ## E3 ## E0 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 0,3,0,3> E0 ## E3 ## E0 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 0,3,1,0> E0 ## E3 ## E1 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 0,3,1,1> E0 ## E3 ## E1 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 0,3,1,2> E0 ## E3 ## E1 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 0,3,1,3> E0 ## E3 ## E1 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 0,3,2,0> E0 ## E3 ## E2 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 0,3,2,1> E0 ## E3 ## E2 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 0,3,2,2> E0 ## E3 ## E2 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 0,3,2,3> E0 ## E3 ## E2 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 0,3,3,0> E0 ## E3 ## E3 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 0,3,3,1> E0 ## E3 ## E3 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 0,3,3,2> E0 ## E3 ## E3 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 0,3,3,3> E0 ## E3 ## E3 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 1,0,0,0> E1 ## E0 ## E0 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 1,0,0,1> E1 ## E0 ## E0 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 1,0,0,2> E1 ## E0 ## E0 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 1,0,0,3> E1 ## E0 ## E0 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 1,0,1,0> E1 ## E0 ## E1 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 1,0,1,1> E1 ## E0 ## E1 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 1,0,1,2> E1 ## E0 ## E1 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 1,0,1,3> E1 ## E0 ## E1 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 1,0,2,0> E1 ## E0 ## E2 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 1,0,2,1> E1 ## E0 ## E2 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 1,0,2,2> E1 ## E0 ## E2 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 1,0,2,3> E1 ## E0 ## E2 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 1,0,3,0> E1 ## E0 ## E3 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 1,0,3,1> E1 ## E0 ## E3 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 1,0,3,2> E1 ## E0 ## E3 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 1,0,3,3> E1 ## E0 ## E3 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 1,1,0,0> E1 ## E1 ## E0 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 1,1,0,1> E1 ## E1 ## E0 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 1,1,0,2> E1 ## E1 ## E0 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 1,1,0,3> E1 ## E1 ## E0 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 1,1,1,0> E1 ## E1 ## E1 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 1,1,1,1> E1 ## E1 ## E1 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 1,1,1,2> E1 ## E1 ## E1 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 1,1,1,3> E1 ## E1 ## E1 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 1,1,2,0> E1 ## E1 ## E2 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 1,1,2,1> E1 ## E1 ## E2 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 1,1,2,2> E1 ## E1 ## E2 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 1,1,2,3> E1 ## E1 ## E2 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 1,1,3,0> E1 ## E1 ## E3 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 1,1,3,1> E1 ## E1 ## E3 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 1,1,3,2> E1 ## E1 ## E3 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 1,1,3,3> E1 ## E1 ## E3 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 1,2,0,0> E1 ## E2 ## E0 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 1,2,0,1> E1 ## E2 ## E0 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 1,2,0,2> E1 ## E2 ## E0 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 1,2,0,3> E1 ## E2 ## E0 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 1,2,1,0> E1 ## E2 ## E1 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 1,2,1,1> E1 ## E2 ## E1 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 1,2,1,2> E1 ## E2 ## E1 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 1,2,1,3> E1 ## E2 ## E1 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 1,2,2,0> E1 ## E2 ## E2 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 1,2,2,1> E1 ## E2 ## E2 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 1,2,2,2> E1 ## E2 ## E2 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 1,2,2,3> E1 ## E2 ## E2 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 1,2,3,0> E1 ## E2 ## E3 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 1,2,3,1> E1 ## E2 ## E3 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 1,2,3,2> E1 ## E2 ## E3 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 1,2,3,3> E1 ## E2 ## E3 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 1,3,0,0> E1 ## E3 ## E0 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 1,3,0,1> E1 ## E3 ## E0 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 1,3,0,2> E1 ## E3 ## E0 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 1,3,0,3> E1 ## E3 ## E0 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 1,3,1,0> E1 ## E3 ## E1 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 1,3,1,1> E1 ## E3 ## E1 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 1,3,1,2> E1 ## E3 ## E1 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 1,3,1,3> E1 ## E3 ## E1 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 1,3,2,0> E1 ## E3 ## E2 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 1,3,2,1> E1 ## E3 ## E2 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 1,3,2,2> E1 ## E3 ## E2 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 1,3,2,3> E1 ## E3 ## E2 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 1,3,3,0> E1 ## E3 ## E3 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 1,3,3,1> E1 ## E3 ## E3 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 1,3,3,2> E1 ## E3 ## E3 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 1,3,3,3> E1 ## E3 ## E3 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 2,0,0,0> E2 ## E0 ## E0 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 2,0,0,1> E2 ## E0 ## E0 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 2,0,0,2> E2 ## E0 ## E0 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 2,0,0,3> E2 ## E0 ## E0 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 2,0,1,0> E2 ## E0 ## E1 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 2,0,1,1> E2 ## E0 ## E1 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 2,0,1,2> E2 ## E0 ## E1 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 2,0,1,3> E2 ## E0 ## E1 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 2,0,2,0> E2 ## E0 ## E2 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 2,0,2,1> E2 ## E0 ## E2 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 2,0,2,2> E2 ## E0 ## E2 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 2,0,2,3> E2 ## E0 ## E2 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 2,0,3,0> E2 ## E0 ## E3 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 2,0,3,1> E2 ## E0 ## E3 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 2,0,3,2> E2 ## E0 ## E3 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 2,0,3,3> E2 ## E0 ## E3 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 2,1,0,0> E2 ## E1 ## E0 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 2,1,0,1> E2 ## E1 ## E0 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 2,1,0,2> E2 ## E1 ## E0 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 2,1,0,3> E2 ## E1 ## E0 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 2,1,1,0> E2 ## E1 ## E1 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 2,1,1,1> E2 ## E1 ## E1 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 2,1,1,2> E2 ## E1 ## E1 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 2,1,1,3> E2 ## E1 ## E1 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 2,1,2,0> E2 ## E1 ## E2 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 2,1,2,1> E2 ## E1 ## E2 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 2,1,2,2> E2 ## E1 ## E2 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 2,1,2,3> E2 ## E1 ## E2 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 2,1,3,0> E2 ## E1 ## E3 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 2,1,3,1> E2 ## E1 ## E3 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 2,1,3,2> E2 ## E1 ## E3 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 2,1,3,3> E2 ## E1 ## E3 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 2,2,0,0> E2 ## E2 ## E0 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 2,2,0,1> E2 ## E2 ## E0 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 2,2,0,2> E2 ## E2 ## E0 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 2,2,0,3> E2 ## E2 ## E0 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 2,2,1,0> E2 ## E2 ## E1 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 2,2,1,1> E2 ## E2 ## E1 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 2,2,1,2> E2 ## E2 ## E1 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 2,2,1,3> E2 ## E2 ## E1 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 2,2,2,0> E2 ## E2 ## E2 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 2,2,2,1> E2 ## E2 ## E2 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 2,2,2,2> E2 ## E2 ## E2 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 2,2,2,3> E2 ## E2 ## E2 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 2,2,3,0> E2 ## E2 ## E3 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 2,2,3,1> E2 ## E2 ## E3 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 2,2,3,2> E2 ## E2 ## E3 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 2,2,3,3> E2 ## E2 ## E3 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 2,3,0,0> E2 ## E3 ## E0 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 2,3,0,1> E2 ## E3 ## E0 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 2,3,0,2> E2 ## E3 ## E0 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 2,3,0,3> E2 ## E3 ## E0 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 2,3,1,0> E2 ## E3 ## E1 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 2,3,1,1> E2 ## E3 ## E1 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 2,3,1,2> E2 ## E3 ## E1 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 2,3,1,3> E2 ## E3 ## E1 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 2,3,2,0> E2 ## E3 ## E2 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 2,3,2,1> E2 ## E3 ## E2 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 2,3,2,2> E2 ## E3 ## E2 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 2,3,2,3> E2 ## E3 ## E2 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 2,3,3,0> E2 ## E3 ## E3 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 2,3,3,1> E2 ## E3 ## E3 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 2,3,3,2> E2 ## E3 ## E3 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 2,3,3,3> E2 ## E3 ## E3 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 3,0,0,0> E3 ## E0 ## E0 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 3,0,0,1> E3 ## E0 ## E0 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 3,0,0,2> E3 ## E0 ## E0 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 3,0,0,3> E3 ## E0 ## E0 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 3,0,1,0> E3 ## E0 ## E1 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 3,0,1,1> E3 ## E0 ## E1 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 3,0,1,2> E3 ## E0 ## E1 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 3,0,1,3> E3 ## E0 ## E1 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 3,0,2,0> E3 ## E0 ## E2 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 3,0,2,1> E3 ## E0 ## E2 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 3,0,2,2> E3 ## E0 ## E2 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 3,0,2,3> E3 ## E0 ## E2 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 3,0,3,0> E3 ## E0 ## E3 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 3,0,3,1> E3 ## E0 ## E3 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 3,0,3,2> E3 ## E0 ## E3 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 3,0,3,3> E3 ## E0 ## E3 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 3,1,0,0> E3 ## E1 ## E0 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 3,1,0,1> E3 ## E1 ## E0 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 3,1,0,2> E3 ## E1 ## E0 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 3,1,0,3> E3 ## E1 ## E0 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 3,1,1,0> E3 ## E1 ## E1 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 3,1,1,1> E3 ## E1 ## E1 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 3,1,1,2> E3 ## E1 ## E1 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 3,1,1,3> E3 ## E1 ## E1 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 3,1,2,0> E3 ## E1 ## E2 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 3,1,2,1> E3 ## E1 ## E2 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 3,1,2,2> E3 ## E1 ## E2 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 3,1,2,3> E3 ## E1 ## E2 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 3,1,3,0> E3 ## E1 ## E3 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 3,1,3,1> E3 ## E1 ## E3 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 3,1,3,2> E3 ## E1 ## E3 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 3,1,3,3> E3 ## E1 ## E3 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 3,2,0,0> E3 ## E2 ## E0 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 3,2,0,1> E3 ## E2 ## E0 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 3,2,0,2> E3 ## E2 ## E0 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 3,2,0,3> E3 ## E2 ## E0 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 3,2,1,0> E3 ## E2 ## E1 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 3,2,1,1> E3 ## E2 ## E1 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 3,2,1,2> E3 ## E2 ## E1 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 3,2,1,3> E3 ## E2 ## E1 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 3,2,2,0> E3 ## E2 ## E2 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 3,2,2,1> E3 ## E2 ## E2 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 3,2,2,2> E3 ## E2 ## E2 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 3,2,2,3> E3 ## E2 ## E2 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 3,2,3,0> E3 ## E2 ## E3 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 3,2,3,1> E3 ## E2 ## E3 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 3,2,3,2> E3 ## E2 ## E3 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 3,2,3,3> E3 ## E2 ## E3 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 3,3,0,0> E3 ## E3 ## E0 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 3,3,0,1> E3 ## E3 ## E0 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 3,3,0,2> E3 ## E3 ## E0 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 3,3,0,3> E3 ## E3 ## E0 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 3,3,1,0> E3 ## E3 ## E1 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 3,3,1,1> E3 ## E3 ## E1 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 3,3,1,2> E3 ## E3 ## E1 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 3,3,1,3> E3 ## E3 ## E1 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 3,3,2,0> E3 ## E3 ## E2 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 3,3,2,1> E3 ## E3 ## E2 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 3,3,2,2> E3 ## E3 ## E2 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 3,3,2,3> E3 ## E3 ## E2 ## E3; }; \ - struct { detail::_swizzle<4, T, Q, 3,3,3,0> E3 ## E3 ## E3 ## E0; }; \ - struct { detail::_swizzle<4, T, Q, 3,3,3,1> E3 ## E3 ## E3 ## E1; }; \ - struct { detail::_swizzle<4, T, Q, 3,3,3,2> E3 ## E3 ## E3 ## E2; }; \ - struct { detail::_swizzle<4, T, Q, 3,3,3,3> E3 ## E3 ## E3 ## E3; }; diff --git a/third_party/glm/detail/_swizzle_func.hpp b/third_party/glm/detail/_swizzle_func.hpp deleted file mode 100755 index d93c6af..0000000 --- a/third_party/glm/detail/_swizzle_func.hpp +++ /dev/null @@ -1,682 +0,0 @@ -#pragma once - -#define GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, CONST, A, B) \ - vec<2, T, Q> A ## B() CONST \ - { \ - return vec<2, T, Q>(this->A, this->B); \ - } - -#define GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, CONST, A, B, C) \ - vec<3, T, Q> A ## B ## C() CONST \ - { \ - return vec<3, T, Q>(this->A, this->B, this->C); \ - } - -#define GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, CONST, A, B, C, D) \ - vec<4, T, Q> A ## B ## C ## D() CONST \ - { \ - return vec<4, T, Q>(this->A, this->B, this->C, this->D); \ - } - -#define GLM_SWIZZLE_GEN_VEC2_ENTRY_DEF(T, P, L, CONST, A, B) \ - template \ - vec vec::A ## B() CONST \ - { \ - return vec<2, T, Q>(this->A, this->B); \ - } - -#define GLM_SWIZZLE_GEN_VEC3_ENTRY_DEF(T, P, L, CONST, A, B, C) \ - template \ - vec<3, T, Q> vec::A ## B ## C() CONST \ - { \ - return vec<3, T, Q>(this->A, this->B, this->C); \ - } - -#define GLM_SWIZZLE_GEN_VEC4_ENTRY_DEF(T, P, L, CONST, A, B, C, D) \ - template \ - vec<4, T, Q> vec::A ## B ## C ## D() CONST \ - { \ - return vec<4, T, Q>(this->A, this->B, this->C, this->D); \ - } - -#define GLM_MUTABLE - -#define GLM_SWIZZLE_GEN_REF2_FROM_VEC2_SWIZZLE(T, P, A, B) \ - GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, 2, GLM_MUTABLE, A, B) \ - GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, 2, GLM_MUTABLE, B, A) - -#define GLM_SWIZZLE_GEN_REF_FROM_VEC2(T, P) \ - GLM_SWIZZLE_GEN_REF2_FROM_VEC2_SWIZZLE(T, P, x, y) \ - GLM_SWIZZLE_GEN_REF2_FROM_VEC2_SWIZZLE(T, P, r, g) \ - GLM_SWIZZLE_GEN_REF2_FROM_VEC2_SWIZZLE(T, P, s, t) - -#define GLM_SWIZZLE_GEN_REF2_FROM_VEC3_SWIZZLE(T, P, A, B, C) \ - GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, A, B) \ - GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, A, C) \ - GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, B, A) \ - GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, B, C) \ - GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, C, A) \ - GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, C, B) - -#define GLM_SWIZZLE_GEN_REF3_FROM_VEC3_SWIZZLE(T, P, A, B, C) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, GLM_MUTABLE, A, B, C) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, GLM_MUTABLE, A, C, B) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, GLM_MUTABLE, B, A, C) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, GLM_MUTABLE, B, C, A) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, GLM_MUTABLE, C, A, B) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, GLM_MUTABLE, C, B, A) - -#define GLM_SWIZZLE_GEN_REF_FROM_VEC3_COMP(T, P, A, B, C) \ - GLM_SWIZZLE_GEN_REF3_FROM_VEC3_SWIZZLE(T, P, A, B, C) \ - GLM_SWIZZLE_GEN_REF2_FROM_VEC3_SWIZZLE(T, P, A, B, C) - -#define GLM_SWIZZLE_GEN_REF_FROM_VEC3(T, P) \ - GLM_SWIZZLE_GEN_REF_FROM_VEC3_COMP(T, P, x, y, z) \ - GLM_SWIZZLE_GEN_REF_FROM_VEC3_COMP(T, P, r, g, b) \ - GLM_SWIZZLE_GEN_REF_FROM_VEC3_COMP(T, P, s, t, p) - -#define GLM_SWIZZLE_GEN_REF2_FROM_VEC4_SWIZZLE(T, P, A, B, C, D) \ - GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, A, B) \ - GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, A, C) \ - GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, A, D) \ - GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, B, A) \ - GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, B, C) \ - GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, B, D) \ - GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, C, A) \ - GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, C, B) \ - GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, C, D) \ - GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, D, A) \ - GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, D, B) \ - GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, GLM_MUTABLE, D, C) - -#define GLM_SWIZZLE_GEN_REF3_FROM_VEC4_SWIZZLE(T, P, A, B, C, D) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , A, B, C) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , A, B, D) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , A, C, B) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , A, C, D) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , A, D, B) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , A, D, C) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , B, A, C) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , B, A, D) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , B, C, A) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , B, C, D) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , B, D, A) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , B, D, C) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , C, A, B) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , C, A, D) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , C, B, A) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , C, B, D) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , C, D, A) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , C, D, B) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , D, A, B) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , D, A, C) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , D, B, A) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , D, B, C) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , D, C, A) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, , D, C, B) - -#define GLM_SWIZZLE_GEN_REF4_FROM_VEC4_SWIZZLE(T, P, A, B, C, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , A, C, B, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , A, C, D, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , A, D, B, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , A, D, C, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , A, B, D, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , A, B, C, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , B, C, A, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , B, C, D, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , B, D, A, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , B, D, C, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , B, A, D, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , B, A, C, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , C, B, A, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , C, B, D, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , C, D, A, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , C, D, B, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , C, A, D, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , C, A, B, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , D, C, B, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , D, C, A, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , D, A, B, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , D, A, C, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , D, B, A, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, , D, B, C, A) - -#define GLM_SWIZZLE_GEN_REF_FROM_VEC4_COMP(T, P, A, B, C, D) \ - GLM_SWIZZLE_GEN_REF2_FROM_VEC4_SWIZZLE(T, P, A, B, C, D) \ - GLM_SWIZZLE_GEN_REF3_FROM_VEC4_SWIZZLE(T, P, A, B, C, D) \ - GLM_SWIZZLE_GEN_REF4_FROM_VEC4_SWIZZLE(T, P, A, B, C, D) - -#define GLM_SWIZZLE_GEN_REF_FROM_VEC4(T, P) \ - GLM_SWIZZLE_GEN_REF_FROM_VEC4_COMP(T, P, x, y, z, w) \ - GLM_SWIZZLE_GEN_REF_FROM_VEC4_COMP(T, P, r, g, b, a) \ - GLM_SWIZZLE_GEN_REF_FROM_VEC4_COMP(T, P, s, t, p, q) - -#define GLM_SWIZZLE_GEN_VEC2_FROM_VEC2_SWIZZLE(T, P, A, B) \ - GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, A, A) \ - GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, A, B) \ - GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, B, A) \ - GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, B, B) - -#define GLM_SWIZZLE_GEN_VEC3_FROM_VEC2_SWIZZLE(T, P, A, B) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, A, A) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, A, B) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, B, A) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, B, B) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, A, A) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, A, B) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, B, A) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, B, B) - -#define GLM_SWIZZLE_GEN_VEC4_FROM_VEC2_SWIZZLE(T, P, A, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, A, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, A, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, B, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, B, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, A, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, A, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, B, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, B, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, A, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, A, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, B, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, B, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, A, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, A, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, B, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, B, B) - -#define GLM_SWIZZLE_GEN_VEC_FROM_VEC2_COMP(T, P, A, B) \ - GLM_SWIZZLE_GEN_VEC2_FROM_VEC2_SWIZZLE(T, P, A, B) \ - GLM_SWIZZLE_GEN_VEC3_FROM_VEC2_SWIZZLE(T, P, A, B) \ - GLM_SWIZZLE_GEN_VEC4_FROM_VEC2_SWIZZLE(T, P, A, B) - -#define GLM_SWIZZLE_GEN_VEC_FROM_VEC2(T, P) \ - GLM_SWIZZLE_GEN_VEC_FROM_VEC2_COMP(T, P, x, y) \ - GLM_SWIZZLE_GEN_VEC_FROM_VEC2_COMP(T, P, r, g) \ - GLM_SWIZZLE_GEN_VEC_FROM_VEC2_COMP(T, P, s, t) - -#define GLM_SWIZZLE_GEN_VEC2_FROM_VEC3_SWIZZLE(T, P, A, B, C) \ - GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, A, A) \ - GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, A, B) \ - GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, A, C) \ - GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, B, A) \ - GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, B, B) \ - GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, B, C) \ - GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, C, A) \ - GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, C, B) \ - GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, C, C) - -#define GLM_SWIZZLE_GEN_VEC3_FROM_VEC3_SWIZZLE(T, P, A, B, C) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, A, A) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, A, B) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, A, C) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, B, A) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, B, B) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, B, C) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, C, A) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, C, B) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, C, C) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, A, A) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, A, B) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, A, C) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, B, A) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, B, B) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, B, C) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, C, A) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, C, B) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, C, C) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, A, A) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, A, B) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, A, C) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, B, A) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, B, B) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, B, C) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, C, A) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, C, B) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, C, C) - -#define GLM_SWIZZLE_GEN_VEC4_FROM_VEC3_SWIZZLE(T, P, A, B, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, A, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, A, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, A, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, B, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, B, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, B, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, C, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, C, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, C, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, A, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, A, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, A, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, B, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, B, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, B, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, C, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, C, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, C, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, A, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, A, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, A, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, B, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, B, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, B, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, C, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, C, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, C, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, A, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, A, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, A, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, B, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, B, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, B, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, C, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, C, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, C, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, A, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, A, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, A, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, B, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, B, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, B, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, C, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, C, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, C, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, A, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, A, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, A, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, B, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, B, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, B, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, C, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, C, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, C, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, A, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, A, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, A, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, B, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, B, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, B, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, C, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, C, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, C, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, A, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, A, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, A, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, B, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, B, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, B, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, C, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, C, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, C, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, A, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, A, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, A, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, B, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, B, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, B, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, C, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, C, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, C, C) - -#define GLM_SWIZZLE_GEN_VEC_FROM_VEC3_COMP(T, P, A, B, C) \ - GLM_SWIZZLE_GEN_VEC2_FROM_VEC3_SWIZZLE(T, P, A, B, C) \ - GLM_SWIZZLE_GEN_VEC3_FROM_VEC3_SWIZZLE(T, P, A, B, C) \ - GLM_SWIZZLE_GEN_VEC4_FROM_VEC3_SWIZZLE(T, P, A, B, C) - -#define GLM_SWIZZLE_GEN_VEC_FROM_VEC3(T, P) \ - GLM_SWIZZLE_GEN_VEC_FROM_VEC3_COMP(T, P, x, y, z) \ - GLM_SWIZZLE_GEN_VEC_FROM_VEC3_COMP(T, P, r, g, b) \ - GLM_SWIZZLE_GEN_VEC_FROM_VEC3_COMP(T, P, s, t, p) - -#define GLM_SWIZZLE_GEN_VEC2_FROM_VEC4_SWIZZLE(T, P, A, B, C, D) \ - GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, A, A) \ - GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, A, B) \ - GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, A, C) \ - GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, A, D) \ - GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, B, A) \ - GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, B, B) \ - GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, B, C) \ - GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, B, D) \ - GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, C, A) \ - GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, C, B) \ - GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, C, C) \ - GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, C, D) \ - GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, D, A) \ - GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, D, B) \ - GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, D, C) \ - GLM_SWIZZLE_GEN_VEC2_ENTRY(T, P, const, D, D) - -#define GLM_SWIZZLE_GEN_VEC3_FROM_VEC4_SWIZZLE(T, P, A, B, C, D) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, A, A) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, A, B) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, A, C) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, A, D) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, B, A) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, B, B) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, B, C) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, B, D) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, C, A) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, C, B) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, C, C) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, C, D) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, D, A) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, D, B) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, D, C) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, A, D, D) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, A, A) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, A, B) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, A, C) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, A, D) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, B, A) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, B, B) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, B, C) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, B, D) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, C, A) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, C, B) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, C, C) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, C, D) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, D, A) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, D, B) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, D, C) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, B, D, D) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, A, A) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, A, B) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, A, C) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, A, D) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, B, A) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, B, B) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, B, C) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, B, D) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, C, A) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, C, B) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, C, C) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, C, D) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, D, A) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, D, B) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, D, C) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, C, D, D) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, D, A, A) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, D, A, B) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, D, A, C) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, D, A, D) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, D, B, A) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, D, B, B) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, D, B, C) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, D, B, D) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, D, C, A) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, D, C, B) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, D, C, C) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, D, C, D) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, D, D, A) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, D, D, B) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, D, D, C) \ - GLM_SWIZZLE_GEN_VEC3_ENTRY(T, P, const, D, D, D) - -#define GLM_SWIZZLE_GEN_VEC4_FROM_VEC4_SWIZZLE(T, P, A, B, C, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, A, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, A, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, A, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, A, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, B, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, B, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, B, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, B, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, C, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, C, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, C, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, C, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, D, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, D, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, D, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, A, D, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, A, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, A, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, A, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, A, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, B, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, B, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, B, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, B, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, C, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, C, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, C, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, C, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, D, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, D, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, D, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, B, D, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, A, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, A, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, A, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, A, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, B, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, B, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, B, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, B, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, C, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, C, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, C, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, C, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, D, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, D, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, D, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, C, D, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, D, A, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, D, A, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, D, A, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, D, A, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, D, B, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, D, B, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, D, B, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, D, B, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, D, C, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, D, C, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, D, C, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, D, C, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, D, D, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, D, D, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, D, D, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, A, D, D, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, A, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, A, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, A, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, A, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, B, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, B, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, B, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, B, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, C, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, C, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, C, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, C, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, D, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, D, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, D, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, A, D, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, A, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, A, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, A, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, A, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, B, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, B, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, B, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, B, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, C, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, C, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, C, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, C, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, D, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, D, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, D, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, B, D, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, A, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, A, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, A, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, A, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, B, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, B, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, B, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, B, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, C, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, C, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, C, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, C, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, D, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, D, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, D, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, C, D, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, D, A, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, D, A, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, D, A, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, D, A, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, D, B, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, D, B, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, D, B, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, D, B, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, D, C, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, D, C, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, D, C, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, D, C, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, D, D, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, D, D, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, D, D, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, B, D, D, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, A, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, A, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, A, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, A, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, B, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, B, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, B, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, B, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, C, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, C, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, C, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, C, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, D, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, D, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, D, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, A, D, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, A, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, A, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, A, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, A, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, B, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, B, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, B, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, B, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, C, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, C, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, C, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, C, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, D, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, D, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, D, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, B, D, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, A, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, A, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, A, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, A, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, B, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, B, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, B, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, B, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, C, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, C, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, C, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, C, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, D, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, D, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, D, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, C, D, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, D, A, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, D, A, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, D, A, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, D, A, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, D, B, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, D, B, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, D, B, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, D, B, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, D, C, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, D, C, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, D, C, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, D, C, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, D, D, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, D, D, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, D, D, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, C, D, D, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, A, A, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, A, A, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, A, A, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, A, A, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, A, B, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, A, B, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, A, B, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, A, B, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, A, C, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, A, C, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, A, C, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, A, C, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, A, D, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, A, D, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, A, D, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, A, D, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, B, A, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, B, A, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, B, A, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, B, A, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, B, B, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, B, B, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, B, B, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, B, B, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, B, C, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, B, C, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, B, C, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, B, C, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, B, D, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, B, D, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, B, D, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, B, D, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, C, A, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, C, A, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, C, A, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, C, A, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, C, B, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, C, B, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, C, B, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, C, B, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, C, C, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, C, C, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, C, C, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, C, C, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, C, D, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, C, D, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, C, D, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, C, D, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, D, A, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, D, A, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, D, A, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, D, A, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, D, B, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, D, B, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, D, B, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, D, B, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, D, C, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, D, C, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, D, C, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, D, C, D) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, D, D, A) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, D, D, B) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, D, D, C) \ - GLM_SWIZZLE_GEN_VEC4_ENTRY(T, P, const, D, D, D, D) - -#define GLM_SWIZZLE_GEN_VEC_FROM_VEC4_COMP(T, P, A, B, C, D) \ - GLM_SWIZZLE_GEN_VEC2_FROM_VEC4_SWIZZLE(T, P, A, B, C, D) \ - GLM_SWIZZLE_GEN_VEC3_FROM_VEC4_SWIZZLE(T, P, A, B, C, D) \ - GLM_SWIZZLE_GEN_VEC4_FROM_VEC4_SWIZZLE(T, P, A, B, C, D) - -#define GLM_SWIZZLE_GEN_VEC_FROM_VEC4(T, P) \ - GLM_SWIZZLE_GEN_VEC_FROM_VEC4_COMP(T, P, x, y, z, w) \ - GLM_SWIZZLE_GEN_VEC_FROM_VEC4_COMP(T, P, r, g, b, a) \ - GLM_SWIZZLE_GEN_VEC_FROM_VEC4_COMP(T, P, s, t, p, q) - diff --git a/third_party/glm/detail/_vectorize.hpp b/third_party/glm/detail/_vectorize.hpp deleted file mode 100755 index 1fcaec3..0000000 --- a/third_party/glm/detail/_vectorize.hpp +++ /dev/null @@ -1,162 +0,0 @@ -#pragma once - -namespace glm{ -namespace detail -{ - template class vec, length_t L, typename R, typename T, qualifier Q> - struct functor1{}; - - template class vec, typename R, typename T, qualifier Q> - struct functor1 - { - GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec<1, R, Q> call(R (*Func) (T x), vec<1, T, Q> const& v) - { - return vec<1, R, Q>(Func(v.x)); - } - }; - - template class vec, typename R, typename T, qualifier Q> - struct functor1 - { - GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec<2, R, Q> call(R (*Func) (T x), vec<2, T, Q> const& v) - { - return vec<2, R, Q>(Func(v.x), Func(v.y)); - } - }; - - template class vec, typename R, typename T, qualifier Q> - struct functor1 - { - GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec<3, R, Q> call(R (*Func) (T x), vec<3, T, Q> const& v) - { - return vec<3, R, Q>(Func(v.x), Func(v.y), Func(v.z)); - } - }; - - template class vec, typename R, typename T, qualifier Q> - struct functor1 - { - GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec<4, R, Q> call(R (*Func) (T x), vec<4, T, Q> const& v) - { - return vec<4, R, Q>(Func(v.x), Func(v.y), Func(v.z), Func(v.w)); - } - }; - - template class vec, length_t L, typename T, qualifier Q> - struct functor2{}; - - template class vec, typename T, qualifier Q> - struct functor2 - { - GLM_FUNC_QUALIFIER static vec<1, T, Q> call(T (*Func) (T x, T y), vec<1, T, Q> const& a, vec<1, T, Q> const& b) - { - return vec<1, T, Q>(Func(a.x, b.x)); - } - }; - - template class vec, typename T, qualifier Q> - struct functor2 - { - GLM_FUNC_QUALIFIER static vec<2, T, Q> call(T (*Func) (T x, T y), vec<2, T, Q> const& a, vec<2, T, Q> const& b) - { - return vec<2, T, Q>(Func(a.x, b.x), Func(a.y, b.y)); - } - }; - - template class vec, typename T, qualifier Q> - struct functor2 - { - GLM_FUNC_QUALIFIER static vec<3, T, Q> call(T (*Func) (T x, T y), vec<3, T, Q> const& a, vec<3, T, Q> const& b) - { - return vec<3, T, Q>(Func(a.x, b.x), Func(a.y, b.y), Func(a.z, b.z)); - } - }; - - template class vec, typename T, qualifier Q> - struct functor2 - { - GLM_FUNC_QUALIFIER static vec<4, T, Q> call(T (*Func) (T x, T y), vec<4, T, Q> const& a, vec<4, T, Q> const& b) - { - return vec<4, T, Q>(Func(a.x, b.x), Func(a.y, b.y), Func(a.z, b.z), Func(a.w, b.w)); - } - }; - - template class vec, length_t L, typename T, qualifier Q> - struct functor2_vec_sca{}; - - template class vec, typename T, qualifier Q> - struct functor2_vec_sca - { - GLM_FUNC_QUALIFIER static vec<1, T, Q> call(T (*Func) (T x, T y), vec<1, T, Q> const& a, T b) - { - return vec<1, T, Q>(Func(a.x, b)); - } - }; - - template class vec, typename T, qualifier Q> - struct functor2_vec_sca - { - GLM_FUNC_QUALIFIER static vec<2, T, Q> call(T (*Func) (T x, T y), vec<2, T, Q> const& a, T b) - { - return vec<2, T, Q>(Func(a.x, b), Func(a.y, b)); - } - }; - - template class vec, typename T, qualifier Q> - struct functor2_vec_sca - { - GLM_FUNC_QUALIFIER static vec<3, T, Q> call(T (*Func) (T x, T y), vec<3, T, Q> const& a, T b) - { - return vec<3, T, Q>(Func(a.x, b), Func(a.y, b), Func(a.z, b)); - } - }; - - template class vec, typename T, qualifier Q> - struct functor2_vec_sca - { - GLM_FUNC_QUALIFIER static vec<4, T, Q> call(T (*Func) (T x, T y), vec<4, T, Q> const& a, T b) - { - return vec<4, T, Q>(Func(a.x, b), Func(a.y, b), Func(a.z, b), Func(a.w, b)); - } - }; - - template - struct functor2_vec_int {}; - - template - struct functor2_vec_int<1, T, Q> - { - GLM_FUNC_QUALIFIER static vec<1, int, Q> call(int (*Func) (T x, int y), vec<1, T, Q> const& a, vec<1, int, Q> const& b) - { - return vec<1, int, Q>(Func(a.x, b.x)); - } - }; - - template - struct functor2_vec_int<2, T, Q> - { - GLM_FUNC_QUALIFIER static vec<2, int, Q> call(int (*Func) (T x, int y), vec<2, T, Q> const& a, vec<2, int, Q> const& b) - { - return vec<2, int, Q>(Func(a.x, b.x), Func(a.y, b.y)); - } - }; - - template - struct functor2_vec_int<3, T, Q> - { - GLM_FUNC_QUALIFIER static vec<3, int, Q> call(int (*Func) (T x, int y), vec<3, T, Q> const& a, vec<3, int, Q> const& b) - { - return vec<3, int, Q>(Func(a.x, b.x), Func(a.y, b.y), Func(a.z, b.z)); - } - }; - - template - struct functor2_vec_int<4, T, Q> - { - GLM_FUNC_QUALIFIER static vec<4, int, Q> call(int (*Func) (T x, int y), vec<4, T, Q> const& a, vec<4, int, Q> const& b) - { - return vec<4, int, Q>(Func(a.x, b.x), Func(a.y, b.y), Func(a.z, b.z), Func(a.w, b.w)); - } - }; -}//namespace detail -}//namespace glm diff --git a/third_party/glm/detail/compute_common.hpp b/third_party/glm/detail/compute_common.hpp deleted file mode 100755 index cc24b9e..0000000 --- a/third_party/glm/detail/compute_common.hpp +++ /dev/null @@ -1,50 +0,0 @@ -#pragma once - -#include "setup.hpp" -#include - -namespace glm{ -namespace detail -{ - template - struct compute_abs - {}; - - template - struct compute_abs - { - GLM_FUNC_QUALIFIER GLM_CONSTEXPR static genFIType call(genFIType x) - { - GLM_STATIC_ASSERT( - std::numeric_limits::is_iec559 || std::numeric_limits::is_signed, - "'abs' only accept floating-point and integer scalar or vector inputs"); - - return x >= genFIType(0) ? x : -x; - // TODO, perf comp with: *(((int *) &x) + 1) &= 0x7fffffff; - } - }; - -#if GLM_COMPILER & GLM_COMPILER_CUDA - template<> - struct compute_abs - { - GLM_FUNC_QUALIFIER GLM_CONSTEXPR static float call(float x) - { - return fabsf(x); - } - }; -#endif - - template - struct compute_abs - { - GLM_FUNC_QUALIFIER GLM_CONSTEXPR static genFIType call(genFIType x) - { - GLM_STATIC_ASSERT( - (!std::numeric_limits::is_signed && std::numeric_limits::is_integer), - "'abs' only accept floating-point and integer scalar or vector inputs"); - return x; - } - }; -}//namespace detail -}//namespace glm diff --git a/third_party/glm/detail/compute_vector_relational.hpp b/third_party/glm/detail/compute_vector_relational.hpp deleted file mode 100755 index 167b634..0000000 --- a/third_party/glm/detail/compute_vector_relational.hpp +++ /dev/null @@ -1,30 +0,0 @@ -#pragma once - -//#include "compute_common.hpp" -#include "setup.hpp" -#include - -namespace glm{ -namespace detail -{ - template - struct compute_equal - { - GLM_FUNC_QUALIFIER GLM_CONSTEXPR static bool call(T a, T b) - { - return a == b; - } - }; -/* - template - struct compute_equal - { - GLM_FUNC_QUALIFIER GLM_CONSTEXPR static bool call(T a, T b) - { - return detail::compute_abs::is_signed>::call(b - a) <= static_cast(0); - //return std::memcmp(&a, &b, sizeof(T)) == 0; - } - }; -*/ -}//namespace detail -}//namespace glm diff --git a/third_party/glm/detail/func_common.inl b/third_party/glm/detail/func_common.inl deleted file mode 100755 index 4b5f144..0000000 --- a/third_party/glm/detail/func_common.inl +++ /dev/null @@ -1,792 +0,0 @@ -/// @ref core -/// @file glm/detail/func_common.inl - -#include "../vector_relational.hpp" -#include "compute_common.hpp" -#include "type_vec1.hpp" -#include "type_vec2.hpp" -#include "type_vec3.hpp" -#include "type_vec4.hpp" -#include "_vectorize.hpp" -#include - -namespace glm -{ - // min - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType min(genType x, genType y) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer, "'min' only accept floating-point or integer inputs"); - return (y < x) ? y : x; - } - - // max - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType max(genType x, genType y) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer, "'max' only accept floating-point or integer inputs"); - - return (x < y) ? y : x; - } - - // abs - template<> - GLM_FUNC_QUALIFIER GLM_CONSTEXPR int abs(int x) - { - int const y = x >> (sizeof(int) * 8 - 1); - return (x ^ y) - y; - } - - // round -# if GLM_HAS_CXX11_STL - using ::std::round; -# else - template - GLM_FUNC_QUALIFIER genType round(genType x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'round' only accept floating-point inputs"); - - return x < static_cast(0) ? static_cast(int(x - static_cast(0.5))) : static_cast(int(x + static_cast(0.5))); - } -# endif - - // trunc -# if GLM_HAS_CXX11_STL - using ::std::trunc; -# else - template - GLM_FUNC_QUALIFIER genType trunc(genType x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'trunc' only accept floating-point inputs"); - - return x < static_cast(0) ? -std::floor(-x) : std::floor(x); - } -# endif - -}//namespace glm - -namespace glm{ -namespace detail -{ - template - struct compute_abs_vector - { - GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec call(vec const& x) - { - return detail::functor1::call(abs, x); - } - }; - - template - struct compute_mix_vector - { - GLM_FUNC_QUALIFIER static vec call(vec const& x, vec const& y, vec const& a) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_GENTYPE, "'mix' only accept floating-point inputs for the interpolator a"); - - return vec(vec(x) * (static_cast(1) - a) + vec(y) * a); - } - }; - - template - struct compute_mix_vector - { - GLM_FUNC_QUALIFIER static vec call(vec const& x, vec const& y, vec const& a) - { - vec Result; - for(length_t i = 0; i < x.length(); ++i) - Result[i] = a[i] ? y[i] : x[i]; - return Result; - } - }; - - template - struct compute_mix_scalar - { - GLM_FUNC_QUALIFIER static vec call(vec const& x, vec const& y, U const& a) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_GENTYPE, "'mix' only accept floating-point inputs for the interpolator a"); - - return vec(vec(x) * (static_cast(1) - a) + vec(y) * a); - } - }; - - template - struct compute_mix_scalar - { - GLM_FUNC_QUALIFIER static vec call(vec const& x, vec const& y, bool const& a) - { - return a ? y : x; - } - }; - - template - struct compute_mix - { - GLM_FUNC_QUALIFIER static T call(T const& x, T const& y, U const& a) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_GENTYPE, "'mix' only accept floating-point inputs for the interpolator a"); - - return static_cast(static_cast(x) * (static_cast(1) - a) + static_cast(y) * a); - } - }; - - template - struct compute_mix - { - GLM_FUNC_QUALIFIER static T call(T const& x, T const& y, bool const& a) - { - return a ? y : x; - } - }; - - template - struct compute_sign - { - GLM_FUNC_QUALIFIER static vec call(vec const& x) - { - return vec(glm::lessThan(vec(0), x)) - vec(glm::lessThan(x, vec(0))); - } - }; - -# if GLM_ARCH == GLM_ARCH_X86 - template - struct compute_sign - { - GLM_FUNC_QUALIFIER static vec call(vec const& x) - { - T const Shift(static_cast(sizeof(T) * 8 - 1)); - vec const y(vec::type, Q>(-x) >> typename detail::make_unsigned::type(Shift)); - - return (x >> Shift) | y; - } - }; -# endif - - template - struct compute_floor - { - GLM_FUNC_QUALIFIER static vec call(vec const& x) - { - return detail::functor1::call(std::floor, x); - } - }; - - template - struct compute_ceil - { - GLM_FUNC_QUALIFIER static vec call(vec const& x) - { - return detail::functor1::call(std::ceil, x); - } - }; - - template - struct compute_fract - { - GLM_FUNC_QUALIFIER static vec call(vec const& x) - { - return x - floor(x); - } - }; - - template - struct compute_trunc - { - GLM_FUNC_QUALIFIER static vec call(vec const& x) - { - return detail::functor1::call(trunc, x); - } - }; - - template - struct compute_round - { - GLM_FUNC_QUALIFIER static vec call(vec const& x) - { - return detail::functor1::call(round, x); - } - }; - - template - struct compute_mod - { - GLM_FUNC_QUALIFIER static vec call(vec const& a, vec const& b) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'mod' only accept floating-point inputs. Include for integer inputs."); - return a - b * floor(a / b); - } - }; - - template - struct compute_min_vector - { - GLM_FUNC_QUALIFIER static vec call(vec const& x, vec const& y) - { - return detail::functor2::call(min, x, y); - } - }; - - template - struct compute_max_vector - { - GLM_FUNC_QUALIFIER static vec call(vec const& x, vec const& y) - { - return detail::functor2::call(max, x, y); - } - }; - - template - struct compute_clamp_vector - { - GLM_FUNC_QUALIFIER static vec call(vec const& x, vec const& minVal, vec const& maxVal) - { - return min(max(x, minVal), maxVal); - } - }; - - template - struct compute_step_vector - { - GLM_FUNC_QUALIFIER static vec call(vec const& edge, vec const& x) - { - return mix(vec(1), vec(0), glm::lessThan(x, edge)); - } - }; - - template - struct compute_smoothstep_vector - { - GLM_FUNC_QUALIFIER static vec call(vec const& edge0, vec const& edge1, vec const& x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_GENTYPE, "'smoothstep' only accept floating-point inputs"); - vec const tmp(clamp((x - edge0) / (edge1 - edge0), static_cast(0), static_cast(1))); - return tmp * tmp * (static_cast(3) - static_cast(2) * tmp); - } - }; -}//namespace detail - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR genFIType abs(genFIType x) - { - return detail::compute_abs::is_signed>::call(x); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec abs(vec const& x) - { - return detail::compute_abs_vector::value>::call(x); - } - - // sign - // fast and works for any type - template - GLM_FUNC_QUALIFIER genFIType sign(genFIType x) - { - GLM_STATIC_ASSERT( - std::numeric_limits::is_iec559 || (std::numeric_limits::is_signed && std::numeric_limits::is_integer), - "'sign' only accept signed inputs"); - - return detail::compute_sign<1, genFIType, defaultp, - std::numeric_limits::is_iec559, detail::is_aligned::value>::call(vec<1, genFIType>(x)).x; - } - - template - GLM_FUNC_QUALIFIER vec sign(vec const& x) - { - GLM_STATIC_ASSERT( - std::numeric_limits::is_iec559 || (std::numeric_limits::is_signed && std::numeric_limits::is_integer), - "'sign' only accept signed inputs"); - - return detail::compute_sign::is_iec559, detail::is_aligned::value>::call(x); - } - - // floor - using ::std::floor; - template - GLM_FUNC_QUALIFIER vec floor(vec const& x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'floor' only accept floating-point inputs."); - return detail::compute_floor::value>::call(x); - } - - template - GLM_FUNC_QUALIFIER vec trunc(vec const& x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'trunc' only accept floating-point inputs"); - return detail::compute_trunc::value>::call(x); - } - - template - GLM_FUNC_QUALIFIER vec round(vec const& x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'round' only accept floating-point inputs"); - return detail::compute_round::value>::call(x); - } - -/* - // roundEven - template - GLM_FUNC_QUALIFIER genType roundEven(genType const& x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'roundEven' only accept floating-point inputs"); - - return genType(int(x + genType(int(x) % 2))); - } -*/ - - // roundEven - template - GLM_FUNC_QUALIFIER genType roundEven(genType x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'roundEven' only accept floating-point inputs"); - - int Integer = static_cast(x); - genType IntegerPart = static_cast(Integer); - genType FractionalPart = fract(x); - - if(FractionalPart > static_cast(0.5) || FractionalPart < static_cast(0.5)) - { - return round(x); - } - else if((Integer % 2) == 0) - { - return IntegerPart; - } - else if(x <= static_cast(0)) // Work around... - { - return IntegerPart - static_cast(1); - } - else - { - return IntegerPart + static_cast(1); - } - //else // Bug on MinGW 4.5.2 - //{ - // return mix(IntegerPart + genType(-1), IntegerPart + genType(1), x <= genType(0)); - //} - } - - template - GLM_FUNC_QUALIFIER vec roundEven(vec const& x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'roundEven' only accept floating-point inputs"); - return detail::functor1::call(roundEven, x); - } - - // ceil - using ::std::ceil; - template - GLM_FUNC_QUALIFIER vec ceil(vec const& x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'ceil' only accept floating-point inputs"); - return detail::compute_ceil::value>::call(x); - } - - // fract - template - GLM_FUNC_QUALIFIER genType fract(genType x) - { - return fract(vec<1, genType>(x)).x; - } - - template - GLM_FUNC_QUALIFIER vec fract(vec const& x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'fract' only accept floating-point inputs"); - return detail::compute_fract::value>::call(x); - } - - // mod - template - GLM_FUNC_QUALIFIER genType mod(genType x, genType y) - { -# if GLM_COMPILER & GLM_COMPILER_CUDA - // Another Cuda compiler bug https://github.com/g-truc/glm/issues/530 - vec<1, genType, defaultp> Result(mod(vec<1, genType, defaultp>(x), y)); - return Result.x; -# else - return mod(vec<1, genType, defaultp>(x), y).x; -# endif - } - - template - GLM_FUNC_QUALIFIER vec mod(vec const& x, T y) - { - return detail::compute_mod::value>::call(x, vec(y)); - } - - template - GLM_FUNC_QUALIFIER vec mod(vec const& x, vec const& y) - { - return detail::compute_mod::value>::call(x, y); - } - - // modf - template - GLM_FUNC_QUALIFIER genType modf(genType x, genType & i) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'modf' only accept floating-point inputs"); - return std::modf(x, &i); - } - - template - GLM_FUNC_QUALIFIER vec<1, T, Q> modf(vec<1, T, Q> const& x, vec<1, T, Q> & i) - { - return vec<1, T, Q>( - modf(x.x, i.x)); - } - - template - GLM_FUNC_QUALIFIER vec<2, T, Q> modf(vec<2, T, Q> const& x, vec<2, T, Q> & i) - { - return vec<2, T, Q>( - modf(x.x, i.x), - modf(x.y, i.y)); - } - - template - GLM_FUNC_QUALIFIER vec<3, T, Q> modf(vec<3, T, Q> const& x, vec<3, T, Q> & i) - { - return vec<3, T, Q>( - modf(x.x, i.x), - modf(x.y, i.y), - modf(x.z, i.z)); - } - - template - GLM_FUNC_QUALIFIER vec<4, T, Q> modf(vec<4, T, Q> const& x, vec<4, T, Q> & i) - { - return vec<4, T, Q>( - modf(x.x, i.x), - modf(x.y, i.y), - modf(x.z, i.z), - modf(x.w, i.w)); - } - - //// Only valid if (INT_MIN <= x-y <= INT_MAX) - //// min(x,y) - //r = y + ((x - y) & ((x - y) >> (sizeof(int) * - //CHAR_BIT - 1))); - //// max(x,y) - //r = x - ((x - y) & ((x - y) >> (sizeof(int) * - //CHAR_BIT - 1))); - - // min - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec min(vec const& a, T b) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer, "'min' only accept floating-point or integer inputs"); - return detail::compute_min_vector::value>::call(a, vec(b)); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec min(vec const& a, vec const& b) - { - return detail::compute_min_vector::value>::call(a, b); - } - - // max - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec max(vec const& a, T b) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer, "'max' only accept floating-point or integer inputs"); - return detail::compute_max_vector::value>::call(a, vec(b)); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec max(vec const& a, vec const& b) - { - return detail::compute_max_vector::value>::call(a, b); - } - - // clamp - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType clamp(genType x, genType minVal, genType maxVal) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer, "'clamp' only accept floating-point or integer inputs"); - return min(max(x, minVal), maxVal); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec clamp(vec const& x, T minVal, T maxVal) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer, "'clamp' only accept floating-point or integer inputs"); - return detail::compute_clamp_vector::value>::call(x, vec(minVal), vec(maxVal)); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec clamp(vec const& x, vec const& minVal, vec const& maxVal) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer, "'clamp' only accept floating-point or integer inputs"); - return detail::compute_clamp_vector::value>::call(x, minVal, maxVal); - } - - template - GLM_FUNC_QUALIFIER genTypeT mix(genTypeT x, genTypeT y, genTypeU a) - { - return detail::compute_mix::call(x, y, a); - } - - template - GLM_FUNC_QUALIFIER vec mix(vec const& x, vec const& y, U a) - { - return detail::compute_mix_scalar::value>::call(x, y, a); - } - - template - GLM_FUNC_QUALIFIER vec mix(vec const& x, vec const& y, vec const& a) - { - return detail::compute_mix_vector::value>::call(x, y, a); - } - - // step - template - GLM_FUNC_QUALIFIER genType step(genType edge, genType x) - { - return mix(static_cast(1), static_cast(0), x < edge); - } - - template - GLM_FUNC_QUALIFIER vec step(T edge, vec const& x) - { - return detail::compute_step_vector::value>::call(vec(edge), x); - } - - template - GLM_FUNC_QUALIFIER vec step(vec const& edge, vec const& x) - { - return detail::compute_step_vector::value>::call(edge, x); - } - - // smoothstep - template - GLM_FUNC_QUALIFIER genType smoothstep(genType edge0, genType edge1, genType x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_GENTYPE, "'smoothstep' only accept floating-point inputs"); - - genType const tmp(clamp((x - edge0) / (edge1 - edge0), genType(0), genType(1))); - return tmp * tmp * (genType(3) - genType(2) * tmp); - } - - template - GLM_FUNC_QUALIFIER vec smoothstep(T edge0, T edge1, vec const& x) - { - return detail::compute_smoothstep_vector::value>::call(vec(edge0), vec(edge1), x); - } - - template - GLM_FUNC_QUALIFIER vec smoothstep(vec const& edge0, vec const& edge1, vec const& x) - { - return detail::compute_smoothstep_vector::value>::call(edge0, edge1, x); - } - -# if GLM_HAS_CXX11_STL - using std::isnan; -# else - template - GLM_FUNC_QUALIFIER bool isnan(genType x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'isnan' only accept floating-point inputs"); - -# if GLM_HAS_CXX11_STL - return std::isnan(x); -# elif GLM_COMPILER & GLM_COMPILER_VC - return _isnan(x) != 0; -# elif GLM_COMPILER & GLM_COMPILER_INTEL -# if GLM_PLATFORM & GLM_PLATFORM_WINDOWS - return _isnan(x) != 0; -# else - return ::isnan(x) != 0; -# endif -# elif (GLM_COMPILER & (GLM_COMPILER_GCC | GLM_COMPILER_CLANG)) && (GLM_PLATFORM & GLM_PLATFORM_ANDROID) && __cplusplus < 201103L - return _isnan(x) != 0; -# elif GLM_COMPILER & GLM_COMPILER_CUDA - return ::isnan(x) != 0; -# else - return std::isnan(x); -# endif - } -# endif - - template - GLM_FUNC_QUALIFIER vec isnan(vec const& v) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'isnan' only accept floating-point inputs"); - - vec Result; - for (length_t l = 0; l < v.length(); ++l) - Result[l] = glm::isnan(v[l]); - return Result; - } - -# if GLM_HAS_CXX11_STL - using std::isinf; -# else - template - GLM_FUNC_QUALIFIER bool isinf(genType x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'isinf' only accept floating-point inputs"); - -# if GLM_HAS_CXX11_STL - return std::isinf(x); -# elif GLM_COMPILER & (GLM_COMPILER_INTEL | GLM_COMPILER_VC) -# if(GLM_PLATFORM & GLM_PLATFORM_WINDOWS) - return _fpclass(x) == _FPCLASS_NINF || _fpclass(x) == _FPCLASS_PINF; -# else - return ::isinf(x); -# endif -# elif GLM_COMPILER & (GLM_COMPILER_GCC | GLM_COMPILER_CLANG) -# if(GLM_PLATFORM & GLM_PLATFORM_ANDROID && __cplusplus < 201103L) - return _isinf(x) != 0; -# else - return std::isinf(x); -# endif -# elif GLM_COMPILER & GLM_COMPILER_CUDA - // http://developer.download.nvidia.com/compute/cuda/4_2/rel/toolkit/docs/online/group__CUDA__MATH__DOUBLE_g13431dd2b40b51f9139cbb7f50c18fab.html#g13431dd2b40b51f9139cbb7f50c18fab - return ::isinf(double(x)) != 0; -# else - return std::isinf(x); -# endif - } -# endif - - template - GLM_FUNC_QUALIFIER vec isinf(vec const& v) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'isinf' only accept floating-point inputs"); - - vec Result; - for (length_t l = 0; l < v.length(); ++l) - Result[l] = glm::isinf(v[l]); - return Result; - } - - GLM_FUNC_QUALIFIER int floatBitsToInt(float const& v) - { - union - { - float in; - int out; - } u; - - u.in = v; - - return u.out; - } - - template - GLM_FUNC_QUALIFIER vec floatBitsToInt(vec const& v) - { - return reinterpret_cast&>(const_cast&>(v)); - } - - GLM_FUNC_QUALIFIER uint floatBitsToUint(float const& v) - { - union - { - float in; - uint out; - } u; - - u.in = v; - - return u.out; - } - - template - GLM_FUNC_QUALIFIER vec floatBitsToUint(vec const& v) - { - return reinterpret_cast&>(const_cast&>(v)); - } - - GLM_FUNC_QUALIFIER float intBitsToFloat(int const& v) - { - union - { - int in; - float out; - } u; - - u.in = v; - - return u.out; - } - - template - GLM_FUNC_QUALIFIER vec intBitsToFloat(vec const& v) - { - return reinterpret_cast&>(const_cast&>(v)); - } - - GLM_FUNC_QUALIFIER float uintBitsToFloat(uint const& v) - { - union - { - uint in; - float out; - } u; - - u.in = v; - - return u.out; - } - - template - GLM_FUNC_QUALIFIER vec uintBitsToFloat(vec const& v) - { - return reinterpret_cast&>(const_cast&>(v)); - } - -# if GLM_HAS_CXX11_STL - using std::fma; -# else - template - GLM_FUNC_QUALIFIER genType fma(genType const& a, genType const& b, genType const& c) - { - return a * b + c; - } -# endif - - template - GLM_FUNC_QUALIFIER genType frexp(genType x, int& exp) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'frexp' only accept floating-point inputs"); - - return std::frexp(x, &exp); - } - - template - GLM_FUNC_QUALIFIER vec frexp(vec const& v, vec& exp) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'frexp' only accept floating-point inputs"); - - vec Result; - for (length_t l = 0; l < v.length(); ++l) - Result[l] = std::frexp(v[l], &exp[l]); - return Result; - } - - template - GLM_FUNC_QUALIFIER genType ldexp(genType const& x, int const& exp) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'ldexp' only accept floating-point inputs"); - - return std::ldexp(x, exp); - } - - template - GLM_FUNC_QUALIFIER vec ldexp(vec const& v, vec const& exp) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'ldexp' only accept floating-point inputs"); - - vec Result; - for (length_t l = 0; l < v.length(); ++l) - Result[l] = std::ldexp(v[l], exp[l]); - return Result; - } -}//namespace glm - -#if GLM_CONFIG_SIMD == GLM_ENABLE -# include "func_common_simd.inl" -#endif diff --git a/third_party/glm/detail/func_common_simd.inl b/third_party/glm/detail/func_common_simd.inl deleted file mode 100755 index ce0032d..0000000 --- a/third_party/glm/detail/func_common_simd.inl +++ /dev/null @@ -1,231 +0,0 @@ -/// @ref core -/// @file glm/detail/func_common_simd.inl - -#if GLM_ARCH & GLM_ARCH_SSE2_BIT - -#include "../simd/common.h" - -#include - -namespace glm{ -namespace detail -{ - template - struct compute_abs_vector<4, float, Q, true> - { - GLM_FUNC_QUALIFIER static vec<4, float, Q> call(vec<4, float, Q> const& v) - { - vec<4, float, Q> result; - result.data = glm_vec4_abs(v.data); - return result; - } - }; - - template - struct compute_abs_vector<4, int, Q, true> - { - GLM_FUNC_QUALIFIER static vec<4, int, Q> call(vec<4, int, Q> const& v) - { - vec<4, int, Q> result; - result.data = glm_ivec4_abs(v.data); - return result; - } - }; - - template - struct compute_floor<4, float, Q, true> - { - GLM_FUNC_QUALIFIER static vec<4, float, Q> call(vec<4, float, Q> const& v) - { - vec<4, float, Q> result; - result.data = glm_vec4_floor(v.data); - return result; - } - }; - - template - struct compute_ceil<4, float, Q, true> - { - GLM_FUNC_QUALIFIER static vec<4, float, Q> call(vec<4, float, Q> const& v) - { - vec<4, float, Q> result; - result.data = glm_vec4_ceil(v.data); - return result; - } - }; - - template - struct compute_fract<4, float, Q, true> - { - GLM_FUNC_QUALIFIER static vec<4, float, Q> call(vec<4, float, Q> const& v) - { - vec<4, float, Q> result; - result.data = glm_vec4_fract(v.data); - return result; - } - }; - - template - struct compute_round<4, float, Q, true> - { - GLM_FUNC_QUALIFIER static vec<4, float, Q> call(vec<4, float, Q> const& v) - { - vec<4, float, Q> result; - result.data = glm_vec4_round(v.data); - return result; - } - }; - - template - struct compute_mod<4, float, Q, true> - { - GLM_FUNC_QUALIFIER static vec<4, float, Q> call(vec<4, float, Q> const& x, vec<4, float, Q> const& y) - { - vec<4, float, Q> result; - result.data = glm_vec4_mod(x.data, y.data); - return result; - } - }; - - template - struct compute_min_vector<4, float, Q, true> - { - GLM_FUNC_QUALIFIER static vec<4, float, Q> call(vec<4, float, Q> const& v1, vec<4, float, Q> const& v2) - { - vec<4, float, Q> result; - result.data = _mm_min_ps(v1.data, v2.data); - return result; - } - }; - - template - struct compute_min_vector<4, int, Q, true> - { - GLM_FUNC_QUALIFIER static vec<4, int, Q> call(vec<4, int, Q> const& v1, vec<4, int, Q> const& v2) - { - vec<4, int, Q> result; - result.data = _mm_min_epi32(v1.data, v2.data); - return result; - } - }; - - template - struct compute_min_vector<4, uint, Q, true> - { - GLM_FUNC_QUALIFIER static vec<4, uint, Q> call(vec<4, uint, Q> const& v1, vec<4, uint, Q> const& v2) - { - vec<4, uint, Q> result; - result.data = _mm_min_epu32(v1.data, v2.data); - return result; - } - }; - - template - struct compute_max_vector<4, float, Q, true> - { - GLM_FUNC_QUALIFIER static vec<4, float, Q> call(vec<4, float, Q> const& v1, vec<4, float, Q> const& v2) - { - vec<4, float, Q> result; - result.data = _mm_max_ps(v1.data, v2.data); - return result; - } - }; - - template - struct compute_max_vector<4, int, Q, true> - { - GLM_FUNC_QUALIFIER static vec<4, int, Q> call(vec<4, int, Q> const& v1, vec<4, int, Q> const& v2) - { - vec<4, int, Q> result; - result.data = _mm_max_epi32(v1.data, v2.data); - return result; - } - }; - - template - struct compute_max_vector<4, uint, Q, true> - { - GLM_FUNC_QUALIFIER static vec<4, uint, Q> call(vec<4, uint, Q> const& v1, vec<4, uint, Q> const& v2) - { - vec<4, uint, Q> result; - result.data = _mm_max_epu32(v1.data, v2.data); - return result; - } - }; - - template - struct compute_clamp_vector<4, float, Q, true> - { - GLM_FUNC_QUALIFIER static vec<4, float, Q> call(vec<4, float, Q> const& x, vec<4, float, Q> const& minVal, vec<4, float, Q> const& maxVal) - { - vec<4, float, Q> result; - result.data = _mm_min_ps(_mm_max_ps(x.data, minVal.data), maxVal.data); - return result; - } - }; - - template - struct compute_clamp_vector<4, int, Q, true> - { - GLM_FUNC_QUALIFIER static vec<4, int, Q> call(vec<4, int, Q> const& x, vec<4, int, Q> const& minVal, vec<4, int, Q> const& maxVal) - { - vec<4, int, Q> result; - result.data = _mm_min_epi32(_mm_max_epi32(x.data, minVal.data), maxVal.data); - return result; - } - }; - - template - struct compute_clamp_vector<4, uint, Q, true> - { - GLM_FUNC_QUALIFIER static vec<4, uint, Q> call(vec<4, uint, Q> const& x, vec<4, uint, Q> const& minVal, vec<4, uint, Q> const& maxVal) - { - vec<4, uint, Q> result; - result.data = _mm_min_epu32(_mm_max_epu32(x.data, minVal.data), maxVal.data); - return result; - } - }; - - template - struct compute_mix_vector<4, float, bool, Q, true> - { - GLM_FUNC_QUALIFIER static vec<4, float, Q> call(vec<4, float, Q> const& x, vec<4, float, Q> const& y, vec<4, bool, Q> const& a) - { - __m128i const Load = _mm_set_epi32(-static_cast(a.w), -static_cast(a.z), -static_cast(a.y), -static_cast(a.x)); - __m128 const Mask = _mm_castsi128_ps(Load); - - vec<4, float, Q> Result; -# if 0 && GLM_ARCH & GLM_ARCH_AVX - Result.data = _mm_blendv_ps(x.data, y.data, Mask); -# else - Result.data = _mm_or_ps(_mm_and_ps(Mask, y.data), _mm_andnot_ps(Mask, x.data)); -# endif - return Result; - } - }; -/* FIXME - template - struct compute_step_vector - { - GLM_FUNC_QUALIFIER static vec<4, float, Q> call(vec<4, float, Q> const& edge, vec<4, float, Q> const& x) - { - vec<4, float, Q> Result; - result.data = glm_vec4_step(edge.data, x.data); - return result; - } - }; -*/ - template - struct compute_smoothstep_vector<4, float, Q, true> - { - GLM_FUNC_QUALIFIER static vec<4, float, Q> call(vec<4, float, Q> const& edge0, vec<4, float, Q> const& edge1, vec<4, float, Q> const& x) - { - vec<4, float, Q> Result; - Result.data = glm_vec4_smoothstep(edge0.data, edge1.data, x.data); - return Result; - } - }; -}//namespace detail -}//namespace glm - -#endif//GLM_ARCH & GLM_ARCH_SSE2_BIT diff --git a/third_party/glm/detail/func_exponential.inl b/third_party/glm/detail/func_exponential.inl deleted file mode 100755 index 2040d41..0000000 --- a/third_party/glm/detail/func_exponential.inl +++ /dev/null @@ -1,152 +0,0 @@ -/// @ref core -/// @file glm/detail/func_exponential.inl - -#include "../vector_relational.hpp" -#include "_vectorize.hpp" -#include -#include -#include - -namespace glm{ -namespace detail -{ -# if GLM_HAS_CXX11_STL - using std::log2; -# else - template - genType log2(genType Value) - { - return std::log(Value) * static_cast(1.4426950408889634073599246810019); - } -# endif - - template - struct compute_log2 - { - GLM_FUNC_QUALIFIER static vec call(vec const& v) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'log2' only accept floating-point inputs. Include for integer inputs."); - - return detail::functor1::call(log2, v); - } - }; - - template - struct compute_sqrt - { - GLM_FUNC_QUALIFIER static vec call(vec const& x) - { - return detail::functor1::call(std::sqrt, x); - } - }; - - template - struct compute_inversesqrt - { - GLM_FUNC_QUALIFIER static vec call(vec const& x) - { - return static_cast(1) / sqrt(x); - } - }; - - template - struct compute_inversesqrt - { - GLM_FUNC_QUALIFIER static vec call(vec const& x) - { - vec tmp(x); - vec xhalf(tmp * 0.5f); - vec* p = reinterpret_cast*>(const_cast*>(&x)); - vec i = vec(0x5f375a86) - (*p >> vec(1)); - vec* ptmp = reinterpret_cast*>(&i); - tmp = *ptmp; - tmp = tmp * (1.5f - xhalf * tmp * tmp); - return tmp; - } - }; -}//namespace detail - - // pow - using std::pow; - template - GLM_FUNC_QUALIFIER vec pow(vec const& base, vec const& exponent) - { - return detail::functor2::call(pow, base, exponent); - } - - // exp - using std::exp; - template - GLM_FUNC_QUALIFIER vec exp(vec const& x) - { - return detail::functor1::call(exp, x); - } - - // log - using std::log; - template - GLM_FUNC_QUALIFIER vec log(vec const& x) - { - return detail::functor1::call(log, x); - } - -# if GLM_HAS_CXX11_STL - using std::exp2; -# else - //exp2, ln2 = 0.69314718055994530941723212145818f - template - GLM_FUNC_QUALIFIER genType exp2(genType x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'exp2' only accept floating-point inputs"); - - return std::exp(static_cast(0.69314718055994530941723212145818) * x); - } -# endif - - template - GLM_FUNC_QUALIFIER vec exp2(vec const& x) - { - return detail::functor1::call(exp2, x); - } - - // log2, ln2 = 0.69314718055994530941723212145818f - template - GLM_FUNC_QUALIFIER genType log2(genType x) - { - return log2(vec<1, genType>(x)).x; - } - - template - GLM_FUNC_QUALIFIER vec log2(vec const& x) - { - return detail::compute_log2::is_iec559, detail::is_aligned::value>::call(x); - } - - // sqrt - using std::sqrt; - template - GLM_FUNC_QUALIFIER vec sqrt(vec const& x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'sqrt' only accept floating-point inputs"); - return detail::compute_sqrt::value>::call(x); - } - - // inversesqrt - template - GLM_FUNC_QUALIFIER genType inversesqrt(genType x) - { - return static_cast(1) / sqrt(x); - } - - template - GLM_FUNC_QUALIFIER vec inversesqrt(vec const& x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'inversesqrt' only accept floating-point inputs"); - return detail::compute_inversesqrt::value>::call(x); - } -}//namespace glm - -#if GLM_CONFIG_SIMD == GLM_ENABLE -# include "func_exponential_simd.inl" -#endif - diff --git a/third_party/glm/detail/func_exponential_simd.inl b/third_party/glm/detail/func_exponential_simd.inl deleted file mode 100755 index fb78951..0000000 --- a/third_party/glm/detail/func_exponential_simd.inl +++ /dev/null @@ -1,37 +0,0 @@ -/// @ref core -/// @file glm/detail/func_exponential_simd.inl - -#include "../simd/exponential.h" - -#if GLM_ARCH & GLM_ARCH_SSE2_BIT - -namespace glm{ -namespace detail -{ - template - struct compute_sqrt<4, float, Q, true> - { - GLM_FUNC_QUALIFIER static vec<4, float, Q> call(vec<4, float, Q> const& v) - { - vec<4, float, Q> Result; - Result.data = _mm_sqrt_ps(v.data); - return Result; - } - }; - -# if GLM_CONFIG_ALIGNED_GENTYPES == GLM_ENABLE - template<> - struct compute_sqrt<4, float, aligned_lowp, true> - { - GLM_FUNC_QUALIFIER static vec<4, float, aligned_lowp> call(vec<4, float, aligned_lowp> const& v) - { - vec<4, float, aligned_lowp> Result; - Result.data = glm_vec4_sqrt_lowp(v.data); - return Result; - } - }; -# endif -}//namespace detail -}//namespace glm - -#endif//GLM_ARCH & GLM_ARCH_SSE2_BIT diff --git a/third_party/glm/detail/func_geometric.inl b/third_party/glm/detail/func_geometric.inl deleted file mode 100755 index 9cde28f..0000000 --- a/third_party/glm/detail/func_geometric.inl +++ /dev/null @@ -1,243 +0,0 @@ -#include "../exponential.hpp" -#include "../common.hpp" - -namespace glm{ -namespace detail -{ - template - struct compute_length - { - GLM_FUNC_QUALIFIER static T call(vec const& v) - { - return sqrt(dot(v, v)); - } - }; - - template - struct compute_distance - { - GLM_FUNC_QUALIFIER static T call(vec const& p0, vec const& p1) - { - return length(p1 - p0); - } - }; - - template - struct compute_dot{}; - - template - struct compute_dot, T, Aligned> - { - GLM_FUNC_QUALIFIER static T call(vec<1, T, Q> const& a, vec<1, T, Q> const& b) - { - return a.x * b.x; - } - }; - - template - struct compute_dot, T, Aligned> - { - GLM_FUNC_QUALIFIER static T call(vec<2, T, Q> const& a, vec<2, T, Q> const& b) - { - vec<2, T, Q> tmp(a * b); - return tmp.x + tmp.y; - } - }; - - template - struct compute_dot, T, Aligned> - { - GLM_FUNC_QUALIFIER static T call(vec<3, T, Q> const& a, vec<3, T, Q> const& b) - { - vec<3, T, Q> tmp(a * b); - return tmp.x + tmp.y + tmp.z; - } - }; - - template - struct compute_dot, T, Aligned> - { - GLM_FUNC_QUALIFIER static T call(vec<4, T, Q> const& a, vec<4, T, Q> const& b) - { - vec<4, T, Q> tmp(a * b); - return (tmp.x + tmp.y) + (tmp.z + tmp.w); - } - }; - - template - struct compute_cross - { - GLM_FUNC_QUALIFIER static vec<3, T, Q> call(vec<3, T, Q> const& x, vec<3, T, Q> const& y) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'cross' accepts only floating-point inputs"); - - return vec<3, T, Q>( - x.y * y.z - y.y * x.z, - x.z * y.x - y.z * x.x, - x.x * y.y - y.x * x.y); - } - }; - - template - struct compute_normalize - { - GLM_FUNC_QUALIFIER static vec call(vec const& v) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'normalize' accepts only floating-point inputs"); - - return v * inversesqrt(dot(v, v)); - } - }; - - template - struct compute_faceforward - { - GLM_FUNC_QUALIFIER static vec call(vec const& N, vec const& I, vec const& Nref) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'normalize' accepts only floating-point inputs"); - - return dot(Nref, I) < static_cast(0) ? N : -N; - } - }; - - template - struct compute_reflect - { - GLM_FUNC_QUALIFIER static vec call(vec const& I, vec const& N) - { - return I - N * dot(N, I) * static_cast(2); - } - }; - - template - struct compute_refract - { - GLM_FUNC_QUALIFIER static vec call(vec const& I, vec const& N, T eta) - { - T const dotValue(dot(N, I)); - T const k(static_cast(1) - eta * eta * (static_cast(1) - dotValue * dotValue)); - vec const Result = - (k >= static_cast(0)) ? (eta * I - (eta * dotValue + std::sqrt(k)) * N) : vec(0); - return Result; - } - }; -}//namespace detail - - // length - template - GLM_FUNC_QUALIFIER genType length(genType x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'length' accepts only floating-point inputs"); - - return abs(x); - } - - template - GLM_FUNC_QUALIFIER T length(vec const& v) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'length' accepts only floating-point inputs"); - - return detail::compute_length::value>::call(v); - } - - // distance - template - GLM_FUNC_QUALIFIER genType distance(genType const& p0, genType const& p1) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'distance' accepts only floating-point inputs"); - - return length(p1 - p0); - } - - template - GLM_FUNC_QUALIFIER T distance(vec const& p0, vec const& p1) - { - return detail::compute_distance::value>::call(p0, p1); - } - - // dot - template - GLM_FUNC_QUALIFIER T dot(T x, T y) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'dot' accepts only floating-point inputs"); - return x * y; - } - - template - GLM_FUNC_QUALIFIER T dot(vec const& x, vec const& y) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'dot' accepts only floating-point inputs"); - return detail::compute_dot, T, detail::is_aligned::value>::call(x, y); - } - - // cross - template - GLM_FUNC_QUALIFIER vec<3, T, Q> cross(vec<3, T, Q> const& x, vec<3, T, Q> const& y) - { - return detail::compute_cross::value>::call(x, y); - } -/* - // normalize - template - GLM_FUNC_QUALIFIER genType normalize(genType const& x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'normalize' accepts only floating-point inputs"); - - return x < genType(0) ? genType(-1) : genType(1); - } -*/ - template - GLM_FUNC_QUALIFIER vec normalize(vec const& x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'normalize' accepts only floating-point inputs"); - - return detail::compute_normalize::value>::call(x); - } - - // faceforward - template - GLM_FUNC_QUALIFIER genType faceforward(genType const& N, genType const& I, genType const& Nref) - { - return dot(Nref, I) < static_cast(0) ? N : -N; - } - - template - GLM_FUNC_QUALIFIER vec faceforward(vec const& N, vec const& I, vec const& Nref) - { - return detail::compute_faceforward::value>::call(N, I, Nref); - } - - // reflect - template - GLM_FUNC_QUALIFIER genType reflect(genType const& I, genType const& N) - { - return I - N * dot(N, I) * genType(2); - } - - template - GLM_FUNC_QUALIFIER vec reflect(vec const& I, vec const& N) - { - return detail::compute_reflect::value>::call(I, N); - } - - // refract - template - GLM_FUNC_QUALIFIER genType refract(genType const& I, genType const& N, genType eta) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'refract' accepts only floating-point inputs"); - genType const dotValue(dot(N, I)); - genType const k(static_cast(1) - eta * eta * (static_cast(1) - dotValue * dotValue)); - return (eta * I - (eta * dotValue + sqrt(k)) * N) * static_cast(k >= static_cast(0)); - } - - template - GLM_FUNC_QUALIFIER vec refract(vec const& I, vec const& N, T eta) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'refract' accepts only floating-point inputs"); - return detail::compute_refract::value>::call(I, N, eta); - } -}//namespace glm - -#if GLM_CONFIG_SIMD == GLM_ENABLE -# include "func_geometric_simd.inl" -#endif diff --git a/third_party/glm/detail/func_geometric_simd.inl b/third_party/glm/detail/func_geometric_simd.inl deleted file mode 100755 index dfe3f4c..0000000 --- a/third_party/glm/detail/func_geometric_simd.inl +++ /dev/null @@ -1,165 +0,0 @@ -/// @ref core -/// @file glm/detail/func_geometric_simd.inl - -#include "../simd/geometric.h" - -#if GLM_ARCH & GLM_ARCH_SSE2_BIT - -namespace glm{ -namespace detail -{ - template - struct compute_length<4, float, Q, true> - { - GLM_FUNC_QUALIFIER static float call(vec<4, float, Q> const& v) - { - return _mm_cvtss_f32(glm_vec4_length(v.data)); - } - }; - - template - struct compute_distance<4, float, Q, true> - { - GLM_FUNC_QUALIFIER static float call(vec<4, float, Q> const& p0, vec<4, float, Q> const& p1) - { - return _mm_cvtss_f32(glm_vec4_distance(p0.data, p1.data)); - } - }; - - template - struct compute_dot, float, true> - { - GLM_FUNC_QUALIFIER static float call(vec<4, float, Q> const& x, vec<4, float, Q> const& y) - { - return _mm_cvtss_f32(glm_vec1_dot(x.data, y.data)); - } - }; - - template - struct compute_cross - { - GLM_FUNC_QUALIFIER static vec<3, float, Q> call(vec<3, float, Q> const& a, vec<3, float, Q> const& b) - { - __m128 const set0 = _mm_set_ps(0.0f, a.z, a.y, a.x); - __m128 const set1 = _mm_set_ps(0.0f, b.z, b.y, b.x); - __m128 const xpd0 = glm_vec4_cross(set0, set1); - - vec<4, float, Q> Result; - Result.data = xpd0; - return vec<3, float, Q>(Result); - } - }; - - template - struct compute_normalize<4, float, Q, true> - { - GLM_FUNC_QUALIFIER static vec<4, float, Q> call(vec<4, float, Q> const& v) - { - vec<4, float, Q> Result; - Result.data = glm_vec4_normalize(v.data); - return Result; - } - }; - - template - struct compute_faceforward<4, float, Q, true> - { - GLM_FUNC_QUALIFIER static vec<4, float, Q> call(vec<4, float, Q> const& N, vec<4, float, Q> const& I, vec<4, float, Q> const& Nref) - { - vec<4, float, Q> Result; - Result.data = glm_vec4_faceforward(N.data, I.data, Nref.data); - return Result; - } - }; - - template - struct compute_reflect<4, float, Q, true> - { - GLM_FUNC_QUALIFIER static vec<4, float, Q> call(vec<4, float, Q> const& I, vec<4, float, Q> const& N) - { - vec<4, float, Q> Result; - Result.data = glm_vec4_reflect(I.data, N.data); - return Result; - } - }; - - template - struct compute_refract<4, float, Q, true> - { - GLM_FUNC_QUALIFIER static vec<4, float, Q> call(vec<4, float, Q> const& I, vec<4, float, Q> const& N, float eta) - { - vec<4, float, Q> Result; - Result.data = glm_vec4_refract(I.data, N.data, _mm_set1_ps(eta)); - return Result; - } - }; -}//namespace detail -}//namespace glm - -#elif GLM_ARCH & GLM_ARCH_NEON_BIT -namespace glm{ -namespace detail -{ - template - struct compute_length<4, float, Q, true> - { - GLM_FUNC_QUALIFIER static float call(vec<4, float, Q> const& v) - { - return compute_dot, float, true>::call(v, v); - } - }; - - template - struct compute_distance<4, float, Q, true> - { - GLM_FUNC_QUALIFIER static float call(vec<4, float, Q> const& p0, vec<4, float, Q> const& p1) - { - return compute_length<4, float, Q, true>::call(p1 - p0); - } - }; - - - template - struct compute_dot, float, true> - { - GLM_FUNC_QUALIFIER static float call(vec<4, float, Q> const& x, vec<4, float, Q> const& y) - { -#if GLM_ARCH & GLM_ARCH_ARMV8_BIT - float32x4_t v = vmulq_f32(x.data, y.data); - v = vpaddq_f32(v, v); - v = vpaddq_f32(v, v); - return vgetq_lane_f32(v, 0); -#else // Armv7a with Neon - float32x4_t p = vmulq_f32(x.data, y.data); - float32x2_t v = vpadd_f32(vget_low_f32(p), vget_high_f32(p)); - v = vpadd_f32(v, v); - return vget_lane_f32(v, 0); -#endif - } - }; - - template - struct compute_normalize<4, float, Q, true> - { - GLM_FUNC_QUALIFIER static vec<4, float, Q> call(vec<4, float, Q> const& v) - { - float32x4_t p = vmulq_f32(v.data, v.data); -#if GLM_ARCH & GLM_ARCH_ARMV8_BIT - p = vpaddq_f32(p, p); - p = vpaddq_f32(p, p); -#else - float32x2_t t = vpadd_f32(vget_low_f32(p), vget_high_f32(p)); - t = vpadd_f32(t, t); - p = vcombine_f32(t, t); -#endif - - float32x4_t vd = vrsqrteq_f32(p); - vec<4, float, Q> Result; - Result.data = vmulq_f32(v.data, vd); - return Result; - } - }; -}//namespace detail -}//namespace glm - -#endif//GLM_ARCH & GLM_ARCH_SSE2_BIT diff --git a/third_party/glm/detail/func_integer.inl b/third_party/glm/detail/func_integer.inl deleted file mode 100755 index 091e1e0..0000000 --- a/third_party/glm/detail/func_integer.inl +++ /dev/null @@ -1,372 +0,0 @@ -/// @ref core - -#include "_vectorize.hpp" -#if(GLM_ARCH & GLM_ARCH_X86 && GLM_COMPILER & GLM_COMPILER_VC) -# include -# pragma intrinsic(_BitScanReverse) -#endif//(GLM_ARCH & GLM_ARCH_X86 && GLM_COMPILER & GLM_COMPILER_VC) -#include - -#if !GLM_HAS_EXTENDED_INTEGER_TYPE -# if GLM_COMPILER & GLM_COMPILER_GCC -# pragma GCC diagnostic ignored "-Wlong-long" -# endif -# if (GLM_COMPILER & GLM_COMPILER_CLANG) -# pragma clang diagnostic ignored "-Wc++11-long-long" -# endif -#endif - -namespace glm{ -namespace detail -{ - template - GLM_FUNC_QUALIFIER T mask(T Bits) - { - return Bits >= static_cast(sizeof(T) * 8) ? ~static_cast(0) : (static_cast(1) << Bits) - static_cast(1); - } - - template - struct compute_bitfieldReverseStep - { - GLM_FUNC_QUALIFIER static vec call(vec const& v, T, T) - { - return v; - } - }; - - template - struct compute_bitfieldReverseStep - { - GLM_FUNC_QUALIFIER static vec call(vec const& v, T Mask, T Shift) - { - return (v & Mask) << Shift | (v & (~Mask)) >> Shift; - } - }; - - template - struct compute_bitfieldBitCountStep - { - GLM_FUNC_QUALIFIER static vec call(vec const& v, T, T) - { - return v; - } - }; - - template - struct compute_bitfieldBitCountStep - { - GLM_FUNC_QUALIFIER static vec call(vec const& v, T Mask, T Shift) - { - return (v & Mask) + ((v >> Shift) & Mask); - } - }; - - template - struct compute_findLSB - { - GLM_FUNC_QUALIFIER static int call(genIUType Value) - { - if(Value == 0) - return -1; - - return glm::bitCount(~Value & (Value - static_cast(1))); - } - }; - -# if GLM_HAS_BITSCAN_WINDOWS - template - struct compute_findLSB - { - GLM_FUNC_QUALIFIER static int call(genIUType Value) - { - unsigned long Result(0); - unsigned char IsNotNull = _BitScanForward(&Result, *reinterpret_cast(&Value)); - return IsNotNull ? int(Result) : -1; - } - }; - -# if !((GLM_COMPILER & GLM_COMPILER_VC) && (GLM_MODEL == GLM_MODEL_32)) - template - struct compute_findLSB - { - GLM_FUNC_QUALIFIER static int call(genIUType Value) - { - unsigned long Result(0); - unsigned char IsNotNull = _BitScanForward64(&Result, *reinterpret_cast(&Value)); - return IsNotNull ? int(Result) : -1; - } - }; -# endif -# endif//GLM_HAS_BITSCAN_WINDOWS - - template - struct compute_findMSB_step_vec - { - GLM_FUNC_QUALIFIER static vec call(vec const& x, T Shift) - { - return x | (x >> Shift); - } - }; - - template - struct compute_findMSB_step_vec - { - GLM_FUNC_QUALIFIER static vec call(vec const& x, T) - { - return x; - } - }; - - template - struct compute_findMSB_vec - { - GLM_FUNC_QUALIFIER static vec call(vec const& v) - { - vec x(v); - x = compute_findMSB_step_vec= 8>::call(x, static_cast( 1)); - x = compute_findMSB_step_vec= 8>::call(x, static_cast( 2)); - x = compute_findMSB_step_vec= 8>::call(x, static_cast( 4)); - x = compute_findMSB_step_vec= 16>::call(x, static_cast( 8)); - x = compute_findMSB_step_vec= 32>::call(x, static_cast(16)); - x = compute_findMSB_step_vec= 64>::call(x, static_cast(32)); - return vec(sizeof(T) * 8 - 1) - glm::bitCount(~x); - } - }; - -# if GLM_HAS_BITSCAN_WINDOWS - template - GLM_FUNC_QUALIFIER int compute_findMSB_32(genIUType Value) - { - unsigned long Result(0); - unsigned char IsNotNull = _BitScanReverse(&Result, *reinterpret_cast(&Value)); - return IsNotNull ? int(Result) : -1; - } - - template - struct compute_findMSB_vec - { - GLM_FUNC_QUALIFIER static vec call(vec const& x) - { - return detail::functor1::call(compute_findMSB_32, x); - } - }; - -# if !((GLM_COMPILER & GLM_COMPILER_VC) && (GLM_MODEL == GLM_MODEL_32)) - template - GLM_FUNC_QUALIFIER int compute_findMSB_64(genIUType Value) - { - unsigned long Result(0); - unsigned char IsNotNull = _BitScanReverse64(&Result, *reinterpret_cast(&Value)); - return IsNotNull ? int(Result) : -1; - } - - template - struct compute_findMSB_vec - { - GLM_FUNC_QUALIFIER static vec call(vec const& x) - { - return detail::functor1::call(compute_findMSB_64, x); - } - }; -# endif -# endif//GLM_HAS_BITSCAN_WINDOWS -}//namespace detail - - // uaddCarry - GLM_FUNC_QUALIFIER uint uaddCarry(uint const& x, uint const& y, uint & Carry) - { - detail::uint64 const Value64(static_cast(x) + static_cast(y)); - detail::uint64 const Max32((static_cast(1) << static_cast(32)) - static_cast(1)); - Carry = Value64 > Max32 ? 1u : 0u; - return static_cast(Value64 % (Max32 + static_cast(1))); - } - - template - GLM_FUNC_QUALIFIER vec uaddCarry(vec const& x, vec const& y, vec& Carry) - { - vec Value64(vec(x) + vec(y)); - vec Max32((static_cast(1) << static_cast(32)) - static_cast(1)); - Carry = mix(vec(0), vec(1), greaterThan(Value64, Max32)); - return vec(Value64 % (Max32 + static_cast(1))); - } - - // usubBorrow - GLM_FUNC_QUALIFIER uint usubBorrow(uint const& x, uint const& y, uint & Borrow) - { - Borrow = x >= y ? static_cast(0) : static_cast(1); - if(y >= x) - return y - x; - else - return static_cast((static_cast(1) << static_cast(32)) + (static_cast(y) - static_cast(x))); - } - - template - GLM_FUNC_QUALIFIER vec usubBorrow(vec const& x, vec const& y, vec& Borrow) - { - Borrow = mix(vec(1), vec(0), greaterThanEqual(x, y)); - vec const YgeX(y - x); - vec const XgeY(vec((static_cast(1) << static_cast(32)) + (vec(y) - vec(x)))); - return mix(XgeY, YgeX, greaterThanEqual(y, x)); - } - - // umulExtended - GLM_FUNC_QUALIFIER void umulExtended(uint const& x, uint const& y, uint & msb, uint & lsb) - { - detail::uint64 Value64 = static_cast(x) * static_cast(y); - msb = static_cast(Value64 >> static_cast(32)); - lsb = static_cast(Value64); - } - - template - GLM_FUNC_QUALIFIER void umulExtended(vec const& x, vec const& y, vec& msb, vec& lsb) - { - vec Value64(vec(x) * vec(y)); - msb = vec(Value64 >> static_cast(32)); - lsb = vec(Value64); - } - - // imulExtended - GLM_FUNC_QUALIFIER void imulExtended(int x, int y, int& msb, int& lsb) - { - detail::int64 Value64 = static_cast(x) * static_cast(y); - msb = static_cast(Value64 >> static_cast(32)); - lsb = static_cast(Value64); - } - - template - GLM_FUNC_QUALIFIER void imulExtended(vec const& x, vec const& y, vec& msb, vec& lsb) - { - vec Value64(vec(x) * vec(y)); - lsb = vec(Value64 & static_cast(0xFFFFFFFF)); - msb = vec((Value64 >> static_cast(32)) & static_cast(0xFFFFFFFF)); - } - - // bitfieldExtract - template - GLM_FUNC_QUALIFIER genIUType bitfieldExtract(genIUType Value, int Offset, int Bits) - { - return bitfieldExtract(vec<1, genIUType>(Value), Offset, Bits).x; - } - - template - GLM_FUNC_QUALIFIER vec bitfieldExtract(vec const& Value, int Offset, int Bits) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'bitfieldExtract' only accept integer inputs"); - - return (Value >> static_cast(Offset)) & static_cast(detail::mask(Bits)); - } - - // bitfieldInsert - template - GLM_FUNC_QUALIFIER genIUType bitfieldInsert(genIUType const& Base, genIUType const& Insert, int Offset, int Bits) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'bitfieldInsert' only accept integer values"); - - return bitfieldInsert(vec<1, genIUType>(Base), vec<1, genIUType>(Insert), Offset, Bits).x; - } - - template - GLM_FUNC_QUALIFIER vec bitfieldInsert(vec const& Base, vec const& Insert, int Offset, int Bits) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'bitfieldInsert' only accept integer values"); - - T const Mask = static_cast(detail::mask(Bits) << Offset); - return (Base & ~Mask) | ((Insert << static_cast(Offset)) & Mask); - } - - // bitfieldReverse - template - GLM_FUNC_QUALIFIER genIUType bitfieldReverse(genIUType x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'bitfieldReverse' only accept integer values"); - - return bitfieldReverse(glm::vec<1, genIUType, glm::defaultp>(x)).x; - } - - template - GLM_FUNC_QUALIFIER vec bitfieldReverse(vec const& v) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'bitfieldReverse' only accept integer values"); - - vec x(v); - x = detail::compute_bitfieldReverseStep::value, sizeof(T) * 8>= 2>::call(x, static_cast(0x5555555555555555ull), static_cast( 1)); - x = detail::compute_bitfieldReverseStep::value, sizeof(T) * 8>= 4>::call(x, static_cast(0x3333333333333333ull), static_cast( 2)); - x = detail::compute_bitfieldReverseStep::value, sizeof(T) * 8>= 8>::call(x, static_cast(0x0F0F0F0F0F0F0F0Full), static_cast( 4)); - x = detail::compute_bitfieldReverseStep::value, sizeof(T) * 8>= 16>::call(x, static_cast(0x00FF00FF00FF00FFull), static_cast( 8)); - x = detail::compute_bitfieldReverseStep::value, sizeof(T) * 8>= 32>::call(x, static_cast(0x0000FFFF0000FFFFull), static_cast(16)); - x = detail::compute_bitfieldReverseStep::value, sizeof(T) * 8>= 64>::call(x, static_cast(0x00000000FFFFFFFFull), static_cast(32)); - return x; - } - - // bitCount - template - GLM_FUNC_QUALIFIER int bitCount(genIUType x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'bitCount' only accept integer values"); - - return bitCount(glm::vec<1, genIUType, glm::defaultp>(x)).x; - } - - template - GLM_FUNC_QUALIFIER vec bitCount(vec const& v) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'bitCount' only accept integer values"); - -# if GLM_COMPILER & GLM_COMPILER_VC -# pragma warning(push) -# pragma warning(disable : 4310) //cast truncates constant value -# endif - - vec::type, Q> x(*reinterpret_cast::type, Q> const *>(&v)); - x = detail::compute_bitfieldBitCountStep::type, Q, detail::is_aligned::value, sizeof(T) * 8>= 2>::call(x, typename detail::make_unsigned::type(0x5555555555555555ull), typename detail::make_unsigned::type( 1)); - x = detail::compute_bitfieldBitCountStep::type, Q, detail::is_aligned::value, sizeof(T) * 8>= 4>::call(x, typename detail::make_unsigned::type(0x3333333333333333ull), typename detail::make_unsigned::type( 2)); - x = detail::compute_bitfieldBitCountStep::type, Q, detail::is_aligned::value, sizeof(T) * 8>= 8>::call(x, typename detail::make_unsigned::type(0x0F0F0F0F0F0F0F0Full), typename detail::make_unsigned::type( 4)); - x = detail::compute_bitfieldBitCountStep::type, Q, detail::is_aligned::value, sizeof(T) * 8>= 16>::call(x, typename detail::make_unsigned::type(0x00FF00FF00FF00FFull), typename detail::make_unsigned::type( 8)); - x = detail::compute_bitfieldBitCountStep::type, Q, detail::is_aligned::value, sizeof(T) * 8>= 32>::call(x, typename detail::make_unsigned::type(0x0000FFFF0000FFFFull), typename detail::make_unsigned::type(16)); - x = detail::compute_bitfieldBitCountStep::type, Q, detail::is_aligned::value, sizeof(T) * 8>= 64>::call(x, typename detail::make_unsigned::type(0x00000000FFFFFFFFull), typename detail::make_unsigned::type(32)); - return vec(x); - -# if GLM_COMPILER & GLM_COMPILER_VC -# pragma warning(pop) -# endif - } - - // findLSB - template - GLM_FUNC_QUALIFIER int findLSB(genIUType Value) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'findLSB' only accept integer values"); - - return detail::compute_findLSB::call(Value); - } - - template - GLM_FUNC_QUALIFIER vec findLSB(vec const& x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'findLSB' only accept integer values"); - - return detail::functor1::call(findLSB, x); - } - - // findMSB - template - GLM_FUNC_QUALIFIER int findMSB(genIUType v) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'findMSB' only accept integer values"); - - return findMSB(vec<1, genIUType>(v)).x; - } - - template - GLM_FUNC_QUALIFIER vec findMSB(vec const& v) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'findMSB' only accept integer values"); - - return detail::compute_findMSB_vec::call(v); - } -}//namespace glm - -#if GLM_CONFIG_SIMD == GLM_ENABLE -# include "func_integer_simd.inl" -#endif - diff --git a/third_party/glm/detail/func_integer_simd.inl b/third_party/glm/detail/func_integer_simd.inl deleted file mode 100755 index 8be6c9c..0000000 --- a/third_party/glm/detail/func_integer_simd.inl +++ /dev/null @@ -1,65 +0,0 @@ -#include "../simd/integer.h" - -#if GLM_ARCH & GLM_ARCH_SSE2_BIT - -namespace glm{ -namespace detail -{ - template - struct compute_bitfieldReverseStep<4, uint, Q, true, true> - { - GLM_FUNC_QUALIFIER static vec<4, uint, Q> call(vec<4, uint, Q> const& v, uint Mask, uint Shift) - { - __m128i const set0 = v.data; - - __m128i const set1 = _mm_set1_epi32(static_cast(Mask)); - __m128i const and1 = _mm_and_si128(set0, set1); - __m128i const sft1 = _mm_slli_epi32(and1, Shift); - - __m128i const set2 = _mm_andnot_si128(set0, _mm_set1_epi32(-1)); - __m128i const and2 = _mm_and_si128(set0, set2); - __m128i const sft2 = _mm_srai_epi32(and2, Shift); - - __m128i const or0 = _mm_or_si128(sft1, sft2); - - return or0; - } - }; - - template - struct compute_bitfieldBitCountStep<4, uint, Q, true, true> - { - GLM_FUNC_QUALIFIER static vec<4, uint, Q> call(vec<4, uint, Q> const& v, uint Mask, uint Shift) - { - __m128i const set0 = v.data; - - __m128i const set1 = _mm_set1_epi32(static_cast(Mask)); - __m128i const and0 = _mm_and_si128(set0, set1); - __m128i const sft0 = _mm_slli_epi32(set0, Shift); - __m128i const and1 = _mm_and_si128(sft0, set1); - __m128i const add0 = _mm_add_epi32(and0, and1); - - return add0; - } - }; -}//namespace detail - -# if GLM_ARCH & GLM_ARCH_AVX_BIT - template<> - GLM_FUNC_QUALIFIER int bitCount(uint x) - { - return _mm_popcnt_u32(x); - } - -# if(GLM_MODEL == GLM_MODEL_64) - template<> - GLM_FUNC_QUALIFIER int bitCount(detail::uint64 x) - { - return static_cast(_mm_popcnt_u64(x)); - } -# endif//GLM_MODEL -# endif//GLM_ARCH - -}//namespace glm - -#endif//GLM_ARCH & GLM_ARCH_SSE2_BIT diff --git a/third_party/glm/detail/func_matrix.inl b/third_party/glm/detail/func_matrix.inl deleted file mode 100755 index d980c6d..0000000 --- a/third_party/glm/detail/func_matrix.inl +++ /dev/null @@ -1,398 +0,0 @@ -#include "../geometric.hpp" -#include - -namespace glm{ -namespace detail -{ - template - struct compute_matrixCompMult - { - GLM_FUNC_QUALIFIER static mat call(mat const& x, mat const& y) - { - mat Result; - for(length_t i = 0; i < Result.length(); ++i) - Result[i] = x[i] * y[i]; - return Result; - } - }; - - template - struct compute_transpose{}; - - template - struct compute_transpose<2, 2, T, Q, Aligned> - { - GLM_FUNC_QUALIFIER static mat<2, 2, T, Q> call(mat<2, 2, T, Q> const& m) - { - mat<2, 2, T, Q> Result; - Result[0][0] = m[0][0]; - Result[0][1] = m[1][0]; - Result[1][0] = m[0][1]; - Result[1][1] = m[1][1]; - return Result; - } - }; - - template - struct compute_transpose<2, 3, T, Q, Aligned> - { - GLM_FUNC_QUALIFIER static mat<3, 2, T, Q> call(mat<2, 3, T, Q> const& m) - { - mat<3,2, T, Q> Result; - Result[0][0] = m[0][0]; - Result[0][1] = m[1][0]; - Result[1][0] = m[0][1]; - Result[1][1] = m[1][1]; - Result[2][0] = m[0][2]; - Result[2][1] = m[1][2]; - return Result; - } - }; - - template - struct compute_transpose<2, 4, T, Q, Aligned> - { - GLM_FUNC_QUALIFIER static mat<4, 2, T, Q> call(mat<2, 4, T, Q> const& m) - { - mat<4, 2, T, Q> Result; - Result[0][0] = m[0][0]; - Result[0][1] = m[1][0]; - Result[1][0] = m[0][1]; - Result[1][1] = m[1][1]; - Result[2][0] = m[0][2]; - Result[2][1] = m[1][2]; - Result[3][0] = m[0][3]; - Result[3][1] = m[1][3]; - return Result; - } - }; - - template - struct compute_transpose<3, 2, T, Q, Aligned> - { - GLM_FUNC_QUALIFIER static mat<2, 3, T, Q> call(mat<3, 2, T, Q> const& m) - { - mat<2, 3, T, Q> Result; - Result[0][0] = m[0][0]; - Result[0][1] = m[1][0]; - Result[0][2] = m[2][0]; - Result[1][0] = m[0][1]; - Result[1][1] = m[1][1]; - Result[1][2] = m[2][1]; - return Result; - } - }; - - template - struct compute_transpose<3, 3, T, Q, Aligned> - { - GLM_FUNC_QUALIFIER static mat<3, 3, T, Q> call(mat<3, 3, T, Q> const& m) - { - mat<3, 3, T, Q> Result; - Result[0][0] = m[0][0]; - Result[0][1] = m[1][0]; - Result[0][2] = m[2][0]; - - Result[1][0] = m[0][1]; - Result[1][1] = m[1][1]; - Result[1][2] = m[2][1]; - - Result[2][0] = m[0][2]; - Result[2][1] = m[1][2]; - Result[2][2] = m[2][2]; - return Result; - } - }; - - template - struct compute_transpose<3, 4, T, Q, Aligned> - { - GLM_FUNC_QUALIFIER static mat<4, 3, T, Q> call(mat<3, 4, T, Q> const& m) - { - mat<4, 3, T, Q> Result; - Result[0][0] = m[0][0]; - Result[0][1] = m[1][0]; - Result[0][2] = m[2][0]; - Result[1][0] = m[0][1]; - Result[1][1] = m[1][1]; - Result[1][2] = m[2][1]; - Result[2][0] = m[0][2]; - Result[2][1] = m[1][2]; - Result[2][2] = m[2][2]; - Result[3][0] = m[0][3]; - Result[3][1] = m[1][3]; - Result[3][2] = m[2][3]; - return Result; - } - }; - - template - struct compute_transpose<4, 2, T, Q, Aligned> - { - GLM_FUNC_QUALIFIER static mat<2, 4, T, Q> call(mat<4, 2, T, Q> const& m) - { - mat<2, 4, T, Q> Result; - Result[0][0] = m[0][0]; - Result[0][1] = m[1][0]; - Result[0][2] = m[2][0]; - Result[0][3] = m[3][0]; - Result[1][0] = m[0][1]; - Result[1][1] = m[1][1]; - Result[1][2] = m[2][1]; - Result[1][3] = m[3][1]; - return Result; - } - }; - - template - struct compute_transpose<4, 3, T, Q, Aligned> - { - GLM_FUNC_QUALIFIER static mat<3, 4, T, Q> call(mat<4, 3, T, Q> const& m) - { - mat<3, 4, T, Q> Result; - Result[0][0] = m[0][0]; - Result[0][1] = m[1][0]; - Result[0][2] = m[2][0]; - Result[0][3] = m[3][0]; - Result[1][0] = m[0][1]; - Result[1][1] = m[1][1]; - Result[1][2] = m[2][1]; - Result[1][3] = m[3][1]; - Result[2][0] = m[0][2]; - Result[2][1] = m[1][2]; - Result[2][2] = m[2][2]; - Result[2][3] = m[3][2]; - return Result; - } - }; - - template - struct compute_transpose<4, 4, T, Q, Aligned> - { - GLM_FUNC_QUALIFIER static mat<4, 4, T, Q> call(mat<4, 4, T, Q> const& m) - { - mat<4, 4, T, Q> Result; - Result[0][0] = m[0][0]; - Result[0][1] = m[1][0]; - Result[0][2] = m[2][0]; - Result[0][3] = m[3][0]; - - Result[1][0] = m[0][1]; - Result[1][1] = m[1][1]; - Result[1][2] = m[2][1]; - Result[1][3] = m[3][1]; - - Result[2][0] = m[0][2]; - Result[2][1] = m[1][2]; - Result[2][2] = m[2][2]; - Result[2][3] = m[3][2]; - - Result[3][0] = m[0][3]; - Result[3][1] = m[1][3]; - Result[3][2] = m[2][3]; - Result[3][3] = m[3][3]; - return Result; - } - }; - - template - struct compute_determinant{}; - - template - struct compute_determinant<2, 2, T, Q, Aligned> - { - GLM_FUNC_QUALIFIER static T call(mat<2, 2, T, Q> const& m) - { - return m[0][0] * m[1][1] - m[1][0] * m[0][1]; - } - }; - - template - struct compute_determinant<3, 3, T, Q, Aligned> - { - GLM_FUNC_QUALIFIER static T call(mat<3, 3, T, Q> const& m) - { - return - + m[0][0] * (m[1][1] * m[2][2] - m[2][1] * m[1][2]) - - m[1][0] * (m[0][1] * m[2][2] - m[2][1] * m[0][2]) - + m[2][0] * (m[0][1] * m[1][2] - m[1][1] * m[0][2]); - } - }; - - template - struct compute_determinant<4, 4, T, Q, Aligned> - { - GLM_FUNC_QUALIFIER static T call(mat<4, 4, T, Q> const& m) - { - T SubFactor00 = m[2][2] * m[3][3] - m[3][2] * m[2][3]; - T SubFactor01 = m[2][1] * m[3][3] - m[3][1] * m[2][3]; - T SubFactor02 = m[2][1] * m[3][2] - m[3][1] * m[2][2]; - T SubFactor03 = m[2][0] * m[3][3] - m[3][0] * m[2][3]; - T SubFactor04 = m[2][0] * m[3][2] - m[3][0] * m[2][2]; - T SubFactor05 = m[2][0] * m[3][1] - m[3][0] * m[2][1]; - - vec<4, T, Q> DetCof( - + (m[1][1] * SubFactor00 - m[1][2] * SubFactor01 + m[1][3] * SubFactor02), - - (m[1][0] * SubFactor00 - m[1][2] * SubFactor03 + m[1][3] * SubFactor04), - + (m[1][0] * SubFactor01 - m[1][1] * SubFactor03 + m[1][3] * SubFactor05), - - (m[1][0] * SubFactor02 - m[1][1] * SubFactor04 + m[1][2] * SubFactor05)); - - return - m[0][0] * DetCof[0] + m[0][1] * DetCof[1] + - m[0][2] * DetCof[2] + m[0][3] * DetCof[3]; - } - }; - - template - struct compute_inverse{}; - - template - struct compute_inverse<2, 2, T, Q, Aligned> - { - GLM_FUNC_QUALIFIER static mat<2, 2, T, Q> call(mat<2, 2, T, Q> const& m) - { - T OneOverDeterminant = static_cast(1) / ( - + m[0][0] * m[1][1] - - m[1][0] * m[0][1]); - - mat<2, 2, T, Q> Inverse( - + m[1][1] * OneOverDeterminant, - - m[0][1] * OneOverDeterminant, - - m[1][0] * OneOverDeterminant, - + m[0][0] * OneOverDeterminant); - - return Inverse; - } - }; - - template - struct compute_inverse<3, 3, T, Q, Aligned> - { - GLM_FUNC_QUALIFIER static mat<3, 3, T, Q> call(mat<3, 3, T, Q> const& m) - { - T OneOverDeterminant = static_cast(1) / ( - + m[0][0] * (m[1][1] * m[2][2] - m[2][1] * m[1][2]) - - m[1][0] * (m[0][1] * m[2][2] - m[2][1] * m[0][2]) - + m[2][0] * (m[0][1] * m[1][2] - m[1][1] * m[0][2])); - - mat<3, 3, T, Q> Inverse; - Inverse[0][0] = + (m[1][1] * m[2][2] - m[2][1] * m[1][2]) * OneOverDeterminant; - Inverse[1][0] = - (m[1][0] * m[2][2] - m[2][0] * m[1][2]) * OneOverDeterminant; - Inverse[2][0] = + (m[1][0] * m[2][1] - m[2][0] * m[1][1]) * OneOverDeterminant; - Inverse[0][1] = - (m[0][1] * m[2][2] - m[2][1] * m[0][2]) * OneOverDeterminant; - Inverse[1][1] = + (m[0][0] * m[2][2] - m[2][0] * m[0][2]) * OneOverDeterminant; - Inverse[2][1] = - (m[0][0] * m[2][1] - m[2][0] * m[0][1]) * OneOverDeterminant; - Inverse[0][2] = + (m[0][1] * m[1][2] - m[1][1] * m[0][2]) * OneOverDeterminant; - Inverse[1][2] = - (m[0][0] * m[1][2] - m[1][0] * m[0][2]) * OneOverDeterminant; - Inverse[2][2] = + (m[0][0] * m[1][1] - m[1][0] * m[0][1]) * OneOverDeterminant; - - return Inverse; - } - }; - - template - struct compute_inverse<4, 4, T, Q, Aligned> - { - GLM_FUNC_QUALIFIER static mat<4, 4, T, Q> call(mat<4, 4, T, Q> const& m) - { - T Coef00 = m[2][2] * m[3][3] - m[3][2] * m[2][3]; - T Coef02 = m[1][2] * m[3][3] - m[3][2] * m[1][3]; - T Coef03 = m[1][2] * m[2][3] - m[2][2] * m[1][3]; - - T Coef04 = m[2][1] * m[3][3] - m[3][1] * m[2][3]; - T Coef06 = m[1][1] * m[3][3] - m[3][1] * m[1][3]; - T Coef07 = m[1][1] * m[2][3] - m[2][1] * m[1][3]; - - T Coef08 = m[2][1] * m[3][2] - m[3][1] * m[2][2]; - T Coef10 = m[1][1] * m[3][2] - m[3][1] * m[1][2]; - T Coef11 = m[1][1] * m[2][2] - m[2][1] * m[1][2]; - - T Coef12 = m[2][0] * m[3][3] - m[3][0] * m[2][3]; - T Coef14 = m[1][0] * m[3][3] - m[3][0] * m[1][3]; - T Coef15 = m[1][0] * m[2][3] - m[2][0] * m[1][3]; - - T Coef16 = m[2][0] * m[3][2] - m[3][0] * m[2][2]; - T Coef18 = m[1][0] * m[3][2] - m[3][0] * m[1][2]; - T Coef19 = m[1][0] * m[2][2] - m[2][0] * m[1][2]; - - T Coef20 = m[2][0] * m[3][1] - m[3][0] * m[2][1]; - T Coef22 = m[1][0] * m[3][1] - m[3][0] * m[1][1]; - T Coef23 = m[1][0] * m[2][1] - m[2][0] * m[1][1]; - - vec<4, T, Q> Fac0(Coef00, Coef00, Coef02, Coef03); - vec<4, T, Q> Fac1(Coef04, Coef04, Coef06, Coef07); - vec<4, T, Q> Fac2(Coef08, Coef08, Coef10, Coef11); - vec<4, T, Q> Fac3(Coef12, Coef12, Coef14, Coef15); - vec<4, T, Q> Fac4(Coef16, Coef16, Coef18, Coef19); - vec<4, T, Q> Fac5(Coef20, Coef20, Coef22, Coef23); - - vec<4, T, Q> Vec0(m[1][0], m[0][0], m[0][0], m[0][0]); - vec<4, T, Q> Vec1(m[1][1], m[0][1], m[0][1], m[0][1]); - vec<4, T, Q> Vec2(m[1][2], m[0][2], m[0][2], m[0][2]); - vec<4, T, Q> Vec3(m[1][3], m[0][3], m[0][3], m[0][3]); - - vec<4, T, Q> Inv0(Vec1 * Fac0 - Vec2 * Fac1 + Vec3 * Fac2); - vec<4, T, Q> Inv1(Vec0 * Fac0 - Vec2 * Fac3 + Vec3 * Fac4); - vec<4, T, Q> Inv2(Vec0 * Fac1 - Vec1 * Fac3 + Vec3 * Fac5); - vec<4, T, Q> Inv3(Vec0 * Fac2 - Vec1 * Fac4 + Vec2 * Fac5); - - vec<4, T, Q> SignA(+1, -1, +1, -1); - vec<4, T, Q> SignB(-1, +1, -1, +1); - mat<4, 4, T, Q> Inverse(Inv0 * SignA, Inv1 * SignB, Inv2 * SignA, Inv3 * SignB); - - vec<4, T, Q> Row0(Inverse[0][0], Inverse[1][0], Inverse[2][0], Inverse[3][0]); - - vec<4, T, Q> Dot0(m[0] * Row0); - T Dot1 = (Dot0.x + Dot0.y) + (Dot0.z + Dot0.w); - - T OneOverDeterminant = static_cast(1) / Dot1; - - return Inverse * OneOverDeterminant; - } - }; -}//namespace detail - - template - GLM_FUNC_QUALIFIER mat matrixCompMult(mat const& x, mat const& y) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_GENTYPE, "'matrixCompMult' only accept floating-point inputs"); - return detail::compute_matrixCompMult::value>::call(x, y); - } - - template - GLM_FUNC_QUALIFIER typename detail::outerProduct_trait::type outerProduct(vec const& c, vec const& r) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_GENTYPE, "'outerProduct' only accept floating-point inputs"); - - typename detail::outerProduct_trait::type m; - for(length_t i = 0; i < m.length(); ++i) - m[i] = c * r[i]; - return m; - } - - template - GLM_FUNC_QUALIFIER typename mat::transpose_type transpose(mat const& m) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_GENTYPE, "'transpose' only accept floating-point inputs"); - return detail::compute_transpose::value>::call(m); - } - - template - GLM_FUNC_QUALIFIER T determinant(mat const& m) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_GENTYPE, "'determinant' only accept floating-point inputs"); - return detail::compute_determinant::value>::call(m); - } - - template - GLM_FUNC_QUALIFIER mat inverse(mat const& m) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || GLM_CONFIG_UNRESTRICTED_GENTYPE, "'inverse' only accept floating-point inputs"); - return detail::compute_inverse::value>::call(m); - } -}//namespace glm - -#if GLM_CONFIG_SIMD == GLM_ENABLE -# include "func_matrix_simd.inl" -#endif - diff --git a/third_party/glm/detail/func_matrix_simd.inl b/third_party/glm/detail/func_matrix_simd.inl deleted file mode 100755 index f67ac66..0000000 --- a/third_party/glm/detail/func_matrix_simd.inl +++ /dev/null @@ -1,249 +0,0 @@ -#if GLM_ARCH & GLM_ARCH_SSE2_BIT - -#include "type_mat4x4.hpp" -#include "../geometric.hpp" -#include "../simd/matrix.h" -#include - -namespace glm{ -namespace detail -{ -# if GLM_CONFIG_ALIGNED_GENTYPES == GLM_ENABLE - template - struct compute_matrixCompMult<4, 4, float, Q, true> - { - GLM_STATIC_ASSERT(detail::is_aligned::value, "Specialization requires aligned"); - - GLM_FUNC_QUALIFIER static mat<4, 4, float, Q> call(mat<4, 4, float, Q> const& x, mat<4, 4, float, Q> const& y) - { - mat<4, 4, float, Q> Result; - glm_mat4_matrixCompMult( - *static_cast(&x[0].data), - *static_cast(&y[0].data), - *static_cast(&Result[0].data)); - return Result; - } - }; -# endif - - template - struct compute_transpose<4, 4, float, Q, true> - { - GLM_FUNC_QUALIFIER static mat<4, 4, float, Q> call(mat<4, 4, float, Q> const& m) - { - mat<4, 4, float, Q> Result; - glm_mat4_transpose(&m[0].data, &Result[0].data); - return Result; - } - }; - - template - struct compute_determinant<4, 4, float, Q, true> - { - GLM_FUNC_QUALIFIER static float call(mat<4, 4, float, Q> const& m) - { - return _mm_cvtss_f32(glm_mat4_determinant(&m[0].data)); - } - }; - - template - struct compute_inverse<4, 4, float, Q, true> - { - GLM_FUNC_QUALIFIER static mat<4, 4, float, Q> call(mat<4, 4, float, Q> const& m) - { - mat<4, 4, float, Q> Result; - glm_mat4_inverse(&m[0].data, &Result[0].data); - return Result; - } - }; -}//namespace detail - -# if GLM_CONFIG_ALIGNED_GENTYPES == GLM_ENABLE - template<> - GLM_FUNC_QUALIFIER mat<4, 4, float, aligned_lowp> outerProduct<4, 4, float, aligned_lowp>(vec<4, float, aligned_lowp> const& c, vec<4, float, aligned_lowp> const& r) - { - __m128 NativeResult[4]; - glm_mat4_outerProduct(c.data, r.data, NativeResult); - mat<4, 4, float, aligned_lowp> Result; - std::memcpy(&Result[0], &NativeResult[0], sizeof(Result)); - return Result; - } - - template<> - GLM_FUNC_QUALIFIER mat<4, 4, float, aligned_mediump> outerProduct<4, 4, float, aligned_mediump>(vec<4, float, aligned_mediump> const& c, vec<4, float, aligned_mediump> const& r) - { - __m128 NativeResult[4]; - glm_mat4_outerProduct(c.data, r.data, NativeResult); - mat<4, 4, float, aligned_mediump> Result; - std::memcpy(&Result[0], &NativeResult[0], sizeof(Result)); - return Result; - } - - template<> - GLM_FUNC_QUALIFIER mat<4, 4, float, aligned_highp> outerProduct<4, 4, float, aligned_highp>(vec<4, float, aligned_highp> const& c, vec<4, float, aligned_highp> const& r) - { - __m128 NativeResult[4]; - glm_mat4_outerProduct(c.data, r.data, NativeResult); - mat<4, 4, float, aligned_highp> Result; - std::memcpy(&Result[0], &NativeResult[0], sizeof(Result)); - return Result; - } -# endif -}//namespace glm - -#elif GLM_ARCH & GLM_ARCH_NEON_BIT - -namespace glm { -#if GLM_LANG & GLM_LANG_CXX11_FLAG - template - GLM_FUNC_QUALIFIER - typename std::enable_if::value, mat<4, 4, float, Q>>::type - operator*(mat<4, 4, float, Q> const & m1, mat<4, 4, float, Q> const & m2) - { - auto MulRow = [&](int l) { - float32x4_t const SrcA = m2[l].data; - - float32x4_t r = neon::mul_lane(m1[0].data, SrcA, 0); - r = neon::madd_lane(r, m1[1].data, SrcA, 1); - r = neon::madd_lane(r, m1[2].data, SrcA, 2); - r = neon::madd_lane(r, m1[3].data, SrcA, 3); - - return r; - }; - - mat<4, 4, float, aligned_highp> Result; - Result[0].data = MulRow(0); - Result[1].data = MulRow(1); - Result[2].data = MulRow(2); - Result[3].data = MulRow(3); - - return Result; - } -#endif // CXX11 - - template - struct detail::compute_inverse<4, 4, float, Q, true> - { - GLM_FUNC_QUALIFIER static mat<4, 4, float, Q> call(mat<4, 4, float, Q> const& m) - { - float32x4_t const& m0 = m[0].data; - float32x4_t const& m1 = m[1].data; - float32x4_t const& m2 = m[2].data; - float32x4_t const& m3 = m[3].data; - - // m[2][2] * m[3][3] - m[3][2] * m[2][3]; - // m[2][2] * m[3][3] - m[3][2] * m[2][3]; - // m[1][2] * m[3][3] - m[3][2] * m[1][3]; - // m[1][2] * m[2][3] - m[2][2] * m[1][3]; - - float32x4_t Fac0; - { - float32x4_t w0 = vcombine_f32(neon::dup_lane(m2, 2), neon::dup_lane(m1, 2)); - float32x4_t w1 = neon::copy_lane(neon::dupq_lane(m3, 3), 3, m2, 3); - float32x4_t w2 = neon::copy_lane(neon::dupq_lane(m3, 2), 3, m2, 2); - float32x4_t w3 = vcombine_f32(neon::dup_lane(m2, 3), neon::dup_lane(m1, 3)); - Fac0 = w0 * w1 - w2 * w3; - } - - // m[2][1] * m[3][3] - m[3][1] * m[2][3]; - // m[2][1] * m[3][3] - m[3][1] * m[2][3]; - // m[1][1] * m[3][3] - m[3][1] * m[1][3]; - // m[1][1] * m[2][3] - m[2][1] * m[1][3]; - - float32x4_t Fac1; - { - float32x4_t w0 = vcombine_f32(neon::dup_lane(m2, 1), neon::dup_lane(m1, 1)); - float32x4_t w1 = neon::copy_lane(neon::dupq_lane(m3, 3), 3, m2, 3); - float32x4_t w2 = neon::copy_lane(neon::dupq_lane(m3, 1), 3, m2, 1); - float32x4_t w3 = vcombine_f32(neon::dup_lane(m2, 3), neon::dup_lane(m1, 3)); - Fac1 = w0 * w1 - w2 * w3; - } - - // m[2][1] * m[3][2] - m[3][1] * m[2][2]; - // m[2][1] * m[3][2] - m[3][1] * m[2][2]; - // m[1][1] * m[3][2] - m[3][1] * m[1][2]; - // m[1][1] * m[2][2] - m[2][1] * m[1][2]; - - float32x4_t Fac2; - { - float32x4_t w0 = vcombine_f32(neon::dup_lane(m2, 1), neon::dup_lane(m1, 1)); - float32x4_t w1 = neon::copy_lane(neon::dupq_lane(m3, 2), 3, m2, 2); - float32x4_t w2 = neon::copy_lane(neon::dupq_lane(m3, 1), 3, m2, 1); - float32x4_t w3 = vcombine_f32(neon::dup_lane(m2, 2), neon::dup_lane(m1, 2)); - Fac2 = w0 * w1 - w2 * w3; - } - - // m[2][0] * m[3][3] - m[3][0] * m[2][3]; - // m[2][0] * m[3][3] - m[3][0] * m[2][3]; - // m[1][0] * m[3][3] - m[3][0] * m[1][3]; - // m[1][0] * m[2][3] - m[2][0] * m[1][3]; - - float32x4_t Fac3; - { - float32x4_t w0 = vcombine_f32(neon::dup_lane(m2, 0), neon::dup_lane(m1, 0)); - float32x4_t w1 = neon::copy_lane(neon::dupq_lane(m3, 3), 3, m2, 3); - float32x4_t w2 = neon::copy_lane(neon::dupq_lane(m3, 0), 3, m2, 0); - float32x4_t w3 = vcombine_f32(neon::dup_lane(m2, 3), neon::dup_lane(m1, 3)); - Fac3 = w0 * w1 - w2 * w3; - } - - // m[2][0] * m[3][2] - m[3][0] * m[2][2]; - // m[2][0] * m[3][2] - m[3][0] * m[2][2]; - // m[1][0] * m[3][2] - m[3][0] * m[1][2]; - // m[1][0] * m[2][2] - m[2][0] * m[1][2]; - - float32x4_t Fac4; - { - float32x4_t w0 = vcombine_f32(neon::dup_lane(m2, 0), neon::dup_lane(m1, 0)); - float32x4_t w1 = neon::copy_lane(neon::dupq_lane(m3, 2), 3, m2, 2); - float32x4_t w2 = neon::copy_lane(neon::dupq_lane(m3, 0), 3, m2, 0); - float32x4_t w3 = vcombine_f32(neon::dup_lane(m2, 2), neon::dup_lane(m1, 2)); - Fac4 = w0 * w1 - w2 * w3; - } - - // m[2][0] * m[3][1] - m[3][0] * m[2][1]; - // m[2][0] * m[3][1] - m[3][0] * m[2][1]; - // m[1][0] * m[3][1] - m[3][0] * m[1][1]; - // m[1][0] * m[2][1] - m[2][0] * m[1][1]; - - float32x4_t Fac5; - { - float32x4_t w0 = vcombine_f32(neon::dup_lane(m2, 0), neon::dup_lane(m1, 0)); - float32x4_t w1 = neon::copy_lane(neon::dupq_lane(m3, 1), 3, m2, 1); - float32x4_t w2 = neon::copy_lane(neon::dupq_lane(m3, 0), 3, m2, 0); - float32x4_t w3 = vcombine_f32(neon::dup_lane(m2, 1), neon::dup_lane(m1, 1)); - Fac5 = w0 * w1 - w2 * w3; - } - - float32x4_t Vec0 = neon::copy_lane(neon::dupq_lane(m0, 0), 0, m1, 0); // (m[1][0], m[0][0], m[0][0], m[0][0]); - float32x4_t Vec1 = neon::copy_lane(neon::dupq_lane(m0, 1), 0, m1, 1); // (m[1][1], m[0][1], m[0][1], m[0][1]); - float32x4_t Vec2 = neon::copy_lane(neon::dupq_lane(m0, 2), 0, m1, 2); // (m[1][2], m[0][2], m[0][2], m[0][2]); - float32x4_t Vec3 = neon::copy_lane(neon::dupq_lane(m0, 3), 0, m1, 3); // (m[1][3], m[0][3], m[0][3], m[0][3]); - - float32x4_t Inv0 = Vec1 * Fac0 - Vec2 * Fac1 + Vec3 * Fac2; - float32x4_t Inv1 = Vec0 * Fac0 - Vec2 * Fac3 + Vec3 * Fac4; - float32x4_t Inv2 = Vec0 * Fac1 - Vec1 * Fac3 + Vec3 * Fac5; - float32x4_t Inv3 = Vec0 * Fac2 - Vec1 * Fac4 + Vec2 * Fac5; - - float32x4_t r0 = float32x4_t{-1, +1, -1, +1} * Inv0; - float32x4_t r1 = float32x4_t{+1, -1, +1, -1} * Inv1; - float32x4_t r2 = float32x4_t{-1, +1, -1, +1} * Inv2; - float32x4_t r3 = float32x4_t{+1, -1, +1, -1} * Inv3; - - float32x4_t det = neon::mul_lane(r0, m0, 0); - det = neon::madd_lane(det, r1, m0, 1); - det = neon::madd_lane(det, r2, m0, 2); - det = neon::madd_lane(det, r3, m0, 3); - - float32x4_t rdet = vdupq_n_f32(1 / vgetq_lane_f32(det, 0)); - - mat<4, 4, float, Q> r; - r[0].data = vmulq_f32(r0, rdet); - r[1].data = vmulq_f32(r1, rdet); - r[2].data = vmulq_f32(r2, rdet); - r[3].data = vmulq_f32(r3, rdet); - return r; - } - }; -}//namespace glm -#endif diff --git a/third_party/glm/detail/func_packing.inl b/third_party/glm/detail/func_packing.inl deleted file mode 100755 index 234b093..0000000 --- a/third_party/glm/detail/func_packing.inl +++ /dev/null @@ -1,189 +0,0 @@ -/// @ref core -/// @file glm/detail/func_packing.inl - -#include "../common.hpp" -#include "type_half.hpp" - -namespace glm -{ - GLM_FUNC_QUALIFIER uint packUnorm2x16(vec2 const& v) - { - union - { - unsigned short in[2]; - uint out; - } u; - - vec<2, unsigned short, defaultp> result(round(clamp(v, 0.0f, 1.0f) * 65535.0f)); - - u.in[0] = result[0]; - u.in[1] = result[1]; - - return u.out; - } - - GLM_FUNC_QUALIFIER vec2 unpackUnorm2x16(uint p) - { - union - { - uint in; - unsigned short out[2]; - } u; - - u.in = p; - - return vec2(u.out[0], u.out[1]) * 1.5259021896696421759365224689097e-5f; - } - - GLM_FUNC_QUALIFIER uint packSnorm2x16(vec2 const& v) - { - union - { - signed short in[2]; - uint out; - } u; - - vec<2, short, defaultp> result(round(clamp(v, -1.0f, 1.0f) * 32767.0f)); - - u.in[0] = result[0]; - u.in[1] = result[1]; - - return u.out; - } - - GLM_FUNC_QUALIFIER vec2 unpackSnorm2x16(uint p) - { - union - { - uint in; - signed short out[2]; - } u; - - u.in = p; - - return clamp(vec2(u.out[0], u.out[1]) * 3.0518509475997192297128208258309e-5f, -1.0f, 1.0f); - } - - GLM_FUNC_QUALIFIER uint packUnorm4x8(vec4 const& v) - { - union - { - unsigned char in[4]; - uint out; - } u; - - vec<4, unsigned char, defaultp> result(round(clamp(v, 0.0f, 1.0f) * 255.0f)); - - u.in[0] = result[0]; - u.in[1] = result[1]; - u.in[2] = result[2]; - u.in[3] = result[3]; - - return u.out; - } - - GLM_FUNC_QUALIFIER vec4 unpackUnorm4x8(uint p) - { - union - { - uint in; - unsigned char out[4]; - } u; - - u.in = p; - - return vec4(u.out[0], u.out[1], u.out[2], u.out[3]) * 0.0039215686274509803921568627451f; - } - - GLM_FUNC_QUALIFIER uint packSnorm4x8(vec4 const& v) - { - union - { - signed char in[4]; - uint out; - } u; - - vec<4, signed char, defaultp> result(round(clamp(v, -1.0f, 1.0f) * 127.0f)); - - u.in[0] = result[0]; - u.in[1] = result[1]; - u.in[2] = result[2]; - u.in[3] = result[3]; - - return u.out; - } - - GLM_FUNC_QUALIFIER glm::vec4 unpackSnorm4x8(uint p) - { - union - { - uint in; - signed char out[4]; - } u; - - u.in = p; - - return clamp(vec4(u.out[0], u.out[1], u.out[2], u.out[3]) * 0.0078740157480315f, -1.0f, 1.0f); - } - - GLM_FUNC_QUALIFIER double packDouble2x32(uvec2 const& v) - { - union - { - uint in[2]; - double out; - } u; - - u.in[0] = v[0]; - u.in[1] = v[1]; - - return u.out; - } - - GLM_FUNC_QUALIFIER uvec2 unpackDouble2x32(double v) - { - union - { - double in; - uint out[2]; - } u; - - u.in = v; - - return uvec2(u.out[0], u.out[1]); - } - - GLM_FUNC_QUALIFIER uint packHalf2x16(vec2 const& v) - { - union - { - signed short in[2]; - uint out; - } u; - - u.in[0] = detail::toFloat16(v.x); - u.in[1] = detail::toFloat16(v.y); - - return u.out; - } - - GLM_FUNC_QUALIFIER vec2 unpackHalf2x16(uint v) - { - union - { - uint in; - signed short out[2]; - } u; - - u.in = v; - - return vec2( - detail::toFloat32(u.out[0]), - detail::toFloat32(u.out[1])); - } -}//namespace glm - -#if GLM_CONFIG_SIMD == GLM_ENABLE -# include "func_packing_simd.inl" -#endif - diff --git a/third_party/glm/detail/func_packing_simd.inl b/third_party/glm/detail/func_packing_simd.inl deleted file mode 100755 index fd0fe8b..0000000 --- a/third_party/glm/detail/func_packing_simd.inl +++ /dev/null @@ -1,6 +0,0 @@ -namespace glm{ -namespace detail -{ - -}//namespace detail -}//namespace glm diff --git a/third_party/glm/detail/func_trigonometric.inl b/third_party/glm/detail/func_trigonometric.inl deleted file mode 100755 index e129dce..0000000 --- a/third_party/glm/detail/func_trigonometric.inl +++ /dev/null @@ -1,197 +0,0 @@ -#include "_vectorize.hpp" -#include -#include - -namespace glm -{ - // radians - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType radians(genType degrees) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'radians' only accept floating-point input"); - - return degrees * static_cast(0.01745329251994329576923690768489); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec radians(vec const& v) - { - return detail::functor1::call(radians, v); - } - - // degrees - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType degrees(genType radians) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'degrees' only accept floating-point input"); - - return radians * static_cast(57.295779513082320876798154814105); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec degrees(vec const& v) - { - return detail::functor1::call(degrees, v); - } - - // sin - using ::std::sin; - - template - GLM_FUNC_QUALIFIER vec sin(vec const& v) - { - return detail::functor1::call(sin, v); - } - - // cos - using std::cos; - - template - GLM_FUNC_QUALIFIER vec cos(vec const& v) - { - return detail::functor1::call(cos, v); - } - - // tan - using std::tan; - - template - GLM_FUNC_QUALIFIER vec tan(vec const& v) - { - return detail::functor1::call(tan, v); - } - - // asin - using std::asin; - - template - GLM_FUNC_QUALIFIER vec asin(vec const& v) - { - return detail::functor1::call(asin, v); - } - - // acos - using std::acos; - - template - GLM_FUNC_QUALIFIER vec acos(vec const& v) - { - return detail::functor1::call(acos, v); - } - - // atan - template - GLM_FUNC_QUALIFIER genType atan(genType y, genType x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'atan' only accept floating-point input"); - - return ::std::atan2(y, x); - } - - template - GLM_FUNC_QUALIFIER vec atan(vec const& a, vec const& b) - { - return detail::functor2::call(::std::atan2, a, b); - } - - using std::atan; - - template - GLM_FUNC_QUALIFIER vec atan(vec const& v) - { - return detail::functor1::call(atan, v); - } - - // sinh - using std::sinh; - - template - GLM_FUNC_QUALIFIER vec sinh(vec const& v) - { - return detail::functor1::call(sinh, v); - } - - // cosh - using std::cosh; - - template - GLM_FUNC_QUALIFIER vec cosh(vec const& v) - { - return detail::functor1::call(cosh, v); - } - - // tanh - using std::tanh; - - template - GLM_FUNC_QUALIFIER vec tanh(vec const& v) - { - return detail::functor1::call(tanh, v); - } - - // asinh -# if GLM_HAS_CXX11_STL - using std::asinh; -# else - template - GLM_FUNC_QUALIFIER genType asinh(genType x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'asinh' only accept floating-point input"); - - return (x < static_cast(0) ? static_cast(-1) : (x > static_cast(0) ? static_cast(1) : static_cast(0))) * log(std::abs(x) + sqrt(static_cast(1) + x * x)); - } -# endif - - template - GLM_FUNC_QUALIFIER vec asinh(vec const& v) - { - return detail::functor1::call(asinh, v); - } - - // acosh -# if GLM_HAS_CXX11_STL - using std::acosh; -# else - template - GLM_FUNC_QUALIFIER genType acosh(genType x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'acosh' only accept floating-point input"); - - if(x < static_cast(1)) - return static_cast(0); - return log(x + sqrt(x * x - static_cast(1))); - } -# endif - - template - GLM_FUNC_QUALIFIER vec acosh(vec const& v) - { - return detail::functor1::call(acosh, v); - } - - // atanh -# if GLM_HAS_CXX11_STL - using std::atanh; -# else - template - GLM_FUNC_QUALIFIER genType atanh(genType x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'atanh' only accept floating-point input"); - - if(std::abs(x) >= static_cast(1)) - return 0; - return static_cast(0.5) * log((static_cast(1) + x) / (static_cast(1) - x)); - } -# endif - - template - GLM_FUNC_QUALIFIER vec atanh(vec const& v) - { - return detail::functor1::call(atanh, v); - } -}//namespace glm - -#if GLM_CONFIG_SIMD == GLM_ENABLE -# include "func_trigonometric_simd.inl" -#endif - diff --git a/third_party/glm/detail/func_trigonometric_simd.inl b/third_party/glm/detail/func_trigonometric_simd.inl deleted file mode 100755 index e69de29..0000000 diff --git a/third_party/glm/detail/func_vector_relational.inl b/third_party/glm/detail/func_vector_relational.inl deleted file mode 100755 index 80c9e87..0000000 --- a/third_party/glm/detail/func_vector_relational.inl +++ /dev/null @@ -1,87 +0,0 @@ -namespace glm -{ - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec lessThan(vec const& x, vec const& y) - { - vec Result(true); - for(length_t i = 0; i < L; ++i) - Result[i] = x[i] < y[i]; - return Result; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec lessThanEqual(vec const& x, vec const& y) - { - vec Result(true); - for(length_t i = 0; i < L; ++i) - Result[i] = x[i] <= y[i]; - return Result; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec greaterThan(vec const& x, vec const& y) - { - vec Result(true); - for(length_t i = 0; i < L; ++i) - Result[i] = x[i] > y[i]; - return Result; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec greaterThanEqual(vec const& x, vec const& y) - { - vec Result(true); - for(length_t i = 0; i < L; ++i) - Result[i] = x[i] >= y[i]; - return Result; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec equal(vec const& x, vec const& y) - { - vec Result(true); - for(length_t i = 0; i < L; ++i) - Result[i] = x[i] == y[i]; - return Result; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec notEqual(vec const& x, vec const& y) - { - vec Result(true); - for(length_t i = 0; i < L; ++i) - Result[i] = x[i] != y[i]; - return Result; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool any(vec const& v) - { - bool Result = false; - for(length_t i = 0; i < L; ++i) - Result = Result || v[i]; - return Result; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool all(vec const& v) - { - bool Result = true; - for(length_t i = 0; i < L; ++i) - Result = Result && v[i]; - return Result; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec not_(vec const& v) - { - vec Result(true); - for(length_t i = 0; i < L; ++i) - Result[i] = !v[i]; - return Result; - } -}//namespace glm - -#if GLM_CONFIG_SIMD == GLM_ENABLE -# include "func_vector_relational_simd.inl" -#endif diff --git a/third_party/glm/detail/func_vector_relational_simd.inl b/third_party/glm/detail/func_vector_relational_simd.inl deleted file mode 100755 index fd0fe8b..0000000 --- a/third_party/glm/detail/func_vector_relational_simd.inl +++ /dev/null @@ -1,6 +0,0 @@ -namespace glm{ -namespace detail -{ - -}//namespace detail -}//namespace glm diff --git a/third_party/glm/detail/glm.cpp b/third_party/glm/detail/glm.cpp deleted file mode 100755 index e0755bd..0000000 --- a/third_party/glm/detail/glm.cpp +++ /dev/null @@ -1,263 +0,0 @@ -/// @ref core -/// @file glm/glm.cpp - -#ifndef GLM_ENABLE_EXPERIMENTAL -#define GLM_ENABLE_EXPERIMENTAL -#endif -#include -#include -#include -#include -#include -#include - -namespace glm -{ -// tvec1 type explicit instantiation -template struct vec<1, uint8, lowp>; -template struct vec<1, uint16, lowp>; -template struct vec<1, uint32, lowp>; -template struct vec<1, uint64, lowp>; -template struct vec<1, int8, lowp>; -template struct vec<1, int16, lowp>; -template struct vec<1, int32, lowp>; -template struct vec<1, int64, lowp>; -template struct vec<1, float32, lowp>; -template struct vec<1, float64, lowp>; - -template struct vec<1, uint8, mediump>; -template struct vec<1, uint16, mediump>; -template struct vec<1, uint32, mediump>; -template struct vec<1, uint64, mediump>; -template struct vec<1, int8, mediump>; -template struct vec<1, int16, mediump>; -template struct vec<1, int32, mediump>; -template struct vec<1, int64, mediump>; -template struct vec<1, float32, mediump>; -template struct vec<1, float64, mediump>; - -template struct vec<1, uint8, highp>; -template struct vec<1, uint16, highp>; -template struct vec<1, uint32, highp>; -template struct vec<1, uint64, highp>; -template struct vec<1, int8, highp>; -template struct vec<1, int16, highp>; -template struct vec<1, int32, highp>; -template struct vec<1, int64, highp>; -template struct vec<1, float32, highp>; -template struct vec<1, float64, highp>; - -// tvec2 type explicit instantiation -template struct vec<2, uint8, lowp>; -template struct vec<2, uint16, lowp>; -template struct vec<2, uint32, lowp>; -template struct vec<2, uint64, lowp>; -template struct vec<2, int8, lowp>; -template struct vec<2, int16, lowp>; -template struct vec<2, int32, lowp>; -template struct vec<2, int64, lowp>; -template struct vec<2, float32, lowp>; -template struct vec<2, float64, lowp>; - -template struct vec<2, uint8, mediump>; -template struct vec<2, uint16, mediump>; -template struct vec<2, uint32, mediump>; -template struct vec<2, uint64, mediump>; -template struct vec<2, int8, mediump>; -template struct vec<2, int16, mediump>; -template struct vec<2, int32, mediump>; -template struct vec<2, int64, mediump>; -template struct vec<2, float32, mediump>; -template struct vec<2, float64, mediump>; - -template struct vec<2, uint8, highp>; -template struct vec<2, uint16, highp>; -template struct vec<2, uint32, highp>; -template struct vec<2, uint64, highp>; -template struct vec<2, int8, highp>; -template struct vec<2, int16, highp>; -template struct vec<2, int32, highp>; -template struct vec<2, int64, highp>; -template struct vec<2, float32, highp>; -template struct vec<2, float64, highp>; - -// tvec3 type explicit instantiation -template struct vec<3, uint8, lowp>; -template struct vec<3, uint16, lowp>; -template struct vec<3, uint32, lowp>; -template struct vec<3, uint64, lowp>; -template struct vec<3, int8, lowp>; -template struct vec<3, int16, lowp>; -template struct vec<3, int32, lowp>; -template struct vec<3, int64, lowp>; -template struct vec<3, float32, lowp>; -template struct vec<3, float64, lowp>; - -template struct vec<3, uint8, mediump>; -template struct vec<3, uint16, mediump>; -template struct vec<3, uint32, mediump>; -template struct vec<3, uint64, mediump>; -template struct vec<3, int8, mediump>; -template struct vec<3, int16, mediump>; -template struct vec<3, int32, mediump>; -template struct vec<3, int64, mediump>; -template struct vec<3, float32, mediump>; -template struct vec<3, float64, mediump>; - -template struct vec<3, uint8, highp>; -template struct vec<3, uint16, highp>; -template struct vec<3, uint32, highp>; -template struct vec<3, uint64, highp>; -template struct vec<3, int8, highp>; -template struct vec<3, int16, highp>; -template struct vec<3, int32, highp>; -template struct vec<3, int64, highp>; -template struct vec<3, float32, highp>; -template struct vec<3, float64, highp>; - -// tvec4 type explicit instantiation -template struct vec<4, uint8, lowp>; -template struct vec<4, uint16, lowp>; -template struct vec<4, uint32, lowp>; -template struct vec<4, uint64, lowp>; -template struct vec<4, int8, lowp>; -template struct vec<4, int16, lowp>; -template struct vec<4, int32, lowp>; -template struct vec<4, int64, lowp>; -template struct vec<4, float32, lowp>; -template struct vec<4, float64, lowp>; - -template struct vec<4, uint8, mediump>; -template struct vec<4, uint16, mediump>; -template struct vec<4, uint32, mediump>; -template struct vec<4, uint64, mediump>; -template struct vec<4, int8, mediump>; -template struct vec<4, int16, mediump>; -template struct vec<4, int32, mediump>; -template struct vec<4, int64, mediump>; -template struct vec<4, float32, mediump>; -template struct vec<4, float64, mediump>; - -template struct vec<4, uint8, highp>; -template struct vec<4, uint16, highp>; -template struct vec<4, uint32, highp>; -template struct vec<4, uint64, highp>; -template struct vec<4, int8, highp>; -template struct vec<4, int16, highp>; -template struct vec<4, int32, highp>; -template struct vec<4, int64, highp>; -template struct vec<4, float32, highp>; -template struct vec<4, float64, highp>; - -// tmat2x2 type explicit instantiation -template struct mat<2, 2, float32, lowp>; -template struct mat<2, 2, float64, lowp>; - -template struct mat<2, 2, float32, mediump>; -template struct mat<2, 2, float64, mediump>; - -template struct mat<2, 2, float32, highp>; -template struct mat<2, 2, float64, highp>; - -// tmat2x3 type explicit instantiation -template struct mat<2, 3, float32, lowp>; -template struct mat<2, 3, float64, lowp>; - -template struct mat<2, 3, float32, mediump>; -template struct mat<2, 3, float64, mediump>; - -template struct mat<2, 3, float32, highp>; -template struct mat<2, 3, float64, highp>; - -// tmat2x4 type explicit instantiation -template struct mat<2, 4, float32, lowp>; -template struct mat<2, 4, float64, lowp>; - -template struct mat<2, 4, float32, mediump>; -template struct mat<2, 4, float64, mediump>; - -template struct mat<2, 4, float32, highp>; -template struct mat<2, 4, float64, highp>; - -// tmat3x2 type explicit instantiation -template struct mat<3, 2, float32, lowp>; -template struct mat<3, 2, float64, lowp>; - -template struct mat<3, 2, float32, mediump>; -template struct mat<3, 2, float64, mediump>; - -template struct mat<3, 2, float32, highp>; -template struct mat<3, 2, float64, highp>; - -// tmat3x3 type explicit instantiation -template struct mat<3, 3, float32, lowp>; -template struct mat<3, 3, float64, lowp>; - -template struct mat<3, 3, float32, mediump>; -template struct mat<3, 3, float64, mediump>; - -template struct mat<3, 3, float32, highp>; -template struct mat<3, 3, float64, highp>; - -// tmat3x4 type explicit instantiation -template struct mat<3, 4, float32, lowp>; -template struct mat<3, 4, float64, lowp>; - -template struct mat<3, 4, float32, mediump>; -template struct mat<3, 4, float64, mediump>; - -template struct mat<3, 4, float32, highp>; -template struct mat<3, 4, float64, highp>; - -// tmat4x2 type explicit instantiation -template struct mat<4, 2, float32, lowp>; -template struct mat<4, 2, float64, lowp>; - -template struct mat<4, 2, float32, mediump>; -template struct mat<4, 2, float64, mediump>; - -template struct mat<4, 2, float32, highp>; -template struct mat<4, 2, float64, highp>; - -// tmat4x3 type explicit instantiation -template struct mat<4, 3, float32, lowp>; -template struct mat<4, 3, float64, lowp>; - -template struct mat<4, 3, float32, mediump>; -template struct mat<4, 3, float64, mediump>; - -template struct mat<4, 3, float32, highp>; -template struct mat<4, 3, float64, highp>; - -// tmat4x4 type explicit instantiation -template struct mat<4, 4, float32, lowp>; -template struct mat<4, 4, float64, lowp>; - -template struct mat<4, 4, float32, mediump>; -template struct mat<4, 4, float64, mediump>; - -template struct mat<4, 4, float32, highp>; -template struct mat<4, 4, float64, highp>; - -// tquat type explicit instantiation -template struct qua; -template struct qua; - -template struct qua; -template struct qua; - -template struct qua; -template struct qua; - -//tdualquat type explicit instantiation -template struct tdualquat; -template struct tdualquat; - -template struct tdualquat; -template struct tdualquat; - -template struct tdualquat; -template struct tdualquat; - -}//namespace glm - diff --git a/third_party/glm/detail/qualifier.hpp b/third_party/glm/detail/qualifier.hpp deleted file mode 100755 index b6c9df0..0000000 --- a/third_party/glm/detail/qualifier.hpp +++ /dev/null @@ -1,230 +0,0 @@ -#pragma once - -#include "setup.hpp" - -namespace glm -{ - /// Qualify GLM types in term of alignment (packed, aligned) and precision in term of ULPs (lowp, mediump, highp) - enum qualifier - { - packed_highp, ///< Typed data is tightly packed in memory and operations are executed with high precision in term of ULPs - packed_mediump, ///< Typed data is tightly packed in memory and operations are executed with medium precision in term of ULPs for higher performance - packed_lowp, ///< Typed data is tightly packed in memory and operations are executed with low precision in term of ULPs to maximize performance - -# if GLM_CONFIG_ALIGNED_GENTYPES == GLM_ENABLE - aligned_highp, ///< Typed data is aligned in memory allowing SIMD optimizations and operations are executed with high precision in term of ULPs - aligned_mediump, ///< Typed data is aligned in memory allowing SIMD optimizations and operations are executed with high precision in term of ULPs for higher performance - aligned_lowp, // ///< Typed data is aligned in memory allowing SIMD optimizations and operations are executed with high precision in term of ULPs to maximize performance - aligned = aligned_highp, ///< By default aligned qualifier is also high precision -# endif - - highp = packed_highp, ///< By default highp qualifier is also packed - mediump = packed_mediump, ///< By default mediump qualifier is also packed - lowp = packed_lowp, ///< By default lowp qualifier is also packed - packed = packed_highp, ///< By default packed qualifier is also high precision - -# if GLM_CONFIG_ALIGNED_GENTYPES == GLM_ENABLE && defined(GLM_FORCE_DEFAULT_ALIGNED_GENTYPES) - defaultp = aligned_highp -# else - defaultp = highp -# endif - }; - - typedef qualifier precision; - - template struct vec; - template struct mat; - template struct qua; - -# if GLM_HAS_TEMPLATE_ALIASES - template using tvec1 = vec<1, T, Q>; - template using tvec2 = vec<2, T, Q>; - template using tvec3 = vec<3, T, Q>; - template using tvec4 = vec<4, T, Q>; - template using tmat2x2 = mat<2, 2, T, Q>; - template using tmat2x3 = mat<2, 3, T, Q>; - template using tmat2x4 = mat<2, 4, T, Q>; - template using tmat3x2 = mat<3, 2, T, Q>; - template using tmat3x3 = mat<3, 3, T, Q>; - template using tmat3x4 = mat<3, 4, T, Q>; - template using tmat4x2 = mat<4, 2, T, Q>; - template using tmat4x3 = mat<4, 3, T, Q>; - template using tmat4x4 = mat<4, 4, T, Q>; - template using tquat = qua; -# endif - -namespace detail -{ - template - struct is_aligned - { - static const bool value = false; - }; - -# if GLM_CONFIG_ALIGNED_GENTYPES == GLM_ENABLE - template<> - struct is_aligned - { - static const bool value = true; - }; - - template<> - struct is_aligned - { - static const bool value = true; - }; - - template<> - struct is_aligned - { - static const bool value = true; - }; -# endif - - template - struct storage - { - typedef struct type { - T data[L]; - } type; - }; - -# if GLM_HAS_ALIGNOF - template - struct storage - { - typedef struct alignas(L * sizeof(T)) type { - T data[L]; - } type; - }; - - template - struct storage<3, T, true> - { - typedef struct alignas(4 * sizeof(T)) type { - T data[4]; - } type; - }; -# endif - -# if GLM_ARCH & GLM_ARCH_SSE2_BIT - template<> - struct storage<4, float, true> - { - typedef glm_f32vec4 type; - }; - - template<> - struct storage<4, int, true> - { - typedef glm_i32vec4 type; - }; - - template<> - struct storage<4, unsigned int, true> - { - typedef glm_u32vec4 type; - }; - - template<> - struct storage<2, double, true> - { - typedef glm_f64vec2 type; - }; - - template<> - struct storage<2, detail::int64, true> - { - typedef glm_i64vec2 type; - }; - - template<> - struct storage<2, detail::uint64, true> - { - typedef glm_u64vec2 type; - }; -# endif - -# if (GLM_ARCH & GLM_ARCH_AVX_BIT) - template<> - struct storage<4, double, true> - { - typedef glm_f64vec4 type; - }; -# endif - -# if (GLM_ARCH & GLM_ARCH_AVX2_BIT) - template<> - struct storage<4, detail::int64, true> - { - typedef glm_i64vec4 type; - }; - - template<> - struct storage<4, detail::uint64, true> - { - typedef glm_u64vec4 type; - }; -# endif - -# if GLM_ARCH & GLM_ARCH_NEON_BIT - template<> - struct storage<4, float, true> - { - typedef glm_f32vec4 type; - }; - - template<> - struct storage<4, int, true> - { - typedef glm_i32vec4 type; - }; - - template<> - struct storage<4, unsigned int, true> - { - typedef glm_u32vec4 type; - }; -# endif - - enum genTypeEnum - { - GENTYPE_VEC, - GENTYPE_MAT, - GENTYPE_QUAT - }; - - template - struct genTypeTrait - {}; - - template - struct genTypeTrait > - { - static const genTypeEnum GENTYPE = GENTYPE_MAT; - }; - - template - struct init_gentype - { - }; - - template - struct init_gentype - { - GLM_FUNC_QUALIFIER GLM_CONSTEXPR static genType identity() - { - return genType(1, 0, 0, 0); - } - }; - - template - struct init_gentype - { - GLM_FUNC_QUALIFIER GLM_CONSTEXPR static genType identity() - { - return genType(1); - } - }; -}//namespace detail -}//namespace glm diff --git a/third_party/glm/detail/setup.hpp b/third_party/glm/detail/setup.hpp deleted file mode 100755 index 07db656..0000000 --- a/third_party/glm/detail/setup.hpp +++ /dev/null @@ -1,1135 +0,0 @@ -#ifndef GLM_SETUP_INCLUDED - -#include -#include - -#define GLM_VERSION_MAJOR 0 -#define GLM_VERSION_MINOR 9 -#define GLM_VERSION_PATCH 9 -#define GLM_VERSION_REVISION 7 -#define GLM_VERSION 997 -#define GLM_VERSION_MESSAGE "GLM: version 0.9.9.7" - -#define GLM_SETUP_INCLUDED GLM_VERSION - -/////////////////////////////////////////////////////////////////////////////////// -// Active states - -#define GLM_DISABLE 0 -#define GLM_ENABLE 1 - -/////////////////////////////////////////////////////////////////////////////////// -// Messages - -#if defined(GLM_FORCE_MESSAGES) -# define GLM_MESSAGES GLM_ENABLE -#else -# define GLM_MESSAGES GLM_DISABLE -#endif - -/////////////////////////////////////////////////////////////////////////////////// -// Detect the platform - -#include "../simd/platform.h" - -/////////////////////////////////////////////////////////////////////////////////// -// Build model - -#if defined(_M_ARM64) || defined(__LP64__) || defined(_M_X64) || defined(__ppc64__) || defined(__x86_64__) -# define GLM_MODEL GLM_MODEL_64 -#elif defined(__i386__) || defined(__ppc__) || defined(__ILP32__) || defined(_M_ARM) -# define GLM_MODEL GLM_MODEL_32 -#else -# define GLM_MODEL GLM_MODEL_32 -#endif// - -#if !defined(GLM_MODEL) && GLM_COMPILER != 0 -# error "GLM_MODEL undefined, your compiler may not be supported by GLM. Add #define GLM_MODEL 0 to ignore this message." -#endif//GLM_MODEL - -/////////////////////////////////////////////////////////////////////////////////// -// C++ Version - -// User defines: GLM_FORCE_CXX98, GLM_FORCE_CXX03, GLM_FORCE_CXX11, GLM_FORCE_CXX14, GLM_FORCE_CXX17, GLM_FORCE_CXX2A - -#define GLM_LANG_CXX98_FLAG (1 << 1) -#define GLM_LANG_CXX03_FLAG (1 << 2) -#define GLM_LANG_CXX0X_FLAG (1 << 3) -#define GLM_LANG_CXX11_FLAG (1 << 4) -#define GLM_LANG_CXX14_FLAG (1 << 5) -#define GLM_LANG_CXX17_FLAG (1 << 6) -#define GLM_LANG_CXX2A_FLAG (1 << 7) -#define GLM_LANG_CXXMS_FLAG (1 << 8) -#define GLM_LANG_CXXGNU_FLAG (1 << 9) - -#define GLM_LANG_CXX98 GLM_LANG_CXX98_FLAG -#define GLM_LANG_CXX03 (GLM_LANG_CXX98 | GLM_LANG_CXX03_FLAG) -#define GLM_LANG_CXX0X (GLM_LANG_CXX03 | GLM_LANG_CXX0X_FLAG) -#define GLM_LANG_CXX11 (GLM_LANG_CXX0X | GLM_LANG_CXX11_FLAG) -#define GLM_LANG_CXX14 (GLM_LANG_CXX11 | GLM_LANG_CXX14_FLAG) -#define GLM_LANG_CXX17 (GLM_LANG_CXX14 | GLM_LANG_CXX17_FLAG) -#define GLM_LANG_CXX2A (GLM_LANG_CXX17 | GLM_LANG_CXX2A_FLAG) -#define GLM_LANG_CXXMS GLM_LANG_CXXMS_FLAG -#define GLM_LANG_CXXGNU GLM_LANG_CXXGNU_FLAG - -#if (defined(_MSC_EXTENSIONS)) -# define GLM_LANG_EXT GLM_LANG_CXXMS_FLAG -#elif ((GLM_COMPILER & (GLM_COMPILER_CLANG | GLM_COMPILER_GCC)) && (GLM_ARCH & GLM_ARCH_SIMD_BIT)) -# define GLM_LANG_EXT GLM_LANG_CXXMS_FLAG -#else -# define GLM_LANG_EXT 0 -#endif - -#if (defined(GLM_FORCE_CXX_UNKNOWN)) -# define GLM_LANG 0 -#elif defined(GLM_FORCE_CXX2A) -# define GLM_LANG (GLM_LANG_CXX2A | GLM_LANG_EXT) -# define GLM_LANG_STL11_FORCED -#elif defined(GLM_FORCE_CXX17) -# define GLM_LANG (GLM_LANG_CXX17 | GLM_LANG_EXT) -# define GLM_LANG_STL11_FORCED -#elif defined(GLM_FORCE_CXX14) -# define GLM_LANG (GLM_LANG_CXX14 | GLM_LANG_EXT) -# define GLM_LANG_STL11_FORCED -#elif defined(GLM_FORCE_CXX11) -# define GLM_LANG (GLM_LANG_CXX11 | GLM_LANG_EXT) -# define GLM_LANG_STL11_FORCED -#elif defined(GLM_FORCE_CXX03) -# define GLM_LANG (GLM_LANG_CXX03 | GLM_LANG_EXT) -#elif defined(GLM_FORCE_CXX98) -# define GLM_LANG (GLM_LANG_CXX98 | GLM_LANG_EXT) -#else -# if GLM_COMPILER & GLM_COMPILER_VC && defined(_MSVC_LANG) -# if GLM_COMPILER >= GLM_COMPILER_VC15_7 -# define GLM_LANG_PLATFORM _MSVC_LANG -# elif GLM_COMPILER >= GLM_COMPILER_VC15 -# if _MSVC_LANG > 201402L -# define GLM_LANG_PLATFORM 201402L -# else -# define GLM_LANG_PLATFORM _MSVC_LANG -# endif -# else -# define GLM_LANG_PLATFORM 0 -# endif -# else -# define GLM_LANG_PLATFORM 0 -# endif - -# if __cplusplus > 201703L || GLM_LANG_PLATFORM > 201703L -# define GLM_LANG (GLM_LANG_CXX2A | GLM_LANG_EXT) -# elif __cplusplus == 201703L || GLM_LANG_PLATFORM == 201703L -# define GLM_LANG (GLM_LANG_CXX17 | GLM_LANG_EXT) -# elif __cplusplus == 201402L || __cplusplus == 201500L || GLM_LANG_PLATFORM == 201402L -# define GLM_LANG (GLM_LANG_CXX14 | GLM_LANG_EXT) -# elif __cplusplus == 201103L || GLM_LANG_PLATFORM == 201103L -# define GLM_LANG (GLM_LANG_CXX11 | GLM_LANG_EXT) -# elif defined(__INTEL_CXX11_MODE__) || defined(_MSC_VER) || defined(__GXX_EXPERIMENTAL_CXX0X__) -# define GLM_LANG (GLM_LANG_CXX0X | GLM_LANG_EXT) -# elif __cplusplus == 199711L -# define GLM_LANG (GLM_LANG_CXX98 | GLM_LANG_EXT) -# else -# define GLM_LANG (0 | GLM_LANG_EXT) -# endif -#endif - -/////////////////////////////////////////////////////////////////////////////////// -// Has of C++ features - -// http://clang.llvm.org/cxx_status.html -// http://gcc.gnu.org/projects/cxx0x.html -// http://msdn.microsoft.com/en-us/library/vstudio/hh567368(v=vs.120).aspx - -// Android has multiple STLs but C++11 STL detection doesn't always work #284 #564 -#if GLM_PLATFORM == GLM_PLATFORM_ANDROID && !defined(GLM_LANG_STL11_FORCED) -# define GLM_HAS_CXX11_STL 0 -#elif GLM_COMPILER & GLM_COMPILER_CLANG -# if (defined(_LIBCPP_VERSION) || (GLM_LANG & GLM_LANG_CXX11_FLAG) || defined(GLM_LANG_STL11_FORCED)) -# define GLM_HAS_CXX11_STL 1 -# else -# define GLM_HAS_CXX11_STL 0 -# endif -#elif GLM_LANG & GLM_LANG_CXX11_FLAG -# define GLM_HAS_CXX11_STL 1 -#else -# define GLM_HAS_CXX11_STL ((GLM_LANG & GLM_LANG_CXX0X_FLAG) && (\ - ((GLM_COMPILER & GLM_COMPILER_GCC) && (GLM_COMPILER >= GLM_COMPILER_GCC48)) || \ - ((GLM_COMPILER & GLM_COMPILER_VC) && (GLM_COMPILER >= GLM_COMPILER_VC12)) || \ - ((GLM_PLATFORM != GLM_PLATFORM_WINDOWS) && (GLM_COMPILER & GLM_COMPILER_INTEL) && (GLM_COMPILER >= GLM_COMPILER_INTEL15)))) -#endif - -// N1720 -#if GLM_COMPILER & GLM_COMPILER_CLANG -# define GLM_HAS_STATIC_ASSERT __has_feature(cxx_static_assert) -#elif GLM_LANG & GLM_LANG_CXX11_FLAG -# define GLM_HAS_STATIC_ASSERT 1 -#else -# define GLM_HAS_STATIC_ASSERT ((GLM_LANG & GLM_LANG_CXX0X_FLAG) && (\ - ((GLM_COMPILER & GLM_COMPILER_CUDA)) || \ - ((GLM_COMPILER & GLM_COMPILER_VC)))) -#endif - -// N1988 -#if GLM_LANG & GLM_LANG_CXX11_FLAG -# define GLM_HAS_EXTENDED_INTEGER_TYPE 1 -#else -# define GLM_HAS_EXTENDED_INTEGER_TYPE (\ - ((GLM_LANG & GLM_LANG_CXX0X_FLAG) && (GLM_COMPILER & GLM_COMPILER_VC)) || \ - ((GLM_LANG & GLM_LANG_CXX0X_FLAG) && (GLM_COMPILER & GLM_COMPILER_CUDA)) || \ - ((GLM_LANG & GLM_LANG_CXX0X_FLAG) && (GLM_COMPILER & GLM_COMPILER_CLANG))) -#endif - -// N2672 Initializer lists http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2672.htm -#if GLM_COMPILER & GLM_COMPILER_CLANG -# define GLM_HAS_INITIALIZER_LISTS __has_feature(cxx_generalized_initializers) -#elif GLM_LANG & GLM_LANG_CXX11_FLAG -# define GLM_HAS_INITIALIZER_LISTS 1 -#else -# define GLM_HAS_INITIALIZER_LISTS ((GLM_LANG & GLM_LANG_CXX0X_FLAG) && (\ - ((GLM_COMPILER & GLM_COMPILER_VC) && (GLM_COMPILER >= GLM_COMPILER_VC15)) || \ - ((GLM_COMPILER & GLM_COMPILER_INTEL) && (GLM_COMPILER >= GLM_COMPILER_INTEL14)) || \ - ((GLM_COMPILER & GLM_COMPILER_CUDA)))) -#endif - -// N2544 Unrestricted unions http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2544.pdf -#if GLM_COMPILER & GLM_COMPILER_CLANG -# define GLM_HAS_UNRESTRICTED_UNIONS __has_feature(cxx_unrestricted_unions) -#elif GLM_LANG & GLM_LANG_CXX11_FLAG -# define GLM_HAS_UNRESTRICTED_UNIONS 1 -#else -# define GLM_HAS_UNRESTRICTED_UNIONS (GLM_LANG & GLM_LANG_CXX0X_FLAG) && (\ - (GLM_COMPILER & GLM_COMPILER_VC) || \ - ((GLM_COMPILER & GLM_COMPILER_CUDA))) -#endif - -// N2346 -#if GLM_COMPILER & GLM_COMPILER_CLANG -# define GLM_HAS_DEFAULTED_FUNCTIONS __has_feature(cxx_defaulted_functions) -#elif GLM_LANG & GLM_LANG_CXX11_FLAG -# define GLM_HAS_DEFAULTED_FUNCTIONS 1 -#else -# define GLM_HAS_DEFAULTED_FUNCTIONS ((GLM_LANG & GLM_LANG_CXX0X_FLAG) && (\ - ((GLM_COMPILER & GLM_COMPILER_VC) && (GLM_COMPILER >= GLM_COMPILER_VC12)) || \ - ((GLM_COMPILER & GLM_COMPILER_INTEL)) || \ - (GLM_COMPILER & GLM_COMPILER_CUDA))) -#endif - -// N2118 -#if GLM_COMPILER & GLM_COMPILER_CLANG -# define GLM_HAS_RVALUE_REFERENCES __has_feature(cxx_rvalue_references) -#elif GLM_LANG & GLM_LANG_CXX11_FLAG -# define GLM_HAS_RVALUE_REFERENCES 1 -#else -# define GLM_HAS_RVALUE_REFERENCES ((GLM_LANG & GLM_LANG_CXX0X_FLAG) && (\ - ((GLM_COMPILER & GLM_COMPILER_VC)) || \ - ((GLM_COMPILER & GLM_COMPILER_CUDA)))) -#endif - -// N2437 http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2437.pdf -#if GLM_COMPILER & GLM_COMPILER_CLANG -# define GLM_HAS_EXPLICIT_CONVERSION_OPERATORS __has_feature(cxx_explicit_conversions) -#elif GLM_LANG & GLM_LANG_CXX11_FLAG -# define GLM_HAS_EXPLICIT_CONVERSION_OPERATORS 1 -#else -# define GLM_HAS_EXPLICIT_CONVERSION_OPERATORS ((GLM_LANG & GLM_LANG_CXX0X_FLAG) && (\ - ((GLM_COMPILER & GLM_COMPILER_INTEL) && (GLM_COMPILER >= GLM_COMPILER_INTEL14)) || \ - ((GLM_COMPILER & GLM_COMPILER_VC) && (GLM_COMPILER >= GLM_COMPILER_VC12)) || \ - ((GLM_COMPILER & GLM_COMPILER_CUDA)))) -#endif - -// N2258 http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2258.pdf -#if GLM_COMPILER & GLM_COMPILER_CLANG -# define GLM_HAS_TEMPLATE_ALIASES __has_feature(cxx_alias_templates) -#elif GLM_LANG & GLM_LANG_CXX11_FLAG -# define GLM_HAS_TEMPLATE_ALIASES 1 -#else -# define GLM_HAS_TEMPLATE_ALIASES ((GLM_LANG & GLM_LANG_CXX0X_FLAG) && (\ - ((GLM_COMPILER & GLM_COMPILER_INTEL)) || \ - ((GLM_COMPILER & GLM_COMPILER_VC) && (GLM_COMPILER >= GLM_COMPILER_VC12)) || \ - ((GLM_COMPILER & GLM_COMPILER_CUDA)))) -#endif - -// N2930 http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2009/n2930.html -#if GLM_COMPILER & GLM_COMPILER_CLANG -# define GLM_HAS_RANGE_FOR __has_feature(cxx_range_for) -#elif GLM_LANG & GLM_LANG_CXX11_FLAG -# define GLM_HAS_RANGE_FOR 1 -#else -# define GLM_HAS_RANGE_FOR ((GLM_LANG & GLM_LANG_CXX0X_FLAG) && (\ - ((GLM_COMPILER & GLM_COMPILER_INTEL)) || \ - ((GLM_COMPILER & GLM_COMPILER_VC)) || \ - ((GLM_COMPILER & GLM_COMPILER_CUDA)))) -#endif - -// N2341 http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2341.pdf -#if GLM_COMPILER & GLM_COMPILER_CLANG -# define GLM_HAS_ALIGNOF __has_feature(cxx_alignas) -#elif GLM_LANG & GLM_LANG_CXX11_FLAG -# define GLM_HAS_ALIGNOF 1 -#else -# define GLM_HAS_ALIGNOF ((GLM_LANG & GLM_LANG_CXX0X_FLAG) && (\ - ((GLM_COMPILER & GLM_COMPILER_INTEL) && (GLM_COMPILER >= GLM_COMPILER_INTEL15)) || \ - ((GLM_COMPILER & GLM_COMPILER_VC) && (GLM_COMPILER >= GLM_COMPILER_VC14)) || \ - ((GLM_COMPILER & GLM_COMPILER_CUDA)))) -#endif - -// N2235 Generalized Constant Expressions http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2235.pdf -// N3652 Extended Constant Expressions http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2013/n3652.html -#if (GLM_ARCH & GLM_ARCH_SIMD_BIT) // Compiler SIMD intrinsics don't support constexpr... -# define GLM_HAS_CONSTEXPR 0 -#elif (GLM_COMPILER & GLM_COMPILER_CLANG) -# define GLM_HAS_CONSTEXPR __has_feature(cxx_relaxed_constexpr) -#elif (GLM_LANG & GLM_LANG_CXX14_FLAG) -# define GLM_HAS_CONSTEXPR 1 -#else -# define GLM_HAS_CONSTEXPR ((GLM_LANG & GLM_LANG_CXX0X_FLAG) && GLM_HAS_INITIALIZER_LISTS && (\ - ((GLM_COMPILER & GLM_COMPILER_INTEL) && (GLM_COMPILER >= GLM_COMPILER_INTEL17)) || \ - ((GLM_COMPILER & GLM_COMPILER_VC) && (GLM_COMPILER >= GLM_COMPILER_VC15)))) -#endif - -#if GLM_HAS_CONSTEXPR -# define GLM_CONSTEXPR constexpr -#else -# define GLM_CONSTEXPR -#endif - -// -#if GLM_HAS_CONSTEXPR -# if (GLM_COMPILER & GLM_COMPILER_CLANG) -# if __has_feature(cxx_if_constexpr) -# define GLM_HAS_IF_CONSTEXPR 1 -# else -# define GLM_HAS_IF_CONSTEXPR 0 -# endif -# elif (GLM_LANG & GLM_LANG_CXX17_FLAG) -# define GLM_HAS_IF_CONSTEXPR 1 -# else -# define GLM_HAS_IF_CONSTEXPR 0 -# endif -#else -# define GLM_HAS_IF_CONSTEXPR 0 -#endif - -#if GLM_HAS_IF_CONSTEXPR -# define GLM_IF_CONSTEXPR if constexpr -#else -# define GLM_IF_CONSTEXPR if -#endif - -// -#if GLM_LANG & GLM_LANG_CXX11_FLAG -# define GLM_HAS_ASSIGNABLE 1 -#else -# define GLM_HAS_ASSIGNABLE ((GLM_LANG & GLM_LANG_CXX0X_FLAG) && (\ - ((GLM_COMPILER & GLM_COMPILER_VC) && (GLM_COMPILER >= GLM_COMPILER_VC15)) || \ - ((GLM_COMPILER & GLM_COMPILER_GCC) && (GLM_COMPILER >= GLM_COMPILER_GCC49)))) -#endif - -// -#define GLM_HAS_TRIVIAL_QUERIES 0 - -// -#if GLM_LANG & GLM_LANG_CXX11_FLAG -# define GLM_HAS_MAKE_SIGNED 1 -#else -# define GLM_HAS_MAKE_SIGNED ((GLM_LANG & GLM_LANG_CXX0X_FLAG) && (\ - ((GLM_COMPILER & GLM_COMPILER_VC) && (GLM_COMPILER >= GLM_COMPILER_VC12)) || \ - ((GLM_COMPILER & GLM_COMPILER_CUDA)))) -#endif - -// -#if defined(GLM_FORCE_INTRINSICS) -# define GLM_HAS_BITSCAN_WINDOWS ((GLM_PLATFORM & GLM_PLATFORM_WINDOWS) && (\ - ((GLM_COMPILER & GLM_COMPILER_INTEL)) || \ - ((GLM_COMPILER & GLM_COMPILER_VC) && (GLM_COMPILER >= GLM_COMPILER_VC14) && (GLM_ARCH & GLM_ARCH_X86_BIT)))) -#else -# define GLM_HAS_BITSCAN_WINDOWS 0 -#endif - -/////////////////////////////////////////////////////////////////////////////////// -// OpenMP -#ifdef _OPENMP -# if GLM_COMPILER & GLM_COMPILER_GCC -# if GLM_COMPILER >= GLM_COMPILER_GCC61 -# define GLM_HAS_OPENMP 45 -# elif GLM_COMPILER >= GLM_COMPILER_GCC49 -# define GLM_HAS_OPENMP 40 -# elif GLM_COMPILER >= GLM_COMPILER_GCC47 -# define GLM_HAS_OPENMP 31 -# else -# define GLM_HAS_OPENMP 0 -# endif -# elif GLM_COMPILER & GLM_COMPILER_CLANG -# if GLM_COMPILER >= GLM_COMPILER_CLANG38 -# define GLM_HAS_OPENMP 31 -# else -# define GLM_HAS_OPENMP 0 -# endif -# elif GLM_COMPILER & GLM_COMPILER_VC -# define GLM_HAS_OPENMP 20 -# elif GLM_COMPILER & GLM_COMPILER_INTEL -# if GLM_COMPILER >= GLM_COMPILER_INTEL16 -# define GLM_HAS_OPENMP 40 -# else -# define GLM_HAS_OPENMP 0 -# endif -# else -# define GLM_HAS_OPENMP 0 -# endif -#else -# define GLM_HAS_OPENMP 0 -#endif - -/////////////////////////////////////////////////////////////////////////////////// -// nullptr - -#if GLM_LANG & GLM_LANG_CXX0X_FLAG -# define GLM_CONFIG_NULLPTR GLM_ENABLE -#else -# define GLM_CONFIG_NULLPTR GLM_DISABLE -#endif - -#if GLM_CONFIG_NULLPTR == GLM_ENABLE -# define GLM_NULLPTR nullptr -#else -# define GLM_NULLPTR 0 -#endif - -/////////////////////////////////////////////////////////////////////////////////// -// Static assert - -#if GLM_HAS_STATIC_ASSERT -# define GLM_STATIC_ASSERT(x, message) static_assert(x, message) -#elif GLM_COMPILER & GLM_COMPILER_VC -# define GLM_STATIC_ASSERT(x, message) typedef char __CASSERT__##__LINE__[(x) ? 1 : -1] -#else -# define GLM_STATIC_ASSERT(x, message) assert(x) -#endif//GLM_LANG - -/////////////////////////////////////////////////////////////////////////////////// -// Qualifiers - -#if GLM_COMPILER & GLM_COMPILER_CUDA -# define GLM_CUDA_FUNC_DEF __device__ __host__ -# define GLM_CUDA_FUNC_DECL __device__ __host__ -#else -# define GLM_CUDA_FUNC_DEF -# define GLM_CUDA_FUNC_DECL -#endif - -#if defined(GLM_FORCE_INLINE) -# if GLM_COMPILER & GLM_COMPILER_VC -# define GLM_INLINE __forceinline -# define GLM_NEVER_INLINE __declspec((noinline)) -# elif GLM_COMPILER & (GLM_COMPILER_GCC | GLM_COMPILER_CLANG) -# define GLM_INLINE inline __attribute__((__always_inline__)) -# define GLM_NEVER_INLINE __attribute__((__noinline__)) -# elif GLM_COMPILER & GLM_COMPILER_CUDA -# define GLM_INLINE __forceinline__ -# define GLM_NEVER_INLINE __noinline__ -# else -# define GLM_INLINE inline -# define GLM_NEVER_INLINE -# endif//GLM_COMPILER -#else -# define GLM_INLINE inline -# define GLM_NEVER_INLINE -#endif//defined(GLM_FORCE_INLINE) - -#define GLM_FUNC_DECL GLM_CUDA_FUNC_DECL -#define GLM_FUNC_QUALIFIER GLM_CUDA_FUNC_DEF GLM_INLINE - -/////////////////////////////////////////////////////////////////////////////////// -// Swizzle operators - -// User defines: GLM_FORCE_SWIZZLE - -#define GLM_SWIZZLE_DISABLED 0 -#define GLM_SWIZZLE_OPERATOR 1 -#define GLM_SWIZZLE_FUNCTION 2 - -#if defined(GLM_FORCE_XYZW_ONLY) -# undef GLM_FORCE_SWIZZLE -#endif - -#if defined(GLM_SWIZZLE) -# pragma message("GLM: GLM_SWIZZLE is deprecated, use GLM_FORCE_SWIZZLE instead.") -# define GLM_FORCE_SWIZZLE -#endif - -#if defined(GLM_FORCE_SWIZZLE) && (GLM_LANG & GLM_LANG_CXXMS_FLAG) -# define GLM_CONFIG_SWIZZLE GLM_SWIZZLE_OPERATOR -#elif defined(GLM_FORCE_SWIZZLE) -# define GLM_CONFIG_SWIZZLE GLM_SWIZZLE_FUNCTION -#else -# define GLM_CONFIG_SWIZZLE GLM_SWIZZLE_DISABLED -#endif - -/////////////////////////////////////////////////////////////////////////////////// -// Allows using not basic types as genType - -// #define GLM_FORCE_UNRESTRICTED_GENTYPE - -#ifdef GLM_FORCE_UNRESTRICTED_GENTYPE -# define GLM_CONFIG_UNRESTRICTED_GENTYPE GLM_ENABLE -#else -# define GLM_CONFIG_UNRESTRICTED_GENTYPE GLM_DISABLE -#endif - -/////////////////////////////////////////////////////////////////////////////////// -// Clip control, define GLM_FORCE_DEPTH_ZERO_TO_ONE before including GLM -// to use a clip space between 0 to 1. -// Coordinate system, define GLM_FORCE_LEFT_HANDED before including GLM -// to use left handed coordinate system by default. - -#define GLM_CLIP_CONTROL_ZO_BIT (1 << 0) // ZERO_TO_ONE -#define GLM_CLIP_CONTROL_NO_BIT (1 << 1) // NEGATIVE_ONE_TO_ONE -#define GLM_CLIP_CONTROL_LH_BIT (1 << 2) // LEFT_HANDED, For DirectX, Metal, Vulkan -#define GLM_CLIP_CONTROL_RH_BIT (1 << 3) // RIGHT_HANDED, For OpenGL, default in GLM - -#define GLM_CLIP_CONTROL_LH_ZO (GLM_CLIP_CONTROL_LH_BIT | GLM_CLIP_CONTROL_ZO_BIT) -#define GLM_CLIP_CONTROL_LH_NO (GLM_CLIP_CONTROL_LH_BIT | GLM_CLIP_CONTROL_NO_BIT) -#define GLM_CLIP_CONTROL_RH_ZO (GLM_CLIP_CONTROL_RH_BIT | GLM_CLIP_CONTROL_ZO_BIT) -#define GLM_CLIP_CONTROL_RH_NO (GLM_CLIP_CONTROL_RH_BIT | GLM_CLIP_CONTROL_NO_BIT) - -#ifdef GLM_FORCE_DEPTH_ZERO_TO_ONE -# ifdef GLM_FORCE_LEFT_HANDED -# define GLM_CONFIG_CLIP_CONTROL GLM_CLIP_CONTROL_LH_ZO -# else -# define GLM_CONFIG_CLIP_CONTROL GLM_CLIP_CONTROL_RH_ZO -# endif -#else -# ifdef GLM_FORCE_LEFT_HANDED -# define GLM_CONFIG_CLIP_CONTROL GLM_CLIP_CONTROL_LH_NO -# else -# define GLM_CONFIG_CLIP_CONTROL GLM_CLIP_CONTROL_RH_NO -# endif -#endif - -/////////////////////////////////////////////////////////////////////////////////// -// Qualifiers - -#if (GLM_COMPILER & GLM_COMPILER_VC) || ((GLM_COMPILER & GLM_COMPILER_INTEL) && (GLM_PLATFORM & GLM_PLATFORM_WINDOWS)) -# define GLM_DEPRECATED __declspec(deprecated) -# define GLM_ALIGNED_TYPEDEF(type, name, alignment) typedef __declspec(align(alignment)) type name -#elif GLM_COMPILER & (GLM_COMPILER_GCC | GLM_COMPILER_CLANG | GLM_COMPILER_INTEL) -# define GLM_DEPRECATED __attribute__((__deprecated__)) -# define GLM_ALIGNED_TYPEDEF(type, name, alignment) typedef type name __attribute__((aligned(alignment))) -#elif GLM_COMPILER & GLM_COMPILER_CUDA -# define GLM_DEPRECATED -# define GLM_ALIGNED_TYPEDEF(type, name, alignment) typedef type name __align__(x) -#else -# define GLM_DEPRECATED -# define GLM_ALIGNED_TYPEDEF(type, name, alignment) typedef type name -#endif - -/////////////////////////////////////////////////////////////////////////////////// - -#ifdef GLM_FORCE_EXPLICIT_CTOR -# define GLM_EXPLICIT explicit -#else -# define GLM_EXPLICIT -#endif - -/////////////////////////////////////////////////////////////////////////////////// -// SYCL - -#if GLM_COMPILER==GLM_COMPILER_SYCL - -#include -#include - -namespace glm { -namespace std { - // Import SYCL's functions into the namespace glm::std to force their usages. - // It's important to use the math built-in function (sin, exp, ...) - // of SYCL instead the std ones. - using namespace cl::sycl; - - /////////////////////////////////////////////////////////////////////////////// - // Import some "harmless" std's stuffs used by glm into - // the new glm::std namespace. - template - using numeric_limits = ::std::numeric_limits; - - using ::std::size_t; - - using ::std::uint8_t; - using ::std::uint16_t; - using ::std::uint32_t; - using ::std::uint64_t; - - using ::std::int8_t; - using ::std::int16_t; - using ::std::int32_t; - using ::std::int64_t; - - using ::std::make_unsigned; - /////////////////////////////////////////////////////////////////////////////// -} //namespace std -} //namespace glm - -#endif - -/////////////////////////////////////////////////////////////////////////////////// - -/////////////////////////////////////////////////////////////////////////////////// -// Length type: all length functions returns a length_t type. -// When GLM_FORCE_SIZE_T_LENGTH is defined, length_t is a typedef of size_t otherwise -// length_t is a typedef of int like GLSL defines it. - -#define GLM_LENGTH_INT 1 -#define GLM_LENGTH_SIZE_T 2 - -#ifdef GLM_FORCE_SIZE_T_LENGTH -# define GLM_CONFIG_LENGTH_TYPE GLM_LENGTH_SIZE_T -#else -# define GLM_CONFIG_LENGTH_TYPE GLM_LENGTH_INT -#endif - -namespace glm -{ - using std::size_t; -# if GLM_CONFIG_LENGTH_TYPE == GLM_LENGTH_SIZE_T - typedef size_t length_t; -# else - typedef int length_t; -# endif -}//namespace glm - -/////////////////////////////////////////////////////////////////////////////////// -// constexpr - -#if GLM_HAS_CONSTEXPR -# define GLM_CONFIG_CONSTEXP GLM_ENABLE - - namespace glm - { - template - constexpr std::size_t countof(T const (&)[N]) - { - return N; - } - }//namespace glm -# define GLM_COUNTOF(arr) glm::countof(arr) -#elif defined(_MSC_VER) -# define GLM_CONFIG_CONSTEXP GLM_DISABLE - -# define GLM_COUNTOF(arr) _countof(arr) -#else -# define GLM_CONFIG_CONSTEXP GLM_DISABLE - -# define GLM_COUNTOF(arr) sizeof(arr) / sizeof(arr[0]) -#endif - -/////////////////////////////////////////////////////////////////////////////////// -// uint - -namespace glm{ -namespace detail -{ - template - struct is_int - { - enum test {value = 0}; - }; - - template<> - struct is_int - { - enum test {value = ~0}; - }; - - template<> - struct is_int - { - enum test {value = ~0}; - }; -}//namespace detail - - typedef unsigned int uint; -}//namespace glm - -/////////////////////////////////////////////////////////////////////////////////// -// 64-bit int - -#if GLM_HAS_EXTENDED_INTEGER_TYPE -# include -#endif - -namespace glm{ -namespace detail -{ -# if GLM_HAS_EXTENDED_INTEGER_TYPE - typedef std::uint64_t uint64; - typedef std::int64_t int64; -# elif (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)) // C99 detected, 64 bit types available - typedef uint64_t uint64; - typedef int64_t int64; -# elif GLM_COMPILER & GLM_COMPILER_VC - typedef unsigned __int64 uint64; - typedef signed __int64 int64; -# elif GLM_COMPILER & GLM_COMPILER_GCC -# pragma GCC diagnostic ignored "-Wlong-long" - __extension__ typedef unsigned long long uint64; - __extension__ typedef signed long long int64; -# elif (GLM_COMPILER & GLM_COMPILER_CLANG) -# pragma clang diagnostic ignored "-Wc++11-long-long" - typedef unsigned long long uint64; - typedef signed long long int64; -# else//unknown compiler - typedef unsigned long long uint64; - typedef signed long long int64; -# endif -}//namespace detail -}//namespace glm - -/////////////////////////////////////////////////////////////////////////////////// -// make_unsigned - -#if GLM_HAS_MAKE_SIGNED -# include - -namespace glm{ -namespace detail -{ - using std::make_unsigned; -}//namespace detail -}//namespace glm - -#else - -namespace glm{ -namespace detail -{ - template - struct make_unsigned - {}; - - template<> - struct make_unsigned - { - typedef unsigned char type; - }; - - template<> - struct make_unsigned - { - typedef unsigned char type; - }; - - template<> - struct make_unsigned - { - typedef unsigned short type; - }; - - template<> - struct make_unsigned - { - typedef unsigned int type; - }; - - template<> - struct make_unsigned - { - typedef unsigned long type; - }; - - template<> - struct make_unsigned - { - typedef uint64 type; - }; - - template<> - struct make_unsigned - { - typedef unsigned char type; - }; - - template<> - struct make_unsigned - { - typedef unsigned short type; - }; - - template<> - struct make_unsigned - { - typedef unsigned int type; - }; - - template<> - struct make_unsigned - { - typedef unsigned long type; - }; - - template<> - struct make_unsigned - { - typedef uint64 type; - }; -}//namespace detail -}//namespace glm -#endif - -/////////////////////////////////////////////////////////////////////////////////// -// Only use x, y, z, w as vector type components - -#ifdef GLM_FORCE_XYZW_ONLY -# define GLM_CONFIG_XYZW_ONLY GLM_ENABLE -#else -# define GLM_CONFIG_XYZW_ONLY GLM_DISABLE -#endif - -/////////////////////////////////////////////////////////////////////////////////// -// Configure the use of defaulted initialized types - -#define GLM_CTOR_INIT_DISABLE 0 -#define GLM_CTOR_INITIALIZER_LIST 1 -#define GLM_CTOR_INITIALISATION 2 - -#if defined(GLM_FORCE_CTOR_INIT) && GLM_HAS_INITIALIZER_LISTS -# define GLM_CONFIG_CTOR_INIT GLM_CTOR_INITIALIZER_LIST -#elif defined(GLM_FORCE_CTOR_INIT) && !GLM_HAS_INITIALIZER_LISTS -# define GLM_CONFIG_CTOR_INIT GLM_CTOR_INITIALISATION -#else -# define GLM_CONFIG_CTOR_INIT GLM_CTOR_INIT_DISABLE -#endif - -/////////////////////////////////////////////////////////////////////////////////// -// Use SIMD instruction sets - -#if GLM_HAS_ALIGNOF && (GLM_LANG & GLM_LANG_CXXMS_FLAG) && (GLM_ARCH & GLM_ARCH_SIMD_BIT) -# define GLM_CONFIG_SIMD GLM_ENABLE -#else -# define GLM_CONFIG_SIMD GLM_DISABLE -#endif - -/////////////////////////////////////////////////////////////////////////////////// -// Configure the use of defaulted function - -#if GLM_HAS_DEFAULTED_FUNCTIONS && GLM_CONFIG_CTOR_INIT == GLM_CTOR_INIT_DISABLE -# define GLM_CONFIG_DEFAULTED_FUNCTIONS GLM_ENABLE -# define GLM_DEFAULT = default -#else -# define GLM_CONFIG_DEFAULTED_FUNCTIONS GLM_DISABLE -# define GLM_DEFAULT -#endif - -/////////////////////////////////////////////////////////////////////////////////// -// Configure the use of aligned gentypes - -#ifdef GLM_FORCE_ALIGNED // Legacy define -# define GLM_FORCE_DEFAULT_ALIGNED_GENTYPES -#endif - -#ifdef GLM_FORCE_DEFAULT_ALIGNED_GENTYPES -# define GLM_FORCE_ALIGNED_GENTYPES -#endif - -#if GLM_HAS_ALIGNOF && (GLM_LANG & GLM_LANG_CXXMS_FLAG) && (defined(GLM_FORCE_ALIGNED_GENTYPES) || (GLM_CONFIG_SIMD == GLM_ENABLE)) -# define GLM_CONFIG_ALIGNED_GENTYPES GLM_ENABLE -#else -# define GLM_CONFIG_ALIGNED_GENTYPES GLM_DISABLE -#endif - -/////////////////////////////////////////////////////////////////////////////////// -// Configure the use of anonymous structure as implementation detail - -#if ((GLM_CONFIG_SIMD == GLM_ENABLE) || (GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR) || (GLM_CONFIG_ALIGNED_GENTYPES == GLM_ENABLE)) -# define GLM_CONFIG_ANONYMOUS_STRUCT GLM_ENABLE -#else -# define GLM_CONFIG_ANONYMOUS_STRUCT GLM_DISABLE -#endif - -/////////////////////////////////////////////////////////////////////////////////// -// Silent warnings - -#ifdef GLM_FORCE_SILENT_WARNINGS -# define GLM_SILENT_WARNINGS GLM_ENABLE -#else -# define GLM_SILENT_WARNINGS GLM_DISABLE -#endif - -/////////////////////////////////////////////////////////////////////////////////// -// Precision - -#define GLM_HIGHP 1 -#define GLM_MEDIUMP 2 -#define GLM_LOWP 3 - -#if defined(GLM_FORCE_PRECISION_HIGHP_BOOL) || defined(GLM_PRECISION_HIGHP_BOOL) -# define GLM_CONFIG_PRECISION_BOOL GLM_HIGHP -#elif defined(GLM_FORCE_PRECISION_MEDIUMP_BOOL) || defined(GLM_PRECISION_MEDIUMP_BOOL) -# define GLM_CONFIG_PRECISION_BOOL GLM_MEDIUMP -#elif defined(GLM_FORCE_PRECISION_LOWP_BOOL) || defined(GLM_PRECISION_LOWP_BOOL) -# define GLM_CONFIG_PRECISION_BOOL GLM_LOWP -#else -# define GLM_CONFIG_PRECISION_BOOL GLM_HIGHP -#endif - -#if defined(GLM_FORCE_PRECISION_HIGHP_INT) || defined(GLM_PRECISION_HIGHP_INT) -# define GLM_CONFIG_PRECISION_INT GLM_HIGHP -#elif defined(GLM_FORCE_PRECISION_MEDIUMP_INT) || defined(GLM_PRECISION_MEDIUMP_INT) -# define GLM_CONFIG_PRECISION_INT GLM_MEDIUMP -#elif defined(GLM_FORCE_PRECISION_LOWP_INT) || defined(GLM_PRECISION_LOWP_INT) -# define GLM_CONFIG_PRECISION_INT GLM_LOWP -#else -# define GLM_CONFIG_PRECISION_INT GLM_HIGHP -#endif - -#if defined(GLM_FORCE_PRECISION_HIGHP_UINT) || defined(GLM_PRECISION_HIGHP_UINT) -# define GLM_CONFIG_PRECISION_UINT GLM_HIGHP -#elif defined(GLM_FORCE_PRECISION_MEDIUMP_UINT) || defined(GLM_PRECISION_MEDIUMP_UINT) -# define GLM_CONFIG_PRECISION_UINT GLM_MEDIUMP -#elif defined(GLM_FORCE_PRECISION_LOWP_UINT) || defined(GLM_PRECISION_LOWP_UINT) -# define GLM_CONFIG_PRECISION_UINT GLM_LOWP -#else -# define GLM_CONFIG_PRECISION_UINT GLM_HIGHP -#endif - -#if defined(GLM_FORCE_PRECISION_HIGHP_FLOAT) || defined(GLM_PRECISION_HIGHP_FLOAT) -# define GLM_CONFIG_PRECISION_FLOAT GLM_HIGHP -#elif defined(GLM_FORCE_PRECISION_MEDIUMP_FLOAT) || defined(GLM_PRECISION_MEDIUMP_FLOAT) -# define GLM_CONFIG_PRECISION_FLOAT GLM_MEDIUMP -#elif defined(GLM_FORCE_PRECISION_LOWP_FLOAT) || defined(GLM_PRECISION_LOWP_FLOAT) -# define GLM_CONFIG_PRECISION_FLOAT GLM_LOWP -#else -# define GLM_CONFIG_PRECISION_FLOAT GLM_HIGHP -#endif - -#if defined(GLM_FORCE_PRECISION_HIGHP_DOUBLE) || defined(GLM_PRECISION_HIGHP_DOUBLE) -# define GLM_CONFIG_PRECISION_DOUBLE GLM_HIGHP -#elif defined(GLM_FORCE_PRECISION_MEDIUMP_DOUBLE) || defined(GLM_PRECISION_MEDIUMP_DOUBLE) -# define GLM_CONFIG_PRECISION_DOUBLE GLM_MEDIUMP -#elif defined(GLM_FORCE_PRECISION_LOWP_DOUBLE) || defined(GLM_PRECISION_LOWP_DOUBLE) -# define GLM_CONFIG_PRECISION_DOUBLE GLM_LOWP -#else -# define GLM_CONFIG_PRECISION_DOUBLE GLM_HIGHP -#endif - -/////////////////////////////////////////////////////////////////////////////////// -// Check inclusions of different versions of GLM - -#elif ((GLM_SETUP_INCLUDED != GLM_VERSION) && !defined(GLM_FORCE_IGNORE_VERSION)) -# error "GLM error: A different version of GLM is already included. Define GLM_FORCE_IGNORE_VERSION before including GLM headers to ignore this error." -#elif GLM_SETUP_INCLUDED == GLM_VERSION - -/////////////////////////////////////////////////////////////////////////////////// -// Messages - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_MESSAGE_DISPLAYED) -# define GLM_MESSAGE_DISPLAYED -# define GLM_STR_HELPER(x) #x -# define GLM_STR(x) GLM_STR_HELPER(x) - - // Report GLM version -# pragma message (GLM_STR(GLM_VERSION_MESSAGE)) - - // Report C++ language -# if (GLM_LANG & GLM_LANG_CXX2A_FLAG) && (GLM_LANG & GLM_LANG_EXT) -# pragma message("GLM: C++ 2A with extensions") -# elif (GLM_LANG & GLM_LANG_CXX2A_FLAG) -# pragma message("GLM: C++ 2A") -# elif (GLM_LANG & GLM_LANG_CXX17_FLAG) && (GLM_LANG & GLM_LANG_EXT) -# pragma message("GLM: C++ 17 with extensions") -# elif (GLM_LANG & GLM_LANG_CXX17_FLAG) -# pragma message("GLM: C++ 17") -# elif (GLM_LANG & GLM_LANG_CXX14_FLAG) && (GLM_LANG & GLM_LANG_EXT) -# pragma message("GLM: C++ 14 with extensions") -# elif (GLM_LANG & GLM_LANG_CXX14_FLAG) -# pragma message("GLM: C++ 14") -# elif (GLM_LANG & GLM_LANG_CXX11_FLAG) && (GLM_LANG & GLM_LANG_EXT) -# pragma message("GLM: C++ 11 with extensions") -# elif (GLM_LANG & GLM_LANG_CXX11_FLAG) -# pragma message("GLM: C++ 11") -# elif (GLM_LANG & GLM_LANG_CXX0X_FLAG) && (GLM_LANG & GLM_LANG_EXT) -# pragma message("GLM: C++ 0x with extensions") -# elif (GLM_LANG & GLM_LANG_CXX0X_FLAG) -# pragma message("GLM: C++ 0x") -# elif (GLM_LANG & GLM_LANG_CXX03_FLAG) && (GLM_LANG & GLM_LANG_EXT) -# pragma message("GLM: C++ 03 with extensions") -# elif (GLM_LANG & GLM_LANG_CXX03_FLAG) -# pragma message("GLM: C++ 03") -# elif (GLM_LANG & GLM_LANG_CXX98_FLAG) && (GLM_LANG & GLM_LANG_EXT) -# pragma message("GLM: C++ 98 with extensions") -# elif (GLM_LANG & GLM_LANG_CXX98_FLAG) -# pragma message("GLM: C++ 98") -# else -# pragma message("GLM: C++ language undetected") -# endif//GLM_LANG - - // Report compiler detection -# if GLM_COMPILER & GLM_COMPILER_CUDA -# pragma message("GLM: CUDA compiler detected") -# elif GLM_COMPILER & GLM_COMPILER_VC -# pragma message("GLM: Visual C++ compiler detected") -# elif GLM_COMPILER & GLM_COMPILER_CLANG -# pragma message("GLM: Clang compiler detected") -# elif GLM_COMPILER & GLM_COMPILER_INTEL -# pragma message("GLM: Intel Compiler detected") -# elif GLM_COMPILER & GLM_COMPILER_GCC -# pragma message("GLM: GCC compiler detected") -# else -# pragma message("GLM: Compiler not detected") -# endif - - // Report build target -# if (GLM_ARCH & GLM_ARCH_AVX2_BIT) && (GLM_MODEL == GLM_MODEL_64) -# pragma message("GLM: x86 64 bits with AVX2 instruction set build target") -# elif (GLM_ARCH & GLM_ARCH_AVX2_BIT) && (GLM_MODEL == GLM_MODEL_32) -# pragma message("GLM: x86 32 bits with AVX2 instruction set build target") - -# elif (GLM_ARCH & GLM_ARCH_AVX_BIT) && (GLM_MODEL == GLM_MODEL_64) -# pragma message("GLM: x86 64 bits with AVX instruction set build target") -# elif (GLM_ARCH & GLM_ARCH_AVX_BIT) && (GLM_MODEL == GLM_MODEL_32) -# pragma message("GLM: x86 32 bits with AVX instruction set build target") - -# elif (GLM_ARCH & GLM_ARCH_SSE42_BIT) && (GLM_MODEL == GLM_MODEL_64) -# pragma message("GLM: x86 64 bits with SSE4.2 instruction set build target") -# elif (GLM_ARCH & GLM_ARCH_SSE42_BIT) && (GLM_MODEL == GLM_MODEL_32) -# pragma message("GLM: x86 32 bits with SSE4.2 instruction set build target") - -# elif (GLM_ARCH & GLM_ARCH_SSE41_BIT) && (GLM_MODEL == GLM_MODEL_64) -# pragma message("GLM: x86 64 bits with SSE4.1 instruction set build target") -# elif (GLM_ARCH & GLM_ARCH_SSE41_BIT) && (GLM_MODEL == GLM_MODEL_32) -# pragma message("GLM: x86 32 bits with SSE4.1 instruction set build target") - -# elif (GLM_ARCH & GLM_ARCH_SSSE3_BIT) && (GLM_MODEL == GLM_MODEL_64) -# pragma message("GLM: x86 64 bits with SSSE3 instruction set build target") -# elif (GLM_ARCH & GLM_ARCH_SSSE3_BIT) && (GLM_MODEL == GLM_MODEL_32) -# pragma message("GLM: x86 32 bits with SSSE3 instruction set build target") - -# elif (GLM_ARCH & GLM_ARCH_SSE3_BIT) && (GLM_MODEL == GLM_MODEL_64) -# pragma message("GLM: x86 64 bits with SSE3 instruction set build target") -# elif (GLM_ARCH & GLM_ARCH_SSE3_BIT) && (GLM_MODEL == GLM_MODEL_32) -# pragma message("GLM: x86 32 bits with SSE3 instruction set build target") - -# elif (GLM_ARCH & GLM_ARCH_SSE2_BIT) && (GLM_MODEL == GLM_MODEL_64) -# pragma message("GLM: x86 64 bits with SSE2 instruction set build target") -# elif (GLM_ARCH & GLM_ARCH_SSE2_BIT) && (GLM_MODEL == GLM_MODEL_32) -# pragma message("GLM: x86 32 bits with SSE2 instruction set build target") - -# elif (GLM_ARCH & GLM_ARCH_X86_BIT) && (GLM_MODEL == GLM_MODEL_64) -# pragma message("GLM: x86 64 bits build target") -# elif (GLM_ARCH & GLM_ARCH_X86_BIT) && (GLM_MODEL == GLM_MODEL_32) -# pragma message("GLM: x86 32 bits build target") - -# elif (GLM_ARCH & GLM_ARCH_NEON_BIT) && (GLM_MODEL == GLM_MODEL_64) -# pragma message("GLM: ARM 64 bits with Neon instruction set build target") -# elif (GLM_ARCH & GLM_ARCH_NEON_BIT) && (GLM_MODEL == GLM_MODEL_32) -# pragma message("GLM: ARM 32 bits with Neon instruction set build target") - -# elif (GLM_ARCH & GLM_ARCH_ARM_BIT) && (GLM_MODEL == GLM_MODEL_64) -# pragma message("GLM: ARM 64 bits build target") -# elif (GLM_ARCH & GLM_ARCH_ARM_BIT) && (GLM_MODEL == GLM_MODEL_32) -# pragma message("GLM: ARM 32 bits build target") - -# elif (GLM_ARCH & GLM_ARCH_MIPS_BIT) && (GLM_MODEL == GLM_MODEL_64) -# pragma message("GLM: MIPS 64 bits build target") -# elif (GLM_ARCH & GLM_ARCH_MIPS_BIT) && (GLM_MODEL == GLM_MODEL_32) -# pragma message("GLM: MIPS 32 bits build target") - -# elif (GLM_ARCH & GLM_ARCH_PPC_BIT) && (GLM_MODEL == GLM_MODEL_64) -# pragma message("GLM: PowerPC 64 bits build target") -# elif (GLM_ARCH & GLM_ARCH_PPC_BIT) && (GLM_MODEL == GLM_MODEL_32) -# pragma message("GLM: PowerPC 32 bits build target") -# else -# pragma message("GLM: Unknown build target") -# endif//GLM_ARCH - - // Report platform name -# if(GLM_PLATFORM & GLM_PLATFORM_QNXNTO) -# pragma message("GLM: QNX platform detected") -//# elif(GLM_PLATFORM & GLM_PLATFORM_IOS) -//# pragma message("GLM: iOS platform detected") -# elif(GLM_PLATFORM & GLM_PLATFORM_APPLE) -# pragma message("GLM: Apple platform detected") -# elif(GLM_PLATFORM & GLM_PLATFORM_WINCE) -# pragma message("GLM: WinCE platform detected") -# elif(GLM_PLATFORM & GLM_PLATFORM_WINDOWS) -# pragma message("GLM: Windows platform detected") -# elif(GLM_PLATFORM & GLM_PLATFORM_CHROME_NACL) -# pragma message("GLM: Native Client detected") -# elif(GLM_PLATFORM & GLM_PLATFORM_ANDROID) -# pragma message("GLM: Android platform detected") -# elif(GLM_PLATFORM & GLM_PLATFORM_LINUX) -# pragma message("GLM: Linux platform detected") -# elif(GLM_PLATFORM & GLM_PLATFORM_UNIX) -# pragma message("GLM: UNIX platform detected") -# elif(GLM_PLATFORM & GLM_PLATFORM_UNKNOWN) -# pragma message("GLM: platform unknown") -# else -# pragma message("GLM: platform not detected") -# endif - - // Report whether only xyzw component are used -# if defined GLM_FORCE_XYZW_ONLY -# pragma message("GLM: GLM_FORCE_XYZW_ONLY is defined. Only x, y, z and w component are available in vector type. This define disables swizzle operators and SIMD instruction sets.") -# endif - - // Report swizzle operator support -# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR -# pragma message("GLM: GLM_FORCE_SWIZZLE is defined, swizzling operators enabled.") -# elif GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_FUNCTION -# pragma message("GLM: GLM_FORCE_SWIZZLE is defined, swizzling functions enabled. Enable compiler C++ language extensions to enable swizzle operators.") -# else -# pragma message("GLM: GLM_FORCE_SWIZZLE is undefined. swizzling functions or operators are disabled.") -# endif - - // Report .length() type -# if GLM_CONFIG_LENGTH_TYPE == GLM_LENGTH_SIZE_T -# pragma message("GLM: GLM_FORCE_SIZE_T_LENGTH is defined. .length() returns a glm::length_t, a typedef of std::size_t.") -# else -# pragma message("GLM: GLM_FORCE_SIZE_T_LENGTH is undefined. .length() returns a glm::length_t, a typedef of int following GLSL.") -# endif - -# if GLM_CONFIG_UNRESTRICTED_GENTYPE == GLM_ENABLE -# pragma message("GLM: GLM_FORCE_UNRESTRICTED_GENTYPE is defined. Removes GLSL restrictions on valid function genTypes.") -# else -# pragma message("GLM: GLM_FORCE_UNRESTRICTED_GENTYPE is undefined. Follows strictly GLSL on valid function genTypes.") -# endif - -# if GLM_SILENT_WARNINGS == GLM_ENABLE -# pragma message("GLM: GLM_FORCE_SILENT_WARNINGS is defined. Ignores C++ warnings from using C++ language extensions.") -# else -# pragma message("GLM: GLM_FORCE_SILENT_WARNINGS is undefined. Shows C++ warnings from using C++ language extensions.") -# endif - -# ifdef GLM_FORCE_SINGLE_ONLY -# pragma message("GLM: GLM_FORCE_SINGLE_ONLY is defined. Using only single precision floating-point types.") -# endif - -# if defined(GLM_FORCE_ALIGNED_GENTYPES) && (GLM_CONFIG_ALIGNED_GENTYPES == GLM_ENABLE) -# undef GLM_FORCE_ALIGNED_GENTYPES -# pragma message("GLM: GLM_FORCE_ALIGNED_GENTYPES is defined, allowing aligned types. This prevents the use of C++ constexpr.") -# elif defined(GLM_FORCE_ALIGNED_GENTYPES) && (GLM_CONFIG_ALIGNED_GENTYPES == GLM_DISABLE) -# undef GLM_FORCE_ALIGNED_GENTYPES -# pragma message("GLM: GLM_FORCE_ALIGNED_GENTYPES is defined but is disabled. It requires C++11 and language extensions.") -# endif - -# if defined(GLM_FORCE_DEFAULT_ALIGNED_GENTYPES) -# if GLM_CONFIG_ALIGNED_GENTYPES == GLM_DISABLE -# undef GLM_FORCE_DEFAULT_ALIGNED_GENTYPES -# pragma message("GLM: GLM_FORCE_DEFAULT_ALIGNED_GENTYPES is defined but is disabled. It requires C++11 and language extensions.") -# elif GLM_CONFIG_ALIGNED_GENTYPES == GLM_ENABLE -# pragma message("GLM: GLM_FORCE_DEFAULT_ALIGNED_GENTYPES is defined. All gentypes (e.g. vec3) will be aligned and padded by default.") -# endif -# endif - -# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_ZO_BIT -# pragma message("GLM: GLM_FORCE_DEPTH_ZERO_TO_ONE is defined. Using zero to one depth clip space.") -# else -# pragma message("GLM: GLM_FORCE_DEPTH_ZERO_TO_ONE is undefined. Using negative one to one depth clip space.") -# endif - -# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_LH_BIT -# pragma message("GLM: GLM_FORCE_LEFT_HANDED is defined. Using left handed coordinate system.") -# else -# pragma message("GLM: GLM_FORCE_LEFT_HANDED is undefined. Using right handed coordinate system.") -# endif -#endif//GLM_MESSAGES - -#endif//GLM_SETUP_INCLUDED diff --git a/third_party/glm/detail/type_float.hpp b/third_party/glm/detail/type_float.hpp deleted file mode 100755 index c8037eb..0000000 --- a/third_party/glm/detail/type_float.hpp +++ /dev/null @@ -1,68 +0,0 @@ -#pragma once - -#include "setup.hpp" - -#if GLM_COMPILER == GLM_COMPILER_VC12 -# pragma warning(push) -# pragma warning(disable: 4512) // assignment operator could not be generated -#endif - -namespace glm{ -namespace detail -{ - template - union float_t - {}; - - // https://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/ - template <> - union float_t - { - typedef int int_type; - typedef float float_type; - - GLM_CONSTEXPR float_t(float_type Num = 0.0f) : f(Num) {} - - GLM_CONSTEXPR float_t& operator=(float_t const& x) - { - f = x.f; - return *this; - } - - // Portable extraction of components. - GLM_CONSTEXPR bool negative() const { return i < 0; } - GLM_CONSTEXPR int_type mantissa() const { return i & ((1 << 23) - 1); } - GLM_CONSTEXPR int_type exponent() const { return (i >> 23) & ((1 << 8) - 1); } - - int_type i; - float_type f; - }; - - template <> - union float_t - { - typedef detail::int64 int_type; - typedef double float_type; - - GLM_CONSTEXPR float_t(float_type Num = static_cast(0)) : f(Num) {} - - GLM_CONSTEXPR float_t& operator=(float_t const& x) - { - f = x.f; - return *this; - } - - // Portable extraction of components. - GLM_CONSTEXPR bool negative() const { return i < 0; } - GLM_CONSTEXPR int_type mantissa() const { return i & ((int_type(1) << 52) - 1); } - GLM_CONSTEXPR int_type exponent() const { return (i >> 52) & ((int_type(1) << 11) - 1); } - - int_type i; - float_type f; - }; -}//namespace detail -}//namespace glm - -#if GLM_COMPILER == GLM_COMPILER_VC12 -# pragma warning(pop) -#endif diff --git a/third_party/glm/detail/type_half.hpp b/third_party/glm/detail/type_half.hpp deleted file mode 100755 index 40b8bec..0000000 --- a/third_party/glm/detail/type_half.hpp +++ /dev/null @@ -1,16 +0,0 @@ -#pragma once - -#include "setup.hpp" - -namespace glm{ -namespace detail -{ - typedef short hdata; - - GLM_FUNC_DECL float toFloat32(hdata value); - GLM_FUNC_DECL hdata toFloat16(float const& value); - -}//namespace detail -}//namespace glm - -#include "type_half.inl" diff --git a/third_party/glm/detail/type_half.inl b/third_party/glm/detail/type_half.inl deleted file mode 100755 index b0723e3..0000000 --- a/third_party/glm/detail/type_half.inl +++ /dev/null @@ -1,241 +0,0 @@ -namespace glm{ -namespace detail -{ - GLM_FUNC_QUALIFIER float overflow() - { - volatile float f = 1e10; - - for(int i = 0; i < 10; ++i) - f *= f; // this will overflow before the for loop terminates - return f; - } - - union uif32 - { - GLM_FUNC_QUALIFIER uif32() : - i(0) - {} - - GLM_FUNC_QUALIFIER uif32(float f_) : - f(f_) - {} - - GLM_FUNC_QUALIFIER uif32(unsigned int i_) : - i(i_) - {} - - float f; - unsigned int i; - }; - - GLM_FUNC_QUALIFIER float toFloat32(hdata value) - { - int s = (value >> 15) & 0x00000001; - int e = (value >> 10) & 0x0000001f; - int m = value & 0x000003ff; - - if(e == 0) - { - if(m == 0) - { - // - // Plus or minus zero - // - - detail::uif32 result; - result.i = static_cast(s << 31); - return result.f; - } - else - { - // - // Denormalized number -- renormalize it - // - - while(!(m & 0x00000400)) - { - m <<= 1; - e -= 1; - } - - e += 1; - m &= ~0x00000400; - } - } - else if(e == 31) - { - if(m == 0) - { - // - // Positive or negative infinity - // - - uif32 result; - result.i = static_cast((s << 31) | 0x7f800000); - return result.f; - } - else - { - // - // Nan -- preserve sign and significand bits - // - - uif32 result; - result.i = static_cast((s << 31) | 0x7f800000 | (m << 13)); - return result.f; - } - } - - // - // Normalized number - // - - e = e + (127 - 15); - m = m << 13; - - // - // Assemble s, e and m. - // - - uif32 Result; - Result.i = static_cast((s << 31) | (e << 23) | m); - return Result.f; - } - - GLM_FUNC_QUALIFIER hdata toFloat16(float const& f) - { - uif32 Entry; - Entry.f = f; - int i = static_cast(Entry.i); - - // - // Our floating point number, f, is represented by the bit - // pattern in integer i. Disassemble that bit pattern into - // the sign, s, the exponent, e, and the significand, m. - // Shift s into the position where it will go in the - // resulting half number. - // Adjust e, accounting for the different exponent bias - // of float and half (127 versus 15). - // - - int s = (i >> 16) & 0x00008000; - int e = ((i >> 23) & 0x000000ff) - (127 - 15); - int m = i & 0x007fffff; - - // - // Now reassemble s, e and m into a half: - // - - if(e <= 0) - { - if(e < -10) - { - // - // E is less than -10. The absolute value of f is - // less than half_MIN (f may be a small normalized - // float, a denormalized float or a zero). - // - // We convert f to a half zero. - // - - return hdata(s); - } - - // - // E is between -10 and 0. F is a normalized float, - // whose magnitude is less than __half_NRM_MIN. - // - // We convert f to a denormalized half. - // - - m = (m | 0x00800000) >> (1 - e); - - // - // Round to nearest, round "0.5" up. - // - // Rounding may cause the significand to overflow and make - // our number normalized. Because of the way a half's bits - // are laid out, we don't have to treat this case separately; - // the code below will handle it correctly. - // - - if(m & 0x00001000) - m += 0x00002000; - - // - // Assemble the half from s, e (zero) and m. - // - - return hdata(s | (m >> 13)); - } - else if(e == 0xff - (127 - 15)) - { - if(m == 0) - { - // - // F is an infinity; convert f to a half - // infinity with the same sign as f. - // - - return hdata(s | 0x7c00); - } - else - { - // - // F is a NAN; we produce a half NAN that preserves - // the sign bit and the 10 leftmost bits of the - // significand of f, with one exception: If the 10 - // leftmost bits are all zero, the NAN would turn - // into an infinity, so we have to set at least one - // bit in the significand. - // - - m >>= 13; - - return hdata(s | 0x7c00 | m | (m == 0)); - } - } - else - { - // - // E is greater than zero. F is a normalized float. - // We try to convert f to a normalized half. - // - - // - // Round to nearest, round "0.5" up - // - - if(m & 0x00001000) - { - m += 0x00002000; - - if(m & 0x00800000) - { - m = 0; // overflow in significand, - e += 1; // adjust exponent - } - } - - // - // Handle exponent overflow - // - - if (e > 30) - { - overflow(); // Cause a hardware floating point overflow; - - return hdata(s | 0x7c00); - // if this returns, the half becomes an - } // infinity with the same sign as f. - - // - // Assemble the half from s, e and m. - // - - return hdata(s | (e << 10) | (m >> 13)); - } - } - -}//namespace detail -}//namespace glm diff --git a/third_party/glm/detail/type_mat2x2.hpp b/third_party/glm/detail/type_mat2x2.hpp deleted file mode 100755 index 033908f..0000000 --- a/third_party/glm/detail/type_mat2x2.hpp +++ /dev/null @@ -1,177 +0,0 @@ -/// @ref core -/// @file glm/detail/type_mat2x2.hpp - -#pragma once - -#include "type_vec2.hpp" -#include -#include - -namespace glm -{ - template - struct mat<2, 2, T, Q> - { - typedef vec<2, T, Q> col_type; - typedef vec<2, T, Q> row_type; - typedef mat<2, 2, T, Q> type; - typedef mat<2, 2, T, Q> transpose_type; - typedef T value_type; - - private: - col_type value[2]; - - public: - // -- Accesses -- - - typedef length_t length_type; - GLM_FUNC_DECL static GLM_CONSTEXPR length_type length() { return 2; } - - GLM_FUNC_DECL col_type & operator[](length_type i); - GLM_FUNC_DECL GLM_CONSTEXPR col_type const& operator[](length_type i) const; - - // -- Constructors -- - - GLM_FUNC_DECL GLM_CONSTEXPR mat() GLM_DEFAULT; - template - GLM_FUNC_DECL GLM_CONSTEXPR mat(mat<2, 2, T, P> const& m); - - GLM_FUNC_DECL explicit GLM_CONSTEXPR mat(T scalar); - GLM_FUNC_DECL GLM_CONSTEXPR mat( - T const& x1, T const& y1, - T const& x2, T const& y2); - GLM_FUNC_DECL GLM_CONSTEXPR mat( - col_type const& v1, - col_type const& v2); - - // -- Conversions -- - - template - GLM_FUNC_DECL GLM_CONSTEXPR mat( - U const& x1, V const& y1, - M const& x2, N const& y2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR mat( - vec<2, U, Q> const& v1, - vec<2, V, Q> const& v2); - - // -- Matrix conversions -- - - template - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 2, U, P> const& m); - - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 3, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 4, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 3, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 2, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 4, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 2, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 4, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 3, T, Q> const& x); - - // -- Unary arithmetic operators -- - - template - GLM_FUNC_DECL mat<2, 2, T, Q> & operator=(mat<2, 2, U, Q> const& m); - template - GLM_FUNC_DECL mat<2, 2, T, Q> & operator+=(U s); - template - GLM_FUNC_DECL mat<2, 2, T, Q> & operator+=(mat<2, 2, U, Q> const& m); - template - GLM_FUNC_DECL mat<2, 2, T, Q> & operator-=(U s); - template - GLM_FUNC_DECL mat<2, 2, T, Q> & operator-=(mat<2, 2, U, Q> const& m); - template - GLM_FUNC_DECL mat<2, 2, T, Q> & operator*=(U s); - template - GLM_FUNC_DECL mat<2, 2, T, Q> & operator*=(mat<2, 2, U, Q> const& m); - template - GLM_FUNC_DECL mat<2, 2, T, Q> & operator/=(U s); - template - GLM_FUNC_DECL mat<2, 2, T, Q> & operator/=(mat<2, 2, U, Q> const& m); - - // -- Increment and decrement operators -- - - GLM_FUNC_DECL mat<2, 2, T, Q> & operator++ (); - GLM_FUNC_DECL mat<2, 2, T, Q> & operator-- (); - GLM_FUNC_DECL mat<2, 2, T, Q> operator++(int); - GLM_FUNC_DECL mat<2, 2, T, Q> operator--(int); - }; - - // -- Unary operators -- - - template - GLM_FUNC_DECL mat<2, 2, T, Q> operator+(mat<2, 2, T, Q> const& m); - - template - GLM_FUNC_DECL mat<2, 2, T, Q> operator-(mat<2, 2, T, Q> const& m); - - // -- Binary operators -- - - template - GLM_FUNC_DECL mat<2, 2, T, Q> operator+(mat<2, 2, T, Q> const& m, T scalar); - - template - GLM_FUNC_DECL mat<2, 2, T, Q> operator+(T scalar, mat<2, 2, T, Q> const& m); - - template - GLM_FUNC_DECL mat<2, 2, T, Q> operator+(mat<2, 2, T, Q> const& m1, mat<2, 2, T, Q> const& m2); - - template - GLM_FUNC_DECL mat<2, 2, T, Q> operator-(mat<2, 2, T, Q> const& m, T scalar); - - template - GLM_FUNC_DECL mat<2, 2, T, Q> operator-(T scalar, mat<2, 2, T, Q> const& m); - - template - GLM_FUNC_DECL mat<2, 2, T, Q> operator-(mat<2, 2, T, Q> const& m1, mat<2, 2, T, Q> const& m2); - - template - GLM_FUNC_DECL mat<2, 2, T, Q> operator*(mat<2, 2, T, Q> const& m, T scalar); - - template - GLM_FUNC_DECL mat<2, 2, T, Q> operator*(T scalar, mat<2, 2, T, Q> const& m); - - template - GLM_FUNC_DECL typename mat<2, 2, T, Q>::col_type operator*(mat<2, 2, T, Q> const& m, typename mat<2, 2, T, Q>::row_type const& v); - - template - GLM_FUNC_DECL typename mat<2, 2, T, Q>::row_type operator*(typename mat<2, 2, T, Q>::col_type const& v, mat<2, 2, T, Q> const& m); - - template - GLM_FUNC_DECL mat<2, 2, T, Q> operator*(mat<2, 2, T, Q> const& m1, mat<2, 2, T, Q> const& m2); - - template - GLM_FUNC_DECL mat<3, 2, T, Q> operator*(mat<2, 2, T, Q> const& m1, mat<3, 2, T, Q> const& m2); - - template - GLM_FUNC_DECL mat<4, 2, T, Q> operator*(mat<2, 2, T, Q> const& m1, mat<4, 2, T, Q> const& m2); - - template - GLM_FUNC_DECL mat<2, 2, T, Q> operator/(mat<2, 2, T, Q> const& m, T scalar); - - template - GLM_FUNC_DECL mat<2, 2, T, Q> operator/(T scalar, mat<2, 2, T, Q> const& m); - - template - GLM_FUNC_DECL typename mat<2, 2, T, Q>::col_type operator/(mat<2, 2, T, Q> const& m, typename mat<2, 2, T, Q>::row_type const& v); - - template - GLM_FUNC_DECL typename mat<2, 2, T, Q>::row_type operator/(typename mat<2, 2, T, Q>::col_type const& v, mat<2, 2, T, Q> const& m); - - template - GLM_FUNC_DECL mat<2, 2, T, Q> operator/(mat<2, 2, T, Q> const& m1, mat<2, 2, T, Q> const& m2); - - // -- Boolean operators -- - - template - GLM_FUNC_DECL bool operator==(mat<2, 2, T, Q> const& m1, mat<2, 2, T, Q> const& m2); - - template - GLM_FUNC_DECL bool operator!=(mat<2, 2, T, Q> const& m1, mat<2, 2, T, Q> const& m2); -} //namespace glm - -#ifndef GLM_EXTERNAL_TEMPLATE -#include "type_mat2x2.inl" -#endif diff --git a/third_party/glm/detail/type_mat2x2.inl b/third_party/glm/detail/type_mat2x2.inl deleted file mode 100755 index fe5d1aa..0000000 --- a/third_party/glm/detail/type_mat2x2.inl +++ /dev/null @@ -1,536 +0,0 @@ -#include "../matrix.hpp" - -namespace glm -{ - // -- Constructors -- - -# if GLM_CONFIG_DEFAULTED_FUNCTIONS == GLM_DISABLE - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>::mat() -# if GLM_CONFIG_CTOR_INIT == GLM_CTOR_INITIALIZER_LIST - : value{col_type(1, 0), col_type(0, 1)} -# endif - { -# if GLM_CONFIG_CTOR_INIT == GLM_CTOR_INITIALISATION - this->value[0] = col_type(1, 0); - this->value[1] = col_type(0, 1); -# endif - } -# endif - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>::mat(mat<2, 2, T, P> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{m[0], m[1]} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = m[0]; - this->value[1] = m[1]; -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>::mat(T scalar) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(scalar, 0), col_type(0, scalar)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(scalar, 0); - this->value[1] = col_type(0, scalar); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>::mat - ( - T const& x0, T const& y0, - T const& x1, T const& y1 - ) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(x0, y0), col_type(x1, y1)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(x0, y0); - this->value[1] = col_type(x1, y1); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>::mat(col_type const& v0, col_type const& v1) -# if GLM_HAS_INITIALIZER_LISTS - : value{v0, v1} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = v0; - this->value[1] = v1; -# endif - } - - // -- Conversion constructors -- - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>::mat - ( - X1 const& x1, Y1 const& y1, - X2 const& x2, Y2 const& y2 - ) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(static_cast(x1), value_type(y1)), col_type(static_cast(x2), value_type(y2)) } -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(static_cast(x1), value_type(y1)); - this->value[1] = col_type(static_cast(x2), value_type(y2)); -# endif - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>::mat(vec<2, V1, Q> const& v1, vec<2, V2, Q> const& v2) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(v1), col_type(v2)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(v1); - this->value[1] = col_type(v2); -# endif - } - - // -- mat2x2 matrix conversions -- - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>::mat(mat<2, 2, U, P> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1])} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>::mat(mat<3, 3, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1])} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>::mat(mat<4, 4, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1])} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>::mat(mat<2, 3, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1])} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>::mat(mat<3, 2, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1])} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>::mat(mat<2, 4, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1])} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>::mat(mat<4, 2, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1])} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>::mat(mat<3, 4, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1])} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 2, T, Q>::mat(mat<4, 3, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1])} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); -# endif - } - - // -- Accesses -- - - template - GLM_FUNC_QUALIFIER typename mat<2, 2, T, Q>::col_type& mat<2, 2, T, Q>::operator[](typename mat<2, 2, T, Q>::length_type i) - { - assert(i < this->length()); - return this->value[i]; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<2, 2, T, Q>::col_type const& mat<2, 2, T, Q>::operator[](typename mat<2, 2, T, Q>::length_type i) const - { - assert(i < this->length()); - return this->value[i]; - } - - // -- Unary updatable operators -- - - template - template - GLM_FUNC_QUALIFIER mat<2, 2, T, Q>& mat<2, 2, T, Q>::operator=(mat<2, 2, U, Q> const& m) - { - this->value[0] = m[0]; - this->value[1] = m[1]; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<2, 2, T, Q>& mat<2, 2, T, Q>::operator+=(U scalar) - { - this->value[0] += scalar; - this->value[1] += scalar; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<2, 2, T, Q>& mat<2, 2, T, Q>::operator+=(mat<2, 2, U, Q> const& m) - { - this->value[0] += m[0]; - this->value[1] += m[1]; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<2, 2, T, Q>& mat<2, 2, T, Q>::operator-=(U scalar) - { - this->value[0] -= scalar; - this->value[1] -= scalar; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<2, 2, T, Q>& mat<2, 2, T, Q>::operator-=(mat<2, 2, U, Q> const& m) - { - this->value[0] -= m[0]; - this->value[1] -= m[1]; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<2, 2, T, Q>& mat<2, 2, T, Q>::operator*=(U scalar) - { - this->value[0] *= scalar; - this->value[1] *= scalar; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<2, 2, T, Q>& mat<2, 2, T, Q>::operator*=(mat<2, 2, U, Q> const& m) - { - return (*this = *this * m); - } - - template - template - GLM_FUNC_QUALIFIER mat<2, 2, T, Q>& mat<2, 2, T, Q>::operator/=(U scalar) - { - this->value[0] /= scalar; - this->value[1] /= scalar; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<2, 2, T, Q>& mat<2, 2, T, Q>::operator/=(mat<2, 2, U, Q> const& m) - { - return *this *= inverse(m); - } - - // -- Increment and decrement operators -- - - template - GLM_FUNC_QUALIFIER mat<2, 2, T, Q>& mat<2, 2, T, Q>::operator++() - { - ++this->value[0]; - ++this->value[1]; - return *this; - } - - template - GLM_FUNC_QUALIFIER mat<2, 2, T, Q>& mat<2, 2, T, Q>::operator--() - { - --this->value[0]; - --this->value[1]; - return *this; - } - - template - GLM_FUNC_QUALIFIER mat<2, 2, T, Q> mat<2, 2, T, Q>::operator++(int) - { - mat<2, 2, T, Q> Result(*this); - ++*this; - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<2, 2, T, Q> mat<2, 2, T, Q>::operator--(int) - { - mat<2, 2, T, Q> Result(*this); - --*this; - return Result; - } - - // -- Unary arithmetic operators -- - - template - GLM_FUNC_QUALIFIER mat<2, 2, T, Q> operator+(mat<2, 2, T, Q> const& m) - { - return m; - } - - template - GLM_FUNC_QUALIFIER mat<2, 2, T, Q> operator-(mat<2, 2, T, Q> const& m) - { - return mat<2, 2, T, Q>( - -m[0], - -m[1]); - } - - // -- Binary arithmetic operators -- - - template - GLM_FUNC_QUALIFIER mat<2, 2, T, Q> operator+(mat<2, 2, T, Q> const& m, T scalar) - { - return mat<2, 2, T, Q>( - m[0] + scalar, - m[1] + scalar); - } - - template - GLM_FUNC_QUALIFIER mat<2, 2, T, Q> operator+(T scalar, mat<2, 2, T, Q> const& m) - { - return mat<2, 2, T, Q>( - m[0] + scalar, - m[1] + scalar); - } - - template - GLM_FUNC_QUALIFIER mat<2, 2, T, Q> operator+(mat<2, 2, T, Q> const& m1, mat<2, 2, T, Q> const& m2) - { - return mat<2, 2, T, Q>( - m1[0] + m2[0], - m1[1] + m2[1]); - } - - template - GLM_FUNC_QUALIFIER mat<2, 2, T, Q> operator-(mat<2, 2, T, Q> const& m, T scalar) - { - return mat<2, 2, T, Q>( - m[0] - scalar, - m[1] - scalar); - } - - template - GLM_FUNC_QUALIFIER mat<2, 2, T, Q> operator-(T scalar, mat<2, 2, T, Q> const& m) - { - return mat<2, 2, T, Q>( - scalar - m[0], - scalar - m[1]); - } - - template - GLM_FUNC_QUALIFIER mat<2, 2, T, Q> operator-(mat<2, 2, T, Q> const& m1, mat<2, 2, T, Q> const& m2) - { - return mat<2, 2, T, Q>( - m1[0] - m2[0], - m1[1] - m2[1]); - } - - template - GLM_FUNC_QUALIFIER mat<2, 2, T, Q> operator*(mat<2, 2, T, Q> const& m, T scalar) - { - return mat<2, 2, T, Q>( - m[0] * scalar, - m[1] * scalar); - } - - template - GLM_FUNC_QUALIFIER mat<2, 2, T, Q> operator*(T scalar, mat<2, 2, T, Q> const& m) - { - return mat<2, 2, T, Q>( - m[0] * scalar, - m[1] * scalar); - } - - template - GLM_FUNC_QUALIFIER typename mat<2, 2, T, Q>::col_type operator* - ( - mat<2, 2, T, Q> const& m, - typename mat<2, 2, T, Q>::row_type const& v - ) - { - return vec<2, T, Q>( - m[0][0] * v.x + m[1][0] * v.y, - m[0][1] * v.x + m[1][1] * v.y); - } - - template - GLM_FUNC_QUALIFIER typename mat<2, 2, T, Q>::row_type operator* - ( - typename mat<2, 2, T, Q>::col_type const& v, - mat<2, 2, T, Q> const& m - ) - { - return vec<2, T, Q>( - v.x * m[0][0] + v.y * m[0][1], - v.x * m[1][0] + v.y * m[1][1]); - } - - template - GLM_FUNC_QUALIFIER mat<2, 2, T, Q> operator*(mat<2, 2, T, Q> const& m1, mat<2, 2, T, Q> const& m2) - { - return mat<2, 2, T, Q>( - m1[0][0] * m2[0][0] + m1[1][0] * m2[0][1], - m1[0][1] * m2[0][0] + m1[1][1] * m2[0][1], - m1[0][0] * m2[1][0] + m1[1][0] * m2[1][1], - m1[0][1] * m2[1][0] + m1[1][1] * m2[1][1]); - } - - template - GLM_FUNC_QUALIFIER mat<3, 2, T, Q> operator*(mat<2, 2, T, Q> const& m1, mat<3, 2, T, Q> const& m2) - { - return mat<3, 2, T, Q>( - m1[0][0] * m2[0][0] + m1[1][0] * m2[0][1], - m1[0][1] * m2[0][0] + m1[1][1] * m2[0][1], - m1[0][0] * m2[1][0] + m1[1][0] * m2[1][1], - m1[0][1] * m2[1][0] + m1[1][1] * m2[1][1], - m1[0][0] * m2[2][0] + m1[1][0] * m2[2][1], - m1[0][1] * m2[2][0] + m1[1][1] * m2[2][1]); - } - - template - GLM_FUNC_QUALIFIER mat<4, 2, T, Q> operator*(mat<2, 2, T, Q> const& m1, mat<4, 2, T, Q> const& m2) - { - return mat<4, 2, T, Q>( - m1[0][0] * m2[0][0] + m1[1][0] * m2[0][1], - m1[0][1] * m2[0][0] + m1[1][1] * m2[0][1], - m1[0][0] * m2[1][0] + m1[1][0] * m2[1][1], - m1[0][1] * m2[1][0] + m1[1][1] * m2[1][1], - m1[0][0] * m2[2][0] + m1[1][0] * m2[2][1], - m1[0][1] * m2[2][0] + m1[1][1] * m2[2][1], - m1[0][0] * m2[3][0] + m1[1][0] * m2[3][1], - m1[0][1] * m2[3][0] + m1[1][1] * m2[3][1]); - } - - template - GLM_FUNC_QUALIFIER mat<2, 2, T, Q> operator/(mat<2, 2, T, Q> const& m, T scalar) - { - return mat<2, 2, T, Q>( - m[0] / scalar, - m[1] / scalar); - } - - template - GLM_FUNC_QUALIFIER mat<2, 2, T, Q> operator/(T scalar, mat<2, 2, T, Q> const& m) - { - return mat<2, 2, T, Q>( - scalar / m[0], - scalar / m[1]); - } - - template - GLM_FUNC_QUALIFIER typename mat<2, 2, T, Q>::col_type operator/(mat<2, 2, T, Q> const& m, typename mat<2, 2, T, Q>::row_type const& v) - { - return inverse(m) * v; - } - - template - GLM_FUNC_QUALIFIER typename mat<2, 2, T, Q>::row_type operator/(typename mat<2, 2, T, Q>::col_type const& v, mat<2, 2, T, Q> const& m) - { - return v * inverse(m); - } - - template - GLM_FUNC_QUALIFIER mat<2, 2, T, Q> operator/(mat<2, 2, T, Q> const& m1, mat<2, 2, T, Q> const& m2) - { - mat<2, 2, T, Q> m1_copy(m1); - return m1_copy /= m2; - } - - // -- Boolean operators -- - - template - GLM_FUNC_QUALIFIER bool operator==(mat<2, 2, T, Q> const& m1, mat<2, 2, T, Q> const& m2) - { - return (m1[0] == m2[0]) && (m1[1] == m2[1]); - } - - template - GLM_FUNC_QUALIFIER bool operator!=(mat<2, 2, T, Q> const& m1, mat<2, 2, T, Q> const& m2) - { - return (m1[0] != m2[0]) || (m1[1] != m2[1]); - } -} //namespace glm diff --git a/third_party/glm/detail/type_mat2x3.hpp b/third_party/glm/detail/type_mat2x3.hpp deleted file mode 100755 index d6596e4..0000000 --- a/third_party/glm/detail/type_mat2x3.hpp +++ /dev/null @@ -1,159 +0,0 @@ -/// @ref core -/// @file glm/detail/type_mat2x3.hpp - -#pragma once - -#include "type_vec2.hpp" -#include "type_vec3.hpp" -#include -#include - -namespace glm -{ - template - struct mat<2, 3, T, Q> - { - typedef vec<3, T, Q> col_type; - typedef vec<2, T, Q> row_type; - typedef mat<2, 3, T, Q> type; - typedef mat<3, 2, T, Q> transpose_type; - typedef T value_type; - - private: - col_type value[2]; - - public: - // -- Accesses -- - - typedef length_t length_type; - GLM_FUNC_DECL static GLM_CONSTEXPR length_type length() { return 2; } - - GLM_FUNC_DECL col_type & operator[](length_type i); - GLM_FUNC_DECL GLM_CONSTEXPR col_type const& operator[](length_type i) const; - - // -- Constructors -- - - GLM_FUNC_DECL GLM_CONSTEXPR mat() GLM_DEFAULT; - template - GLM_FUNC_DECL GLM_CONSTEXPR mat(mat<2, 3, T, P> const& m); - - GLM_FUNC_DECL explicit GLM_CONSTEXPR mat(T scalar); - GLM_FUNC_DECL GLM_CONSTEXPR mat( - T x0, T y0, T z0, - T x1, T y1, T z1); - GLM_FUNC_DECL GLM_CONSTEXPR mat( - col_type const& v0, - col_type const& v1); - - // -- Conversions -- - - template - GLM_FUNC_DECL GLM_CONSTEXPR mat( - X1 x1, Y1 y1, Z1 z1, - X2 x2, Y2 y2, Z2 z2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR mat( - vec<3, U, Q> const& v1, - vec<3, V, Q> const& v2); - - // -- Matrix conversions -- - - template - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 3, U, P> const& m); - - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 2, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 3, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 4, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 4, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 2, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 4, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 2, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 3, T, Q> const& x); - - // -- Unary arithmetic operators -- - - template - GLM_FUNC_DECL mat<2, 3, T, Q> & operator=(mat<2, 3, U, Q> const& m); - template - GLM_FUNC_DECL mat<2, 3, T, Q> & operator+=(U s); - template - GLM_FUNC_DECL mat<2, 3, T, Q> & operator+=(mat<2, 3, U, Q> const& m); - template - GLM_FUNC_DECL mat<2, 3, T, Q> & operator-=(U s); - template - GLM_FUNC_DECL mat<2, 3, T, Q> & operator-=(mat<2, 3, U, Q> const& m); - template - GLM_FUNC_DECL mat<2, 3, T, Q> & operator*=(U s); - template - GLM_FUNC_DECL mat<2, 3, T, Q> & operator/=(U s); - - // -- Increment and decrement operators -- - - GLM_FUNC_DECL mat<2, 3, T, Q> & operator++ (); - GLM_FUNC_DECL mat<2, 3, T, Q> & operator-- (); - GLM_FUNC_DECL mat<2, 3, T, Q> operator++(int); - GLM_FUNC_DECL mat<2, 3, T, Q> operator--(int); - }; - - // -- Unary operators -- - - template - GLM_FUNC_DECL mat<2, 3, T, Q> operator+(mat<2, 3, T, Q> const& m); - - template - GLM_FUNC_DECL mat<2, 3, T, Q> operator-(mat<2, 3, T, Q> const& m); - - // -- Binary operators -- - - template - GLM_FUNC_DECL mat<2, 3, T, Q> operator+(mat<2, 3, T, Q> const& m, T scalar); - - template - GLM_FUNC_DECL mat<2, 3, T, Q> operator+(mat<2, 3, T, Q> const& m1, mat<2, 3, T, Q> const& m2); - - template - GLM_FUNC_DECL mat<2, 3, T, Q> operator-(mat<2, 3, T, Q> const& m, T scalar); - - template - GLM_FUNC_DECL mat<2, 3, T, Q> operator-(mat<2, 3, T, Q> const& m1, mat<2, 3, T, Q> const& m2); - - template - GLM_FUNC_DECL mat<2, 3, T, Q> operator*(mat<2, 3, T, Q> const& m, T scalar); - - template - GLM_FUNC_DECL mat<2, 3, T, Q> operator*(T scalar, mat<2, 3, T, Q> const& m); - - template - GLM_FUNC_DECL typename mat<2, 3, T, Q>::col_type operator*(mat<2, 3, T, Q> const& m, typename mat<2, 3, T, Q>::row_type const& v); - - template - GLM_FUNC_DECL typename mat<2, 3, T, Q>::row_type operator*(typename mat<2, 3, T, Q>::col_type const& v, mat<2, 3, T, Q> const& m); - - template - GLM_FUNC_DECL mat<2, 3, T, Q> operator*(mat<2, 3, T, Q> const& m1, mat<2, 2, T, Q> const& m2); - - template - GLM_FUNC_DECL mat<3, 3, T, Q> operator*(mat<2, 3, T, Q> const& m1, mat<3, 2, T, Q> const& m2); - - template - GLM_FUNC_DECL mat<4, 3, T, Q> operator*(mat<2, 3, T, Q> const& m1, mat<4, 2, T, Q> const& m2); - - template - GLM_FUNC_DECL mat<2, 3, T, Q> operator/(mat<2, 3, T, Q> const& m, T scalar); - - template - GLM_FUNC_DECL mat<2, 3, T, Q> operator/(T scalar, mat<2, 3, T, Q> const& m); - - // -- Boolean operators -- - - template - GLM_FUNC_DECL bool operator==(mat<2, 3, T, Q> const& m1, mat<2, 3, T, Q> const& m2); - - template - GLM_FUNC_DECL bool operator!=(mat<2, 3, T, Q> const& m1, mat<2, 3, T, Q> const& m2); -}//namespace glm - -#ifndef GLM_EXTERNAL_TEMPLATE -#include "type_mat2x3.inl" -#endif diff --git a/third_party/glm/detail/type_mat2x3.inl b/third_party/glm/detail/type_mat2x3.inl deleted file mode 100755 index 5fec17e..0000000 --- a/third_party/glm/detail/type_mat2x3.inl +++ /dev/null @@ -1,510 +0,0 @@ -namespace glm -{ - // -- Constructors -- - -# if GLM_CONFIG_DEFAULTED_FUNCTIONS == GLM_DISABLE - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q>::mat() -# if GLM_CONFIG_CTOR_INIT == GLM_CTOR_INITIALIZER_LIST - : value{col_type(1, 0, 0), col_type(0, 1, 0)} -# endif - { -# if GLM_CONFIG_CTOR_INIT == GLM_CTOR_INITIALISATION - this->value[0] = col_type(1, 0, 0); - this->value[1] = col_type(0, 1, 0); -# endif - } -# endif - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q>::mat(mat<2, 3, T, P> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{m.value[0], m.value[1]} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = m.value[0]; - this->value[1] = m.value[1]; -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q>::mat(T scalar) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(scalar, 0, 0), col_type(0, scalar, 0)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(scalar, 0, 0); - this->value[1] = col_type(0, scalar, 0); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q>::mat - ( - T x0, T y0, T z0, - T x1, T y1, T z1 - ) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(x0, y0, z0), col_type(x1, y1, z1)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(x0, y0, z0); - this->value[1] = col_type(x1, y1, z1); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q>::mat(col_type const& v0, col_type const& v1) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(v0), col_type(v1)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(v0); - this->value[1] = col_type(v1); -# endif - } - - // -- Conversion constructors -- - - template - template< - typename X1, typename Y1, typename Z1, - typename X2, typename Y2, typename Z2> - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q>::mat - ( - X1 x1, Y1 y1, Z1 z1, - X2 x2, Y2 y2, Z2 z2 - ) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(x1, y1, z1), col_type(x2, y2, z2)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(x1, y1, z1); - this->value[1] = col_type(x2, y2, z2); -# endif - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q>::mat(vec<3, V1, Q> const& v1, vec<3, V2, Q> const& v2) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(v1), col_type(v2)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(v1); - this->value[1] = col_type(v2); -# endif - } - - // -- Matrix conversions -- - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q>::mat(mat<2, 3, U, P> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1])} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q>::mat(mat<2, 2, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0], 0), col_type(m[1], 0)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0], 0); - this->value[1] = col_type(m[1], 0); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q>::mat(mat<3, 3, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1])} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q>::mat(mat<4, 4, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1])} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q>::mat(mat<2, 4, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1])} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q>::mat(mat<3, 2, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0], 0), col_type(m[1], 0)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0], 0); - this->value[1] = col_type(m[1], 0); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q>::mat(mat<3, 4, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1])} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q>::mat(mat<4, 2, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0], 0), col_type(m[1], 0)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0], 0); - this->value[1] = col_type(m[1], 0); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 3, T, Q>::mat(mat<4, 3, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1])} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); -# endif - } - - // -- Accesses -- - - template - GLM_FUNC_QUALIFIER typename mat<2, 3, T, Q>::col_type & mat<2, 3, T, Q>::operator[](typename mat<2, 3, T, Q>::length_type i) - { - assert(i < this->length()); - return this->value[i]; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<2, 3, T, Q>::col_type const& mat<2, 3, T, Q>::operator[](typename mat<2, 3, T, Q>::length_type i) const - { - assert(i < this->length()); - return this->value[i]; - } - - // -- Unary updatable operators -- - - template - template - GLM_FUNC_QUALIFIER mat<2, 3, T, Q>& mat<2, 3, T, Q>::operator=(mat<2, 3, U, Q> const& m) - { - this->value[0] = m[0]; - this->value[1] = m[1]; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<2, 3, T, Q> & mat<2, 3, T, Q>::operator+=(U s) - { - this->value[0] += s; - this->value[1] += s; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<2, 3, T, Q>& mat<2, 3, T, Q>::operator+=(mat<2, 3, U, Q> const& m) - { - this->value[0] += m[0]; - this->value[1] += m[1]; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<2, 3, T, Q>& mat<2, 3, T, Q>::operator-=(U s) - { - this->value[0] -= s; - this->value[1] -= s; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<2, 3, T, Q>& mat<2, 3, T, Q>::operator-=(mat<2, 3, U, Q> const& m) - { - this->value[0] -= m[0]; - this->value[1] -= m[1]; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<2, 3, T, Q>& mat<2, 3, T, Q>::operator*=(U s) - { - this->value[0] *= s; - this->value[1] *= s; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<2, 3, T, Q> & mat<2, 3, T, Q>::operator/=(U s) - { - this->value[0] /= s; - this->value[1] /= s; - return *this; - } - - // -- Increment and decrement operators -- - - template - GLM_FUNC_QUALIFIER mat<2, 3, T, Q> & mat<2, 3, T, Q>::operator++() - { - ++this->value[0]; - ++this->value[1]; - return *this; - } - - template - GLM_FUNC_QUALIFIER mat<2, 3, T, Q> & mat<2, 3, T, Q>::operator--() - { - --this->value[0]; - --this->value[1]; - return *this; - } - - template - GLM_FUNC_QUALIFIER mat<2, 3, T, Q> mat<2, 3, T, Q>::operator++(int) - { - mat<2, 3, T, Q> Result(*this); - ++*this; - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<2, 3, T, Q> mat<2, 3, T, Q>::operator--(int) - { - mat<2, 3, T, Q> Result(*this); - --*this; - return Result; - } - - // -- Unary arithmetic operators -- - - template - GLM_FUNC_QUALIFIER mat<2, 3, T, Q> operator+(mat<2, 3, T, Q> const& m) - { - return m; - } - - template - GLM_FUNC_QUALIFIER mat<2, 3, T, Q> operator-(mat<2, 3, T, Q> const& m) - { - return mat<2, 3, T, Q>( - -m[0], - -m[1]); - } - - // -- Binary arithmetic operators -- - - template - GLM_FUNC_QUALIFIER mat<2, 3, T, Q> operator+(mat<2, 3, T, Q> const& m, T scalar) - { - return mat<2, 3, T, Q>( - m[0] + scalar, - m[1] + scalar); - } - - template - GLM_FUNC_QUALIFIER mat<2, 3, T, Q> operator+(mat<2, 3, T, Q> const& m1, mat<2, 3, T, Q> const& m2) - { - return mat<2, 3, T, Q>( - m1[0] + m2[0], - m1[1] + m2[1]); - } - - template - GLM_FUNC_QUALIFIER mat<2, 3, T, Q> operator-(mat<2, 3, T, Q> const& m, T scalar) - { - return mat<2, 3, T, Q>( - m[0] - scalar, - m[1] - scalar); - } - - template - GLM_FUNC_QUALIFIER mat<2, 3, T, Q> operator-(mat<2, 3, T, Q> const& m1, mat<2, 3, T, Q> const& m2) - { - return mat<2, 3, T, Q>( - m1[0] - m2[0], - m1[1] - m2[1]); - } - - template - GLM_FUNC_QUALIFIER mat<2, 3, T, Q> operator*(mat<2, 3, T, Q> const& m, T scalar) - { - return mat<2, 3, T, Q>( - m[0] * scalar, - m[1] * scalar); - } - - template - GLM_FUNC_QUALIFIER mat<2, 3, T, Q> operator*(T scalar, mat<2, 3, T, Q> const& m) - { - return mat<2, 3, T, Q>( - m[0] * scalar, - m[1] * scalar); - } - - template - GLM_FUNC_QUALIFIER typename mat<2, 3, T, Q>::col_type operator* - ( - mat<2, 3, T, Q> const& m, - typename mat<2, 3, T, Q>::row_type const& v) - { - return typename mat<2, 3, T, Q>::col_type( - m[0][0] * v.x + m[1][0] * v.y, - m[0][1] * v.x + m[1][1] * v.y, - m[0][2] * v.x + m[1][2] * v.y); - } - - template - GLM_FUNC_QUALIFIER typename mat<2, 3, T, Q>::row_type operator* - ( - typename mat<2, 3, T, Q>::col_type const& v, - mat<2, 3, T, Q> const& m) - { - return typename mat<2, 3, T, Q>::row_type( - v.x * m[0][0] + v.y * m[0][1] + v.z * m[0][2], - v.x * m[1][0] + v.y * m[1][1] + v.z * m[1][2]); - } - - template - GLM_FUNC_QUALIFIER mat<2, 3, T, Q> operator*(mat<2, 3, T, Q> const& m1, mat<2, 2, T, Q> const& m2) - { - return mat<2, 3, T, Q>( - m1[0][0] * m2[0][0] + m1[1][0] * m2[0][1], - m1[0][1] * m2[0][0] + m1[1][1] * m2[0][1], - m1[0][2] * m2[0][0] + m1[1][2] * m2[0][1], - m1[0][0] * m2[1][0] + m1[1][0] * m2[1][1], - m1[0][1] * m2[1][0] + m1[1][1] * m2[1][1], - m1[0][2] * m2[1][0] + m1[1][2] * m2[1][1]); - } - - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> operator*(mat<2, 3, T, Q> const& m1, mat<3, 2, T, Q> const& m2) - { - T SrcA00 = m1[0][0]; - T SrcA01 = m1[0][1]; - T SrcA02 = m1[0][2]; - T SrcA10 = m1[1][0]; - T SrcA11 = m1[1][1]; - T SrcA12 = m1[1][2]; - - T SrcB00 = m2[0][0]; - T SrcB01 = m2[0][1]; - T SrcB10 = m2[1][0]; - T SrcB11 = m2[1][1]; - T SrcB20 = m2[2][0]; - T SrcB21 = m2[2][1]; - - mat<3, 3, T, Q> Result; - Result[0][0] = SrcA00 * SrcB00 + SrcA10 * SrcB01; - Result[0][1] = SrcA01 * SrcB00 + SrcA11 * SrcB01; - Result[0][2] = SrcA02 * SrcB00 + SrcA12 * SrcB01; - Result[1][0] = SrcA00 * SrcB10 + SrcA10 * SrcB11; - Result[1][1] = SrcA01 * SrcB10 + SrcA11 * SrcB11; - Result[1][2] = SrcA02 * SrcB10 + SrcA12 * SrcB11; - Result[2][0] = SrcA00 * SrcB20 + SrcA10 * SrcB21; - Result[2][1] = SrcA01 * SrcB20 + SrcA11 * SrcB21; - Result[2][2] = SrcA02 * SrcB20 + SrcA12 * SrcB21; - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 3, T, Q> operator*(mat<2, 3, T, Q> const& m1, mat<4, 2, T, Q> const& m2) - { - return mat<4, 3, T, Q>( - m1[0][0] * m2[0][0] + m1[1][0] * m2[0][1], - m1[0][1] * m2[0][0] + m1[1][1] * m2[0][1], - m1[0][2] * m2[0][0] + m1[1][2] * m2[0][1], - m1[0][0] * m2[1][0] + m1[1][0] * m2[1][1], - m1[0][1] * m2[1][0] + m1[1][1] * m2[1][1], - m1[0][2] * m2[1][0] + m1[1][2] * m2[1][1], - m1[0][0] * m2[2][0] + m1[1][0] * m2[2][1], - m1[0][1] * m2[2][0] + m1[1][1] * m2[2][1], - m1[0][2] * m2[2][0] + m1[1][2] * m2[2][1], - m1[0][0] * m2[3][0] + m1[1][0] * m2[3][1], - m1[0][1] * m2[3][0] + m1[1][1] * m2[3][1], - m1[0][2] * m2[3][0] + m1[1][2] * m2[3][1]); - } - - template - GLM_FUNC_QUALIFIER mat<2, 3, T, Q> operator/(mat<2, 3, T, Q> const& m, T scalar) - { - return mat<2, 3, T, Q>( - m[0] / scalar, - m[1] / scalar); - } - - template - GLM_FUNC_QUALIFIER mat<2, 3, T, Q> operator/(T scalar, mat<2, 3, T, Q> const& m) - { - return mat<2, 3, T, Q>( - scalar / m[0], - scalar / m[1]); - } - - // -- Boolean operators -- - - template - GLM_FUNC_QUALIFIER bool operator==(mat<2, 3, T, Q> const& m1, mat<2, 3, T, Q> const& m2) - { - return (m1[0] == m2[0]) && (m1[1] == m2[1]); - } - - template - GLM_FUNC_QUALIFIER bool operator!=(mat<2, 3, T, Q> const& m1, mat<2, 3, T, Q> const& m2) - { - return (m1[0] != m2[0]) || (m1[1] != m2[1]); - } -} //namespace glm diff --git a/third_party/glm/detail/type_mat2x4.hpp b/third_party/glm/detail/type_mat2x4.hpp deleted file mode 100755 index ff03e21..0000000 --- a/third_party/glm/detail/type_mat2x4.hpp +++ /dev/null @@ -1,161 +0,0 @@ -/// @ref core -/// @file glm/detail/type_mat2x4.hpp - -#pragma once - -#include "type_vec2.hpp" -#include "type_vec4.hpp" -#include -#include - -namespace glm -{ - template - struct mat<2, 4, T, Q> - { - typedef vec<4, T, Q> col_type; - typedef vec<2, T, Q> row_type; - typedef mat<2, 4, T, Q> type; - typedef mat<4, 2, T, Q> transpose_type; - typedef T value_type; - - private: - col_type value[2]; - - public: - // -- Accesses -- - - typedef length_t length_type; - GLM_FUNC_DECL static GLM_CONSTEXPR length_type length() { return 2; } - - GLM_FUNC_DECL col_type & operator[](length_type i); - GLM_FUNC_DECL GLM_CONSTEXPR col_type const& operator[](length_type i) const; - - // -- Constructors -- - - GLM_FUNC_DECL GLM_CONSTEXPR mat() GLM_DEFAULT; - template - GLM_FUNC_DECL GLM_CONSTEXPR mat(mat<2, 4, T, P> const& m); - - GLM_FUNC_DECL explicit GLM_CONSTEXPR mat(T scalar); - GLM_FUNC_DECL GLM_CONSTEXPR mat( - T x0, T y0, T z0, T w0, - T x1, T y1, T z1, T w1); - GLM_FUNC_DECL GLM_CONSTEXPR mat( - col_type const& v0, - col_type const& v1); - - // -- Conversions -- - - template< - typename X1, typename Y1, typename Z1, typename W1, - typename X2, typename Y2, typename Z2, typename W2> - GLM_FUNC_DECL GLM_CONSTEXPR mat( - X1 x1, Y1 y1, Z1 z1, W1 w1, - X2 x2, Y2 y2, Z2 z2, W2 w2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR mat( - vec<4, U, Q> const& v1, - vec<4, V, Q> const& v2); - - // -- Matrix conversions -- - - template - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 4, U, P> const& m); - - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 2, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 3, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 4, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 3, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 2, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 4, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 2, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 3, T, Q> const& x); - - // -- Unary arithmetic operators -- - - template - GLM_FUNC_DECL mat<2, 4, T, Q> & operator=(mat<2, 4, U, Q> const& m); - template - GLM_FUNC_DECL mat<2, 4, T, Q> & operator+=(U s); - template - GLM_FUNC_DECL mat<2, 4, T, Q> & operator+=(mat<2, 4, U, Q> const& m); - template - GLM_FUNC_DECL mat<2, 4, T, Q> & operator-=(U s); - template - GLM_FUNC_DECL mat<2, 4, T, Q> & operator-=(mat<2, 4, U, Q> const& m); - template - GLM_FUNC_DECL mat<2, 4, T, Q> & operator*=(U s); - template - GLM_FUNC_DECL mat<2, 4, T, Q> & operator/=(U s); - - // -- Increment and decrement operators -- - - GLM_FUNC_DECL mat<2, 4, T, Q> & operator++ (); - GLM_FUNC_DECL mat<2, 4, T, Q> & operator-- (); - GLM_FUNC_DECL mat<2, 4, T, Q> operator++(int); - GLM_FUNC_DECL mat<2, 4, T, Q> operator--(int); - }; - - // -- Unary operators -- - - template - GLM_FUNC_DECL mat<2, 4, T, Q> operator+(mat<2, 4, T, Q> const& m); - - template - GLM_FUNC_DECL mat<2, 4, T, Q> operator-(mat<2, 4, T, Q> const& m); - - // -- Binary operators -- - - template - GLM_FUNC_DECL mat<2, 4, T, Q> operator+(mat<2, 4, T, Q> const& m, T scalar); - - template - GLM_FUNC_DECL mat<2, 4, T, Q> operator+(mat<2, 4, T, Q> const& m1, mat<2, 4, T, Q> const& m2); - - template - GLM_FUNC_DECL mat<2, 4, T, Q> operator-(mat<2, 4, T, Q> const& m, T scalar); - - template - GLM_FUNC_DECL mat<2, 4, T, Q> operator-(mat<2, 4, T, Q> const& m1, mat<2, 4, T, Q> const& m2); - - template - GLM_FUNC_DECL mat<2, 4, T, Q> operator*(mat<2, 4, T, Q> const& m, T scalar); - - template - GLM_FUNC_DECL mat<2, 4, T, Q> operator*(T scalar, mat<2, 4, T, Q> const& m); - - template - GLM_FUNC_DECL typename mat<2, 4, T, Q>::col_type operator*(mat<2, 4, T, Q> const& m, typename mat<2, 4, T, Q>::row_type const& v); - - template - GLM_FUNC_DECL typename mat<2, 4, T, Q>::row_type operator*(typename mat<2, 4, T, Q>::col_type const& v, mat<2, 4, T, Q> const& m); - - template - GLM_FUNC_DECL mat<4, 4, T, Q> operator*(mat<2, 4, T, Q> const& m1, mat<4, 2, T, Q> const& m2); - - template - GLM_FUNC_DECL mat<2, 4, T, Q> operator*(mat<2, 4, T, Q> const& m1, mat<2, 2, T, Q> const& m2); - - template - GLM_FUNC_DECL mat<3, 4, T, Q> operator*(mat<2, 4, T, Q> const& m1, mat<3, 2, T, Q> const& m2); - - template - GLM_FUNC_DECL mat<2, 4, T, Q> operator/(mat<2, 4, T, Q> const& m, T scalar); - - template - GLM_FUNC_DECL mat<2, 4, T, Q> operator/(T scalar, mat<2, 4, T, Q> const& m); - - // -- Boolean operators -- - - template - GLM_FUNC_DECL bool operator==(mat<2, 4, T, Q> const& m1, mat<2, 4, T, Q> const& m2); - - template - GLM_FUNC_DECL bool operator!=(mat<2, 4, T, Q> const& m1, mat<2, 4, T, Q> const& m2); -}//namespace glm - -#ifndef GLM_EXTERNAL_TEMPLATE -#include "type_mat2x4.inl" -#endif diff --git a/third_party/glm/detail/type_mat2x4.inl b/third_party/glm/detail/type_mat2x4.inl deleted file mode 100755 index b6d2b9d..0000000 --- a/third_party/glm/detail/type_mat2x4.inl +++ /dev/null @@ -1,520 +0,0 @@ -namespace glm -{ - // -- Constructors -- - -# if GLM_CONFIG_DEFAULTED_FUNCTIONS == GLM_DISABLE - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q>::mat() -# if GLM_CONFIG_CTOR_INIT == GLM_CTOR_INITIALIZER_LIST - : value{col_type(1, 0, 0, 0), col_type(0, 1, 0, 0)} -# endif - { -# if GLM_CONFIG_CTOR_INIT == GLM_CTOR_INITIALISATION - this->value[0] = col_type(1, 0, 0, 0); - this->value[1] = col_type(0, 1, 0, 0); -# endif - } -# endif - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q>::mat(mat<2, 4, T, P> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{m[0], m[1]} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = m[0]; - this->value[1] = m[1]; -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q>::mat(T s) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(s, 0, 0, 0), col_type(0, s, 0, 0)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(s, 0, 0, 0); - this->value[1] = col_type(0, s, 0, 0); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q>::mat - ( - T x0, T y0, T z0, T w0, - T x1, T y1, T z1, T w1 - ) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(x0, y0, z0, w0), col_type(x1, y1, z1, w1)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(x0, y0, z0, w0); - this->value[1] = col_type(x1, y1, z1, w1); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q>::mat(col_type const& v0, col_type const& v1) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(v0), col_type(v1)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = v0; - this->value[1] = v1; -# endif - } - - // -- Conversion constructors -- - - template - template< - typename X1, typename Y1, typename Z1, typename W1, - typename X2, typename Y2, typename Z2, typename W2> - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q>::mat - ( - X1 x1, Y1 y1, Z1 z1, W1 w1, - X2 x2, Y2 y2, Z2 z2, W2 w2 - ) -# if GLM_HAS_INITIALIZER_LISTS - : value{ - col_type(x1, y1, z1, w1), - col_type(x2, y2, z2, w2)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(x1, y1, z1, w1); - this->value[1] = col_type(x2, y2, z2, w2); -# endif - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q>::mat(vec<4, V1, Q> const& v1, vec<4, V2, Q> const& v2) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(v1), col_type(v2)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(v1); - this->value[1] = col_type(v2); -# endif - } - - // -- Matrix conversions -- - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q>::mat(mat<2, 4, U, P> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1])} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q>::mat(mat<2, 2, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0], 0, 0), col_type(m[1], 0, 0)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0], 0, 0); - this->value[1] = col_type(m[1], 0, 0); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q>::mat(mat<3, 3, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0], 0), col_type(m[1], 0)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0], 0); - this->value[1] = col_type(m[1], 0); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q>::mat(mat<4, 4, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1])} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q>::mat(mat<2, 3, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0], 0), col_type(m[1], 0)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0], 0); - this->value[1] = col_type(m[1], 0); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q>::mat(mat<3, 2, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0], 0, 0), col_type(m[1], 0, 0)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0], 0, 0); - this->value[1] = col_type(m[1], 0, 0); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q>::mat(mat<3, 4, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1])} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q>::mat(mat<4, 2, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0], 0, 0), col_type(m[1], 0, 0)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0], 0, 0); - this->value[1] = col_type(m[1], 0, 0); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<2, 4, T, Q>::mat(mat<4, 3, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0], 0), col_type(m[1], 0)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0], 0); - this->value[1] = col_type(m[1], 0); -# endif - } - - // -- Accesses -- - - template - GLM_FUNC_QUALIFIER typename mat<2, 4, T, Q>::col_type & mat<2, 4, T, Q>::operator[](typename mat<2, 4, T, Q>::length_type i) - { - assert(i < this->length()); - return this->value[i]; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<2, 4, T, Q>::col_type const& mat<2, 4, T, Q>::operator[](typename mat<2, 4, T, Q>::length_type i) const - { - assert(i < this->length()); - return this->value[i]; - } - - // -- Unary updatable operators -- - - template - template - GLM_FUNC_QUALIFIER mat<2, 4, T, Q>& mat<2, 4, T, Q>::operator=(mat<2, 4, U, Q> const& m) - { - this->value[0] = m[0]; - this->value[1] = m[1]; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<2, 4, T, Q>& mat<2, 4, T, Q>::operator+=(U s) - { - this->value[0] += s; - this->value[1] += s; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<2, 4, T, Q>& mat<2, 4, T, Q>::operator+=(mat<2, 4, U, Q> const& m) - { - this->value[0] += m[0]; - this->value[1] += m[1]; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<2, 4, T, Q>& mat<2, 4, T, Q>::operator-=(U s) - { - this->value[0] -= s; - this->value[1] -= s; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<2, 4, T, Q>& mat<2, 4, T, Q>::operator-=(mat<2, 4, U, Q> const& m) - { - this->value[0] -= m[0]; - this->value[1] -= m[1]; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<2, 4, T, Q>& mat<2, 4, T, Q>::operator*=(U s) - { - this->value[0] *= s; - this->value[1] *= s; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<2, 4, T, Q> & mat<2, 4, T, Q>::operator/=(U s) - { - this->value[0] /= s; - this->value[1] /= s; - return *this; - } - - // -- Increment and decrement operators -- - - template - GLM_FUNC_QUALIFIER mat<2, 4, T, Q>& mat<2, 4, T, Q>::operator++() - { - ++this->value[0]; - ++this->value[1]; - return *this; - } - - template - GLM_FUNC_QUALIFIER mat<2, 4, T, Q>& mat<2, 4, T, Q>::operator--() - { - --this->value[0]; - --this->value[1]; - return *this; - } - - template - GLM_FUNC_QUALIFIER mat<2, 4, T, Q> mat<2, 4, T, Q>::operator++(int) - { - mat<2, 4, T, Q> Result(*this); - ++*this; - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<2, 4, T, Q> mat<2, 4, T, Q>::operator--(int) - { - mat<2, 4, T, Q> Result(*this); - --*this; - return Result; - } - - // -- Unary arithmetic operators -- - - template - GLM_FUNC_QUALIFIER mat<2, 4, T, Q> operator+(mat<2, 4, T, Q> const& m) - { - return m; - } - - template - GLM_FUNC_QUALIFIER mat<2, 4, T, Q> operator-(mat<2, 4, T, Q> const& m) - { - return mat<2, 4, T, Q>( - -m[0], - -m[1]); - } - - // -- Binary arithmetic operators -- - - template - GLM_FUNC_QUALIFIER mat<2, 4, T, Q> operator+(mat<2, 4, T, Q> const& m, T scalar) - { - return mat<2, 4, T, Q>( - m[0] + scalar, - m[1] + scalar); - } - - template - GLM_FUNC_QUALIFIER mat<2, 4, T, Q> operator+(mat<2, 4, T, Q> const& m1, mat<2, 4, T, Q> const& m2) - { - return mat<2, 4, T, Q>( - m1[0] + m2[0], - m1[1] + m2[1]); - } - - template - GLM_FUNC_QUALIFIER mat<2, 4, T, Q> operator-(mat<2, 4, T, Q> const& m, T scalar) - { - return mat<2, 4, T, Q>( - m[0] - scalar, - m[1] - scalar); - } - - template - GLM_FUNC_QUALIFIER mat<2, 4, T, Q> operator-(mat<2, 4, T, Q> const& m1, mat<2, 4, T, Q> const& m2) - { - return mat<2, 4, T, Q>( - m1[0] - m2[0], - m1[1] - m2[1]); - } - - template - GLM_FUNC_QUALIFIER mat<2, 4, T, Q> operator*(mat<2, 4, T, Q> const& m, T scalar) - { - return mat<2, 4, T, Q>( - m[0] * scalar, - m[1] * scalar); - } - - template - GLM_FUNC_QUALIFIER mat<2, 4, T, Q> operator*(T scalar, mat<2, 4, T, Q> const& m) - { - return mat<2, 4, T, Q>( - m[0] * scalar, - m[1] * scalar); - } - - template - GLM_FUNC_QUALIFIER typename mat<2, 4, T, Q>::col_type operator*(mat<2, 4, T, Q> const& m, typename mat<2, 4, T, Q>::row_type const& v) - { - return typename mat<2, 4, T, Q>::col_type( - m[0][0] * v.x + m[1][0] * v.y, - m[0][1] * v.x + m[1][1] * v.y, - m[0][2] * v.x + m[1][2] * v.y, - m[0][3] * v.x + m[1][3] * v.y); - } - - template - GLM_FUNC_QUALIFIER typename mat<2, 4, T, Q>::row_type operator*(typename mat<2, 4, T, Q>::col_type const& v, mat<2, 4, T, Q> const& m) - { - return typename mat<2, 4, T, Q>::row_type( - v.x * m[0][0] + v.y * m[0][1] + v.z * m[0][2] + v.w * m[0][3], - v.x * m[1][0] + v.y * m[1][1] + v.z * m[1][2] + v.w * m[1][3]); - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> operator*(mat<2, 4, T, Q> const& m1, mat<4, 2, T, Q> const& m2) - { - T SrcA00 = m1[0][0]; - T SrcA01 = m1[0][1]; - T SrcA02 = m1[0][2]; - T SrcA03 = m1[0][3]; - T SrcA10 = m1[1][0]; - T SrcA11 = m1[1][1]; - T SrcA12 = m1[1][2]; - T SrcA13 = m1[1][3]; - - T SrcB00 = m2[0][0]; - T SrcB01 = m2[0][1]; - T SrcB10 = m2[1][0]; - T SrcB11 = m2[1][1]; - T SrcB20 = m2[2][0]; - T SrcB21 = m2[2][1]; - T SrcB30 = m2[3][0]; - T SrcB31 = m2[3][1]; - - mat<4, 4, T, Q> Result; - Result[0][0] = SrcA00 * SrcB00 + SrcA10 * SrcB01; - Result[0][1] = SrcA01 * SrcB00 + SrcA11 * SrcB01; - Result[0][2] = SrcA02 * SrcB00 + SrcA12 * SrcB01; - Result[0][3] = SrcA03 * SrcB00 + SrcA13 * SrcB01; - Result[1][0] = SrcA00 * SrcB10 + SrcA10 * SrcB11; - Result[1][1] = SrcA01 * SrcB10 + SrcA11 * SrcB11; - Result[1][2] = SrcA02 * SrcB10 + SrcA12 * SrcB11; - Result[1][3] = SrcA03 * SrcB10 + SrcA13 * SrcB11; - Result[2][0] = SrcA00 * SrcB20 + SrcA10 * SrcB21; - Result[2][1] = SrcA01 * SrcB20 + SrcA11 * SrcB21; - Result[2][2] = SrcA02 * SrcB20 + SrcA12 * SrcB21; - Result[2][3] = SrcA03 * SrcB20 + SrcA13 * SrcB21; - Result[3][0] = SrcA00 * SrcB30 + SrcA10 * SrcB31; - Result[3][1] = SrcA01 * SrcB30 + SrcA11 * SrcB31; - Result[3][2] = SrcA02 * SrcB30 + SrcA12 * SrcB31; - Result[3][3] = SrcA03 * SrcB30 + SrcA13 * SrcB31; - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<2, 4, T, Q> operator*(mat<2, 4, T, Q> const& m1, mat<2, 2, T, Q> const& m2) - { - return mat<2, 4, T, Q>( - m1[0][0] * m2[0][0] + m1[1][0] * m2[0][1], - m1[0][1] * m2[0][0] + m1[1][1] * m2[0][1], - m1[0][2] * m2[0][0] + m1[1][2] * m2[0][1], - m1[0][3] * m2[0][0] + m1[1][3] * m2[0][1], - m1[0][0] * m2[1][0] + m1[1][0] * m2[1][1], - m1[0][1] * m2[1][0] + m1[1][1] * m2[1][1], - m1[0][2] * m2[1][0] + m1[1][2] * m2[1][1], - m1[0][3] * m2[1][0] + m1[1][3] * m2[1][1]); - } - - template - GLM_FUNC_QUALIFIER mat<3, 4, T, Q> operator*(mat<2, 4, T, Q> const& m1, mat<3, 2, T, Q> const& m2) - { - return mat<3, 4, T, Q>( - m1[0][0] * m2[0][0] + m1[1][0] * m2[0][1], - m1[0][1] * m2[0][0] + m1[1][1] * m2[0][1], - m1[0][2] * m2[0][0] + m1[1][2] * m2[0][1], - m1[0][3] * m2[0][0] + m1[1][3] * m2[0][1], - m1[0][0] * m2[1][0] + m1[1][0] * m2[1][1], - m1[0][1] * m2[1][0] + m1[1][1] * m2[1][1], - m1[0][2] * m2[1][0] + m1[1][2] * m2[1][1], - m1[0][3] * m2[1][0] + m1[1][3] * m2[1][1], - m1[0][0] * m2[2][0] + m1[1][0] * m2[2][1], - m1[0][1] * m2[2][0] + m1[1][1] * m2[2][1], - m1[0][2] * m2[2][0] + m1[1][2] * m2[2][1], - m1[0][3] * m2[2][0] + m1[1][3] * m2[2][1]); - } - - template - GLM_FUNC_QUALIFIER mat<2, 4, T, Q> operator/(mat<2, 4, T, Q> const& m, T scalar) - { - return mat<2, 4, T, Q>( - m[0] / scalar, - m[1] / scalar); - } - - template - GLM_FUNC_QUALIFIER mat<2, 4, T, Q> operator/(T scalar, mat<2, 4, T, Q> const& m) - { - return mat<2, 4, T, Q>( - scalar / m[0], - scalar / m[1]); - } - - // -- Boolean operators -- - - template - GLM_FUNC_QUALIFIER bool operator==(mat<2, 4, T, Q> const& m1, mat<2, 4, T, Q> const& m2) - { - return (m1[0] == m2[0]) && (m1[1] == m2[1]); - } - - template - GLM_FUNC_QUALIFIER bool operator!=(mat<2, 4, T, Q> const& m1, mat<2, 4, T, Q> const& m2) - { - return (m1[0] != m2[0]) || (m1[1] != m2[1]); - } -} //namespace glm diff --git a/third_party/glm/detail/type_mat3x2.hpp b/third_party/glm/detail/type_mat3x2.hpp deleted file mode 100755 index e166581..0000000 --- a/third_party/glm/detail/type_mat3x2.hpp +++ /dev/null @@ -1,167 +0,0 @@ -/// @ref core -/// @file glm/detail/type_mat3x2.hpp - -#pragma once - -#include "type_vec2.hpp" -#include "type_vec3.hpp" -#include -#include - -namespace glm -{ - template - struct mat<3, 2, T, Q> - { - typedef vec<2, T, Q> col_type; - typedef vec<3, T, Q> row_type; - typedef mat<3, 2, T, Q> type; - typedef mat<2, 3, T, Q> transpose_type; - typedef T value_type; - - private: - col_type value[3]; - - public: - // -- Accesses -- - - typedef length_t length_type; - GLM_FUNC_DECL static GLM_CONSTEXPR length_type length() { return 3; } - - GLM_FUNC_DECL col_type & operator[](length_type i); - GLM_FUNC_DECL GLM_CONSTEXPR col_type const& operator[](length_type i) const; - - // -- Constructors -- - - GLM_FUNC_DECL GLM_CONSTEXPR mat() GLM_DEFAULT; - template - GLM_FUNC_DECL GLM_CONSTEXPR mat(mat<3, 2, T, P> const& m); - - GLM_FUNC_DECL explicit GLM_CONSTEXPR mat(T scalar); - GLM_FUNC_DECL GLM_CONSTEXPR mat( - T x0, T y0, - T x1, T y1, - T x2, T y2); - GLM_FUNC_DECL GLM_CONSTEXPR mat( - col_type const& v0, - col_type const& v1, - col_type const& v2); - - // -- Conversions -- - - template< - typename X1, typename Y1, - typename X2, typename Y2, - typename X3, typename Y3> - GLM_FUNC_DECL GLM_CONSTEXPR mat( - X1 x1, Y1 y1, - X2 x2, Y2 y2, - X3 x3, Y3 y3); - - template - GLM_FUNC_DECL GLM_CONSTEXPR mat( - vec<2, V1, Q> const& v1, - vec<2, V2, Q> const& v2, - vec<2, V3, Q> const& v3); - - // -- Matrix conversions -- - - template - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 2, U, P> const& m); - - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 2, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 3, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 4, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 3, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 4, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 4, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 2, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 3, T, Q> const& x); - - // -- Unary arithmetic operators -- - - template - GLM_FUNC_DECL mat<3, 2, T, Q> & operator=(mat<3, 2, U, Q> const& m); - template - GLM_FUNC_DECL mat<3, 2, T, Q> & operator+=(U s); - template - GLM_FUNC_DECL mat<3, 2, T, Q> & operator+=(mat<3, 2, U, Q> const& m); - template - GLM_FUNC_DECL mat<3, 2, T, Q> & operator-=(U s); - template - GLM_FUNC_DECL mat<3, 2, T, Q> & operator-=(mat<3, 2, U, Q> const& m); - template - GLM_FUNC_DECL mat<3, 2, T, Q> & operator*=(U s); - template - GLM_FUNC_DECL mat<3, 2, T, Q> & operator/=(U s); - - // -- Increment and decrement operators -- - - GLM_FUNC_DECL mat<3, 2, T, Q> & operator++ (); - GLM_FUNC_DECL mat<3, 2, T, Q> & operator-- (); - GLM_FUNC_DECL mat<3, 2, T, Q> operator++(int); - GLM_FUNC_DECL mat<3, 2, T, Q> operator--(int); - }; - - // -- Unary operators -- - - template - GLM_FUNC_DECL mat<3, 2, T, Q> operator+(mat<3, 2, T, Q> const& m); - - template - GLM_FUNC_DECL mat<3, 2, T, Q> operator-(mat<3, 2, T, Q> const& m); - - // -- Binary operators -- - - template - GLM_FUNC_DECL mat<3, 2, T, Q> operator+(mat<3, 2, T, Q> const& m, T scalar); - - template - GLM_FUNC_DECL mat<3, 2, T, Q> operator+(mat<3, 2, T, Q> const& m1, mat<3, 2, T, Q> const& m2); - - template - GLM_FUNC_DECL mat<3, 2, T, Q> operator-(mat<3, 2, T, Q> const& m, T scalar); - - template - GLM_FUNC_DECL mat<3, 2, T, Q> operator-(mat<3, 2, T, Q> const& m1, mat<3, 2, T, Q> const& m2); - - template - GLM_FUNC_DECL mat<3, 2, T, Q> operator*(mat<3, 2, T, Q> const& m, T scalar); - - template - GLM_FUNC_DECL mat<3, 2, T, Q> operator*(T scalar, mat<3, 2, T, Q> const& m); - - template - GLM_FUNC_DECL typename mat<3, 2, T, Q>::col_type operator*(mat<3, 2, T, Q> const& m, typename mat<3, 2, T, Q>::row_type const& v); - - template - GLM_FUNC_DECL typename mat<3, 2, T, Q>::row_type operator*(typename mat<3, 2, T, Q>::col_type const& v, mat<3, 2, T, Q> const& m); - - template - GLM_FUNC_DECL mat<2, 2, T, Q> operator*(mat<3, 2, T, Q> const& m1, mat<2, 3, T, Q> const& m2); - - template - GLM_FUNC_DECL mat<3, 2, T, Q> operator*(mat<3, 2, T, Q> const& m1, mat<3, 3, T, Q> const& m2); - - template - GLM_FUNC_DECL mat<4, 2, T, Q> operator*(mat<3, 2, T, Q> const& m1, mat<4, 3, T, Q> const& m2); - - template - GLM_FUNC_DECL mat<3, 2, T, Q> operator/(mat<3, 2, T, Q> const& m, T scalar); - - template - GLM_FUNC_DECL mat<3, 2, T, Q> operator/(T scalar, mat<3, 2, T, Q> const& m); - - // -- Boolean operators -- - - template - GLM_FUNC_DECL bool operator==(mat<3, 2, T, Q> const& m1, mat<3, 2, T, Q> const& m2); - - template - GLM_FUNC_DECL bool operator!=(mat<3, 2, T, Q> const& m1, mat<3, 2, T, Q> const& m2); - -}//namespace glm - -#ifndef GLM_EXTERNAL_TEMPLATE -#include "type_mat3x2.inl" -#endif diff --git a/third_party/glm/detail/type_mat3x2.inl b/third_party/glm/detail/type_mat3x2.inl deleted file mode 100755 index b4b948b..0000000 --- a/third_party/glm/detail/type_mat3x2.inl +++ /dev/null @@ -1,532 +0,0 @@ -namespace glm -{ - // -- Constructors -- - -# if GLM_CONFIG_DEFAULTED_FUNCTIONS == GLM_DISABLE - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q>::mat() -# if GLM_CONFIG_CTOR_INIT == GLM_CTOR_INITIALIZER_LIST - : value{col_type(1, 0), col_type(0, 1), col_type(0, 0)} -# endif - { -# if GLM_CONFIG_CTOR_INIT == GLM_CTOR_INITIALISATION - this->value[0] = col_type(1, 0); - this->value[1] = col_type(0, 1); - this->value[2] = col_type(0, 0); -# endif - } -# endif - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q>::mat(mat<3, 2, T, P> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1]), col_type(m[2])} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = m[0]; - this->value[1] = m[1]; - this->value[2] = m[2]; -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q>::mat(T s) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(s, 0), col_type(0, s), col_type(0, 0)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(s, 0); - this->value[1] = col_type(0, s); - this->value[2] = col_type(0, 0); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q>::mat - ( - T x0, T y0, - T x1, T y1, - T x2, T y2 - ) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(x0, y0), col_type(x1, y1), col_type(x2, y2)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(x0, y0); - this->value[1] = col_type(x1, y1); - this->value[2] = col_type(x2, y2); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q>::mat(col_type const& v0, col_type const& v1, col_type const& v2) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(v0), col_type(v1), col_type(v2)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = v0; - this->value[1] = v1; - this->value[2] = v2; -# endif - } - - // -- Conversion constructors -- - - template - template< - typename X0, typename Y0, - typename X1, typename Y1, - typename X2, typename Y2> - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q>::mat - ( - X0 x0, Y0 y0, - X1 x1, Y1 y1, - X2 x2, Y2 y2 - ) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(x0, y0), col_type(x1, y1), col_type(x2, y2)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(x0, y0); - this->value[1] = col_type(x1, y1); - this->value[2] = col_type(x2, y2); -# endif - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q>::mat(vec<2, V0, Q> const& v0, vec<2, V1, Q> const& v1, vec<2, V2, Q> const& v2) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(v0), col_type(v1), col_type(v2)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(v0); - this->value[1] = col_type(v1); - this->value[2] = col_type(v2); -# endif - } - - // -- Matrix conversions -- - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q>::mat(mat<3, 2, U, P> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1]), col_type(m[2])} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); - this->value[2] = col_type(m[2]); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q>::mat(mat<2, 2, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1]), col_type(0)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = m[0]; - this->value[1] = m[1]; - this->value[2] = col_type(0); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q>::mat(mat<3, 3, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1]), col_type(m[2])} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); - this->value[2] = col_type(m[2]); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q>::mat(mat<4, 4, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1]), col_type(m[2])} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); - this->value[2] = col_type(m[2]); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q>::mat(mat<2, 3, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1]), col_type(0)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); - this->value[2] = col_type(0); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q>::mat(mat<2, 4, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1]), col_type(0)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); - this->value[2] = col_type(0); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q>::mat(mat<3, 4, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1]), col_type(m[2])} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); - this->value[2] = col_type(m[2]); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q>::mat(mat<4, 2, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1]), col_type(m[2])} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = m[0]; - this->value[1] = m[1]; - this->value[2] = m[2]; -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 2, T, Q>::mat(mat<4, 3, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1]), col_type(m[2])} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); - this->value[2] = col_type(m[2]); -# endif - } - - // -- Accesses -- - - template - GLM_FUNC_QUALIFIER typename mat<3, 2, T, Q>::col_type & mat<3, 2, T, Q>::operator[](typename mat<3, 2, T, Q>::length_type i) - { - assert(i < this->length()); - return this->value[i]; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<3, 2, T, Q>::col_type const& mat<3, 2, T, Q>::operator[](typename mat<3, 2, T, Q>::length_type i) const - { - assert(i < this->length()); - return this->value[i]; - } - - // -- Unary updatable operators -- - - template - template - GLM_FUNC_QUALIFIER mat<3, 2, T, Q>& mat<3, 2, T, Q>::operator=(mat<3, 2, U, Q> const& m) - { - this->value[0] = m[0]; - this->value[1] = m[1]; - this->value[2] = m[2]; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<3, 2, T, Q>& mat<3, 2, T, Q>::operator+=(U s) - { - this->value[0] += s; - this->value[1] += s; - this->value[2] += s; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<3, 2, T, Q>& mat<3, 2, T, Q>::operator+=(mat<3, 2, U, Q> const& m) - { - this->value[0] += m[0]; - this->value[1] += m[1]; - this->value[2] += m[2]; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<3, 2, T, Q>& mat<3, 2, T, Q>::operator-=(U s) - { - this->value[0] -= s; - this->value[1] -= s; - this->value[2] -= s; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<3, 2, T, Q>& mat<3, 2, T, Q>::operator-=(mat<3, 2, U, Q> const& m) - { - this->value[0] -= m[0]; - this->value[1] -= m[1]; - this->value[2] -= m[2]; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<3, 2, T, Q>& mat<3, 2, T, Q>::operator*=(U s) - { - this->value[0] *= s; - this->value[1] *= s; - this->value[2] *= s; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<3, 2, T, Q> & mat<3, 2, T, Q>::operator/=(U s) - { - this->value[0] /= s; - this->value[1] /= s; - this->value[2] /= s; - return *this; - } - - // -- Increment and decrement operators -- - - template - GLM_FUNC_QUALIFIER mat<3, 2, T, Q>& mat<3, 2, T, Q>::operator++() - { - ++this->value[0]; - ++this->value[1]; - ++this->value[2]; - return *this; - } - - template - GLM_FUNC_QUALIFIER mat<3, 2, T, Q>& mat<3, 2, T, Q>::operator--() - { - --this->value[0]; - --this->value[1]; - --this->value[2]; - return *this; - } - - template - GLM_FUNC_QUALIFIER mat<3, 2, T, Q> mat<3, 2, T, Q>::operator++(int) - { - mat<3, 2, T, Q> Result(*this); - ++*this; - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<3, 2, T, Q> mat<3, 2, T, Q>::operator--(int) - { - mat<3, 2, T, Q> Result(*this); - --*this; - return Result; - } - - // -- Unary arithmetic operators -- - - template - GLM_FUNC_QUALIFIER mat<3, 2, T, Q> operator+(mat<3, 2, T, Q> const& m) - { - return m; - } - - template - GLM_FUNC_QUALIFIER mat<3, 2, T, Q> operator-(mat<3, 2, T, Q> const& m) - { - return mat<3, 2, T, Q>( - -m[0], - -m[1], - -m[2]); - } - - // -- Binary arithmetic operators -- - - template - GLM_FUNC_QUALIFIER mat<3, 2, T, Q> operator+(mat<3, 2, T, Q> const& m, T scalar) - { - return mat<3, 2, T, Q>( - m[0] + scalar, - m[1] + scalar, - m[2] + scalar); - } - - template - GLM_FUNC_QUALIFIER mat<3, 2, T, Q> operator+(mat<3, 2, T, Q> const& m1, mat<3, 2, T, Q> const& m2) - { - return mat<3, 2, T, Q>( - m1[0] + m2[0], - m1[1] + m2[1], - m1[2] + m2[2]); - } - - template - GLM_FUNC_QUALIFIER mat<3, 2, T, Q> operator-(mat<3, 2, T, Q> const& m, T scalar) - { - return mat<3, 2, T, Q>( - m[0] - scalar, - m[1] - scalar, - m[2] - scalar); - } - - template - GLM_FUNC_QUALIFIER mat<3, 2, T, Q> operator-(mat<3, 2, T, Q> const& m1, mat<3, 2, T, Q> const& m2) - { - return mat<3, 2, T, Q>( - m1[0] - m2[0], - m1[1] - m2[1], - m1[2] - m2[2]); - } - - template - GLM_FUNC_QUALIFIER mat<3, 2, T, Q> operator*(mat<3, 2, T, Q> const& m, T scalar) - { - return mat<3, 2, T, Q>( - m[0] * scalar, - m[1] * scalar, - m[2] * scalar); - } - - template - GLM_FUNC_QUALIFIER mat<3, 2, T, Q> operator*(T scalar, mat<3, 2, T, Q> const& m) - { - return mat<3, 2, T, Q>( - m[0] * scalar, - m[1] * scalar, - m[2] * scalar); - } - - template - GLM_FUNC_QUALIFIER typename mat<3, 2, T, Q>::col_type operator*(mat<3, 2, T, Q> const& m, typename mat<3, 2, T, Q>::row_type const& v) - { - return typename mat<3, 2, T, Q>::col_type( - m[0][0] * v.x + m[1][0] * v.y + m[2][0] * v.z, - m[0][1] * v.x + m[1][1] * v.y + m[2][1] * v.z); - } - - template - GLM_FUNC_QUALIFIER typename mat<3, 2, T, Q>::row_type operator*(typename mat<3, 2, T, Q>::col_type const& v, mat<3, 2, T, Q> const& m) - { - return typename mat<3, 2, T, Q>::row_type( - v.x * m[0][0] + v.y * m[0][1], - v.x * m[1][0] + v.y * m[1][1], - v.x * m[2][0] + v.y * m[2][1]); - } - - template - GLM_FUNC_QUALIFIER mat<2, 2, T, Q> operator*(mat<3, 2, T, Q> const& m1, mat<2, 3, T, Q> const& m2) - { - const T SrcA00 = m1[0][0]; - const T SrcA01 = m1[0][1]; - const T SrcA10 = m1[1][0]; - const T SrcA11 = m1[1][1]; - const T SrcA20 = m1[2][0]; - const T SrcA21 = m1[2][1]; - - const T SrcB00 = m2[0][0]; - const T SrcB01 = m2[0][1]; - const T SrcB02 = m2[0][2]; - const T SrcB10 = m2[1][0]; - const T SrcB11 = m2[1][1]; - const T SrcB12 = m2[1][2]; - - mat<2, 2, T, Q> Result; - Result[0][0] = SrcA00 * SrcB00 + SrcA10 * SrcB01 + SrcA20 * SrcB02; - Result[0][1] = SrcA01 * SrcB00 + SrcA11 * SrcB01 + SrcA21 * SrcB02; - Result[1][0] = SrcA00 * SrcB10 + SrcA10 * SrcB11 + SrcA20 * SrcB12; - Result[1][1] = SrcA01 * SrcB10 + SrcA11 * SrcB11 + SrcA21 * SrcB12; - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<3, 2, T, Q> operator*(mat<3, 2, T, Q> const& m1, mat<3, 3, T, Q> const& m2) - { - return mat<3, 2, T, Q>( - m1[0][0] * m2[0][0] + m1[1][0] * m2[0][1] + m1[2][0] * m2[0][2], - m1[0][1] * m2[0][0] + m1[1][1] * m2[0][1] + m1[2][1] * m2[0][2], - m1[0][0] * m2[1][0] + m1[1][0] * m2[1][1] + m1[2][0] * m2[1][2], - m1[0][1] * m2[1][0] + m1[1][1] * m2[1][1] + m1[2][1] * m2[1][2], - m1[0][0] * m2[2][0] + m1[1][0] * m2[2][1] + m1[2][0] * m2[2][2], - m1[0][1] * m2[2][0] + m1[1][1] * m2[2][1] + m1[2][1] * m2[2][2]); - } - - template - GLM_FUNC_QUALIFIER mat<4, 2, T, Q> operator*(mat<3, 2, T, Q> const& m1, mat<4, 3, T, Q> const& m2) - { - return mat<4, 2, T, Q>( - m1[0][0] * m2[0][0] + m1[1][0] * m2[0][1] + m1[2][0] * m2[0][2], - m1[0][1] * m2[0][0] + m1[1][1] * m2[0][1] + m1[2][1] * m2[0][2], - m1[0][0] * m2[1][0] + m1[1][0] * m2[1][1] + m1[2][0] * m2[1][2], - m1[0][1] * m2[1][0] + m1[1][1] * m2[1][1] + m1[2][1] * m2[1][2], - m1[0][0] * m2[2][0] + m1[1][0] * m2[2][1] + m1[2][0] * m2[2][2], - m1[0][1] * m2[2][0] + m1[1][1] * m2[2][1] + m1[2][1] * m2[2][2], - m1[0][0] * m2[3][0] + m1[1][0] * m2[3][1] + m1[2][0] * m2[3][2], - m1[0][1] * m2[3][0] + m1[1][1] * m2[3][1] + m1[2][1] * m2[3][2]); - } - - template - GLM_FUNC_QUALIFIER mat<3, 2, T, Q> operator/(mat<3, 2, T, Q> const& m, T scalar) - { - return mat<3, 2, T, Q>( - m[0] / scalar, - m[1] / scalar, - m[2] / scalar); - } - - template - GLM_FUNC_QUALIFIER mat<3, 2, T, Q> operator/(T scalar, mat<3, 2, T, Q> const& m) - { - return mat<3, 2, T, Q>( - scalar / m[0], - scalar / m[1], - scalar / m[2]); - } - - // -- Boolean operators -- - - template - GLM_FUNC_QUALIFIER bool operator==(mat<3, 2, T, Q> const& m1, mat<3, 2, T, Q> const& m2) - { - return (m1[0] == m2[0]) && (m1[1] == m2[1]) && (m1[2] == m2[2]); - } - - template - GLM_FUNC_QUALIFIER bool operator!=(mat<3, 2, T, Q> const& m1, mat<3, 2, T, Q> const& m2) - { - return (m1[0] != m2[0]) || (m1[1] != m2[1]) || (m1[2] != m2[2]); - } -} //namespace glm diff --git a/third_party/glm/detail/type_mat3x3.hpp b/third_party/glm/detail/type_mat3x3.hpp deleted file mode 100755 index 3174872..0000000 --- a/third_party/glm/detail/type_mat3x3.hpp +++ /dev/null @@ -1,184 +0,0 @@ -/// @ref core -/// @file glm/detail/type_mat3x3.hpp - -#pragma once - -#include "type_vec3.hpp" -#include -#include - -namespace glm -{ - template - struct mat<3, 3, T, Q> - { - typedef vec<3, T, Q> col_type; - typedef vec<3, T, Q> row_type; - typedef mat<3, 3, T, Q> type; - typedef mat<3, 3, T, Q> transpose_type; - typedef T value_type; - - private: - col_type value[3]; - - public: - // -- Accesses -- - - typedef length_t length_type; - GLM_FUNC_DECL static GLM_CONSTEXPR length_type length() { return 3; } - - GLM_FUNC_DECL col_type & operator[](length_type i); - GLM_FUNC_DECL GLM_CONSTEXPR col_type const& operator[](length_type i) const; - - // -- Constructors -- - - GLM_FUNC_DECL GLM_CONSTEXPR mat() GLM_DEFAULT; - template - GLM_FUNC_DECL GLM_CONSTEXPR mat(mat<3, 3, T, P> const& m); - - GLM_FUNC_DECL explicit GLM_CONSTEXPR mat(T scalar); - GLM_FUNC_DECL GLM_CONSTEXPR mat( - T x0, T y0, T z0, - T x1, T y1, T z1, - T x2, T y2, T z2); - GLM_FUNC_DECL GLM_CONSTEXPR mat( - col_type const& v0, - col_type const& v1, - col_type const& v2); - - // -- Conversions -- - - template< - typename X1, typename Y1, typename Z1, - typename X2, typename Y2, typename Z2, - typename X3, typename Y3, typename Z3> - GLM_FUNC_DECL GLM_CONSTEXPR mat( - X1 x1, Y1 y1, Z1 z1, - X2 x2, Y2 y2, Z2 z2, - X3 x3, Y3 y3, Z3 z3); - - template - GLM_FUNC_DECL GLM_CONSTEXPR mat( - vec<3, V1, Q> const& v1, - vec<3, V2, Q> const& v2, - vec<3, V3, Q> const& v3); - - // -- Matrix conversions -- - - template - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 3, U, P> const& m); - - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 2, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 4, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 3, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 2, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 4, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 2, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 4, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 3, T, Q> const& x); - - // -- Unary arithmetic operators -- - - template - GLM_FUNC_DECL mat<3, 3, T, Q> & operator=(mat<3, 3, U, Q> const& m); - template - GLM_FUNC_DECL mat<3, 3, T, Q> & operator+=(U s); - template - GLM_FUNC_DECL mat<3, 3, T, Q> & operator+=(mat<3, 3, U, Q> const& m); - template - GLM_FUNC_DECL mat<3, 3, T, Q> & operator-=(U s); - template - GLM_FUNC_DECL mat<3, 3, T, Q> & operator-=(mat<3, 3, U, Q> const& m); - template - GLM_FUNC_DECL mat<3, 3, T, Q> & operator*=(U s); - template - GLM_FUNC_DECL mat<3, 3, T, Q> & operator*=(mat<3, 3, U, Q> const& m); - template - GLM_FUNC_DECL mat<3, 3, T, Q> & operator/=(U s); - template - GLM_FUNC_DECL mat<3, 3, T, Q> & operator/=(mat<3, 3, U, Q> const& m); - - // -- Increment and decrement operators -- - - GLM_FUNC_DECL mat<3, 3, T, Q> & operator++(); - GLM_FUNC_DECL mat<3, 3, T, Q> & operator--(); - GLM_FUNC_DECL mat<3, 3, T, Q> operator++(int); - GLM_FUNC_DECL mat<3, 3, T, Q> operator--(int); - }; - - // -- Unary operators -- - - template - GLM_FUNC_DECL mat<3, 3, T, Q> operator+(mat<3, 3, T, Q> const& m); - - template - GLM_FUNC_DECL mat<3, 3, T, Q> operator-(mat<3, 3, T, Q> const& m); - - // -- Binary operators -- - - template - GLM_FUNC_DECL mat<3, 3, T, Q> operator+(mat<3, 3, T, Q> const& m, T scalar); - - template - GLM_FUNC_DECL mat<3, 3, T, Q> operator+(T scalar, mat<3, 3, T, Q> const& m); - - template - GLM_FUNC_DECL mat<3, 3, T, Q> operator+(mat<3, 3, T, Q> const& m1, mat<3, 3, T, Q> const& m2); - - template - GLM_FUNC_DECL mat<3, 3, T, Q> operator-(mat<3, 3, T, Q> const& m, T scalar); - - template - GLM_FUNC_DECL mat<3, 3, T, Q> operator-(T scalar, mat<3, 3, T, Q> const& m); - - template - GLM_FUNC_DECL mat<3, 3, T, Q> operator-(mat<3, 3, T, Q> const& m1, mat<3, 3, T, Q> const& m2); - - template - GLM_FUNC_DECL mat<3, 3, T, Q> operator*(mat<3, 3, T, Q> const& m, T scalar); - - template - GLM_FUNC_DECL mat<3, 3, T, Q> operator*(T scalar, mat<3, 3, T, Q> const& m); - - template - GLM_FUNC_DECL typename mat<3, 3, T, Q>::col_type operator*(mat<3, 3, T, Q> const& m, typename mat<3, 3, T, Q>::row_type const& v); - - template - GLM_FUNC_DECL typename mat<3, 3, T, Q>::row_type operator*(typename mat<3, 3, T, Q>::col_type const& v, mat<3, 3, T, Q> const& m); - - template - GLM_FUNC_DECL mat<3, 3, T, Q> operator*(mat<3, 3, T, Q> const& m1, mat<3, 3, T, Q> const& m2); - - template - GLM_FUNC_DECL mat<2, 3, T, Q> operator*(mat<3, 3, T, Q> const& m1, mat<2, 3, T, Q> const& m2); - - template - GLM_FUNC_DECL mat<4, 3, T, Q> operator*(mat<3, 3, T, Q> const& m1, mat<4, 3, T, Q> const& m2); - - template - GLM_FUNC_DECL mat<3, 3, T, Q> operator/(mat<3, 3, T, Q> const& m, T scalar); - - template - GLM_FUNC_DECL mat<3, 3, T, Q> operator/(T scalar, mat<3, 3, T, Q> const& m); - - template - GLM_FUNC_DECL typename mat<3, 3, T, Q>::col_type operator/(mat<3, 3, T, Q> const& m, typename mat<3, 3, T, Q>::row_type const& v); - - template - GLM_FUNC_DECL typename mat<3, 3, T, Q>::row_type operator/(typename mat<3, 3, T, Q>::col_type const& v, mat<3, 3, T, Q> const& m); - - template - GLM_FUNC_DECL mat<3, 3, T, Q> operator/(mat<3, 3, T, Q> const& m1, mat<3, 3, T, Q> const& m2); - - // -- Boolean operators -- - - template - GLM_FUNC_DECL GLM_CONSTEXPR bool operator==(mat<3, 3, T, Q> const& m1, mat<3, 3, T, Q> const& m2); - - template - GLM_FUNC_DECL bool operator!=(mat<3, 3, T, Q> const& m1, mat<3, 3, T, Q> const& m2); -}//namespace glm - -#ifndef GLM_EXTERNAL_TEMPLATE -#include "type_mat3x3.inl" -#endif diff --git a/third_party/glm/detail/type_mat3x3.inl b/third_party/glm/detail/type_mat3x3.inl deleted file mode 100755 index 1ddaf99..0000000 --- a/third_party/glm/detail/type_mat3x3.inl +++ /dev/null @@ -1,601 +0,0 @@ -#include "../matrix.hpp" - -namespace glm -{ - // -- Constructors -- - -# if GLM_CONFIG_DEFAULTED_FUNCTIONS == GLM_DISABLE - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q>::mat() -# if GLM_CONFIG_CTOR_INIT == GLM_CTOR_INITIALIZER_LIST - : value{col_type(1, 0, 0), col_type(0, 1, 0), col_type(0, 0, 1)} -# endif - { -# if GLM_CONFIG_CTOR_INIT == GLM_CTOR_INITIALISATION - this->value[0] = col_type(1, 0, 0); - this->value[1] = col_type(0, 1, 0); - this->value[2] = col_type(0, 0, 1); -# endif - } -# endif - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q>::mat(mat<3, 3, T, P> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1]), col_type(m[2])} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); - this->value[2] = col_type(m[2]); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q>::mat(T s) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(s, 0, 0), col_type(0, s, 0), col_type(0, 0, s)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(s, 0, 0); - this->value[1] = col_type(0, s, 0); - this->value[2] = col_type(0, 0, s); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q>::mat - ( - T x0, T y0, T z0, - T x1, T y1, T z1, - T x2, T y2, T z2 - ) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(x0, y0, z0), col_type(x1, y1, z1), col_type(x2, y2, z2)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(x0, y0, z0); - this->value[1] = col_type(x1, y1, z1); - this->value[2] = col_type(x2, y2, z2); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q>::mat(col_type const& v0, col_type const& v1, col_type const& v2) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(v0), col_type(v1), col_type(v2)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(v0); - this->value[1] = col_type(v1); - this->value[2] = col_type(v2); -# endif - } - - // -- Conversion constructors -- - - template - template< - typename X1, typename Y1, typename Z1, - typename X2, typename Y2, typename Z2, - typename X3, typename Y3, typename Z3> - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q>::mat - ( - X1 x1, Y1 y1, Z1 z1, - X2 x2, Y2 y2, Z2 z2, - X3 x3, Y3 y3, Z3 z3 - ) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(x1, y1, z1), col_type(x2, y2, z2), col_type(x3, y3, z3)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(x1, y1, z1); - this->value[1] = col_type(x2, y2, z2); - this->value[2] = col_type(x3, y3, z3); -# endif - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q>::mat(vec<3, V1, Q> const& v1, vec<3, V2, Q> const& v2, vec<3, V3, Q> const& v3) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(v1), col_type(v2), col_type(v3)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(v1); - this->value[1] = col_type(v2); - this->value[2] = col_type(v3); -# endif - } - - // -- Matrix conversions -- - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q>::mat(mat<3, 3, U, P> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1]), col_type(m[2])} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); - this->value[2] = col_type(m[2]); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q>::mat(mat<2, 2, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0], 0), col_type(m[1], 0), col_type(0, 0, 1)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0], 0); - this->value[1] = col_type(m[1], 0); - this->value[2] = col_type(0, 0, 1); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q>::mat(mat<4, 4, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1]), col_type(m[2])} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); - this->value[2] = col_type(m[2]); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q>::mat(mat<2, 3, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1]), col_type(0, 0, 1)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); - this->value[2] = col_type(0, 0, 1); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q>::mat(mat<3, 2, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0], 0), col_type(m[1], 0), col_type(m[2], 1)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0], 0); - this->value[1] = col_type(m[1], 0); - this->value[2] = col_type(m[2], 1); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q>::mat(mat<2, 4, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1]), col_type(0, 0, 1)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); - this->value[2] = col_type(0, 0, 1); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q>::mat(mat<4, 2, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0], 0), col_type(m[1], 0), col_type(m[2], 1)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0], 0); - this->value[1] = col_type(m[1], 0); - this->value[2] = col_type(m[2], 1); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q>::mat(mat<3, 4, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1]), col_type(m[2])} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); - this->value[2] = col_type(m[2]); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 3, T, Q>::mat(mat<4, 3, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1]), col_type(m[2])} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); - this->value[2] = col_type(m[2]); -# endif - } - - // -- Accesses -- - - template - GLM_FUNC_QUALIFIER typename mat<3, 3, T, Q>::col_type & mat<3, 3, T, Q>::operator[](typename mat<3, 3, T, Q>::length_type i) - { - assert(i < this->length()); - return this->value[i]; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<3, 3, T, Q>::col_type const& mat<3, 3, T, Q>::operator[](typename mat<3, 3, T, Q>::length_type i) const - { - assert(i < this->length()); - return this->value[i]; - } - - // -- Unary updatable operators -- - - template - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> & mat<3, 3, T, Q>::operator=(mat<3, 3, U, Q> const& m) - { - this->value[0] = m[0]; - this->value[1] = m[1]; - this->value[2] = m[2]; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> & mat<3, 3, T, Q>::operator+=(U s) - { - this->value[0] += s; - this->value[1] += s; - this->value[2] += s; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> & mat<3, 3, T, Q>::operator+=(mat<3, 3, U, Q> const& m) - { - this->value[0] += m[0]; - this->value[1] += m[1]; - this->value[2] += m[2]; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> & mat<3, 3, T, Q>::operator-=(U s) - { - this->value[0] -= s; - this->value[1] -= s; - this->value[2] -= s; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> & mat<3, 3, T, Q>::operator-=(mat<3, 3, U, Q> const& m) - { - this->value[0] -= m[0]; - this->value[1] -= m[1]; - this->value[2] -= m[2]; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> & mat<3, 3, T, Q>::operator*=(U s) - { - this->value[0] *= s; - this->value[1] *= s; - this->value[2] *= s; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> & mat<3, 3, T, Q>::operator*=(mat<3, 3, U, Q> const& m) - { - return (*this = *this * m); - } - - template - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> & mat<3, 3, T, Q>::operator/=(U s) - { - this->value[0] /= s; - this->value[1] /= s; - this->value[2] /= s; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> & mat<3, 3, T, Q>::operator/=(mat<3, 3, U, Q> const& m) - { - return *this *= inverse(m); - } - - // -- Increment and decrement operators -- - - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> & mat<3, 3, T, Q>::operator++() - { - ++this->value[0]; - ++this->value[1]; - ++this->value[2]; - return *this; - } - - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> & mat<3, 3, T, Q>::operator--() - { - --this->value[0]; - --this->value[1]; - --this->value[2]; - return *this; - } - - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> mat<3, 3, T, Q>::operator++(int) - { - mat<3, 3, T, Q> Result(*this); - ++*this; - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> mat<3, 3, T, Q>::operator--(int) - { - mat<3, 3, T, Q> Result(*this); - --*this; - return Result; - } - - // -- Unary arithmetic operators -- - - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> operator+(mat<3, 3, T, Q> const& m) - { - return m; - } - - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> operator-(mat<3, 3, T, Q> const& m) - { - return mat<3, 3, T, Q>( - -m[0], - -m[1], - -m[2]); - } - - // -- Binary arithmetic operators -- - - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> operator+(mat<3, 3, T, Q> const& m, T scalar) - { - return mat<3, 3, T, Q>( - m[0] + scalar, - m[1] + scalar, - m[2] + scalar); - } - - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> operator+(T scalar, mat<3, 3, T, Q> const& m) - { - return mat<3, 3, T, Q>( - m[0] + scalar, - m[1] + scalar, - m[2] + scalar); - } - - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> operator+(mat<3, 3, T, Q> const& m1, mat<3, 3, T, Q> const& m2) - { - return mat<3, 3, T, Q>( - m1[0] + m2[0], - m1[1] + m2[1], - m1[2] + m2[2]); - } - - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> operator-(mat<3, 3, T, Q> const& m, T scalar) - { - return mat<3, 3, T, Q>( - m[0] - scalar, - m[1] - scalar, - m[2] - scalar); - } - - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> operator-(T scalar, mat<3, 3, T, Q> const& m) - { - return mat<3, 3, T, Q>( - scalar - m[0], - scalar - m[1], - scalar - m[2]); - } - - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> operator-(mat<3, 3, T, Q> const& m1, mat<3, 3, T, Q> const& m2) - { - return mat<3, 3, T, Q>( - m1[0] - m2[0], - m1[1] - m2[1], - m1[2] - m2[2]); - } - - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> operator*(mat<3, 3, T, Q> const& m, T scalar) - { - return mat<3, 3, T, Q>( - m[0] * scalar, - m[1] * scalar, - m[2] * scalar); - } - - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> operator*(T scalar, mat<3, 3, T, Q> const& m) - { - return mat<3, 3, T, Q>( - m[0] * scalar, - m[1] * scalar, - m[2] * scalar); - } - - template - GLM_FUNC_QUALIFIER typename mat<3, 3, T, Q>::col_type operator*(mat<3, 3, T, Q> const& m, typename mat<3, 3, T, Q>::row_type const& v) - { - return typename mat<3, 3, T, Q>::col_type( - m[0][0] * v.x + m[1][0] * v.y + m[2][0] * v.z, - m[0][1] * v.x + m[1][1] * v.y + m[2][1] * v.z, - m[0][2] * v.x + m[1][2] * v.y + m[2][2] * v.z); - } - - template - GLM_FUNC_QUALIFIER typename mat<3, 3, T, Q>::row_type operator*(typename mat<3, 3, T, Q>::col_type const& v, mat<3, 3, T, Q> const& m) - { - return typename mat<3, 3, T, Q>::row_type( - m[0][0] * v.x + m[0][1] * v.y + m[0][2] * v.z, - m[1][0] * v.x + m[1][1] * v.y + m[1][2] * v.z, - m[2][0] * v.x + m[2][1] * v.y + m[2][2] * v.z); - } - - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> operator*(mat<3, 3, T, Q> const& m1, mat<3, 3, T, Q> const& m2) - { - T const SrcA00 = m1[0][0]; - T const SrcA01 = m1[0][1]; - T const SrcA02 = m1[0][2]; - T const SrcA10 = m1[1][0]; - T const SrcA11 = m1[1][1]; - T const SrcA12 = m1[1][2]; - T const SrcA20 = m1[2][0]; - T const SrcA21 = m1[2][1]; - T const SrcA22 = m1[2][2]; - - T const SrcB00 = m2[0][0]; - T const SrcB01 = m2[0][1]; - T const SrcB02 = m2[0][2]; - T const SrcB10 = m2[1][0]; - T const SrcB11 = m2[1][1]; - T const SrcB12 = m2[1][2]; - T const SrcB20 = m2[2][0]; - T const SrcB21 = m2[2][1]; - T const SrcB22 = m2[2][2]; - - mat<3, 3, T, Q> Result; - Result[0][0] = SrcA00 * SrcB00 + SrcA10 * SrcB01 + SrcA20 * SrcB02; - Result[0][1] = SrcA01 * SrcB00 + SrcA11 * SrcB01 + SrcA21 * SrcB02; - Result[0][2] = SrcA02 * SrcB00 + SrcA12 * SrcB01 + SrcA22 * SrcB02; - Result[1][0] = SrcA00 * SrcB10 + SrcA10 * SrcB11 + SrcA20 * SrcB12; - Result[1][1] = SrcA01 * SrcB10 + SrcA11 * SrcB11 + SrcA21 * SrcB12; - Result[1][2] = SrcA02 * SrcB10 + SrcA12 * SrcB11 + SrcA22 * SrcB12; - Result[2][0] = SrcA00 * SrcB20 + SrcA10 * SrcB21 + SrcA20 * SrcB22; - Result[2][1] = SrcA01 * SrcB20 + SrcA11 * SrcB21 + SrcA21 * SrcB22; - Result[2][2] = SrcA02 * SrcB20 + SrcA12 * SrcB21 + SrcA22 * SrcB22; - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<2, 3, T, Q> operator*(mat<3, 3, T, Q> const& m1, mat<2, 3, T, Q> const& m2) - { - return mat<2, 3, T, Q>( - m1[0][0] * m2[0][0] + m1[1][0] * m2[0][1] + m1[2][0] * m2[0][2], - m1[0][1] * m2[0][0] + m1[1][1] * m2[0][1] + m1[2][1] * m2[0][2], - m1[0][2] * m2[0][0] + m1[1][2] * m2[0][1] + m1[2][2] * m2[0][2], - m1[0][0] * m2[1][0] + m1[1][0] * m2[1][1] + m1[2][0] * m2[1][2], - m1[0][1] * m2[1][0] + m1[1][1] * m2[1][1] + m1[2][1] * m2[1][2], - m1[0][2] * m2[1][0] + m1[1][2] * m2[1][1] + m1[2][2] * m2[1][2]); - } - - template - GLM_FUNC_QUALIFIER mat<4, 3, T, Q> operator*(mat<3, 3, T, Q> const& m1, mat<4, 3, T, Q> const& m2) - { - return mat<4, 3, T, Q>( - m1[0][0] * m2[0][0] + m1[1][0] * m2[0][1] + m1[2][0] * m2[0][2], - m1[0][1] * m2[0][0] + m1[1][1] * m2[0][1] + m1[2][1] * m2[0][2], - m1[0][2] * m2[0][0] + m1[1][2] * m2[0][1] + m1[2][2] * m2[0][2], - m1[0][0] * m2[1][0] + m1[1][0] * m2[1][1] + m1[2][0] * m2[1][2], - m1[0][1] * m2[1][0] + m1[1][1] * m2[1][1] + m1[2][1] * m2[1][2], - m1[0][2] * m2[1][0] + m1[1][2] * m2[1][1] + m1[2][2] * m2[1][2], - m1[0][0] * m2[2][0] + m1[1][0] * m2[2][1] + m1[2][0] * m2[2][2], - m1[0][1] * m2[2][0] + m1[1][1] * m2[2][1] + m1[2][1] * m2[2][2], - m1[0][2] * m2[2][0] + m1[1][2] * m2[2][1] + m1[2][2] * m2[2][2], - m1[0][0] * m2[3][0] + m1[1][0] * m2[3][1] + m1[2][0] * m2[3][2], - m1[0][1] * m2[3][0] + m1[1][1] * m2[3][1] + m1[2][1] * m2[3][2], - m1[0][2] * m2[3][0] + m1[1][2] * m2[3][1] + m1[2][2] * m2[3][2]); - } - - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> operator/(mat<3, 3, T, Q> const& m, T scalar) - { - return mat<3, 3, T, Q>( - m[0] / scalar, - m[1] / scalar, - m[2] / scalar); - } - - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> operator/(T scalar, mat<3, 3, T, Q> const& m) - { - return mat<3, 3, T, Q>( - scalar / m[0], - scalar / m[1], - scalar / m[2]); - } - - template - GLM_FUNC_QUALIFIER typename mat<3, 3, T, Q>::col_type operator/(mat<3, 3, T, Q> const& m, typename mat<3, 3, T, Q>::row_type const& v) - { - return inverse(m) * v; - } - - template - GLM_FUNC_QUALIFIER typename mat<3, 3, T, Q>::row_type operator/(typename mat<3, 3, T, Q>::col_type const& v, mat<3, 3, T, Q> const& m) - { - return v * inverse(m); - } - - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> operator/(mat<3, 3, T, Q> const& m1, mat<3, 3, T, Q> const& m2) - { - mat<3, 3, T, Q> m1_copy(m1); - return m1_copy /= m2; - } - - // -- Boolean operators -- - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool operator==(mat<3, 3, T, Q> const& m1, mat<3, 3, T, Q> const& m2) - { - return (m1[0] == m2[0]) && (m1[1] == m2[1]) && (m1[2] == m2[2]); - } - - template - GLM_FUNC_QUALIFIER bool operator!=(mat<3, 3, T, Q> const& m1, mat<3, 3, T, Q> const& m2) - { - return (m1[0] != m2[0]) || (m1[1] != m2[1]) || (m1[2] != m2[2]); - } -} //namespace glm diff --git a/third_party/glm/detail/type_mat3x4.hpp b/third_party/glm/detail/type_mat3x4.hpp deleted file mode 100755 index 6e40b90..0000000 --- a/third_party/glm/detail/type_mat3x4.hpp +++ /dev/null @@ -1,166 +0,0 @@ -/// @ref core -/// @file glm/detail/type_mat3x4.hpp - -#pragma once - -#include "type_vec3.hpp" -#include "type_vec4.hpp" -#include -#include - -namespace glm -{ - template - struct mat<3, 4, T, Q> - { - typedef vec<4, T, Q> col_type; - typedef vec<3, T, Q> row_type; - typedef mat<3, 4, T, Q> type; - typedef mat<4, 3, T, Q> transpose_type; - typedef T value_type; - - private: - col_type value[3]; - - public: - // -- Accesses -- - - typedef length_t length_type; - GLM_FUNC_DECL static GLM_CONSTEXPR length_type length() { return 3; } - - GLM_FUNC_DECL col_type & operator[](length_type i); - GLM_FUNC_DECL GLM_CONSTEXPR col_type const& operator[](length_type i) const; - - // -- Constructors -- - - GLM_FUNC_DECL GLM_CONSTEXPR mat() GLM_DEFAULT; - template - GLM_FUNC_DECL GLM_CONSTEXPR mat(mat<3, 4, T, P> const& m); - - GLM_FUNC_DECL explicit GLM_CONSTEXPR mat(T scalar); - GLM_FUNC_DECL GLM_CONSTEXPR mat( - T x0, T y0, T z0, T w0, - T x1, T y1, T z1, T w1, - T x2, T y2, T z2, T w2); - GLM_FUNC_DECL GLM_CONSTEXPR mat( - col_type const& v0, - col_type const& v1, - col_type const& v2); - - // -- Conversions -- - - template< - typename X1, typename Y1, typename Z1, typename W1, - typename X2, typename Y2, typename Z2, typename W2, - typename X3, typename Y3, typename Z3, typename W3> - GLM_FUNC_DECL GLM_CONSTEXPR mat( - X1 x1, Y1 y1, Z1 z1, W1 w1, - X2 x2, Y2 y2, Z2 z2, W2 w2, - X3 x3, Y3 y3, Z3 z3, W3 w3); - - template - GLM_FUNC_DECL GLM_CONSTEXPR mat( - vec<4, V1, Q> const& v1, - vec<4, V2, Q> const& v2, - vec<4, V3, Q> const& v3); - - // -- Matrix conversions -- - - template - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 4, U, P> const& m); - - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 2, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 3, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 4, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 3, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 2, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 4, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 2, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 3, T, Q> const& x); - - // -- Unary arithmetic operators -- - - template - GLM_FUNC_DECL mat<3, 4, T, Q> & operator=(mat<3, 4, U, Q> const& m); - template - GLM_FUNC_DECL mat<3, 4, T, Q> & operator+=(U s); - template - GLM_FUNC_DECL mat<3, 4, T, Q> & operator+=(mat<3, 4, U, Q> const& m); - template - GLM_FUNC_DECL mat<3, 4, T, Q> & operator-=(U s); - template - GLM_FUNC_DECL mat<3, 4, T, Q> & operator-=(mat<3, 4, U, Q> const& m); - template - GLM_FUNC_DECL mat<3, 4, T, Q> & operator*=(U s); - template - GLM_FUNC_DECL mat<3, 4, T, Q> & operator/=(U s); - - // -- Increment and decrement operators -- - - GLM_FUNC_DECL mat<3, 4, T, Q> & operator++(); - GLM_FUNC_DECL mat<3, 4, T, Q> & operator--(); - GLM_FUNC_DECL mat<3, 4, T, Q> operator++(int); - GLM_FUNC_DECL mat<3, 4, T, Q> operator--(int); - }; - - // -- Unary operators -- - - template - GLM_FUNC_DECL mat<3, 4, T, Q> operator+(mat<3, 4, T, Q> const& m); - - template - GLM_FUNC_DECL mat<3, 4, T, Q> operator-(mat<3, 4, T, Q> const& m); - - // -- Binary operators -- - - template - GLM_FUNC_DECL mat<3, 4, T, Q> operator+(mat<3, 4, T, Q> const& m, T scalar); - - template - GLM_FUNC_DECL mat<3, 4, T, Q> operator+(mat<3, 4, T, Q> const& m1, mat<3, 4, T, Q> const& m2); - - template - GLM_FUNC_DECL mat<3, 4, T, Q> operator-(mat<3, 4, T, Q> const& m, T scalar); - - template - GLM_FUNC_DECL mat<3, 4, T, Q> operator-(mat<3, 4, T, Q> const& m1, mat<3, 4, T, Q> const& m2); - - template - GLM_FUNC_DECL mat<3, 4, T, Q> operator*(mat<3, 4, T, Q> const& m, T scalar); - - template - GLM_FUNC_DECL mat<3, 4, T, Q> operator*(T scalar, mat<3, 4, T, Q> const& m); - - template - GLM_FUNC_DECL typename mat<3, 4, T, Q>::col_type operator*(mat<3, 4, T, Q> const& m, typename mat<3, 4, T, Q>::row_type const& v); - - template - GLM_FUNC_DECL typename mat<3, 4, T, Q>::row_type operator*(typename mat<3, 4, T, Q>::col_type const& v, mat<3, 4, T, Q> const& m); - - template - GLM_FUNC_DECL mat<4, 4, T, Q> operator*(mat<3, 4, T, Q> const& m1, mat<4, 3, T, Q> const& m2); - - template - GLM_FUNC_DECL mat<2, 4, T, Q> operator*(mat<3, 4, T, Q> const& m1, mat<2, 3, T, Q> const& m2); - - template - GLM_FUNC_DECL mat<3, 4, T, Q> operator*(mat<3, 4, T, Q> const& m1, mat<3, 3, T, Q> const& m2); - - template - GLM_FUNC_DECL mat<3, 4, T, Q> operator/(mat<3, 4, T, Q> const& m, T scalar); - - template - GLM_FUNC_DECL mat<3, 4, T, Q> operator/(T scalar, mat<3, 4, T, Q> const& m); - - // -- Boolean operators -- - - template - GLM_FUNC_DECL bool operator==(mat<3, 4, T, Q> const& m1, mat<3, 4, T, Q> const& m2); - - template - GLM_FUNC_DECL bool operator!=(mat<3, 4, T, Q> const& m1, mat<3, 4, T, Q> const& m2); -}//namespace glm - -#ifndef GLM_EXTERNAL_TEMPLATE -#include "type_mat3x4.inl" -#endif diff --git a/third_party/glm/detail/type_mat3x4.inl b/third_party/glm/detail/type_mat3x4.inl deleted file mode 100755 index 6ee416c..0000000 --- a/third_party/glm/detail/type_mat3x4.inl +++ /dev/null @@ -1,578 +0,0 @@ -namespace glm -{ - // -- Constructors -- - -# if GLM_CONFIG_DEFAULTED_FUNCTIONS == GLM_DISABLE - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q>::mat() -# if GLM_CONFIG_CTOR_INIT == GLM_CTOR_INITIALIZER_LIST - : value{col_type(1, 0, 0, 0), col_type(0, 1, 0, 0), col_type(0, 0, 1, 0)} -# endif - { -# if GLM_CONFIG_CTOR_INIT == GLM_CTOR_INITIALISATION - this->value[0] = col_type(1, 0, 0, 0); - this->value[1] = col_type(0, 1, 0, 0); - this->value[2] = col_type(0, 0, 1, 0); -# endif - } -# endif - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q>::mat(mat<3, 4, T, P> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1]), col_type(m[2])} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = m[0]; - this->value[1] = m[1]; - this->value[2] = m[2]; -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q>::mat(T s) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(s, 0, 0, 0), col_type(0, s, 0, 0), col_type(0, 0, s, 0)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(s, 0, 0, 0); - this->value[1] = col_type(0, s, 0, 0); - this->value[2] = col_type(0, 0, s, 0); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q>::mat - ( - T x0, T y0, T z0, T w0, - T x1, T y1, T z1, T w1, - T x2, T y2, T z2, T w2 - ) -# if GLM_HAS_INITIALIZER_LISTS - : value{ - col_type(x0, y0, z0, w0), - col_type(x1, y1, z1, w1), - col_type(x2, y2, z2, w2)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(x0, y0, z0, w0); - this->value[1] = col_type(x1, y1, z1, w1); - this->value[2] = col_type(x2, y2, z2, w2); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q>::mat(col_type const& v0, col_type const& v1, col_type const& v2) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(v0), col_type(v1), col_type(v2)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = v0; - this->value[1] = v1; - this->value[2] = v2; -# endif - } - - // -- Conversion constructors -- - - template - template< - typename X0, typename Y0, typename Z0, typename W0, - typename X1, typename Y1, typename Z1, typename W1, - typename X2, typename Y2, typename Z2, typename W2> - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q>::mat - ( - X0 x0, Y0 y0, Z0 z0, W0 w0, - X1 x1, Y1 y1, Z1 z1, W1 w1, - X2 x2, Y2 y2, Z2 z2, W2 w2 - ) -# if GLM_HAS_INITIALIZER_LISTS - : value{ - col_type(x0, y0, z0, w0), - col_type(x1, y1, z1, w1), - col_type(x2, y2, z2, w2)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(x0, y0, z0, w0); - this->value[1] = col_type(x1, y1, z1, w1); - this->value[2] = col_type(x2, y2, z2, w2); -# endif - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q>::mat(vec<4, V1, Q> const& v0, vec<4, V2, Q> const& v1, vec<4, V3, Q> const& v2) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(v0), col_type(v1), col_type(v2)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(v0); - this->value[1] = col_type(v1); - this->value[2] = col_type(v2); -# endif - } - - // -- Matrix conversions -- - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q>::mat(mat<3, 4, U, P> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1]), col_type(m[2])} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); - this->value[2] = col_type(m[2]); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q>::mat(mat<2, 2, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0], 0, 0), col_type(m[1], 0, 0), col_type(0, 0, 1, 0)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0], 0, 0); - this->value[1] = col_type(m[1], 0, 0); - this->value[2] = col_type(0, 0, 1, 0); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q>::mat(mat<3, 3, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0], 0), col_type(m[1], 0), col_type(m[2], 0)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0], 0); - this->value[1] = col_type(m[1], 0); - this->value[2] = col_type(m[2], 0); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q>::mat(mat<4, 4, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1]), col_type(m[2])} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); - this->value[2] = col_type(m[2]); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q>::mat(mat<2, 3, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0], 0), col_type(m[1], 0), col_type(0, 0, 1, 0)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0], 0); - this->value[1] = col_type(m[1], 0); - this->value[2] = col_type(0, 0, 1, 0); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q>::mat(mat<3, 2, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0], 0, 0), col_type(m[1], 0, 0), col_type(m[2], 1, 0)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0], 0, 0); - this->value[1] = col_type(m[1], 0, 0); - this->value[2] = col_type(m[2], 1, 0); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q>::mat(mat<2, 4, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1]), col_type(0, 0, 1, 0)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); - this->value[2] = col_type(0, 0, 1, 0); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q>::mat(mat<4, 2, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0], 0, 0), col_type(m[1], 0, 0), col_type(m[2], 1, 0)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0], 0, 0); - this->value[1] = col_type(m[1], 0, 0); - this->value[2] = col_type(m[2], 1, 0); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<3, 4, T, Q>::mat(mat<4, 3, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0], 0), col_type(m[1], 0), col_type(m[2], 0)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0], 0); - this->value[1] = col_type(m[1], 0); - this->value[2] = col_type(m[2], 0); -# endif - } - - // -- Accesses -- - - template - GLM_FUNC_QUALIFIER typename mat<3, 4, T, Q>::col_type & mat<3, 4, T, Q>::operator[](typename mat<3, 4, T, Q>::length_type i) - { - assert(i < this->length()); - return this->value[i]; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<3, 4, T, Q>::col_type const& mat<3, 4, T, Q>::operator[](typename mat<3, 4, T, Q>::length_type i) const - { - assert(i < this->length()); - return this->value[i]; - } - - // -- Unary updatable operators -- - - template - template - GLM_FUNC_QUALIFIER mat<3, 4, T, Q>& mat<3, 4, T, Q>::operator=(mat<3, 4, U, Q> const& m) - { - this->value[0] = m[0]; - this->value[1] = m[1]; - this->value[2] = m[2]; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<3, 4, T, Q>& mat<3, 4, T, Q>::operator+=(U s) - { - this->value[0] += s; - this->value[1] += s; - this->value[2] += s; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<3, 4, T, Q>& mat<3, 4, T, Q>::operator+=(mat<3, 4, U, Q> const& m) - { - this->value[0] += m[0]; - this->value[1] += m[1]; - this->value[2] += m[2]; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<3, 4, T, Q>& mat<3, 4, T, Q>::operator-=(U s) - { - this->value[0] -= s; - this->value[1] -= s; - this->value[2] -= s; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<3, 4, T, Q>& mat<3, 4, T, Q>::operator-=(mat<3, 4, U, Q> const& m) - { - this->value[0] -= m[0]; - this->value[1] -= m[1]; - this->value[2] -= m[2]; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<3, 4, T, Q>& mat<3, 4, T, Q>::operator*=(U s) - { - this->value[0] *= s; - this->value[1] *= s; - this->value[2] *= s; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<3, 4, T, Q> & mat<3, 4, T, Q>::operator/=(U s) - { - this->value[0] /= s; - this->value[1] /= s; - this->value[2] /= s; - return *this; - } - - // -- Increment and decrement operators -- - - template - GLM_FUNC_QUALIFIER mat<3, 4, T, Q>& mat<3, 4, T, Q>::operator++() - { - ++this->value[0]; - ++this->value[1]; - ++this->value[2]; - return *this; - } - - template - GLM_FUNC_QUALIFIER mat<3, 4, T, Q>& mat<3, 4, T, Q>::operator--() - { - --this->value[0]; - --this->value[1]; - --this->value[2]; - return *this; - } - - template - GLM_FUNC_QUALIFIER mat<3, 4, T, Q> mat<3, 4, T, Q>::operator++(int) - { - mat<3, 4, T, Q> Result(*this); - ++*this; - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<3, 4, T, Q> mat<3, 4, T, Q>::operator--(int) - { - mat<3, 4, T, Q> Result(*this); - --*this; - return Result; - } - - // -- Unary arithmetic operators -- - - template - GLM_FUNC_QUALIFIER mat<3, 4, T, Q> operator+(mat<3, 4, T, Q> const& m) - { - return m; - } - - template - GLM_FUNC_QUALIFIER mat<3, 4, T, Q> operator-(mat<3, 4, T, Q> const& m) - { - return mat<3, 4, T, Q>( - -m[0], - -m[1], - -m[2]); - } - - // -- Binary arithmetic operators -- - - template - GLM_FUNC_QUALIFIER mat<3, 4, T, Q> operator+(mat<3, 4, T, Q> const& m, T scalar) - { - return mat<3, 4, T, Q>( - m[0] + scalar, - m[1] + scalar, - m[2] + scalar); - } - - template - GLM_FUNC_QUALIFIER mat<3, 4, T, Q> operator+(mat<3, 4, T, Q> const& m1, mat<3, 4, T, Q> const& m2) - { - return mat<3, 4, T, Q>( - m1[0] + m2[0], - m1[1] + m2[1], - m1[2] + m2[2]); - } - - template - GLM_FUNC_QUALIFIER mat<3, 4, T, Q> operator-(mat<3, 4, T, Q> const& m, T scalar) - { - return mat<3, 4, T, Q>( - m[0] - scalar, - m[1] - scalar, - m[2] - scalar); - } - - template - GLM_FUNC_QUALIFIER mat<3, 4, T, Q> operator-(mat<3, 4, T, Q> const& m1, mat<3, 4, T, Q> const& m2) - { - return mat<3, 4, T, Q>( - m1[0] - m2[0], - m1[1] - m2[1], - m1[2] - m2[2]); - } - - template - GLM_FUNC_QUALIFIER mat<3, 4, T, Q> operator*(mat<3, 4, T, Q> const& m, T scalar) - { - return mat<3, 4, T, Q>( - m[0] * scalar, - m[1] * scalar, - m[2] * scalar); - } - - template - GLM_FUNC_QUALIFIER mat<3, 4, T, Q> operator*(T scalar, mat<3, 4, T, Q> const& m) - { - return mat<3, 4, T, Q>( - m[0] * scalar, - m[1] * scalar, - m[2] * scalar); - } - - template - GLM_FUNC_QUALIFIER typename mat<3, 4, T, Q>::col_type operator* - ( - mat<3, 4, T, Q> const& m, - typename mat<3, 4, T, Q>::row_type const& v - ) - { - return typename mat<3, 4, T, Q>::col_type( - m[0][0] * v.x + m[1][0] * v.y + m[2][0] * v.z, - m[0][1] * v.x + m[1][1] * v.y + m[2][1] * v.z, - m[0][2] * v.x + m[1][2] * v.y + m[2][2] * v.z, - m[0][3] * v.x + m[1][3] * v.y + m[2][3] * v.z); - } - - template - GLM_FUNC_QUALIFIER typename mat<3, 4, T, Q>::row_type operator* - ( - typename mat<3, 4, T, Q>::col_type const& v, - mat<3, 4, T, Q> const& m - ) - { - return typename mat<3, 4, T, Q>::row_type( - v.x * m[0][0] + v.y * m[0][1] + v.z * m[0][2] + v.w * m[0][3], - v.x * m[1][0] + v.y * m[1][1] + v.z * m[1][2] + v.w * m[1][3], - v.x * m[2][0] + v.y * m[2][1] + v.z * m[2][2] + v.w * m[2][3]); - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> operator*(mat<3, 4, T, Q> const& m1, mat<4, 3, T, Q> const& m2) - { - const T SrcA00 = m1[0][0]; - const T SrcA01 = m1[0][1]; - const T SrcA02 = m1[0][2]; - const T SrcA03 = m1[0][3]; - const T SrcA10 = m1[1][0]; - const T SrcA11 = m1[1][1]; - const T SrcA12 = m1[1][2]; - const T SrcA13 = m1[1][3]; - const T SrcA20 = m1[2][0]; - const T SrcA21 = m1[2][1]; - const T SrcA22 = m1[2][2]; - const T SrcA23 = m1[2][3]; - - const T SrcB00 = m2[0][0]; - const T SrcB01 = m2[0][1]; - const T SrcB02 = m2[0][2]; - const T SrcB10 = m2[1][0]; - const T SrcB11 = m2[1][1]; - const T SrcB12 = m2[1][2]; - const T SrcB20 = m2[2][0]; - const T SrcB21 = m2[2][1]; - const T SrcB22 = m2[2][2]; - const T SrcB30 = m2[3][0]; - const T SrcB31 = m2[3][1]; - const T SrcB32 = m2[3][2]; - - mat<4, 4, T, Q> Result; - Result[0][0] = SrcA00 * SrcB00 + SrcA10 * SrcB01 + SrcA20 * SrcB02; - Result[0][1] = SrcA01 * SrcB00 + SrcA11 * SrcB01 + SrcA21 * SrcB02; - Result[0][2] = SrcA02 * SrcB00 + SrcA12 * SrcB01 + SrcA22 * SrcB02; - Result[0][3] = SrcA03 * SrcB00 + SrcA13 * SrcB01 + SrcA23 * SrcB02; - Result[1][0] = SrcA00 * SrcB10 + SrcA10 * SrcB11 + SrcA20 * SrcB12; - Result[1][1] = SrcA01 * SrcB10 + SrcA11 * SrcB11 + SrcA21 * SrcB12; - Result[1][2] = SrcA02 * SrcB10 + SrcA12 * SrcB11 + SrcA22 * SrcB12; - Result[1][3] = SrcA03 * SrcB10 + SrcA13 * SrcB11 + SrcA23 * SrcB12; - Result[2][0] = SrcA00 * SrcB20 + SrcA10 * SrcB21 + SrcA20 * SrcB22; - Result[2][1] = SrcA01 * SrcB20 + SrcA11 * SrcB21 + SrcA21 * SrcB22; - Result[2][2] = SrcA02 * SrcB20 + SrcA12 * SrcB21 + SrcA22 * SrcB22; - Result[2][3] = SrcA03 * SrcB20 + SrcA13 * SrcB21 + SrcA23 * SrcB22; - Result[3][0] = SrcA00 * SrcB30 + SrcA10 * SrcB31 + SrcA20 * SrcB32; - Result[3][1] = SrcA01 * SrcB30 + SrcA11 * SrcB31 + SrcA21 * SrcB32; - Result[3][2] = SrcA02 * SrcB30 + SrcA12 * SrcB31 + SrcA22 * SrcB32; - Result[3][3] = SrcA03 * SrcB30 + SrcA13 * SrcB31 + SrcA23 * SrcB32; - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<2, 4, T, Q> operator*(mat<3, 4, T, Q> const& m1, mat<2, 3, T, Q> const& m2) - { - return mat<2, 4, T, Q>( - m1[0][0] * m2[0][0] + m1[1][0] * m2[0][1] + m1[2][0] * m2[0][2], - m1[0][1] * m2[0][0] + m1[1][1] * m2[0][1] + m1[2][1] * m2[0][2], - m1[0][2] * m2[0][0] + m1[1][2] * m2[0][1] + m1[2][2] * m2[0][2], - m1[0][3] * m2[0][0] + m1[1][3] * m2[0][1] + m1[2][3] * m2[0][2], - m1[0][0] * m2[1][0] + m1[1][0] * m2[1][1] + m1[2][0] * m2[1][2], - m1[0][1] * m2[1][0] + m1[1][1] * m2[1][1] + m1[2][1] * m2[1][2], - m1[0][2] * m2[1][0] + m1[1][2] * m2[1][1] + m1[2][2] * m2[1][2], - m1[0][3] * m2[1][0] + m1[1][3] * m2[1][1] + m1[2][3] * m2[1][2]); - } - - template - GLM_FUNC_QUALIFIER mat<3, 4, T, Q> operator*(mat<3, 4, T, Q> const& m1, mat<3, 3, T, Q> const& m2) - { - return mat<3, 4, T, Q>( - m1[0][0] * m2[0][0] + m1[1][0] * m2[0][1] + m1[2][0] * m2[0][2], - m1[0][1] * m2[0][0] + m1[1][1] * m2[0][1] + m1[2][1] * m2[0][2], - m1[0][2] * m2[0][0] + m1[1][2] * m2[0][1] + m1[2][2] * m2[0][2], - m1[0][3] * m2[0][0] + m1[1][3] * m2[0][1] + m1[2][3] * m2[0][2], - m1[0][0] * m2[1][0] + m1[1][0] * m2[1][1] + m1[2][0] * m2[1][2], - m1[0][1] * m2[1][0] + m1[1][1] * m2[1][1] + m1[2][1] * m2[1][2], - m1[0][2] * m2[1][0] + m1[1][2] * m2[1][1] + m1[2][2] * m2[1][2], - m1[0][3] * m2[1][0] + m1[1][3] * m2[1][1] + m1[2][3] * m2[1][2], - m1[0][0] * m2[2][0] + m1[1][0] * m2[2][1] + m1[2][0] * m2[2][2], - m1[0][1] * m2[2][0] + m1[1][1] * m2[2][1] + m1[2][1] * m2[2][2], - m1[0][2] * m2[2][0] + m1[1][2] * m2[2][1] + m1[2][2] * m2[2][2], - m1[0][3] * m2[2][0] + m1[1][3] * m2[2][1] + m1[2][3] * m2[2][2]); - } - - template - GLM_FUNC_QUALIFIER mat<3, 4, T, Q> operator/(mat<3, 4, T, Q> const& m, T scalar) - { - return mat<3, 4, T, Q>( - m[0] / scalar, - m[1] / scalar, - m[2] / scalar); - } - - template - GLM_FUNC_QUALIFIER mat<3, 4, T, Q> operator/(T scalar, mat<3, 4, T, Q> const& m) - { - return mat<3, 4, T, Q>( - scalar / m[0], - scalar / m[1], - scalar / m[2]); - } - - // -- Boolean operators -- - - template - GLM_FUNC_QUALIFIER bool operator==(mat<3, 4, T, Q> const& m1, mat<3, 4, T, Q> const& m2) - { - return (m1[0] == m2[0]) && (m1[1] == m2[1]) && (m1[2] == m2[2]); - } - - template - GLM_FUNC_QUALIFIER bool operator!=(mat<3, 4, T, Q> const& m1, mat<3, 4, T, Q> const& m2) - { - return (m1[0] != m2[0]) || (m1[1] != m2[1]) || (m1[2] != m2[2]); - } -} //namespace glm diff --git a/third_party/glm/detail/type_mat4x2.hpp b/third_party/glm/detail/type_mat4x2.hpp deleted file mode 100755 index 8d34352..0000000 --- a/third_party/glm/detail/type_mat4x2.hpp +++ /dev/null @@ -1,171 +0,0 @@ -/// @ref core -/// @file glm/detail/type_mat4x2.hpp - -#pragma once - -#include "type_vec2.hpp" -#include "type_vec4.hpp" -#include -#include - -namespace glm -{ - template - struct mat<4, 2, T, Q> - { - typedef vec<2, T, Q> col_type; - typedef vec<4, T, Q> row_type; - typedef mat<4, 2, T, Q> type; - typedef mat<2, 4, T, Q> transpose_type; - typedef T value_type; - - private: - col_type value[4]; - - public: - // -- Accesses -- - - typedef length_t length_type; - GLM_FUNC_DECL static GLM_CONSTEXPR length_type length() { return 4; } - - GLM_FUNC_DECL col_type & operator[](length_type i); - GLM_FUNC_DECL GLM_CONSTEXPR col_type const& operator[](length_type i) const; - - // -- Constructors -- - - GLM_FUNC_DECL GLM_CONSTEXPR mat() GLM_DEFAULT; - template - GLM_FUNC_DECL GLM_CONSTEXPR mat(mat<4, 2, T, P> const& m); - - GLM_FUNC_DECL explicit GLM_CONSTEXPR mat(T scalar); - GLM_FUNC_DECL GLM_CONSTEXPR mat( - T x0, T y0, - T x1, T y1, - T x2, T y2, - T x3, T y3); - GLM_FUNC_DECL GLM_CONSTEXPR mat( - col_type const& v0, - col_type const& v1, - col_type const& v2, - col_type const& v3); - - // -- Conversions -- - - template< - typename X0, typename Y0, - typename X1, typename Y1, - typename X2, typename Y2, - typename X3, typename Y3> - GLM_FUNC_DECL GLM_CONSTEXPR mat( - X0 x0, Y0 y0, - X1 x1, Y1 y1, - X2 x2, Y2 y2, - X3 x3, Y3 y3); - - template - GLM_FUNC_DECL GLM_CONSTEXPR mat( - vec<2, V1, Q> const& v1, - vec<2, V2, Q> const& v2, - vec<2, V3, Q> const& v3, - vec<2, V4, Q> const& v4); - - // -- Matrix conversions -- - - template - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 2, U, P> const& m); - - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 2, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 3, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 4, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 3, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 2, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 4, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 3, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 4, T, Q> const& x); - - // -- Unary arithmetic operators -- - - template - GLM_FUNC_DECL mat<4, 2, T, Q> & operator=(mat<4, 2, U, Q> const& m); - template - GLM_FUNC_DECL mat<4, 2, T, Q> & operator+=(U s); - template - GLM_FUNC_DECL mat<4, 2, T, Q> & operator+=(mat<4, 2, U, Q> const& m); - template - GLM_FUNC_DECL mat<4, 2, T, Q> & operator-=(U s); - template - GLM_FUNC_DECL mat<4, 2, T, Q> & operator-=(mat<4, 2, U, Q> const& m); - template - GLM_FUNC_DECL mat<4, 2, T, Q> & operator*=(U s); - template - GLM_FUNC_DECL mat<4, 2, T, Q> & operator/=(U s); - - // -- Increment and decrement operators -- - - GLM_FUNC_DECL mat<4, 2, T, Q> & operator++ (); - GLM_FUNC_DECL mat<4, 2, T, Q> & operator-- (); - GLM_FUNC_DECL mat<4, 2, T, Q> operator++(int); - GLM_FUNC_DECL mat<4, 2, T, Q> operator--(int); - }; - - // -- Unary operators -- - - template - GLM_FUNC_DECL mat<4, 2, T, Q> operator+(mat<4, 2, T, Q> const& m); - - template - GLM_FUNC_DECL mat<4, 2, T, Q> operator-(mat<4, 2, T, Q> const& m); - - // -- Binary operators -- - - template - GLM_FUNC_DECL mat<4, 2, T, Q> operator+(mat<4, 2, T, Q> const& m, T scalar); - - template - GLM_FUNC_DECL mat<4, 2, T, Q> operator+(mat<4, 2, T, Q> const& m1, mat<4, 2, T, Q> const& m2); - - template - GLM_FUNC_DECL mat<4, 2, T, Q> operator-(mat<4, 2, T, Q> const& m, T scalar); - - template - GLM_FUNC_DECL mat<4, 2, T, Q> operator-(mat<4, 2, T, Q> const& m1, mat<4, 2, T, Q> const& m2); - - template - GLM_FUNC_DECL mat<4, 2, T, Q> operator*(mat<4, 2, T, Q> const& m, T scalar); - - template - GLM_FUNC_DECL mat<4, 2, T, Q> operator*(T scalar, mat<4, 2, T, Q> const& m); - - template - GLM_FUNC_DECL typename mat<4, 2, T, Q>::col_type operator*(mat<4, 2, T, Q> const& m, typename mat<4, 2, T, Q>::row_type const& v); - - template - GLM_FUNC_DECL typename mat<4, 2, T, Q>::row_type operator*(typename mat<4, 2, T, Q>::col_type const& v, mat<4, 2, T, Q> const& m); - - template - GLM_FUNC_DECL mat<2, 2, T, Q> operator*(mat<4, 2, T, Q> const& m1, mat<2, 4, T, Q> const& m2); - - template - GLM_FUNC_DECL mat<3, 2, T, Q> operator*(mat<4, 2, T, Q> const& m1, mat<3, 4, T, Q> const& m2); - - template - GLM_FUNC_DECL mat<4, 2, T, Q> operator*(mat<4, 2, T, Q> const& m1, mat<4, 4, T, Q> const& m2); - - template - GLM_FUNC_DECL mat<4, 2, T, Q> operator/(mat<4, 2, T, Q> const& m, T scalar); - - template - GLM_FUNC_DECL mat<4, 2, T, Q> operator/(T scalar, mat<4, 2, T, Q> const& m); - - // -- Boolean operators -- - - template - GLM_FUNC_DECL bool operator==(mat<4, 2, T, Q> const& m1, mat<4, 2, T, Q> const& m2); - - template - GLM_FUNC_DECL bool operator!=(mat<4, 2, T, Q> const& m1, mat<4, 2, T, Q> const& m2); -}//namespace glm - -#ifndef GLM_EXTERNAL_TEMPLATE -#include "type_mat4x2.inl" -#endif diff --git a/third_party/glm/detail/type_mat4x2.inl b/third_party/glm/detail/type_mat4x2.inl deleted file mode 100755 index 419c80c..0000000 --- a/third_party/glm/detail/type_mat4x2.inl +++ /dev/null @@ -1,574 +0,0 @@ -namespace glm -{ - // -- Constructors -- - -# if GLM_CONFIG_DEFAULTED_FUNCTIONS == GLM_DISABLE - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q>::mat() -# if GLM_CONFIG_CTOR_INIT == GLM_CTOR_INITIALIZER_LIST - : value{col_type(1, 0), col_type(0, 1), col_type(0, 0), col_type(0, 0)} -# endif - { -# if GLM_CONFIG_CTOR_INIT == GLM_CTOR_INITIALISATION - this->value[0] = col_type(1, 0); - this->value[1] = col_type(0, 1); - this->value[2] = col_type(0, 0); - this->value[3] = col_type(0, 0); -# endif - } -# endif - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q>::mat(mat<4, 2, T, P> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1]), col_type(m[2]), col_type(m[3])} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = m[0]; - this->value[1] = m[1]; - this->value[2] = m[2]; - this->value[3] = m[3]; -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q>::mat(T s) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(s, 0), col_type(0, s), col_type(0, 0), col_type(0, 0)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(s, 0); - this->value[1] = col_type(0, s); - this->value[2] = col_type(0, 0); - this->value[3] = col_type(0, 0); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q>::mat - ( - T x0, T y0, - T x1, T y1, - T x2, T y2, - T x3, T y3 - ) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(x0, y0), col_type(x1, y1), col_type(x2, y2), col_type(x3, y3)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(x0, y0); - this->value[1] = col_type(x1, y1); - this->value[2] = col_type(x2, y2); - this->value[3] = col_type(x3, y3); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q>::mat(col_type const& v0, col_type const& v1, col_type const& v2, col_type const& v3) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(v0), col_type(v1), col_type(v2), col_type(v3)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = v0; - this->value[1] = v1; - this->value[2] = v2; - this->value[3] = v3; -# endif - } - - // -- Conversion constructors -- - - template - template< - typename X0, typename Y0, - typename X1, typename Y1, - typename X2, typename Y2, - typename X3, typename Y3> - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q>::mat - ( - X0 x0, Y0 y0, - X1 x1, Y1 y1, - X2 x2, Y2 y2, - X3 x3, Y3 y3 - ) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(x0, y0), col_type(x1, y1), col_type(x2, y2), col_type(x3, y3)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(x0, y0); - this->value[1] = col_type(x1, y1); - this->value[2] = col_type(x2, y2); - this->value[3] = col_type(x3, y3); -# endif - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q>::mat(vec<2, V0, Q> const& v0, vec<2, V1, Q> const& v1, vec<2, V2, Q> const& v2, vec<2, V3, Q> const& v3) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(v0), col_type(v1), col_type(v2), col_type(v3)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(v0); - this->value[1] = col_type(v1); - this->value[2] = col_type(v2); - this->value[3] = col_type(v3); -# endif - } - - // -- Conversion -- - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q>::mat(mat<4, 2, U, P> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1]), col_type(m[2]), col_type(m[3])} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); - this->value[2] = col_type(m[2]); - this->value[3] = col_type(m[3]); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q>::mat(mat<2, 2, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1]), col_type(0), col_type(0)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); - this->value[2] = col_type(0); - this->value[3] = col_type(0); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q>::mat(mat<3, 3, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1]), col_type(m[2]), col_type(0)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); - this->value[2] = col_type(m[2]); - this->value[3] = col_type(0); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q>::mat(mat<4, 4, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1]), col_type(m[2]), col_type(m[3])} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); - this->value[2] = col_type(m[2]); - this->value[3] = col_type(m[3]); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q>::mat(mat<2, 3, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1]), col_type(0), col_type(0)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); - this->value[2] = col_type(0); - this->value[3] = col_type(0); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q>::mat(mat<3, 2, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1]), col_type(m[2]), col_type(0)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); - this->value[2] = col_type(m[2]); - this->value[3] = col_type(0); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q>::mat(mat<2, 4, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1]), col_type(0), col_type(0)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); - this->value[2] = col_type(0); - this->value[3] = col_type(0); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q>::mat(mat<4, 3, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1]), col_type(m[2]), col_type(m[3])} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); - this->value[2] = col_type(m[2]); - this->value[3] = col_type(m[3]); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 2, T, Q>::mat(mat<3, 4, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1]), col_type(m[2]), col_type(0)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); - this->value[2] = col_type(m[2]); - this->value[3] = col_type(0); -# endif - } - - // -- Accesses -- - - template - GLM_FUNC_QUALIFIER typename mat<4, 2, T, Q>::col_type & mat<4, 2, T, Q>::operator[](typename mat<4, 2, T, Q>::length_type i) - { - assert(i < this->length()); - return this->value[i]; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<4, 2, T, Q>::col_type const& mat<4, 2, T, Q>::operator[](typename mat<4, 2, T, Q>::length_type i) const - { - assert(i < this->length()); - return this->value[i]; - } - - // -- Unary updatable operators -- - - template - template - GLM_FUNC_QUALIFIER mat<4, 2, T, Q>& mat<4, 2, T, Q>::operator=(mat<4, 2, U, Q> const& m) - { - this->value[0] = m[0]; - this->value[1] = m[1]; - this->value[2] = m[2]; - this->value[3] = m[3]; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<4, 2, T, Q> & mat<4, 2, T, Q>::operator+=(U s) - { - this->value[0] += s; - this->value[1] += s; - this->value[2] += s; - this->value[3] += s; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<4, 2, T, Q> & mat<4, 2, T, Q>::operator+=(mat<4, 2, U, Q> const& m) - { - this->value[0] += m[0]; - this->value[1] += m[1]; - this->value[2] += m[2]; - this->value[3] += m[3]; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<4, 2, T, Q> & mat<4, 2, T, Q>::operator-=(U s) - { - this->value[0] -= s; - this->value[1] -= s; - this->value[2] -= s; - this->value[3] -= s; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<4, 2, T, Q> & mat<4, 2, T, Q>::operator-=(mat<4, 2, U, Q> const& m) - { - this->value[0] -= m[0]; - this->value[1] -= m[1]; - this->value[2] -= m[2]; - this->value[3] -= m[3]; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<4, 2, T, Q> & mat<4, 2, T, Q>::operator*=(U s) - { - this->value[0] *= s; - this->value[1] *= s; - this->value[2] *= s; - this->value[3] *= s; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<4, 2, T, Q> & mat<4, 2, T, Q>::operator/=(U s) - { - this->value[0] /= s; - this->value[1] /= s; - this->value[2] /= s; - this->value[3] /= s; - return *this; - } - - // -- Increment and decrement operators -- - - template - GLM_FUNC_QUALIFIER mat<4, 2, T, Q> & mat<4, 2, T, Q>::operator++() - { - ++this->value[0]; - ++this->value[1]; - ++this->value[2]; - ++this->value[3]; - return *this; - } - - template - GLM_FUNC_QUALIFIER mat<4, 2, T, Q> & mat<4, 2, T, Q>::operator--() - { - --this->value[0]; - --this->value[1]; - --this->value[2]; - --this->value[3]; - return *this; - } - - template - GLM_FUNC_QUALIFIER mat<4, 2, T, Q> mat<4, 2, T, Q>::operator++(int) - { - mat<4, 2, T, Q> Result(*this); - ++*this; - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 2, T, Q> mat<4, 2, T, Q>::operator--(int) - { - mat<4, 2, T, Q> Result(*this); - --*this; - return Result; - } - - // -- Unary arithmetic operators -- - - template - GLM_FUNC_QUALIFIER mat<4, 2, T, Q> operator+(mat<4, 2, T, Q> const& m) - { - return m; - } - - template - GLM_FUNC_QUALIFIER mat<4, 2, T, Q> operator-(mat<4, 2, T, Q> const& m) - { - return mat<4, 2, T, Q>( - -m[0], - -m[1], - -m[2], - -m[3]); - } - - // -- Binary arithmetic operators -- - - template - GLM_FUNC_QUALIFIER mat<4, 2, T, Q> operator+(mat<4, 2, T, Q> const& m, T scalar) - { - return mat<4, 2, T, Q>( - m[0] + scalar, - m[1] + scalar, - m[2] + scalar, - m[3] + scalar); - } - - template - GLM_FUNC_QUALIFIER mat<4, 2, T, Q> operator+(mat<4, 2, T, Q> const& m1, mat<4, 2, T, Q> const& m2) - { - return mat<4, 2, T, Q>( - m1[0] + m2[0], - m1[1] + m2[1], - m1[2] + m2[2], - m1[3] + m2[3]); - } - - template - GLM_FUNC_QUALIFIER mat<4, 2, T, Q> operator-(mat<4, 2, T, Q> const& m, T scalar) - { - return mat<4, 2, T, Q>( - m[0] - scalar, - m[1] - scalar, - m[2] - scalar, - m[3] - scalar); - } - - template - GLM_FUNC_QUALIFIER mat<4, 2, T, Q> operator-(mat<4, 2, T, Q> const& m1, mat<4, 2, T, Q> const& m2) - { - return mat<4, 2, T, Q>( - m1[0] - m2[0], - m1[1] - m2[1], - m1[2] - m2[2], - m1[3] - m2[3]); - } - - template - GLM_FUNC_QUALIFIER mat<4, 2, T, Q> operator*(mat<4, 2, T, Q> const& m, T scalar) - { - return mat<4, 2, T, Q>( - m[0] * scalar, - m[1] * scalar, - m[2] * scalar, - m[3] * scalar); - } - - template - GLM_FUNC_QUALIFIER mat<4, 2, T, Q> operator*(T scalar, mat<4, 2, T, Q> const& m) - { - return mat<4, 2, T, Q>( - m[0] * scalar, - m[1] * scalar, - m[2] * scalar, - m[3] * scalar); - } - - template - GLM_FUNC_QUALIFIER typename mat<4, 2, T, Q>::col_type operator*(mat<4, 2, T, Q> const& m, typename mat<4, 2, T, Q>::row_type const& v) - { - return typename mat<4, 2, T, Q>::col_type( - m[0][0] * v.x + m[1][0] * v.y + m[2][0] * v.z + m[3][0] * v.w, - m[0][1] * v.x + m[1][1] * v.y + m[2][1] * v.z + m[3][1] * v.w); - } - - template - GLM_FUNC_QUALIFIER typename mat<4, 2, T, Q>::row_type operator*(typename mat<4, 2, T, Q>::col_type const& v, mat<4, 2, T, Q> const& m) - { - return typename mat<4, 2, T, Q>::row_type( - v.x * m[0][0] + v.y * m[0][1], - v.x * m[1][0] + v.y * m[1][1], - v.x * m[2][0] + v.y * m[2][1], - v.x * m[3][0] + v.y * m[3][1]); - } - - template - GLM_FUNC_QUALIFIER mat<2, 2, T, Q> operator*(mat<4, 2, T, Q> const& m1, mat<2, 4, T, Q> const& m2) - { - T const SrcA00 = m1[0][0]; - T const SrcA01 = m1[0][1]; - T const SrcA10 = m1[1][0]; - T const SrcA11 = m1[1][1]; - T const SrcA20 = m1[2][0]; - T const SrcA21 = m1[2][1]; - T const SrcA30 = m1[3][0]; - T const SrcA31 = m1[3][1]; - - T const SrcB00 = m2[0][0]; - T const SrcB01 = m2[0][1]; - T const SrcB02 = m2[0][2]; - T const SrcB03 = m2[0][3]; - T const SrcB10 = m2[1][0]; - T const SrcB11 = m2[1][1]; - T const SrcB12 = m2[1][2]; - T const SrcB13 = m2[1][3]; - - mat<2, 2, T, Q> Result; - Result[0][0] = SrcA00 * SrcB00 + SrcA10 * SrcB01 + SrcA20 * SrcB02 + SrcA30 * SrcB03; - Result[0][1] = SrcA01 * SrcB00 + SrcA11 * SrcB01 + SrcA21 * SrcB02 + SrcA31 * SrcB03; - Result[1][0] = SrcA00 * SrcB10 + SrcA10 * SrcB11 + SrcA20 * SrcB12 + SrcA30 * SrcB13; - Result[1][1] = SrcA01 * SrcB10 + SrcA11 * SrcB11 + SrcA21 * SrcB12 + SrcA31 * SrcB13; - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<3, 2, T, Q> operator*(mat<4, 2, T, Q> const& m1, mat<3, 4, T, Q> const& m2) - { - return mat<3, 2, T, Q>( - m1[0][0] * m2[0][0] + m1[1][0] * m2[0][1] + m1[2][0] * m2[0][2] + m1[3][0] * m2[0][3], - m1[0][1] * m2[0][0] + m1[1][1] * m2[0][1] + m1[2][1] * m2[0][2] + m1[3][1] * m2[0][3], - m1[0][0] * m2[1][0] + m1[1][0] * m2[1][1] + m1[2][0] * m2[1][2] + m1[3][0] * m2[1][3], - m1[0][1] * m2[1][0] + m1[1][1] * m2[1][1] + m1[2][1] * m2[1][2] + m1[3][1] * m2[1][3], - m1[0][0] * m2[2][0] + m1[1][0] * m2[2][1] + m1[2][0] * m2[2][2] + m1[3][0] * m2[2][3], - m1[0][1] * m2[2][0] + m1[1][1] * m2[2][1] + m1[2][1] * m2[2][2] + m1[3][1] * m2[2][3]); - } - - template - GLM_FUNC_QUALIFIER mat<4, 2, T, Q> operator*(mat<4, 2, T, Q> const& m1, mat<4, 4, T, Q> const& m2) - { - return mat<4, 2, T, Q>( - m1[0][0] * m2[0][0] + m1[1][0] * m2[0][1] + m1[2][0] * m2[0][2] + m1[3][0] * m2[0][3], - m1[0][1] * m2[0][0] + m1[1][1] * m2[0][1] + m1[2][1] * m2[0][2] + m1[3][1] * m2[0][3], - m1[0][0] * m2[1][0] + m1[1][0] * m2[1][1] + m1[2][0] * m2[1][2] + m1[3][0] * m2[1][3], - m1[0][1] * m2[1][0] + m1[1][1] * m2[1][1] + m1[2][1] * m2[1][2] + m1[3][1] * m2[1][3], - m1[0][0] * m2[2][0] + m1[1][0] * m2[2][1] + m1[2][0] * m2[2][2] + m1[3][0] * m2[2][3], - m1[0][1] * m2[2][0] + m1[1][1] * m2[2][1] + m1[2][1] * m2[2][2] + m1[3][1] * m2[2][3], - m1[0][0] * m2[3][0] + m1[1][0] * m2[3][1] + m1[2][0] * m2[3][2] + m1[3][0] * m2[3][3], - m1[0][1] * m2[3][0] + m1[1][1] * m2[3][1] + m1[2][1] * m2[3][2] + m1[3][1] * m2[3][3]); - } - - template - GLM_FUNC_QUALIFIER mat<4, 2, T, Q> operator/(mat<4, 2, T, Q> const& m, T scalar) - { - return mat<4, 2, T, Q>( - m[0] / scalar, - m[1] / scalar, - m[2] / scalar, - m[3] / scalar); - } - - template - GLM_FUNC_QUALIFIER mat<4, 2, T, Q> operator/(T scalar, mat<4, 2, T, Q> const& m) - { - return mat<4, 2, T, Q>( - scalar / m[0], - scalar / m[1], - scalar / m[2], - scalar / m[3]); - } - - // -- Boolean operators -- - - template - GLM_FUNC_QUALIFIER bool operator==(mat<4, 2, T, Q> const& m1, mat<4, 2, T, Q> const& m2) - { - return (m1[0] == m2[0]) && (m1[1] == m2[1]) && (m1[2] == m2[2]) && (m1[3] == m2[3]); - } - - template - GLM_FUNC_QUALIFIER bool operator!=(mat<4, 2, T, Q> const& m1, mat<4, 2, T, Q> const& m2) - { - return (m1[0] != m2[0]) || (m1[1] != m2[1]) || (m1[2] != m2[2]) || (m1[3] != m2[3]); - } -} //namespace glm diff --git a/third_party/glm/detail/type_mat4x3.hpp b/third_party/glm/detail/type_mat4x3.hpp deleted file mode 100755 index 16e4270..0000000 --- a/third_party/glm/detail/type_mat4x3.hpp +++ /dev/null @@ -1,171 +0,0 @@ -/// @ref core -/// @file glm/detail/type_mat4x3.hpp - -#pragma once - -#include "type_vec3.hpp" -#include "type_vec4.hpp" -#include -#include - -namespace glm -{ - template - struct mat<4, 3, T, Q> - { - typedef vec<3, T, Q> col_type; - typedef vec<4, T, Q> row_type; - typedef mat<4, 3, T, Q> type; - typedef mat<3, 4, T, Q> transpose_type; - typedef T value_type; - - private: - col_type value[4]; - - public: - // -- Accesses -- - - typedef length_t length_type; - GLM_FUNC_DECL static GLM_CONSTEXPR length_type length() { return 4; } - - GLM_FUNC_DECL col_type & operator[](length_type i); - GLM_FUNC_DECL GLM_CONSTEXPR col_type const& operator[](length_type i) const; - - // -- Constructors -- - - GLM_FUNC_DECL GLM_CONSTEXPR mat() GLM_DEFAULT; - template - GLM_FUNC_DECL GLM_CONSTEXPR mat(mat<4, 3, T, P> const& m); - - GLM_FUNC_DECL explicit GLM_CONSTEXPR mat(T const& x); - GLM_FUNC_DECL GLM_CONSTEXPR mat( - T const& x0, T const& y0, T const& z0, - T const& x1, T const& y1, T const& z1, - T const& x2, T const& y2, T const& z2, - T const& x3, T const& y3, T const& z3); - GLM_FUNC_DECL GLM_CONSTEXPR mat( - col_type const& v0, - col_type const& v1, - col_type const& v2, - col_type const& v3); - - // -- Conversions -- - - template< - typename X1, typename Y1, typename Z1, - typename X2, typename Y2, typename Z2, - typename X3, typename Y3, typename Z3, - typename X4, typename Y4, typename Z4> - GLM_FUNC_DECL GLM_CONSTEXPR mat( - X1 const& x1, Y1 const& y1, Z1 const& z1, - X2 const& x2, Y2 const& y2, Z2 const& z2, - X3 const& x3, Y3 const& y3, Z3 const& z3, - X4 const& x4, Y4 const& y4, Z4 const& z4); - - template - GLM_FUNC_DECL GLM_CONSTEXPR mat( - vec<3, V1, Q> const& v1, - vec<3, V2, Q> const& v2, - vec<3, V3, Q> const& v3, - vec<3, V4, Q> const& v4); - - // -- Matrix conversions -- - - template - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 3, U, P> const& m); - - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 2, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 3, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 4, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 3, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 2, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 4, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 2, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 4, T, Q> const& x); - - // -- Unary arithmetic operators -- - - template - GLM_FUNC_DECL mat<4, 3, T, Q> & operator=(mat<4, 3, U, Q> const& m); - template - GLM_FUNC_DECL mat<4, 3, T, Q> & operator+=(U s); - template - GLM_FUNC_DECL mat<4, 3, T, Q> & operator+=(mat<4, 3, U, Q> const& m); - template - GLM_FUNC_DECL mat<4, 3, T, Q> & operator-=(U s); - template - GLM_FUNC_DECL mat<4, 3, T, Q> & operator-=(mat<4, 3, U, Q> const& m); - template - GLM_FUNC_DECL mat<4, 3, T, Q> & operator*=(U s); - template - GLM_FUNC_DECL mat<4, 3, T, Q> & operator/=(U s); - - // -- Increment and decrement operators -- - - GLM_FUNC_DECL mat<4, 3, T, Q>& operator++(); - GLM_FUNC_DECL mat<4, 3, T, Q>& operator--(); - GLM_FUNC_DECL mat<4, 3, T, Q> operator++(int); - GLM_FUNC_DECL mat<4, 3, T, Q> operator--(int); - }; - - // -- Unary operators -- - - template - GLM_FUNC_DECL mat<4, 3, T, Q> operator+(mat<4, 3, T, Q> const& m); - - template - GLM_FUNC_DECL mat<4, 3, T, Q> operator-(mat<4, 3, T, Q> const& m); - - // -- Binary operators -- - - template - GLM_FUNC_DECL mat<4, 3, T, Q> operator+(mat<4, 3, T, Q> const& m, T const& s); - - template - GLM_FUNC_DECL mat<4, 3, T, Q> operator+(mat<4, 3, T, Q> const& m1, mat<4, 3, T, Q> const& m2); - - template - GLM_FUNC_DECL mat<4, 3, T, Q> operator-(mat<4, 3, T, Q> const& m, T const& s); - - template - GLM_FUNC_DECL mat<4, 3, T, Q> operator-(mat<4, 3, T, Q> const& m1, mat<4, 3, T, Q> const& m2); - - template - GLM_FUNC_DECL mat<4, 3, T, Q> operator*(mat<4, 3, T, Q> const& m, T const& s); - - template - GLM_FUNC_DECL mat<4, 3, T, Q> operator*(T const& s, mat<4, 3, T, Q> const& m); - - template - GLM_FUNC_DECL typename mat<4, 3, T, Q>::col_type operator*(mat<4, 3, T, Q> const& m, typename mat<4, 3, T, Q>::row_type const& v); - - template - GLM_FUNC_DECL typename mat<4, 3, T, Q>::row_type operator*(typename mat<4, 3, T, Q>::col_type const& v, mat<4, 3, T, Q> const& m); - - template - GLM_FUNC_DECL mat<2, 3, T, Q> operator*(mat<4, 3, T, Q> const& m1, mat<2, 4, T, Q> const& m2); - - template - GLM_FUNC_DECL mat<3, 3, T, Q> operator*(mat<4, 3, T, Q> const& m1, mat<3, 4, T, Q> const& m2); - - template - GLM_FUNC_DECL mat<4, 3, T, Q> operator*(mat<4, 3, T, Q> const& m1, mat<4, 4, T, Q> const& m2); - - template - GLM_FUNC_DECL mat<4, 3, T, Q> operator/(mat<4, 3, T, Q> const& m, T const& s); - - template - GLM_FUNC_DECL mat<4, 3, T, Q> operator/(T const& s, mat<4, 3, T, Q> const& m); - - // -- Boolean operators -- - - template - GLM_FUNC_DECL bool operator==(mat<4, 3, T, Q> const& m1, mat<4, 3, T, Q> const& m2); - - template - GLM_FUNC_DECL bool operator!=(mat<4, 3, T, Q> const& m1, mat<4, 3, T, Q> const& m2); -}//namespace glm - -#ifndef GLM_EXTERNAL_TEMPLATE -#include "type_mat4x3.inl" -#endif //GLM_EXTERNAL_TEMPLATE diff --git a/third_party/glm/detail/type_mat4x3.inl b/third_party/glm/detail/type_mat4x3.inl deleted file mode 100755 index 11b1ee3..0000000 --- a/third_party/glm/detail/type_mat4x3.inl +++ /dev/null @@ -1,598 +0,0 @@ -namespace glm -{ - // -- Constructors -- - -# if GLM_CONFIG_DEFAULTED_FUNCTIONS == GLM_DISABLE - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q>::mat() -# if GLM_CONFIG_CTOR_INIT == GLM_CTOR_INITIALIZER_LIST - : value{col_type(1, 0, 0), col_type(0, 1, 0), col_type(0, 0, 1), col_type(0, 0, 0)} -# endif - { -# if GLM_CONFIG_CTOR_INIT == GLM_CTOR_INITIALISATION - this->value[0] = col_type(1, 0, 0); - this->value[1] = col_type(0, 1, 0); - this->value[2] = col_type(0, 0, 1); - this->value[3] = col_type(0, 0, 0); -# endif - } -# endif - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q>::mat(mat<4, 3, T, P> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1]), col_type(m[2]), col_type(m[3])} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = m[0]; - this->value[1] = m[1]; - this->value[2] = m[2]; - this->value[3] = m[3]; -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q>::mat(T const& s) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(s, 0, 0), col_type(0, s, 0), col_type(0, 0, s), col_type(0, 0, 0)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(s, 0, 0); - this->value[1] = col_type(0, s, 0); - this->value[2] = col_type(0, 0, s); - this->value[3] = col_type(0, 0, 0); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q>::mat - ( - T const& x0, T const& y0, T const& z0, - T const& x1, T const& y1, T const& z1, - T const& x2, T const& y2, T const& z2, - T const& x3, T const& y3, T const& z3 - ) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(x0, y0, z0), col_type(x1, y1, z1), col_type(x2, y2, z2), col_type(x3, y3, z3)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(x0, y0, z0); - this->value[1] = col_type(x1, y1, z1); - this->value[2] = col_type(x2, y2, z2); - this->value[3] = col_type(x3, y3, z3); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q>::mat(col_type const& v0, col_type const& v1, col_type const& v2, col_type const& v3) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(v0), col_type(v1), col_type(v2), col_type(v3)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = v0; - this->value[1] = v1; - this->value[2] = v2; - this->value[3] = v3; -# endif - } - - // -- Conversion constructors -- - - template - template< - typename X0, typename Y0, typename Z0, - typename X1, typename Y1, typename Z1, - typename X2, typename Y2, typename Z2, - typename X3, typename Y3, typename Z3> - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q>::mat - ( - X0 const& x0, Y0 const& y0, Z0 const& z0, - X1 const& x1, Y1 const& y1, Z1 const& z1, - X2 const& x2, Y2 const& y2, Z2 const& z2, - X3 const& x3, Y3 const& y3, Z3 const& z3 - ) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(x0, y0, z0), col_type(x1, y1, z1), col_type(x2, y2, z2), col_type(x3, y3, z3)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(x0, y0, z0); - this->value[1] = col_type(x1, y1, z1); - this->value[2] = col_type(x2, y2, z2); - this->value[3] = col_type(x3, y3, z3); -# endif - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q>::mat(vec<3, V1, Q> const& v1, vec<3, V2, Q> const& v2, vec<3, V3, Q> const& v3, vec<3, V4, Q> const& v4) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(v1), col_type(v2), col_type(v3), col_type(v4)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(v1); - this->value[1] = col_type(v2); - this->value[2] = col_type(v3); - this->value[3] = col_type(v4); -# endif - } - - // -- Matrix conversions -- - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q>::mat(mat<4, 3, U, P> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1]), col_type(m[2]), col_type(m[3])} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); - this->value[2] = col_type(m[2]); - this->value[3] = col_type(m[3]); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q>::mat(mat<2, 2, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0], 0), col_type(m[1], 0), col_type(0, 0, 1), col_type(0)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0], 0); - this->value[1] = col_type(m[1], 0); - this->value[2] = col_type(0, 0, 1); - this->value[3] = col_type(0); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q>::mat(mat<3, 3, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1]), col_type(m[2]), col_type(0)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); - this->value[2] = col_type(m[2]); - this->value[3] = col_type(0); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q>::mat(mat<4, 4, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1]), col_type(m[2]), col_type(m[3])} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); - this->value[2] = col_type(m[2]); - this->value[3] = col_type(m[3]); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q>::mat(mat<2, 3, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1]), col_type(0, 0, 1), col_type(0)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); - this->value[2] = col_type(0, 0, 1); - this->value[3] = col_type(0); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q>::mat(mat<3, 2, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0], 0), col_type(m[1], 0), col_type(m[2], 1), col_type(0)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0], 0); - this->value[1] = col_type(m[1], 0); - this->value[2] = col_type(m[2], 1); - this->value[3] = col_type(0); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q>::mat(mat<2, 4, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1]), col_type(0, 0, 1), col_type(0)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); - this->value[2] = col_type(0, 0, 1); - this->value[3] = col_type(0); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q>::mat(mat<4, 2, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0], 0), col_type(m[1], 0), col_type(m[2], 1), col_type(m[3], 0)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0], 0); - this->value[1] = col_type(m[1], 0); - this->value[2] = col_type(m[2], 1); - this->value[3] = col_type(m[3], 0); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 3, T, Q>::mat(mat<3, 4, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1]), col_type(m[2]), col_type(0)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); - this->value[2] = col_type(m[2]); - this->value[3] = col_type(0); -# endif - } - - // -- Accesses -- - - template - GLM_FUNC_QUALIFIER typename mat<4, 3, T, Q>::col_type & mat<4, 3, T, Q>::operator[](typename mat<4, 3, T, Q>::length_type i) - { - assert(i < this->length()); - return this->value[i]; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<4, 3, T, Q>::col_type const& mat<4, 3, T, Q>::operator[](typename mat<4, 3, T, Q>::length_type i) const - { - assert(i < this->length()); - return this->value[i]; - } - - // -- Unary updatable operators -- - - template - template - GLM_FUNC_QUALIFIER mat<4, 3, T, Q>& mat<4, 3, T, Q>::operator=(mat<4, 3, U, Q> const& m) - { - this->value[0] = m[0]; - this->value[1] = m[1]; - this->value[2] = m[2]; - this->value[3] = m[3]; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<4, 3, T, Q> & mat<4, 3, T, Q>::operator+=(U s) - { - this->value[0] += s; - this->value[1] += s; - this->value[2] += s; - this->value[3] += s; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<4, 3, T, Q> & mat<4, 3, T, Q>::operator+=(mat<4, 3, U, Q> const& m) - { - this->value[0] += m[0]; - this->value[1] += m[1]; - this->value[2] += m[2]; - this->value[3] += m[3]; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<4, 3, T, Q> & mat<4, 3, T, Q>::operator-=(U s) - { - this->value[0] -= s; - this->value[1] -= s; - this->value[2] -= s; - this->value[3] -= s; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<4, 3, T, Q> & mat<4, 3, T, Q>::operator-=(mat<4, 3, U, Q> const& m) - { - this->value[0] -= m[0]; - this->value[1] -= m[1]; - this->value[2] -= m[2]; - this->value[3] -= m[3]; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<4, 3, T, Q> & mat<4, 3, T, Q>::operator*=(U s) - { - this->value[0] *= s; - this->value[1] *= s; - this->value[2] *= s; - this->value[3] *= s; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<4, 3, T, Q> & mat<4, 3, T, Q>::operator/=(U s) - { - this->value[0] /= s; - this->value[1] /= s; - this->value[2] /= s; - this->value[3] /= s; - return *this; - } - - // -- Increment and decrement operators -- - - template - GLM_FUNC_QUALIFIER mat<4, 3, T, Q> & mat<4, 3, T, Q>::operator++() - { - ++this->value[0]; - ++this->value[1]; - ++this->value[2]; - ++this->value[3]; - return *this; - } - - template - GLM_FUNC_QUALIFIER mat<4, 3, T, Q> & mat<4, 3, T, Q>::operator--() - { - --this->value[0]; - --this->value[1]; - --this->value[2]; - --this->value[3]; - return *this; - } - - template - GLM_FUNC_QUALIFIER mat<4, 3, T, Q> mat<4, 3, T, Q>::operator++(int) - { - mat<4, 3, T, Q> Result(*this); - ++*this; - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 3, T, Q> mat<4, 3, T, Q>::operator--(int) - { - mat<4, 3, T, Q> Result(*this); - --*this; - return Result; - } - - // -- Unary arithmetic operators -- - - template - GLM_FUNC_QUALIFIER mat<4, 3, T, Q> operator+(mat<4, 3, T, Q> const& m) - { - return m; - } - - template - GLM_FUNC_QUALIFIER mat<4, 3, T, Q> operator-(mat<4, 3, T, Q> const& m) - { - return mat<4, 3, T, Q>( - -m[0], - -m[1], - -m[2], - -m[3]); - } - - // -- Binary arithmetic operators -- - - template - GLM_FUNC_QUALIFIER mat<4, 3, T, Q> operator+(mat<4, 3, T, Q> const& m, T const& s) - { - return mat<4, 3, T, Q>( - m[0] + s, - m[1] + s, - m[2] + s, - m[3] + s); - } - - template - GLM_FUNC_QUALIFIER mat<4, 3, T, Q> operator+(mat<4, 3, T, Q> const& m1, mat<4, 3, T, Q> const& m2) - { - return mat<4, 3, T, Q>( - m1[0] + m2[0], - m1[1] + m2[1], - m1[2] + m2[2], - m1[3] + m2[3]); - } - - template - GLM_FUNC_QUALIFIER mat<4, 3, T, Q> operator-(mat<4, 3, T, Q> const& m, T const& s) - { - return mat<4, 3, T, Q>( - m[0] - s, - m[1] - s, - m[2] - s, - m[3] - s); - } - - template - GLM_FUNC_QUALIFIER mat<4, 3, T, Q> operator-(mat<4, 3, T, Q> const& m1, mat<4, 3, T, Q> const& m2) - { - return mat<4, 3, T, Q>( - m1[0] - m2[0], - m1[1] - m2[1], - m1[2] - m2[2], - m1[3] - m2[3]); - } - - template - GLM_FUNC_QUALIFIER mat<4, 3, T, Q> operator*(mat<4, 3, T, Q> const& m, T const& s) - { - return mat<4, 3, T, Q>( - m[0] * s, - m[1] * s, - m[2] * s, - m[3] * s); - } - - template - GLM_FUNC_QUALIFIER mat<4, 3, T, Q> operator*(T const& s, mat<4, 3, T, Q> const& m) - { - return mat<4, 3, T, Q>( - m[0] * s, - m[1] * s, - m[2] * s, - m[3] * s); - } - - template - GLM_FUNC_QUALIFIER typename mat<4, 3, T, Q>::col_type operator* - ( - mat<4, 3, T, Q> const& m, - typename mat<4, 3, T, Q>::row_type const& v) - { - return typename mat<4, 3, T, Q>::col_type( - m[0][0] * v.x + m[1][0] * v.y + m[2][0] * v.z + m[3][0] * v.w, - m[0][1] * v.x + m[1][1] * v.y + m[2][1] * v.z + m[3][1] * v.w, - m[0][2] * v.x + m[1][2] * v.y + m[2][2] * v.z + m[3][2] * v.w); - } - - template - GLM_FUNC_QUALIFIER typename mat<4, 3, T, Q>::row_type operator* - ( - typename mat<4, 3, T, Q>::col_type const& v, - mat<4, 3, T, Q> const& m) - { - return typename mat<4, 3, T, Q>::row_type( - v.x * m[0][0] + v.y * m[0][1] + v.z * m[0][2], - v.x * m[1][0] + v.y * m[1][1] + v.z * m[1][2], - v.x * m[2][0] + v.y * m[2][1] + v.z * m[2][2], - v.x * m[3][0] + v.y * m[3][1] + v.z * m[3][2]); - } - - template - GLM_FUNC_QUALIFIER mat<2, 3, T, Q> operator*(mat<4, 3, T, Q> const& m1, mat<2, 4, T, Q> const& m2) - { - return mat<2, 3, T, Q>( - m1[0][0] * m2[0][0] + m1[1][0] * m2[0][1] + m1[2][0] * m2[0][2] + m1[3][0] * m2[0][3], - m1[0][1] * m2[0][0] + m1[1][1] * m2[0][1] + m1[2][1] * m2[0][2] + m1[3][1] * m2[0][3], - m1[0][2] * m2[0][0] + m1[1][2] * m2[0][1] + m1[2][2] * m2[0][2] + m1[3][2] * m2[0][3], - m1[0][0] * m2[1][0] + m1[1][0] * m2[1][1] + m1[2][0] * m2[1][2] + m1[3][0] * m2[1][3], - m1[0][1] * m2[1][0] + m1[1][1] * m2[1][1] + m1[2][1] * m2[1][2] + m1[3][1] * m2[1][3], - m1[0][2] * m2[1][0] + m1[1][2] * m2[1][1] + m1[2][2] * m2[1][2] + m1[3][2] * m2[1][3]); - } - - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> operator*(mat<4, 3, T, Q> const& m1, mat<3, 4, T, Q> const& m2) - { - T const SrcA00 = m1[0][0]; - T const SrcA01 = m1[0][1]; - T const SrcA02 = m1[0][2]; - T const SrcA10 = m1[1][0]; - T const SrcA11 = m1[1][1]; - T const SrcA12 = m1[1][2]; - T const SrcA20 = m1[2][0]; - T const SrcA21 = m1[2][1]; - T const SrcA22 = m1[2][2]; - T const SrcA30 = m1[3][0]; - T const SrcA31 = m1[3][1]; - T const SrcA32 = m1[3][2]; - - T const SrcB00 = m2[0][0]; - T const SrcB01 = m2[0][1]; - T const SrcB02 = m2[0][2]; - T const SrcB03 = m2[0][3]; - T const SrcB10 = m2[1][0]; - T const SrcB11 = m2[1][1]; - T const SrcB12 = m2[1][2]; - T const SrcB13 = m2[1][3]; - T const SrcB20 = m2[2][0]; - T const SrcB21 = m2[2][1]; - T const SrcB22 = m2[2][2]; - T const SrcB23 = m2[2][3]; - - mat<3, 3, T, Q> Result; - Result[0][0] = SrcA00 * SrcB00 + SrcA10 * SrcB01 + SrcA20 * SrcB02 + SrcA30 * SrcB03; - Result[0][1] = SrcA01 * SrcB00 + SrcA11 * SrcB01 + SrcA21 * SrcB02 + SrcA31 * SrcB03; - Result[0][2] = SrcA02 * SrcB00 + SrcA12 * SrcB01 + SrcA22 * SrcB02 + SrcA32 * SrcB03; - Result[1][0] = SrcA00 * SrcB10 + SrcA10 * SrcB11 + SrcA20 * SrcB12 + SrcA30 * SrcB13; - Result[1][1] = SrcA01 * SrcB10 + SrcA11 * SrcB11 + SrcA21 * SrcB12 + SrcA31 * SrcB13; - Result[1][2] = SrcA02 * SrcB10 + SrcA12 * SrcB11 + SrcA22 * SrcB12 + SrcA32 * SrcB13; - Result[2][0] = SrcA00 * SrcB20 + SrcA10 * SrcB21 + SrcA20 * SrcB22 + SrcA30 * SrcB23; - Result[2][1] = SrcA01 * SrcB20 + SrcA11 * SrcB21 + SrcA21 * SrcB22 + SrcA31 * SrcB23; - Result[2][2] = SrcA02 * SrcB20 + SrcA12 * SrcB21 + SrcA22 * SrcB22 + SrcA32 * SrcB23; - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 3, T, Q> operator*(mat<4, 3, T, Q> const& m1, mat<4, 4, T, Q> const& m2) - { - return mat<4, 3, T, Q>( - m1[0][0] * m2[0][0] + m1[1][0] * m2[0][1] + m1[2][0] * m2[0][2] + m1[3][0] * m2[0][3], - m1[0][1] * m2[0][0] + m1[1][1] * m2[0][1] + m1[2][1] * m2[0][2] + m1[3][1] * m2[0][3], - m1[0][2] * m2[0][0] + m1[1][2] * m2[0][1] + m1[2][2] * m2[0][2] + m1[3][2] * m2[0][3], - m1[0][0] * m2[1][0] + m1[1][0] * m2[1][1] + m1[2][0] * m2[1][2] + m1[3][0] * m2[1][3], - m1[0][1] * m2[1][0] + m1[1][1] * m2[1][1] + m1[2][1] * m2[1][2] + m1[3][1] * m2[1][3], - m1[0][2] * m2[1][0] + m1[1][2] * m2[1][1] + m1[2][2] * m2[1][2] + m1[3][2] * m2[1][3], - m1[0][0] * m2[2][0] + m1[1][0] * m2[2][1] + m1[2][0] * m2[2][2] + m1[3][0] * m2[2][3], - m1[0][1] * m2[2][0] + m1[1][1] * m2[2][1] + m1[2][1] * m2[2][2] + m1[3][1] * m2[2][3], - m1[0][2] * m2[2][0] + m1[1][2] * m2[2][1] + m1[2][2] * m2[2][2] + m1[3][2] * m2[2][3], - m1[0][0] * m2[3][0] + m1[1][0] * m2[3][1] + m1[2][0] * m2[3][2] + m1[3][0] * m2[3][3], - m1[0][1] * m2[3][0] + m1[1][1] * m2[3][1] + m1[2][1] * m2[3][2] + m1[3][1] * m2[3][3], - m1[0][2] * m2[3][0] + m1[1][2] * m2[3][1] + m1[2][2] * m2[3][2] + m1[3][2] * m2[3][3]); - } - - template - GLM_FUNC_QUALIFIER mat<4, 3, T, Q> operator/(mat<4, 3, T, Q> const& m, T const& s) - { - return mat<4, 3, T, Q>( - m[0] / s, - m[1] / s, - m[2] / s, - m[3] / s); - } - - template - GLM_FUNC_QUALIFIER mat<4, 3, T, Q> operator/(T const& s, mat<4, 3, T, Q> const& m) - { - return mat<4, 3, T, Q>( - s / m[0], - s / m[1], - s / m[2], - s / m[3]); - } - - // -- Boolean operators -- - - template - GLM_FUNC_QUALIFIER bool operator==(mat<4, 3, T, Q> const& m1, mat<4, 3, T, Q> const& m2) - { - return (m1[0] == m2[0]) && (m1[1] == m2[1]) && (m1[2] == m2[2]) && (m1[3] == m2[3]); - } - - template - GLM_FUNC_QUALIFIER bool operator!=(mat<4, 3, T, Q> const& m1, mat<4, 3, T, Q> const& m2) - { - return (m1[0] != m2[0]) || (m1[1] != m2[1]) || (m1[2] != m2[2]) || (m1[3] != m2[3]); - } -} //namespace glm diff --git a/third_party/glm/detail/type_mat4x4.hpp b/third_party/glm/detail/type_mat4x4.hpp deleted file mode 100755 index 3517f9f..0000000 --- a/third_party/glm/detail/type_mat4x4.hpp +++ /dev/null @@ -1,189 +0,0 @@ -/// @ref core -/// @file glm/detail/type_mat4x4.hpp - -#pragma once - -#include "type_vec4.hpp" -#include -#include - -namespace glm -{ - template - struct mat<4, 4, T, Q> - { - typedef vec<4, T, Q> col_type; - typedef vec<4, T, Q> row_type; - typedef mat<4, 4, T, Q> type; - typedef mat<4, 4, T, Q> transpose_type; - typedef T value_type; - - private: - col_type value[4]; - - public: - // -- Accesses -- - - typedef length_t length_type; - GLM_FUNC_DECL static GLM_CONSTEXPR length_type length(){return 4;} - - GLM_FUNC_DECL col_type & operator[](length_type i); - GLM_FUNC_DECL GLM_CONSTEXPR col_type const& operator[](length_type i) const; - - // -- Constructors -- - - GLM_FUNC_DECL GLM_CONSTEXPR mat() GLM_DEFAULT; - template - GLM_FUNC_DECL GLM_CONSTEXPR mat(mat<4, 4, T, P> const& m); - - GLM_FUNC_DECL explicit GLM_CONSTEXPR mat(T const& x); - GLM_FUNC_DECL GLM_CONSTEXPR mat( - T const& x0, T const& y0, T const& z0, T const& w0, - T const& x1, T const& y1, T const& z1, T const& w1, - T const& x2, T const& y2, T const& z2, T const& w2, - T const& x3, T const& y3, T const& z3, T const& w3); - GLM_FUNC_DECL GLM_CONSTEXPR mat( - col_type const& v0, - col_type const& v1, - col_type const& v2, - col_type const& v3); - - // -- Conversions -- - - template< - typename X1, typename Y1, typename Z1, typename W1, - typename X2, typename Y2, typename Z2, typename W2, - typename X3, typename Y3, typename Z3, typename W3, - typename X4, typename Y4, typename Z4, typename W4> - GLM_FUNC_DECL GLM_CONSTEXPR mat( - X1 const& x1, Y1 const& y1, Z1 const& z1, W1 const& w1, - X2 const& x2, Y2 const& y2, Z2 const& z2, W2 const& w2, - X3 const& x3, Y3 const& y3, Z3 const& z3, W3 const& w3, - X4 const& x4, Y4 const& y4, Z4 const& z4, W4 const& w4); - - template - GLM_FUNC_DECL GLM_CONSTEXPR mat( - vec<4, V1, Q> const& v1, - vec<4, V2, Q> const& v2, - vec<4, V3, Q> const& v3, - vec<4, V4, Q> const& v4); - - // -- Matrix conversions -- - - template - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 4, U, P> const& m); - - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 2, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 3, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 3, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 2, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<2, 4, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 2, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<3, 4, T, Q> const& x); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR mat(mat<4, 3, T, Q> const& x); - - // -- Unary arithmetic operators -- - - template - GLM_FUNC_DECL mat<4, 4, T, Q> & operator=(mat<4, 4, U, Q> const& m); - template - GLM_FUNC_DECL mat<4, 4, T, Q> & operator+=(U s); - template - GLM_FUNC_DECL mat<4, 4, T, Q> & operator+=(mat<4, 4, U, Q> const& m); - template - GLM_FUNC_DECL mat<4, 4, T, Q> & operator-=(U s); - template - GLM_FUNC_DECL mat<4, 4, T, Q> & operator-=(mat<4, 4, U, Q> const& m); - template - GLM_FUNC_DECL mat<4, 4, T, Q> & operator*=(U s); - template - GLM_FUNC_DECL mat<4, 4, T, Q> & operator*=(mat<4, 4, U, Q> const& m); - template - GLM_FUNC_DECL mat<4, 4, T, Q> & operator/=(U s); - template - GLM_FUNC_DECL mat<4, 4, T, Q> & operator/=(mat<4, 4, U, Q> const& m); - - // -- Increment and decrement operators -- - - GLM_FUNC_DECL mat<4, 4, T, Q> & operator++(); - GLM_FUNC_DECL mat<4, 4, T, Q> & operator--(); - GLM_FUNC_DECL mat<4, 4, T, Q> operator++(int); - GLM_FUNC_DECL mat<4, 4, T, Q> operator--(int); - }; - - // -- Unary operators -- - - template - GLM_FUNC_DECL mat<4, 4, T, Q> operator+(mat<4, 4, T, Q> const& m); - - template - GLM_FUNC_DECL mat<4, 4, T, Q> operator-(mat<4, 4, T, Q> const& m); - - // -- Binary operators -- - - template - GLM_FUNC_DECL mat<4, 4, T, Q> operator+(mat<4, 4, T, Q> const& m, T const& s); - - template - GLM_FUNC_DECL mat<4, 4, T, Q> operator+(T const& s, mat<4, 4, T, Q> const& m); - - template - GLM_FUNC_DECL mat<4, 4, T, Q> operator+(mat<4, 4, T, Q> const& m1, mat<4, 4, T, Q> const& m2); - - template - GLM_FUNC_DECL mat<4, 4, T, Q> operator-(mat<4, 4, T, Q> const& m, T const& s); - - template - GLM_FUNC_DECL mat<4, 4, T, Q> operator-(T const& s, mat<4, 4, T, Q> const& m); - - template - GLM_FUNC_DECL mat<4, 4, T, Q> operator-(mat<4, 4, T, Q> const& m1, mat<4, 4, T, Q> const& m2); - - template - GLM_FUNC_DECL mat<4, 4, T, Q> operator*(mat<4, 4, T, Q> const& m, T const& s); - - template - GLM_FUNC_DECL mat<4, 4, T, Q> operator*(T const& s, mat<4, 4, T, Q> const& m); - - template - GLM_FUNC_DECL typename mat<4, 4, T, Q>::col_type operator*(mat<4, 4, T, Q> const& m, typename mat<4, 4, T, Q>::row_type const& v); - - template - GLM_FUNC_DECL typename mat<4, 4, T, Q>::row_type operator*(typename mat<4, 4, T, Q>::col_type const& v, mat<4, 4, T, Q> const& m); - - template - GLM_FUNC_DECL mat<2, 4, T, Q> operator*(mat<4, 4, T, Q> const& m1, mat<2, 4, T, Q> const& m2); - - template - GLM_FUNC_DECL mat<3, 4, T, Q> operator*(mat<4, 4, T, Q> const& m1, mat<3, 4, T, Q> const& m2); - - template - GLM_FUNC_DECL mat<4, 4, T, Q> operator*(mat<4, 4, T, Q> const& m1, mat<4, 4, T, Q> const& m2); - - template - GLM_FUNC_DECL mat<4, 4, T, Q> operator/(mat<4, 4, T, Q> const& m, T const& s); - - template - GLM_FUNC_DECL mat<4, 4, T, Q> operator/(T const& s, mat<4, 4, T, Q> const& m); - - template - GLM_FUNC_DECL typename mat<4, 4, T, Q>::col_type operator/(mat<4, 4, T, Q> const& m, typename mat<4, 4, T, Q>::row_type const& v); - - template - GLM_FUNC_DECL typename mat<4, 4, T, Q>::row_type operator/(typename mat<4, 4, T, Q>::col_type const& v, mat<4, 4, T, Q> const& m); - - template - GLM_FUNC_DECL mat<4, 4, T, Q> operator/(mat<4, 4, T, Q> const& m1, mat<4, 4, T, Q> const& m2); - - // -- Boolean operators -- - - template - GLM_FUNC_DECL bool operator==(mat<4, 4, T, Q> const& m1, mat<4, 4, T, Q> const& m2); - - template - GLM_FUNC_DECL bool operator!=(mat<4, 4, T, Q> const& m1, mat<4, 4, T, Q> const& m2); -}//namespace glm - -#ifndef GLM_EXTERNAL_TEMPLATE -#include "type_mat4x4.inl" -#endif//GLM_EXTERNAL_TEMPLATE diff --git a/third_party/glm/detail/type_mat4x4.inl b/third_party/glm/detail/type_mat4x4.inl deleted file mode 100755 index e38b87f..0000000 --- a/third_party/glm/detail/type_mat4x4.inl +++ /dev/null @@ -1,706 +0,0 @@ -#include "../matrix.hpp" - -namespace glm -{ - // -- Constructors -- - -# if GLM_CONFIG_DEFAULTED_FUNCTIONS == GLM_DISABLE - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q>::mat() -# if GLM_CONFIG_CTOR_INIT == GLM_CTOR_INITIALIZER_LIST - : value{col_type(1, 0, 0, 0), col_type(0, 1, 0, 0), col_type(0, 0, 1, 0), col_type(0, 0, 0, 1)} -# endif - { -# if GLM_CONFIG_CTOR_INIT == GLM_CTOR_INITIALISATION - this->value[0] = col_type(1, 0, 0, 0); - this->value[1] = col_type(0, 1, 0, 0); - this->value[2] = col_type(0, 0, 1, 0); - this->value[3] = col_type(0, 0, 0, 1); -# endif - } -# endif - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q>::mat(mat<4, 4, T, P> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1]), col_type(m[2]), col_type(m[3])} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = m[0]; - this->value[1] = m[1]; - this->value[2] = m[2]; - this->value[3] = m[3]; -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q>::mat(T const& s) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(s, 0, 0, 0), col_type(0, s, 0, 0), col_type(0, 0, s, 0), col_type(0, 0, 0, s)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(s, 0, 0, 0); - this->value[1] = col_type(0, s, 0, 0); - this->value[2] = col_type(0, 0, s, 0); - this->value[3] = col_type(0, 0, 0, s); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q>::mat - ( - T const& x0, T const& y0, T const& z0, T const& w0, - T const& x1, T const& y1, T const& z1, T const& w1, - T const& x2, T const& y2, T const& z2, T const& w2, - T const& x3, T const& y3, T const& z3, T const& w3 - ) -# if GLM_HAS_INITIALIZER_LISTS - : value{ - col_type(x0, y0, z0, w0), - col_type(x1, y1, z1, w1), - col_type(x2, y2, z2, w2), - col_type(x3, y3, z3, w3)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(x0, y0, z0, w0); - this->value[1] = col_type(x1, y1, z1, w1); - this->value[2] = col_type(x2, y2, z2, w2); - this->value[3] = col_type(x3, y3, z3, w3); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q>::mat(col_type const& v0, col_type const& v1, col_type const& v2, col_type const& v3) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(v0), col_type(v1), col_type(v2), col_type(v3)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = v0; - this->value[1] = v1; - this->value[2] = v2; - this->value[3] = v3; -# endif - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q>::mat(mat<4, 4, U, P> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1]), col_type(m[2]), col_type(m[3])} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0]); - this->value[1] = col_type(m[1]); - this->value[2] = col_type(m[2]); - this->value[3] = col_type(m[3]); -# endif - } - - // -- Conversions -- - - template - template< - typename X1, typename Y1, typename Z1, typename W1, - typename X2, typename Y2, typename Z2, typename W2, - typename X3, typename Y3, typename Z3, typename W3, - typename X4, typename Y4, typename Z4, typename W4> - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q>::mat - ( - X1 const& x1, Y1 const& y1, Z1 const& z1, W1 const& w1, - X2 const& x2, Y2 const& y2, Z2 const& z2, W2 const& w2, - X3 const& x3, Y3 const& y3, Z3 const& z3, W3 const& w3, - X4 const& x4, Y4 const& y4, Z4 const& z4, W4 const& w4 - ) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(x1, y1, z1, w1), col_type(x2, y2, z2, w2), col_type(x3, y3, z3, w3), col_type(x4, y4, z4, w4)} -# endif - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer || GLM_CONFIG_UNRESTRICTED_GENTYPE, "*mat4x4 constructor only takes float and integer types, 1st parameter type invalid."); - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer || GLM_CONFIG_UNRESTRICTED_GENTYPE, "*mat4x4 constructor only takes float and integer types, 2nd parameter type invalid."); - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer || GLM_CONFIG_UNRESTRICTED_GENTYPE, "*mat4x4 constructor only takes float and integer types, 3rd parameter type invalid."); - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer || GLM_CONFIG_UNRESTRICTED_GENTYPE, "*mat4x4 constructor only takes float and integer types, 4th parameter type invalid."); - - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer || GLM_CONFIG_UNRESTRICTED_GENTYPE, "*mat4x4 constructor only takes float and integer types, 5th parameter type invalid."); - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer || GLM_CONFIG_UNRESTRICTED_GENTYPE, "*mat4x4 constructor only takes float and integer types, 6th parameter type invalid."); - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer || GLM_CONFIG_UNRESTRICTED_GENTYPE, "*mat4x4 constructor only takes float and integer types, 7th parameter type invalid."); - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer || GLM_CONFIG_UNRESTRICTED_GENTYPE, "*mat4x4 constructor only takes float and integer types, 8th parameter type invalid."); - - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer || GLM_CONFIG_UNRESTRICTED_GENTYPE, "*mat4x4 constructor only takes float and integer types, 9th parameter type invalid."); - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer || GLM_CONFIG_UNRESTRICTED_GENTYPE, "*mat4x4 constructor only takes float and integer types, 10th parameter type invalid."); - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer || GLM_CONFIG_UNRESTRICTED_GENTYPE, "*mat4x4 constructor only takes float and integer types, 11th parameter type invalid."); - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer || GLM_CONFIG_UNRESTRICTED_GENTYPE, "*mat4x4 constructor only takes float and integer types, 12th parameter type invalid."); - - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer || GLM_CONFIG_UNRESTRICTED_GENTYPE, "*mat4x4 constructor only takes float and integer types, 13th parameter type invalid."); - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer || GLM_CONFIG_UNRESTRICTED_GENTYPE, "*mat4x4 constructor only takes float and integer types, 14th parameter type invalid."); - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer || GLM_CONFIG_UNRESTRICTED_GENTYPE, "*mat4x4 constructor only takes float and integer types, 15th parameter type invalid."); - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer || GLM_CONFIG_UNRESTRICTED_GENTYPE, "*mat4x4 constructor only takes float and integer types, 16th parameter type invalid."); - -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(x1, y1, z1, w1); - this->value[1] = col_type(x2, y2, z2, w2); - this->value[2] = col_type(x3, y3, z3, w3); - this->value[3] = col_type(x4, y4, z4, w4); -# endif - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q>::mat(vec<4, V1, Q> const& v1, vec<4, V2, Q> const& v2, vec<4, V3, Q> const& v3, vec<4, V4, Q> const& v4) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(v1), col_type(v2), col_type(v3), col_type(v4)} -# endif - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer || GLM_CONFIG_UNRESTRICTED_GENTYPE, "*mat4x4 constructor only takes float and integer types, 1st parameter type invalid."); - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer || GLM_CONFIG_UNRESTRICTED_GENTYPE, "*mat4x4 constructor only takes float and integer types, 2nd parameter type invalid."); - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer || GLM_CONFIG_UNRESTRICTED_GENTYPE, "*mat4x4 constructor only takes float and integer types, 3rd parameter type invalid."); - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559 || std::numeric_limits::is_integer || GLM_CONFIG_UNRESTRICTED_GENTYPE, "*mat4x4 constructor only takes float and integer types, 4th parameter type invalid."); - -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(v1); - this->value[1] = col_type(v2); - this->value[2] = col_type(v3); - this->value[3] = col_type(v4); -# endif - } - - // -- Matrix conversions -- - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q>::mat(mat<2, 2, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0], 0, 0), col_type(m[1], 0, 0), col_type(0, 0, 1, 0), col_type(0, 0, 0, 1)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0], 0, 0); - this->value[1] = col_type(m[1], 0, 0); - this->value[2] = col_type(0, 0, 1, 0); - this->value[3] = col_type(0, 0, 0, 1); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q>::mat(mat<3, 3, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0], 0), col_type(m[1], 0), col_type(m[2], 0), col_type(0, 0, 0, 1)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0], 0); - this->value[1] = col_type(m[1], 0); - this->value[2] = col_type(m[2], 0); - this->value[3] = col_type(0, 0, 0, 1); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q>::mat(mat<2, 3, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0], 0), col_type(m[1], 0), col_type(0, 0, 1, 0), col_type(0, 0, 0, 1)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0], 0); - this->value[1] = col_type(m[1], 0); - this->value[2] = col_type(0, 0, 1, 0); - this->value[3] = col_type(0, 0, 0, 1); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q>::mat(mat<3, 2, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0], 0, 0), col_type(m[1], 0, 0), col_type(m[2], 1, 0), col_type(0, 0, 0, 1)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0], 0, 0); - this->value[1] = col_type(m[1], 0, 0); - this->value[2] = col_type(m[2], 1, 0); - this->value[3] = col_type(0, 0, 0, 1); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q>::mat(mat<2, 4, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1]), col_type(0, 0, 1, 0), col_type(0, 0, 0, 1)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = m[0]; - this->value[1] = m[1]; - this->value[2] = col_type(0, 0, 1, 0); - this->value[3] = col_type(0, 0, 0, 1); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q>::mat(mat<4, 2, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0], 0, 0), col_type(m[1], 0, 0), col_type(0, 0, 1, 0), col_type(0, 0, 0, 1)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0], 0, 0); - this->value[1] = col_type(m[1], 0, 0); - this->value[2] = col_type(0, 0, 1, 0); - this->value[3] = col_type(0, 0, 0, 1); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q>::mat(mat<3, 4, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0]), col_type(m[1]), col_type(m[2]), col_type(0, 0, 0, 1)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = m[0]; - this->value[1] = m[1]; - this->value[2] = m[2]; - this->value[3] = col_type(0, 0, 0, 1); -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR mat<4, 4, T, Q>::mat(mat<4, 3, T, Q> const& m) -# if GLM_HAS_INITIALIZER_LISTS - : value{col_type(m[0], 0), col_type(m[1], 0), col_type(m[2], 0), col_type(m[3], 1)} -# endif - { -# if !GLM_HAS_INITIALIZER_LISTS - this->value[0] = col_type(m[0], 0); - this->value[1] = col_type(m[1], 0); - this->value[2] = col_type(m[2], 0); - this->value[3] = col_type(m[3], 1); -# endif - } - - // -- Accesses -- - - template - GLM_FUNC_QUALIFIER typename mat<4, 4, T, Q>::col_type & mat<4, 4, T, Q>::operator[](typename mat<4, 4, T, Q>::length_type i) - { - assert(i < this->length()); - return this->value[i]; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR typename mat<4, 4, T, Q>::col_type const& mat<4, 4, T, Q>::operator[](typename mat<4, 4, T, Q>::length_type i) const - { - assert(i < this->length()); - return this->value[i]; - } - - // -- Unary arithmetic operators -- - - template - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q>& mat<4, 4, T, Q>::operator=(mat<4, 4, U, Q> const& m) - { - //memcpy could be faster - //memcpy(&this->value, &m.value, 16 * sizeof(valType)); - this->value[0] = m[0]; - this->value[1] = m[1]; - this->value[2] = m[2]; - this->value[3] = m[3]; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q>& mat<4, 4, T, Q>::operator+=(U s) - { - this->value[0] += s; - this->value[1] += s; - this->value[2] += s; - this->value[3] += s; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q>& mat<4, 4, T, Q>::operator+=(mat<4, 4, U, Q> const& m) - { - this->value[0] += m[0]; - this->value[1] += m[1]; - this->value[2] += m[2]; - this->value[3] += m[3]; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> & mat<4, 4, T, Q>::operator-=(U s) - { - this->value[0] -= s; - this->value[1] -= s; - this->value[2] -= s; - this->value[3] -= s; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> & mat<4, 4, T, Q>::operator-=(mat<4, 4, U, Q> const& m) - { - this->value[0] -= m[0]; - this->value[1] -= m[1]; - this->value[2] -= m[2]; - this->value[3] -= m[3]; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> & mat<4, 4, T, Q>::operator*=(U s) - { - this->value[0] *= s; - this->value[1] *= s; - this->value[2] *= s; - this->value[3] *= s; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> & mat<4, 4, T, Q>::operator*=(mat<4, 4, U, Q> const& m) - { - return (*this = *this * m); - } - - template - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> & mat<4, 4, T, Q>::operator/=(U s) - { - this->value[0] /= s; - this->value[1] /= s; - this->value[2] /= s; - this->value[3] /= s; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> & mat<4, 4, T, Q>::operator/=(mat<4, 4, U, Q> const& m) - { - return *this *= inverse(m); - } - - // -- Increment and decrement operators -- - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> & mat<4, 4, T, Q>::operator++() - { - ++this->value[0]; - ++this->value[1]; - ++this->value[2]; - ++this->value[3]; - return *this; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> & mat<4, 4, T, Q>::operator--() - { - --this->value[0]; - --this->value[1]; - --this->value[2]; - --this->value[3]; - return *this; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> mat<4, 4, T, Q>::operator++(int) - { - mat<4, 4, T, Q> Result(*this); - ++*this; - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> mat<4, 4, T, Q>::operator--(int) - { - mat<4, 4, T, Q> Result(*this); - --*this; - return Result; - } - - // -- Unary constant operators -- - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> operator+(mat<4, 4, T, Q> const& m) - { - return m; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> operator-(mat<4, 4, T, Q> const& m) - { - return mat<4, 4, T, Q>( - -m[0], - -m[1], - -m[2], - -m[3]); - } - - // -- Binary arithmetic operators -- - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> operator+(mat<4, 4, T, Q> const& m, T const& s) - { - return mat<4, 4, T, Q>( - m[0] + s, - m[1] + s, - m[2] + s, - m[3] + s); - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> operator+(T const& s, mat<4, 4, T, Q> const& m) - { - return mat<4, 4, T, Q>( - m[0] + s, - m[1] + s, - m[2] + s, - m[3] + s); - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> operator+(mat<4, 4, T, Q> const& m1, mat<4, 4, T, Q> const& m2) - { - return mat<4, 4, T, Q>( - m1[0] + m2[0], - m1[1] + m2[1], - m1[2] + m2[2], - m1[3] + m2[3]); - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> operator-(mat<4, 4, T, Q> const& m, T const& s) - { - return mat<4, 4, T, Q>( - m[0] - s, - m[1] - s, - m[2] - s, - m[3] - s); - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> operator-(T const& s, mat<4, 4, T, Q> const& m) - { - return mat<4, 4, T, Q>( - s - m[0], - s - m[1], - s - m[2], - s - m[3]); - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> operator-(mat<4, 4, T, Q> const& m1, mat<4, 4, T, Q> const& m2) - { - return mat<4, 4, T, Q>( - m1[0] - m2[0], - m1[1] - m2[1], - m1[2] - m2[2], - m1[3] - m2[3]); - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> operator*(mat<4, 4, T, Q> const& m, T const & s) - { - return mat<4, 4, T, Q>( - m[0] * s, - m[1] * s, - m[2] * s, - m[3] * s); - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> operator*(T const& s, mat<4, 4, T, Q> const& m) - { - return mat<4, 4, T, Q>( - m[0] * s, - m[1] * s, - m[2] * s, - m[3] * s); - } - - template - GLM_FUNC_QUALIFIER typename mat<4, 4, T, Q>::col_type operator* - ( - mat<4, 4, T, Q> const& m, - typename mat<4, 4, T, Q>::row_type const& v - ) - { -/* - __m128 v0 = _mm_shuffle_ps(v.data, v.data, _MM_SHUFFLE(0, 0, 0, 0)); - __m128 v1 = _mm_shuffle_ps(v.data, v.data, _MM_SHUFFLE(1, 1, 1, 1)); - __m128 v2 = _mm_shuffle_ps(v.data, v.data, _MM_SHUFFLE(2, 2, 2, 2)); - __m128 v3 = _mm_shuffle_ps(v.data, v.data, _MM_SHUFFLE(3, 3, 3, 3)); - - __m128 m0 = _mm_mul_ps(m[0].data, v0); - __m128 m1 = _mm_mul_ps(m[1].data, v1); - __m128 a0 = _mm_add_ps(m0, m1); - - __m128 m2 = _mm_mul_ps(m[2].data, v2); - __m128 m3 = _mm_mul_ps(m[3].data, v3); - __m128 a1 = _mm_add_ps(m2, m3); - - __m128 a2 = _mm_add_ps(a0, a1); - - return typename mat<4, 4, T, Q>::col_type(a2); -*/ - - typename mat<4, 4, T, Q>::col_type const Mov0(v[0]); - typename mat<4, 4, T, Q>::col_type const Mov1(v[1]); - typename mat<4, 4, T, Q>::col_type const Mul0 = m[0] * Mov0; - typename mat<4, 4, T, Q>::col_type const Mul1 = m[1] * Mov1; - typename mat<4, 4, T, Q>::col_type const Add0 = Mul0 + Mul1; - typename mat<4, 4, T, Q>::col_type const Mov2(v[2]); - typename mat<4, 4, T, Q>::col_type const Mov3(v[3]); - typename mat<4, 4, T, Q>::col_type const Mul2 = m[2] * Mov2; - typename mat<4, 4, T, Q>::col_type const Mul3 = m[3] * Mov3; - typename mat<4, 4, T, Q>::col_type const Add1 = Mul2 + Mul3; - typename mat<4, 4, T, Q>::col_type const Add2 = Add0 + Add1; - return Add2; - -/* - return typename mat<4, 4, T, Q>::col_type( - m[0][0] * v[0] + m[1][0] * v[1] + m[2][0] * v[2] + m[3][0] * v[3], - m[0][1] * v[0] + m[1][1] * v[1] + m[2][1] * v[2] + m[3][1] * v[3], - m[0][2] * v[0] + m[1][2] * v[1] + m[2][2] * v[2] + m[3][2] * v[3], - m[0][3] * v[0] + m[1][3] * v[1] + m[2][3] * v[2] + m[3][3] * v[3]); -*/ - } - - template - GLM_FUNC_QUALIFIER typename mat<4, 4, T, Q>::row_type operator* - ( - typename mat<4, 4, T, Q>::col_type const& v, - mat<4, 4, T, Q> const& m - ) - { - return typename mat<4, 4, T, Q>::row_type( - m[0][0] * v[0] + m[0][1] * v[1] + m[0][2] * v[2] + m[0][3] * v[3], - m[1][0] * v[0] + m[1][1] * v[1] + m[1][2] * v[2] + m[1][3] * v[3], - m[2][0] * v[0] + m[2][1] * v[1] + m[2][2] * v[2] + m[2][3] * v[3], - m[3][0] * v[0] + m[3][1] * v[1] + m[3][2] * v[2] + m[3][3] * v[3]); - } - - template - GLM_FUNC_QUALIFIER mat<2, 4, T, Q> operator*(mat<4, 4, T, Q> const& m1, mat<2, 4, T, Q> const& m2) - { - return mat<2, 4, T, Q>( - m1[0][0] * m2[0][0] + m1[1][0] * m2[0][1] + m1[2][0] * m2[0][2] + m1[3][0] * m2[0][3], - m1[0][1] * m2[0][0] + m1[1][1] * m2[0][1] + m1[2][1] * m2[0][2] + m1[3][1] * m2[0][3], - m1[0][2] * m2[0][0] + m1[1][2] * m2[0][1] + m1[2][2] * m2[0][2] + m1[3][2] * m2[0][3], - m1[0][3] * m2[0][0] + m1[1][3] * m2[0][1] + m1[2][3] * m2[0][2] + m1[3][3] * m2[0][3], - m1[0][0] * m2[1][0] + m1[1][0] * m2[1][1] + m1[2][0] * m2[1][2] + m1[3][0] * m2[1][3], - m1[0][1] * m2[1][0] + m1[1][1] * m2[1][1] + m1[2][1] * m2[1][2] + m1[3][1] * m2[1][3], - m1[0][2] * m2[1][0] + m1[1][2] * m2[1][1] + m1[2][2] * m2[1][2] + m1[3][2] * m2[1][3], - m1[0][3] * m2[1][0] + m1[1][3] * m2[1][1] + m1[2][3] * m2[1][2] + m1[3][3] * m2[1][3]); - } - - template - GLM_FUNC_QUALIFIER mat<3, 4, T, Q> operator*(mat<4, 4, T, Q> const& m1, mat<3, 4, T, Q> const& m2) - { - return mat<3, 4, T, Q>( - m1[0][0] * m2[0][0] + m1[1][0] * m2[0][1] + m1[2][0] * m2[0][2] + m1[3][0] * m2[0][3], - m1[0][1] * m2[0][0] + m1[1][1] * m2[0][1] + m1[2][1] * m2[0][2] + m1[3][1] * m2[0][3], - m1[0][2] * m2[0][0] + m1[1][2] * m2[0][1] + m1[2][2] * m2[0][2] + m1[3][2] * m2[0][3], - m1[0][3] * m2[0][0] + m1[1][3] * m2[0][1] + m1[2][3] * m2[0][2] + m1[3][3] * m2[0][3], - m1[0][0] * m2[1][0] + m1[1][0] * m2[1][1] + m1[2][0] * m2[1][2] + m1[3][0] * m2[1][3], - m1[0][1] * m2[1][0] + m1[1][1] * m2[1][1] + m1[2][1] * m2[1][2] + m1[3][1] * m2[1][3], - m1[0][2] * m2[1][0] + m1[1][2] * m2[1][1] + m1[2][2] * m2[1][2] + m1[3][2] * m2[1][3], - m1[0][3] * m2[1][0] + m1[1][3] * m2[1][1] + m1[2][3] * m2[1][2] + m1[3][3] * m2[1][3], - m1[0][0] * m2[2][0] + m1[1][0] * m2[2][1] + m1[2][0] * m2[2][2] + m1[3][0] * m2[2][3], - m1[0][1] * m2[2][0] + m1[1][1] * m2[2][1] + m1[2][1] * m2[2][2] + m1[3][1] * m2[2][3], - m1[0][2] * m2[2][0] + m1[1][2] * m2[2][1] + m1[2][2] * m2[2][2] + m1[3][2] * m2[2][3], - m1[0][3] * m2[2][0] + m1[1][3] * m2[2][1] + m1[2][3] * m2[2][2] + m1[3][3] * m2[2][3]); - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> operator*(mat<4, 4, T, Q> const& m1, mat<4, 4, T, Q> const& m2) - { - typename mat<4, 4, T, Q>::col_type const SrcA0 = m1[0]; - typename mat<4, 4, T, Q>::col_type const SrcA1 = m1[1]; - typename mat<4, 4, T, Q>::col_type const SrcA2 = m1[2]; - typename mat<4, 4, T, Q>::col_type const SrcA3 = m1[3]; - - typename mat<4, 4, T, Q>::col_type const SrcB0 = m2[0]; - typename mat<4, 4, T, Q>::col_type const SrcB1 = m2[1]; - typename mat<4, 4, T, Q>::col_type const SrcB2 = m2[2]; - typename mat<4, 4, T, Q>::col_type const SrcB3 = m2[3]; - - mat<4, 4, T, Q> Result; - Result[0] = SrcA0 * SrcB0[0] + SrcA1 * SrcB0[1] + SrcA2 * SrcB0[2] + SrcA3 * SrcB0[3]; - Result[1] = SrcA0 * SrcB1[0] + SrcA1 * SrcB1[1] + SrcA2 * SrcB1[2] + SrcA3 * SrcB1[3]; - Result[2] = SrcA0 * SrcB2[0] + SrcA1 * SrcB2[1] + SrcA2 * SrcB2[2] + SrcA3 * SrcB2[3]; - Result[3] = SrcA0 * SrcB3[0] + SrcA1 * SrcB3[1] + SrcA2 * SrcB3[2] + SrcA3 * SrcB3[3]; - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> operator/(mat<4, 4, T, Q> const& m, T const& s) - { - return mat<4, 4, T, Q>( - m[0] / s, - m[1] / s, - m[2] / s, - m[3] / s); - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> operator/(T const& s, mat<4, 4, T, Q> const& m) - { - return mat<4, 4, T, Q>( - s / m[0], - s / m[1], - s / m[2], - s / m[3]); - } - - template - GLM_FUNC_QUALIFIER typename mat<4, 4, T, Q>::col_type operator/(mat<4, 4, T, Q> const& m, typename mat<4, 4, T, Q>::row_type const& v) - { - return inverse(m) * v; - } - - template - GLM_FUNC_QUALIFIER typename mat<4, 4, T, Q>::row_type operator/(typename mat<4, 4, T, Q>::col_type const& v, mat<4, 4, T, Q> const& m) - { - return v * inverse(m); - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> operator/(mat<4, 4, T, Q> const& m1, mat<4, 4, T, Q> const& m2) - { - mat<4, 4, T, Q> m1_copy(m1); - return m1_copy /= m2; - } - - // -- Boolean operators -- - - template - GLM_FUNC_QUALIFIER bool operator==(mat<4, 4, T, Q> const& m1, mat<4, 4, T, Q> const& m2) - { - return (m1[0] == m2[0]) && (m1[1] == m2[1]) && (m1[2] == m2[2]) && (m1[3] == m2[3]); - } - - template - GLM_FUNC_QUALIFIER bool operator!=(mat<4, 4, T, Q> const& m1, mat<4, 4, T, Q> const& m2) - { - return (m1[0] != m2[0]) || (m1[1] != m2[1]) || (m1[2] != m2[2]) || (m1[3] != m2[3]); - } -}//namespace glm - -#if GLM_CONFIG_SIMD == GLM_ENABLE -# include "type_mat4x4_simd.inl" -#endif diff --git a/third_party/glm/detail/type_mat4x4_simd.inl b/third_party/glm/detail/type_mat4x4_simd.inl deleted file mode 100755 index fb3a16f..0000000 --- a/third_party/glm/detail/type_mat4x4_simd.inl +++ /dev/null @@ -1,6 +0,0 @@ -/// @ref core - -namespace glm -{ - -}//namespace glm diff --git a/third_party/glm/detail/type_quat.hpp b/third_party/glm/detail/type_quat.hpp deleted file mode 100755 index 0e60bc3..0000000 --- a/third_party/glm/detail/type_quat.hpp +++ /dev/null @@ -1,186 +0,0 @@ -/// @ref core -/// @file glm/detail/type_quat.hpp - -#pragma once - -// Dependency: -#include "../detail/type_mat3x3.hpp" -#include "../detail/type_mat4x4.hpp" -#include "../detail/type_vec3.hpp" -#include "../detail/type_vec4.hpp" -#include "../ext/vector_relational.hpp" -#include "../ext/quaternion_relational.hpp" -#include "../gtc/constants.hpp" -#include "../gtc/matrix_transform.hpp" - -namespace glm -{ - template - struct qua - { - // -- Implementation detail -- - - typedef qua type; - typedef T value_type; - - // -- Data -- - -# if GLM_SILENT_WARNINGS == GLM_ENABLE -# if GLM_COMPILER & GLM_COMPILER_GCC -# pragma GCC diagnostic push -# pragma GCC diagnostic ignored "-Wpedantic" -# elif GLM_COMPILER & GLM_COMPILER_CLANG -# pragma clang diagnostic push -# pragma clang diagnostic ignored "-Wgnu-anonymous-struct" -# pragma clang diagnostic ignored "-Wnested-anon-types" -# elif GLM_COMPILER & GLM_COMPILER_VC -# pragma warning(push) -# pragma warning(disable: 4201) // nonstandard extension used : nameless struct/union -# endif -# endif - -# if GLM_LANG & GLM_LANG_CXXMS_FLAG - union - { -# ifdef GLM_FORCE_QUAT_DATA_WXYZ - struct { T w, x, y, z; }; -# else - struct { T x, y, z, w; }; -# endif - - typename detail::storage<4, T, detail::is_aligned::value>::type data; - }; -# else -# ifdef GLM_FORCE_QUAT_DATA_WXYZ - T w, x, y, z; -# else - T x, y, z, w; -# endif -# endif - -# if GLM_SILENT_WARNINGS == GLM_ENABLE -# if GLM_COMPILER & GLM_COMPILER_CLANG -# pragma clang diagnostic pop -# elif GLM_COMPILER & GLM_COMPILER_GCC -# pragma GCC diagnostic pop -# elif GLM_COMPILER & GLM_COMPILER_VC -# pragma warning(pop) -# endif -# endif - - // -- Component accesses -- - - typedef length_t length_type; - - /// Return the count of components of a quaternion - GLM_FUNC_DECL static GLM_CONSTEXPR length_type length(){return 4;} - - GLM_FUNC_DECL GLM_CONSTEXPR T & operator[](length_type i); - GLM_FUNC_DECL GLM_CONSTEXPR T const& operator[](length_type i) const; - - // -- Implicit basic constructors -- - - GLM_FUNC_DECL GLM_CONSTEXPR qua() GLM_DEFAULT; - GLM_FUNC_DECL GLM_CONSTEXPR qua(qua const& q) GLM_DEFAULT; - template - GLM_FUNC_DECL GLM_CONSTEXPR qua(qua const& q); - - // -- Explicit basic constructors -- - - GLM_FUNC_DECL GLM_CONSTEXPR qua(T s, vec<3, T, Q> const& v); - GLM_FUNC_DECL GLM_CONSTEXPR qua(T w, T x, T y, T z); - - // -- Conversion constructors -- - - template - GLM_FUNC_DECL GLM_CONSTEXPR GLM_EXPLICIT qua(qua const& q); - - /// Explicit conversion operators -# if GLM_HAS_EXPLICIT_CONVERSION_OPERATORS - GLM_FUNC_DECL explicit operator mat<3, 3, T, Q>() const; - GLM_FUNC_DECL explicit operator mat<4, 4, T, Q>() const; -# endif - - /// Create a quaternion from two normalized axis - /// - /// @param u A first normalized axis - /// @param v A second normalized axis - /// @see gtc_quaternion - /// @see http://lolengine.net/blog/2013/09/18/beautiful-maths-quaternion-from-vectors - GLM_FUNC_DECL qua(vec<3, T, Q> const& u, vec<3, T, Q> const& v); - - /// Build a quaternion from euler angles (pitch, yaw, roll), in radians. - GLM_FUNC_DECL GLM_CONSTEXPR GLM_EXPLICIT qua(vec<3, T, Q> const& eulerAngles); - GLM_FUNC_DECL GLM_EXPLICIT qua(mat<3, 3, T, Q> const& q); - GLM_FUNC_DECL GLM_EXPLICIT qua(mat<4, 4, T, Q> const& q); - - // -- Unary arithmetic operators -- - - GLM_FUNC_DECL GLM_CONSTEXPR qua& operator=(qua const& q) GLM_DEFAULT; - - template - GLM_FUNC_DECL GLM_CONSTEXPR qua& operator=(qua const& q); - template - GLM_FUNC_DECL GLM_CONSTEXPR qua& operator+=(qua const& q); - template - GLM_FUNC_DECL GLM_CONSTEXPR qua& operator-=(qua const& q); - template - GLM_FUNC_DECL GLM_CONSTEXPR qua& operator*=(qua const& q); - template - GLM_FUNC_DECL GLM_CONSTEXPR qua& operator*=(U s); - template - GLM_FUNC_DECL GLM_CONSTEXPR qua& operator/=(U s); - }; - - // -- Unary bit operators -- - - template - GLM_FUNC_DECL GLM_CONSTEXPR qua operator+(qua const& q); - - template - GLM_FUNC_DECL GLM_CONSTEXPR qua operator-(qua const& q); - - // -- Binary operators -- - - template - GLM_FUNC_DECL GLM_CONSTEXPR qua operator+(qua const& q, qua const& p); - - template - GLM_FUNC_DECL GLM_CONSTEXPR qua operator-(qua const& q, qua const& p); - - template - GLM_FUNC_DECL GLM_CONSTEXPR qua operator*(qua const& q, qua const& p); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator*(qua const& q, vec<3, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator*(vec<3, T, Q> const& v, qua const& q); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator*(qua const& q, vec<4, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator*(vec<4, T, Q> const& v, qua const& q); - - template - GLM_FUNC_DECL GLM_CONSTEXPR qua operator*(qua const& q, T const& s); - - template - GLM_FUNC_DECL GLM_CONSTEXPR qua operator*(T const& s, qua const& q); - - template - GLM_FUNC_DECL GLM_CONSTEXPR qua operator/(qua const& q, T const& s); - - // -- Boolean operators -- - - template - GLM_FUNC_DECL GLM_CONSTEXPR bool operator==(qua const& q1, qua const& q2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR bool operator!=(qua const& q1, qua const& q2); -} //namespace glm - -#ifndef GLM_EXTERNAL_TEMPLATE -#include "type_quat.inl" -#endif//GLM_EXTERNAL_TEMPLATE diff --git a/third_party/glm/detail/type_quat.inl b/third_party/glm/detail/type_quat.inl deleted file mode 100755 index 67b9310..0000000 --- a/third_party/glm/detail/type_quat.inl +++ /dev/null @@ -1,408 +0,0 @@ -#include "../trigonometric.hpp" -#include "../exponential.hpp" -#include "../ext/quaternion_geometric.hpp" -#include - -namespace glm{ -namespace detail -{ - template - struct genTypeTrait > - { - static const genTypeEnum GENTYPE = GENTYPE_QUAT; - }; - - template - struct compute_dot, T, Aligned> - { - GLM_FUNC_QUALIFIER GLM_CONSTEXPR static T call(qua const& a, qua const& b) - { - vec<4, T, Q> tmp(a.w * b.w, a.x * b.x, a.y * b.y, a.z * b.z); - return (tmp.x + tmp.y) + (tmp.z + tmp.w); - } - }; - - template - struct compute_quat_add - { - GLM_FUNC_QUALIFIER GLM_CONSTEXPR static qua call(qua const& q, qua const& p) - { - return qua(q.w + p.w, q.x + p.x, q.y + p.y, q.z + p.z); - } - }; - - template - struct compute_quat_sub - { - GLM_FUNC_QUALIFIER GLM_CONSTEXPR static qua call(qua const& q, qua const& p) - { - return qua(q.w - p.w, q.x - p.x, q.y - p.y, q.z - p.z); - } - }; - - template - struct compute_quat_mul_scalar - { - GLM_FUNC_QUALIFIER GLM_CONSTEXPR static qua call(qua const& q, T s) - { - return qua(q.w * s, q.x * s, q.y * s, q.z * s); - } - }; - - template - struct compute_quat_div_scalar - { - GLM_FUNC_QUALIFIER GLM_CONSTEXPR static qua call(qua const& q, T s) - { - return qua(q.w / s, q.x / s, q.y / s, q.z / s); - } - }; - - template - struct compute_quat_mul_vec4 - { - GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec<4, T, Q> call(qua const& q, vec<4, T, Q> const& v) - { - return vec<4, T, Q>(q * vec<3, T, Q>(v), v.w); - } - }; -}//namespace detail - - // -- Component accesses -- - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR T & qua::operator[](typename qua::length_type i) - { - assert(i >= 0 && i < this->length()); -# ifdef GLM_FORCE_QUAT_DATA_WXYZ - return (&w)[i]; -# else - return (&x)[i]; -# endif - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR T const& qua::operator[](typename qua::length_type i) const - { - assert(i >= 0 && i < this->length()); -# ifdef GLM_FORCE_QUAT_DATA_WXYZ - return (&w)[i]; -# else - return (&x)[i]; -# endif - } - - // -- Implicit basic constructors -- - -# if GLM_CONFIG_DEFAULTED_FUNCTIONS == GLM_DISABLE - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua::qua() -# if GLM_CONFIG_CTOR_INIT != GLM_CTOR_INIT_DISABLE -# ifdef GLM_FORCE_QUAT_DATA_WXYZ - : w(1), x(0), y(0), z(0) -# else - : x(0), y(0), z(0), w(1) -# endif -# endif - {} - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua::qua(qua const& q) -# ifdef GLM_FORCE_QUAT_DATA_WXYZ - : w(q.w), x(q.x), y(q.y), z(q.z) -# else - : x(q.x), y(q.y), z(q.z), w(q.w) -# endif - {} -# endif - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua::qua(qua const& q) -# ifdef GLM_FORCE_QUAT_DATA_WXYZ - : w(q.w), x(q.x), y(q.y), z(q.z) -# else - : x(q.x), y(q.y), z(q.z), w(q.w) -# endif - {} - - // -- Explicit basic constructors -- - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua::qua(T s, vec<3, T, Q> const& v) -# ifdef GLM_FORCE_QUAT_DATA_WXYZ - : w(s), x(v.x), y(v.y), z(v.z) -# else - : x(v.x), y(v.y), z(v.z), w(s) -# endif - {} - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua::qua(T _w, T _x, T _y, T _z) -# ifdef GLM_FORCE_QUAT_DATA_WXYZ - : w(_w), x(_x), y(_y), z(_z) -# else - : x(_x), y(_y), z(_z), w(_w) -# endif - {} - - // -- Conversion constructors -- - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua::qua(qua const& q) -# ifdef GLM_FORCE_QUAT_DATA_WXYZ - : w(static_cast(q.w)), x(static_cast(q.x)), y(static_cast(q.y)), z(static_cast(q.z)) -# else - : x(static_cast(q.x)), y(static_cast(q.y)), z(static_cast(q.z)), w(static_cast(q.w)) -# endif - {} - - //template - //GLM_FUNC_QUALIFIER qua::qua - //( - // valType const& pitch, - // valType const& yaw, - // valType const& roll - //) - //{ - // vec<3, valType> eulerAngle(pitch * valType(0.5), yaw * valType(0.5), roll * valType(0.5)); - // vec<3, valType> c = glm::cos(eulerAngle * valType(0.5)); - // vec<3, valType> s = glm::sin(eulerAngle * valType(0.5)); - // - // this->w = c.x * c.y * c.z + s.x * s.y * s.z; - // this->x = s.x * c.y * c.z - c.x * s.y * s.z; - // this->y = c.x * s.y * c.z + s.x * c.y * s.z; - // this->z = c.x * c.y * s.z - s.x * s.y * c.z; - //} - - template - GLM_FUNC_QUALIFIER qua::qua(vec<3, T, Q> const& u, vec<3, T, Q> const& v) - { - T norm_u_norm_v = sqrt(dot(u, u) * dot(v, v)); - T real_part = norm_u_norm_v + dot(u, v); - vec<3, T, Q> t; - - if(real_part < static_cast(1.e-6f) * norm_u_norm_v) - { - // If u and v are exactly opposite, rotate 180 degrees - // around an arbitrary orthogonal axis. Axis normalisation - // can happen later, when we normalise the quaternion. - real_part = static_cast(0); - t = abs(u.x) > abs(u.z) ? vec<3, T, Q>(-u.y, u.x, static_cast(0)) : vec<3, T, Q>(static_cast(0), -u.z, u.y); - } - else - { - // Otherwise, build quaternion the standard way. - t = cross(u, v); - } - - *this = normalize(qua(real_part, t.x, t.y, t.z)); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua::qua(vec<3, T, Q> const& eulerAngle) - { - vec<3, T, Q> c = glm::cos(eulerAngle * T(0.5)); - vec<3, T, Q> s = glm::sin(eulerAngle * T(0.5)); - - this->w = c.x * c.y * c.z + s.x * s.y * s.z; - this->x = s.x * c.y * c.z - c.x * s.y * s.z; - this->y = c.x * s.y * c.z + s.x * c.y * s.z; - this->z = c.x * c.y * s.z - s.x * s.y * c.z; - } - - template - GLM_FUNC_QUALIFIER qua::qua(mat<3, 3, T, Q> const& m) - { - *this = quat_cast(m); - } - - template - GLM_FUNC_QUALIFIER qua::qua(mat<4, 4, T, Q> const& m) - { - *this = quat_cast(m); - } - -# if GLM_HAS_EXPLICIT_CONVERSION_OPERATORS - template - GLM_FUNC_QUALIFIER qua::operator mat<3, 3, T, Q>() const - { - return mat3_cast(*this); - } - - template - GLM_FUNC_QUALIFIER qua::operator mat<4, 4, T, Q>() const - { - return mat4_cast(*this); - } -# endif//GLM_HAS_EXPLICIT_CONVERSION_OPERATORS - - // -- Unary arithmetic operators -- - -# if GLM_CONFIG_DEFAULTED_FUNCTIONS == GLM_DISABLE - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua & qua::operator=(qua const& q) - { - this->w = q.w; - this->x = q.x; - this->y = q.y; - this->z = q.z; - return *this; - } -# endif - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua & qua::operator=(qua const& q) - { - this->w = static_cast(q.w); - this->x = static_cast(q.x); - this->y = static_cast(q.y); - this->z = static_cast(q.z); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua & qua::operator+=(qua const& q) - { - return (*this = detail::compute_quat_add::value>::call(*this, qua(q))); - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua & qua::operator-=(qua const& q) - { - return (*this = detail::compute_quat_sub::value>::call(*this, qua(q))); - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua & qua::operator*=(qua const& r) - { - qua const p(*this); - qua const q(r); - - this->w = p.w * q.w - p.x * q.x - p.y * q.y - p.z * q.z; - this->x = p.w * q.x + p.x * q.w + p.y * q.z - p.z * q.y; - this->y = p.w * q.y + p.y * q.w + p.z * q.x - p.x * q.z; - this->z = p.w * q.z + p.z * q.w + p.x * q.y - p.y * q.x; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua & qua::operator*=(U s) - { - return (*this = detail::compute_quat_mul_scalar::value>::call(*this, static_cast(s))); - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua & qua::operator/=(U s) - { - return (*this = detail::compute_quat_div_scalar::value>::call(*this, static_cast(s))); - } - - // -- Unary bit operators -- - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua operator+(qua const& q) - { - return q; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua operator-(qua const& q) - { - return qua(-q.w, -q.x, -q.y, -q.z); - } - - // -- Binary operators -- - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua operator+(qua const& q, qua const& p) - { - return qua(q) += p; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua operator-(qua const& q, qua const& p) - { - return qua(q) -= p; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua operator*(qua const& q, qua const& p) - { - return qua(q) *= p; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator*(qua const& q, vec<3, T, Q> const& v) - { - vec<3, T, Q> const QuatVector(q.x, q.y, q.z); - vec<3, T, Q> const uv(glm::cross(QuatVector, v)); - vec<3, T, Q> const uuv(glm::cross(QuatVector, uv)); - - return v + ((uv * q.w) + uuv) * static_cast(2); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator*(vec<3, T, Q> const& v, qua const& q) - { - return glm::inverse(q) * v; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator*(qua const& q, vec<4, T, Q> const& v) - { - return detail::compute_quat_mul_vec4::value>::call(q, v); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator*(vec<4, T, Q> const& v, qua const& q) - { - return glm::inverse(q) * v; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua operator*(qua const& q, T const& s) - { - return qua( - q.w * s, q.x * s, q.y * s, q.z * s); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua operator*(T const& s, qua const& q) - { - return q * s; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua operator/(qua const& q, T const& s) - { - return qua( - q.w / s, q.x / s, q.y / s, q.z / s); - } - - // -- Boolean operators -- - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool operator==(qua const& q1, qua const& q2) - { - return q1.x == q2.x && q1.y == q2.y && q1.z == q2.z && q1.w == q2.w; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool operator!=(qua const& q1, qua const& q2) - { - return q1.x != q2.x || q1.y != q2.y || q1.z != q2.z || q1.w != q2.w; - } -}//namespace glm - -#if GLM_CONFIG_SIMD == GLM_ENABLE -# include "type_quat_simd.inl" -#endif - diff --git a/third_party/glm/detail/type_quat_simd.inl b/third_party/glm/detail/type_quat_simd.inl deleted file mode 100755 index 3333e59..0000000 --- a/third_party/glm/detail/type_quat_simd.inl +++ /dev/null @@ -1,188 +0,0 @@ -/// @ref core - -#if GLM_ARCH & GLM_ARCH_SSE2_BIT - -namespace glm{ -namespace detail -{ -/* - template - struct compute_quat_mul - { - static qua call(qua const& q1, qua const& q2) - { - // SSE2 STATS: 11 shuffle, 8 mul, 8 add - // SSE4 STATS: 3 shuffle, 4 mul, 4 dpps - - __m128 const mul0 = _mm_mul_ps(q1.Data, _mm_shuffle_ps(q2.Data, q2.Data, _MM_SHUFFLE(0, 1, 2, 3))); - __m128 const mul1 = _mm_mul_ps(q1.Data, _mm_shuffle_ps(q2.Data, q2.Data, _MM_SHUFFLE(1, 0, 3, 2))); - __m128 const mul2 = _mm_mul_ps(q1.Data, _mm_shuffle_ps(q2.Data, q2.Data, _MM_SHUFFLE(2, 3, 0, 1))); - __m128 const mul3 = _mm_mul_ps(q1.Data, q2.Data); - -# if GLM_ARCH & GLM_ARCH_SSE41_BIT - __m128 const add0 = _mm_dp_ps(mul0, _mm_set_ps(1.0f, -1.0f, 1.0f, 1.0f), 0xff); - __m128 const add1 = _mm_dp_ps(mul1, _mm_set_ps(1.0f, 1.0f, 1.0f, -1.0f), 0xff); - __m128 const add2 = _mm_dp_ps(mul2, _mm_set_ps(1.0f, 1.0f, -1.0f, 1.0f), 0xff); - __m128 const add3 = _mm_dp_ps(mul3, _mm_set_ps(1.0f, -1.0f, -1.0f, -1.0f), 0xff); -# else - __m128 const mul4 = _mm_mul_ps(mul0, _mm_set_ps(1.0f, -1.0f, 1.0f, 1.0f)); - __m128 const add0 = _mm_add_ps(mul0, _mm_movehl_ps(mul4, mul4)); - __m128 const add4 = _mm_add_ss(add0, _mm_shuffle_ps(add0, add0, 1)); - - __m128 const mul5 = _mm_mul_ps(mul1, _mm_set_ps(1.0f, 1.0f, 1.0f, -1.0f)); - __m128 const add1 = _mm_add_ps(mul1, _mm_movehl_ps(mul5, mul5)); - __m128 const add5 = _mm_add_ss(add1, _mm_shuffle_ps(add1, add1, 1)); - - __m128 const mul6 = _mm_mul_ps(mul2, _mm_set_ps(1.0f, 1.0f, -1.0f, 1.0f)); - __m128 const add2 = _mm_add_ps(mul6, _mm_movehl_ps(mul6, mul6)); - __m128 const add6 = _mm_add_ss(add2, _mm_shuffle_ps(add2, add2, 1)); - - __m128 const mul7 = _mm_mul_ps(mul3, _mm_set_ps(1.0f, -1.0f, -1.0f, -1.0f)); - __m128 const add3 = _mm_add_ps(mul3, _mm_movehl_ps(mul7, mul7)); - __m128 const add7 = _mm_add_ss(add3, _mm_shuffle_ps(add3, add3, 1)); - #endif - - // This SIMD code is a politically correct way of doing this, but in every test I've tried it has been slower than - // the final code below. I'll keep this here for reference - maybe somebody else can do something better... - // - //__m128 xxyy = _mm_shuffle_ps(add4, add5, _MM_SHUFFLE(0, 0, 0, 0)); - //__m128 zzww = _mm_shuffle_ps(add6, add7, _MM_SHUFFLE(0, 0, 0, 0)); - // - //return _mm_shuffle_ps(xxyy, zzww, _MM_SHUFFLE(2, 0, 2, 0)); - - qua Result; - _mm_store_ss(&Result.x, add4); - _mm_store_ss(&Result.y, add5); - _mm_store_ss(&Result.z, add6); - _mm_store_ss(&Result.w, add7); - return Result; - } - }; -*/ - - template - struct compute_quat_add - { - static qua call(qua const& q, qua const& p) - { - qua Result; - Result.data = _mm_add_ps(q.data, p.data); - return Result; - } - }; - -# if GLM_ARCH & GLM_ARCH_AVX_BIT - template - struct compute_quat_add - { - static qua call(qua const& a, qua const& b) - { - qua Result; - Result.data = _mm256_add_pd(a.data, b.data); - return Result; - } - }; -# endif - - template - struct compute_quat_sub - { - static qua call(qua const& q, qua const& p) - { - vec<4, float, Q> Result; - Result.data = _mm_sub_ps(q.data, p.data); - return Result; - } - }; - -# if GLM_ARCH & GLM_ARCH_AVX_BIT - template - struct compute_quat_sub - { - static qua call(qua const& a, qua const& b) - { - qua Result; - Result.data = _mm256_sub_pd(a.data, b.data); - return Result; - } - }; -# endif - - template - struct compute_quat_mul_scalar - { - static qua call(qua const& q, float s) - { - vec<4, float, Q> Result; - Result.data = _mm_mul_ps(q.data, _mm_set_ps1(s)); - return Result; - } - }; - -# if GLM_ARCH & GLM_ARCH_AVX_BIT - template - struct compute_quat_mul_scalar - { - static qua call(qua const& q, double s) - { - qua Result; - Result.data = _mm256_mul_pd(q.data, _mm_set_ps1(s)); - return Result; - } - }; -# endif - - template - struct compute_quat_div_scalar - { - static qua call(qua const& q, float s) - { - vec<4, float, Q> Result; - Result.data = _mm_div_ps(q.data, _mm_set_ps1(s)); - return Result; - } - }; - -# if GLM_ARCH & GLM_ARCH_AVX_BIT - template - struct compute_quat_div_scalar - { - static qua call(qua const& q, double s) - { - qua Result; - Result.data = _mm256_div_pd(q.data, _mm_set_ps1(s)); - return Result; - } - }; -# endif - - template - struct compute_quat_mul_vec4 - { - static vec<4, float, Q> call(qua const& q, vec<4, float, Q> const& v) - { - __m128 const q_wwww = _mm_shuffle_ps(q.data, q.data, _MM_SHUFFLE(3, 3, 3, 3)); - __m128 const q_swp0 = _mm_shuffle_ps(q.data, q.data, _MM_SHUFFLE(3, 0, 2, 1)); - __m128 const q_swp1 = _mm_shuffle_ps(q.data, q.data, _MM_SHUFFLE(3, 1, 0, 2)); - __m128 const v_swp0 = _mm_shuffle_ps(v.data, v.data, _MM_SHUFFLE(3, 0, 2, 1)); - __m128 const v_swp1 = _mm_shuffle_ps(v.data, v.data, _MM_SHUFFLE(3, 1, 0, 2)); - - __m128 uv = _mm_sub_ps(_mm_mul_ps(q_swp0, v_swp1), _mm_mul_ps(q_swp1, v_swp0)); - __m128 uv_swp0 = _mm_shuffle_ps(uv, uv, _MM_SHUFFLE(3, 0, 2, 1)); - __m128 uv_swp1 = _mm_shuffle_ps(uv, uv, _MM_SHUFFLE(3, 1, 0, 2)); - __m128 uuv = _mm_sub_ps(_mm_mul_ps(q_swp0, uv_swp1), _mm_mul_ps(q_swp1, uv_swp0)); - - __m128 const two = _mm_set1_ps(2.0f); - uv = _mm_mul_ps(uv, _mm_mul_ps(q_wwww, two)); - uuv = _mm_mul_ps(uuv, two); - - vec<4, float, Q> Result; - Result.data = _mm_add_ps(v.Data, _mm_add_ps(uv, uuv)); - return Result; - } - }; -}//namespace detail -}//namespace glm - -#endif//GLM_ARCH & GLM_ARCH_SSE2_BIT - diff --git a/third_party/glm/detail/type_vec1.hpp b/third_party/glm/detail/type_vec1.hpp deleted file mode 100755 index 51163f1..0000000 --- a/third_party/glm/detail/type_vec1.hpp +++ /dev/null @@ -1,308 +0,0 @@ -/// @ref core -/// @file glm/detail/type_vec1.hpp - -#pragma once - -#include "qualifier.hpp" -#if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR -# include "_swizzle.hpp" -#elif GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_FUNCTION -# include "_swizzle_func.hpp" -#endif -#include - -namespace glm -{ - template - struct vec<1, T, Q> - { - // -- Implementation detail -- - - typedef T value_type; - typedef vec<1, T, Q> type; - typedef vec<1, bool, Q> bool_type; - - // -- Data -- - -# if GLM_SILENT_WARNINGS == GLM_ENABLE -# if GLM_COMPILER & GLM_COMPILER_GCC -# pragma GCC diagnostic push -# pragma GCC diagnostic ignored "-Wpedantic" -# elif GLM_COMPILER & GLM_COMPILER_CLANG -# pragma clang diagnostic push -# pragma clang diagnostic ignored "-Wgnu-anonymous-struct" -# pragma clang diagnostic ignored "-Wnested-anon-types" -# elif GLM_COMPILER & GLM_COMPILER_VC -# pragma warning(push) -# pragma warning(disable: 4201) // nonstandard extension used : nameless struct/union -# endif -# endif - -# if GLM_CONFIG_XYZW_ONLY - T x; -# elif GLM_CONFIG_ANONYMOUS_STRUCT == GLM_ENABLE - union - { - T x; - T r; - T s; - - typename detail::storage<1, T, detail::is_aligned::value>::type data; -/* -# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR - _GLM_SWIZZLE1_2_MEMBERS(T, Q, x) - _GLM_SWIZZLE1_2_MEMBERS(T, Q, r) - _GLM_SWIZZLE1_2_MEMBERS(T, Q, s) - _GLM_SWIZZLE1_3_MEMBERS(T, Q, x) - _GLM_SWIZZLE1_3_MEMBERS(T, Q, r) - _GLM_SWIZZLE1_3_MEMBERS(T, Q, s) - _GLM_SWIZZLE1_4_MEMBERS(T, Q, x) - _GLM_SWIZZLE1_4_MEMBERS(T, Q, r) - _GLM_SWIZZLE1_4_MEMBERS(T, Q, s) -# endif -*/ - }; -# else - union {T x, r, s;}; -/* -# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_FUNCTION - GLM_SWIZZLE_GEN_VEC_FROM_VEC1(T, Q) -# endif -*/ -# endif - -# if GLM_SILENT_WARNINGS == GLM_ENABLE -# if GLM_COMPILER & GLM_COMPILER_CLANG -# pragma clang diagnostic pop -# elif GLM_COMPILER & GLM_COMPILER_GCC -# pragma GCC diagnostic pop -# elif GLM_COMPILER & GLM_COMPILER_VC -# pragma warning(pop) -# endif -# endif - - // -- Component accesses -- - - /// Return the count of components of the vector - typedef length_t length_type; - GLM_FUNC_DECL static GLM_CONSTEXPR length_type length(){return 1;} - - GLM_FUNC_DECL GLM_CONSTEXPR T & operator[](length_type i); - GLM_FUNC_DECL GLM_CONSTEXPR T const& operator[](length_type i) const; - - // -- Implicit basic constructors -- - - GLM_FUNC_DECL GLM_CONSTEXPR vec() GLM_DEFAULT; - GLM_FUNC_DECL GLM_CONSTEXPR vec(vec const& v) GLM_DEFAULT; - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<1, T, P> const& v); - - // -- Explicit basic constructors -- - - GLM_FUNC_DECL GLM_CONSTEXPR explicit vec(T scalar); - - // -- Conversion vector constructors -- - - /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) - template - GLM_FUNC_DECL GLM_CONSTEXPR GLM_EXPLICIT vec(vec<2, U, P> const& v); - /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) - template - GLM_FUNC_DECL GLM_CONSTEXPR GLM_EXPLICIT vec(vec<3, U, P> const& v); - /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) - template - GLM_FUNC_DECL GLM_CONSTEXPR GLM_EXPLICIT vec(vec<4, U, P> const& v); - - /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) - template - GLM_FUNC_DECL GLM_CONSTEXPR GLM_EXPLICIT vec(vec<1, U, P> const& v); - - // -- Swizzle constructors -- -/* -# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(detail::_swizzle<1, T, Q, E0, -1,-2,-3> const& that) - { - *this = that(); - } -# endif//GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR -*/ - // -- Unary arithmetic operators -- - - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> & operator=(vec const& v) GLM_DEFAULT; - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> & operator=(vec<1, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> & operator+=(U scalar); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> & operator+=(vec<1, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> & operator-=(U scalar); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> & operator-=(vec<1, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> & operator*=(U scalar); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> & operator*=(vec<1, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> & operator/=(U scalar); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> & operator/=(vec<1, U, Q> const& v); - - // -- Increment and decrement operators -- - - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> & operator++(); - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> & operator--(); - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator++(int); - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator--(int); - - // -- Unary bit operators -- - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> & operator%=(U scalar); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> & operator%=(vec<1, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> & operator&=(U scalar); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> & operator&=(vec<1, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> & operator|=(U scalar); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> & operator|=(vec<1, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> & operator^=(U scalar); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> & operator^=(vec<1, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> & operator<<=(U scalar); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> & operator<<=(vec<1, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> & operator>>=(U scalar); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> & operator>>=(vec<1, U, Q> const& v); - }; - - // -- Unary operators -- - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator+(vec<1, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator-(vec<1, T, Q> const& v); - - // -- Binary operators -- - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator+(vec<1, T, Q> const& v, T scalar); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator+(T scalar, vec<1, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator+(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator-(vec<1, T, Q> const& v, T scalar); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator-(T scalar, vec<1, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator-(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator*(vec<1, T, Q> const& v, T scalar); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator*(T scalar, vec<1, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator*(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator/(vec<1, T, Q> const& v, T scalar); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator/(T scalar, vec<1, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator/(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator%(vec<1, T, Q> const& v, T scalar); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator%(T scalar, vec<1, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator%(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator&(vec<1, T, Q> const& v, T scalar); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator&(T scalar, vec<1, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator&(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator|(vec<1, T, Q> const& v, T scalar); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator|(T scalar, vec<1, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator|(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator^(vec<1, T, Q> const& v, T scalar); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator^(T scalar, vec<1, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator^(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator<<(vec<1, T, Q> const& v, T scalar); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator<<(T scalar, vec<1, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator<<(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator>>(vec<1, T, Q> const& v, T scalar); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator>>(T scalar, vec<1, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator>>(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, T, Q> operator~(vec<1, T, Q> const& v); - - // -- Boolean operators -- - - template - GLM_FUNC_DECL GLM_CONSTEXPR bool operator==(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR bool operator!=(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, bool, Q> operator&&(vec<1, bool, Q> const& v1, vec<1, bool, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<1, bool, Q> operator||(vec<1, bool, Q> const& v1, vec<1, bool, Q> const& v2); -}//namespace glm - -#ifndef GLM_EXTERNAL_TEMPLATE -#include "type_vec1.inl" -#endif//GLM_EXTERNAL_TEMPLATE diff --git a/third_party/glm/detail/type_vec1.inl b/third_party/glm/detail/type_vec1.inl deleted file mode 100755 index d0f49fd..0000000 --- a/third_party/glm/detail/type_vec1.inl +++ /dev/null @@ -1,551 +0,0 @@ -/// @ref core - -#include "./compute_vector_relational.hpp" - -namespace glm -{ - // -- Implicit basic constructors -- - -# if GLM_CONFIG_DEFAULTED_FUNCTIONS == GLM_DISABLE - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q>::vec() -# if GLM_CONFIG_CTOR_INIT != GLM_CTOR_INIT_DISABLE - : x(0) -# endif - {} - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q>::vec(vec<1, T, Q> const& v) - : x(v.x) - {} -# endif - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q>::vec(vec<1, T, P> const& v) - : x(v.x) - {} - - // -- Explicit basic constructors -- - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q>::vec(T scalar) - : x(scalar) - {} - - // -- Conversion vector constructors -- - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q>::vec(vec<1, U, P> const& v) - : x(static_cast(v.x)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q>::vec(vec<2, U, P> const& v) - : x(static_cast(v.x)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q>::vec(vec<3, U, P> const& v) - : x(static_cast(v.x)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q>::vec(vec<4, U, P> const& v) - : x(static_cast(v.x)) - {} - - // -- Component accesses -- - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR T & vec<1, T, Q>::operator[](typename vec<1, T, Q>::length_type) - { - return x; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR T const& vec<1, T, Q>::operator[](typename vec<1, T, Q>::length_type) const - { - return x; - } - - // -- Unary arithmetic operators -- - -# if GLM_CONFIG_DEFAULTED_FUNCTIONS == GLM_DISABLE - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator=(vec<1, T, Q> const& v) - { - this->x = v.x; - return *this; - } -# endif - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator=(vec<1, U, Q> const& v) - { - this->x = static_cast(v.x); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator+=(U scalar) - { - this->x += static_cast(scalar); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator+=(vec<1, U, Q> const& v) - { - this->x += static_cast(v.x); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator-=(U scalar) - { - this->x -= static_cast(scalar); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator-=(vec<1, U, Q> const& v) - { - this->x -= static_cast(v.x); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator*=(U scalar) - { - this->x *= static_cast(scalar); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator*=(vec<1, U, Q> const& v) - { - this->x *= static_cast(v.x); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator/=(U scalar) - { - this->x /= static_cast(scalar); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator/=(vec<1, U, Q> const& v) - { - this->x /= static_cast(v.x); - return *this; - } - - // -- Increment and decrement operators -- - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator++() - { - ++this->x; - return *this; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator--() - { - --this->x; - return *this; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> vec<1, T, Q>::operator++(int) - { - vec<1, T, Q> Result(*this); - ++*this; - return Result; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> vec<1, T, Q>::operator--(int) - { - vec<1, T, Q> Result(*this); - --*this; - return Result; - } - - // -- Unary bit operators -- - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator%=(U scalar) - { - this->x %= static_cast(scalar); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator%=(vec<1, U, Q> const& v) - { - this->x %= static_cast(v.x); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator&=(U scalar) - { - this->x &= static_cast(scalar); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator&=(vec<1, U, Q> const& v) - { - this->x &= static_cast(v.x); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator|=(U scalar) - { - this->x |= static_cast(scalar); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator|=(vec<1, U, Q> const& v) - { - this->x |= U(v.x); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator^=(U scalar) - { - this->x ^= static_cast(scalar); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator^=(vec<1, U, Q> const& v) - { - this->x ^= static_cast(v.x); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator<<=(U scalar) - { - this->x <<= static_cast(scalar); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator<<=(vec<1, U, Q> const& v) - { - this->x <<= static_cast(v.x); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator>>=(U scalar) - { - this->x >>= static_cast(scalar); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> & vec<1, T, Q>::operator>>=(vec<1, U, Q> const& v) - { - this->x >>= static_cast(v.x); - return *this; - } - - // -- Unary constant operators -- - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator+(vec<1, T, Q> const& v) - { - return v; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator-(vec<1, T, Q> const& v) - { - return vec<1, T, Q>( - -v.x); - } - - // -- Binary arithmetic operators -- - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator+(vec<1, T, Q> const& v, T scalar) - { - return vec<1, T, Q>( - v.x + scalar); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator+(T scalar, vec<1, T, Q> const& v) - { - return vec<1, T, Q>( - scalar + v.x); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator+(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2) - { - return vec<1, T, Q>( - v1.x + v2.x); - } - - //operator- - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator-(vec<1, T, Q> const& v, T scalar) - { - return vec<1, T, Q>( - v.x - scalar); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator-(T scalar, vec<1, T, Q> const& v) - { - return vec<1, T, Q>( - scalar - v.x); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator-(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2) - { - return vec<1, T, Q>( - v1.x - v2.x); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator*(vec<1, T, Q> const& v, T scalar) - { - return vec<1, T, Q>( - v.x * scalar); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator*(T scalar, vec<1, T, Q> const& v) - { - return vec<1, T, Q>( - scalar * v.x); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator*(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2) - { - return vec<1, T, Q>( - v1.x * v2.x); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator/(vec<1, T, Q> const& v, T scalar) - { - return vec<1, T, Q>( - v.x / scalar); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator/(T scalar, vec<1, T, Q> const& v) - { - return vec<1, T, Q>( - scalar / v.x); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator/(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2) - { - return vec<1, T, Q>( - v1.x / v2.x); - } - - // -- Binary bit operators -- - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator%(vec<1, T, Q> const& v, T scalar) - { - return vec<1, T, Q>( - v.x % scalar); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator%(T scalar, vec<1, T, Q> const& v) - { - return vec<1, T, Q>( - scalar % v.x); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator%(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2) - { - return vec<1, T, Q>( - v1.x % v2.x); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator&(vec<1, T, Q> const& v, T scalar) - { - return vec<1, T, Q>( - v.x & scalar); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator&(T scalar, vec<1, T, Q> const& v) - { - return vec<1, T, Q>( - scalar & v.x); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator&(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2) - { - return vec<1, T, Q>( - v1.x & v2.x); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator|(vec<1, T, Q> const& v, T scalar) - { - return vec<1, T, Q>( - v.x | scalar); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator|(T scalar, vec<1, T, Q> const& v) - { - return vec<1, T, Q>( - scalar | v.x); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator|(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2) - { - return vec<1, T, Q>( - v1.x | v2.x); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator^(vec<1, T, Q> const& v, T scalar) - { - return vec<1, T, Q>( - v.x ^ scalar); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator^(T scalar, vec<1, T, Q> const& v) - { - return vec<1, T, Q>( - scalar ^ v.x); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator^(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2) - { - return vec<1, T, Q>( - v1.x ^ v2.x); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator<<(vec<1, T, Q> const& v, T scalar) - { - return vec<1, T, Q>( - static_cast(v.x << scalar)); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator<<(T scalar, vec<1, T, Q> const& v) - { - return vec<1, T, Q>( - scalar << v.x); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator<<(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2) - { - return vec<1, T, Q>( - v1.x << v2.x); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator>>(vec<1, T, Q> const& v, T scalar) - { - return vec<1, T, Q>( - v.x >> scalar); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator>>(T scalar, vec<1, T, Q> const& v) - { - return vec<1, T, Q>( - scalar >> v.x); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator>>(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2) - { - return vec<1, T, Q>( - v1.x >> v2.x); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, T, Q> operator~(vec<1, T, Q> const& v) - { - return vec<1, T, Q>( - ~v.x); - } - - // -- Boolean operators -- - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool operator==(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2) - { - return detail::compute_equal::is_iec559>::call(v1.x, v2.x); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool operator!=(vec<1, T, Q> const& v1, vec<1, T, Q> const& v2) - { - return !(v1 == v2); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, bool, Q> operator&&(vec<1, bool, Q> const& v1, vec<1, bool, Q> const& v2) - { - return vec<1, bool, Q>(v1.x && v2.x); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<1, bool, Q> operator||(vec<1, bool, Q> const& v1, vec<1, bool, Q> const& v2) - { - return vec<1, bool, Q>(v1.x || v2.x); - } -}//namespace glm diff --git a/third_party/glm/detail/type_vec2.hpp b/third_party/glm/detail/type_vec2.hpp deleted file mode 100755 index 52ef408..0000000 --- a/third_party/glm/detail/type_vec2.hpp +++ /dev/null @@ -1,399 +0,0 @@ -/// @ref core -/// @file glm/detail/type_vec2.hpp - -#pragma once - -#include "qualifier.hpp" -#if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR -# include "_swizzle.hpp" -#elif GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_FUNCTION -# include "_swizzle_func.hpp" -#endif -#include - -namespace glm -{ - template - struct vec<2, T, Q> - { - // -- Implementation detail -- - - typedef T value_type; - typedef vec<2, T, Q> type; - typedef vec<2, bool, Q> bool_type; - - // -- Data -- - -# if GLM_SILENT_WARNINGS == GLM_ENABLE -# if GLM_COMPILER & GLM_COMPILER_GCC -# pragma GCC diagnostic push -# pragma GCC diagnostic ignored "-Wpedantic" -# elif GLM_COMPILER & GLM_COMPILER_CLANG -# pragma clang diagnostic push -# pragma clang diagnostic ignored "-Wgnu-anonymous-struct" -# pragma clang diagnostic ignored "-Wnested-anon-types" -# elif GLM_COMPILER & GLM_COMPILER_VC -# pragma warning(push) -# pragma warning(disable: 4201) // nonstandard extension used : nameless struct/union -# endif -# endif - -# if GLM_CONFIG_XYZW_ONLY - T x, y; -# elif GLM_CONFIG_ANONYMOUS_STRUCT == GLM_ENABLE - union - { - struct{ T x, y; }; - struct{ T r, g; }; - struct{ T s, t; }; - - typename detail::storage<2, T, detail::is_aligned::value>::type data; - -# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR - GLM_SWIZZLE2_2_MEMBERS(T, Q, x, y) - GLM_SWIZZLE2_2_MEMBERS(T, Q, r, g) - GLM_SWIZZLE2_2_MEMBERS(T, Q, s, t) - GLM_SWIZZLE2_3_MEMBERS(T, Q, x, y) - GLM_SWIZZLE2_3_MEMBERS(T, Q, r, g) - GLM_SWIZZLE2_3_MEMBERS(T, Q, s, t) - GLM_SWIZZLE2_4_MEMBERS(T, Q, x, y) - GLM_SWIZZLE2_4_MEMBERS(T, Q, r, g) - GLM_SWIZZLE2_4_MEMBERS(T, Q, s, t) -# endif - }; -# else - union {T x, r, s;}; - union {T y, g, t;}; - -# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_FUNCTION - GLM_SWIZZLE_GEN_VEC_FROM_VEC2(T, Q) -# endif//GLM_CONFIG_SWIZZLE -# endif - -# if GLM_SILENT_WARNINGS == GLM_ENABLE -# if GLM_COMPILER & GLM_COMPILER_CLANG -# pragma clang diagnostic pop -# elif GLM_COMPILER & GLM_COMPILER_GCC -# pragma GCC diagnostic pop -# elif GLM_COMPILER & GLM_COMPILER_VC -# pragma warning(pop) -# endif -# endif - - // -- Component accesses -- - - /// Return the count of components of the vector - typedef length_t length_type; - GLM_FUNC_DECL static GLM_CONSTEXPR length_type length(){return 2;} - - GLM_FUNC_DECL GLM_CONSTEXPR T& operator[](length_type i); - GLM_FUNC_DECL GLM_CONSTEXPR T const& operator[](length_type i) const; - - // -- Implicit basic constructors -- - - GLM_FUNC_DECL GLM_CONSTEXPR vec() GLM_DEFAULT; - GLM_FUNC_DECL GLM_CONSTEXPR vec(vec const& v) GLM_DEFAULT; - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<2, T, P> const& v); - - // -- Explicit basic constructors -- - - GLM_FUNC_DECL GLM_CONSTEXPR explicit vec(T scalar); - GLM_FUNC_DECL GLM_CONSTEXPR vec(T x, T y); - - // -- Conversion constructors -- - - template - GLM_FUNC_DECL GLM_CONSTEXPR explicit vec(vec<1, U, P> const& v); - - /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(A x, B y); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<1, A, Q> const& x, B y); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(A x, vec<1, B, Q> const& y); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<1, A, Q> const& x, vec<1, B, Q> const& y); - - // -- Conversion vector constructors -- - - /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) - template - GLM_FUNC_DECL GLM_CONSTEXPR GLM_EXPLICIT vec(vec<3, U, P> const& v); - /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) - template - GLM_FUNC_DECL GLM_CONSTEXPR GLM_EXPLICIT vec(vec<4, U, P> const& v); - - /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) - template - GLM_FUNC_DECL GLM_CONSTEXPR GLM_EXPLICIT vec(vec<2, U, P> const& v); - - // -- Swizzle constructors -- -# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(detail::_swizzle<2, T, Q, E0, E1,-1,-2> const& that) - { - *this = that(); - } -# endif//GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR - - // -- Unary arithmetic operators -- - - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator=(vec const& v) GLM_DEFAULT; - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator=(vec<2, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator+=(U scalar); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator+=(vec<1, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator+=(vec<2, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator-=(U scalar); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator-=(vec<1, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator-=(vec<2, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator*=(U scalar); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator*=(vec<1, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator*=(vec<2, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator/=(U scalar); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator/=(vec<1, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator/=(vec<2, U, Q> const& v); - - // -- Increment and decrement operators -- - - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator++(); - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator--(); - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator++(int); - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator--(int); - - // -- Unary bit operators -- - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator%=(U scalar); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator%=(vec<1, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator%=(vec<2, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator&=(U scalar); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator&=(vec<1, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator&=(vec<2, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator|=(U scalar); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator|=(vec<1, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator|=(vec<2, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator^=(U scalar); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator^=(vec<1, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator^=(vec<2, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator<<=(U scalar); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator<<=(vec<1, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator<<=(vec<2, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator>>=(U scalar); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator>>=(vec<1, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> & operator>>=(vec<2, U, Q> const& v); - }; - - // -- Unary operators -- - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator+(vec<2, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator-(vec<2, T, Q> const& v); - - // -- Binary operators -- - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator+(vec<2, T, Q> const& v, T scalar); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator+(vec<2, T, Q> const& v1, vec<1, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator+(T scalar, vec<2, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator+(vec<1, T, Q> const& v1, vec<2, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator+(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator-(vec<2, T, Q> const& v, T scalar); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator-(vec<2, T, Q> const& v1, vec<1, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator-(T scalar, vec<2, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator-(vec<1, T, Q> const& v1, vec<2, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator-(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator*(vec<2, T, Q> const& v, T scalar); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator*(vec<2, T, Q> const& v1, vec<1, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator*(T scalar, vec<2, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator*(vec<1, T, Q> const& v1, vec<2, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator*(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator/(vec<2, T, Q> const& v, T scalar); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator/(vec<2, T, Q> const& v1, vec<1, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator/(T scalar, vec<2, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator/(vec<1, T, Q> const& v1, vec<2, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator/(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator%(vec<2, T, Q> const& v, T scalar); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator%(vec<2, T, Q> const& v1, vec<1, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator%(T scalar, vec<2, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator%(vec<1, T, Q> const& v1, vec<2, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator%(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator&(vec<2, T, Q> const& v, T scalar); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator&(vec<2, T, Q> const& v1, vec<1, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator&(T scalar, vec<2, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator&(vec<1, T, Q> const& v1, vec<2, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator&(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator|(vec<2, T, Q> const& v, T scalar); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator|(vec<2, T, Q> const& v1, vec<1, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator|(T scalar, vec<2, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator|(vec<1, T, Q> const& v1, vec<2, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator|(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator^(vec<2, T, Q> const& v, T scalar); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator^(vec<2, T, Q> const& v1, vec<1, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator^(T scalar, vec<2, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator^(vec<1, T, Q> const& v1, vec<2, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator^(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator<<(vec<2, T, Q> const& v, T scalar); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator<<(vec<2, T, Q> const& v1, vec<1, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator<<(T scalar, vec<2, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator<<(vec<1, T, Q> const& v1, vec<2, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator<<(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator>>(vec<2, T, Q> const& v, T scalar); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator>>(vec<2, T, Q> const& v1, vec<1, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator>>(T scalar, vec<2, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator>>(vec<1, T, Q> const& v1, vec<2, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator>>(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, T, Q> operator~(vec<2, T, Q> const& v); - - // -- Boolean operators -- - - template - GLM_FUNC_DECL GLM_CONSTEXPR bool operator==(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR bool operator!=(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, bool, Q> operator&&(vec<2, bool, Q> const& v1, vec<2, bool, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<2, bool, Q> operator||(vec<2, bool, Q> const& v1, vec<2, bool, Q> const& v2); -}//namespace glm - -#ifndef GLM_EXTERNAL_TEMPLATE -#include "type_vec2.inl" -#endif//GLM_EXTERNAL_TEMPLATE diff --git a/third_party/glm/detail/type_vec2.inl b/third_party/glm/detail/type_vec2.inl deleted file mode 100755 index 8e65d6b..0000000 --- a/third_party/glm/detail/type_vec2.inl +++ /dev/null @@ -1,913 +0,0 @@ -/// @ref core - -#include "./compute_vector_relational.hpp" - -namespace glm -{ - // -- Implicit basic constructors -- - -# if GLM_CONFIG_DEFAULTED_FUNCTIONS == GLM_DISABLE - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q>::vec() -# if GLM_CONFIG_CTOR_INIT != GLM_CTOR_INIT_DISABLE - : x(0), y(0) -# endif - {} - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q>::vec(vec<2, T, Q> const& v) - : x(v.x), y(v.y) - {} -# endif - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q>::vec(vec<2, T, P> const& v) - : x(v.x), y(v.y) - {} - - // -- Explicit basic constructors -- - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q>::vec(T scalar) - : x(scalar), y(scalar) - {} - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q>::vec(T _x, T _y) - : x(_x), y(_y) - {} - - // -- Conversion scalar constructors -- - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q>::vec(vec<1, U, P> const& v) - : x(static_cast(v.x)) - , y(static_cast(v.x)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q>::vec(A _x, B _y) - : x(static_cast(_x)) - , y(static_cast(_y)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q>::vec(vec<1, A, Q> const& _x, B _y) - : x(static_cast(_x.x)) - , y(static_cast(_y)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q>::vec(A _x, vec<1, B, Q> const& _y) - : x(static_cast(_x)) - , y(static_cast(_y.x)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q>::vec(vec<1, A, Q> const& _x, vec<1, B, Q> const& _y) - : x(static_cast(_x.x)) - , y(static_cast(_y.x)) - {} - - // -- Conversion vector constructors -- - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q>::vec(vec<2, U, P> const& v) - : x(static_cast(v.x)) - , y(static_cast(v.y)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q>::vec(vec<3, U, P> const& v) - : x(static_cast(v.x)) - , y(static_cast(v.y)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q>::vec(vec<4, U, P> const& v) - : x(static_cast(v.x)) - , y(static_cast(v.y)) - {} - - // -- Component accesses -- - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR T & vec<2, T, Q>::operator[](typename vec<2, T, Q>::length_type i) - { - assert(i >= 0 && i < this->length()); - switch(i) - { - default: - case 0: - return x; - case 1: - return y; - } - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR T const& vec<2, T, Q>::operator[](typename vec<2, T, Q>::length_type i) const - { - assert(i >= 0 && i < this->length()); - switch(i) - { - default: - case 0: - return x; - case 1: - return y; - } - } - - // -- Unary arithmetic operators -- - -# if GLM_CONFIG_DEFAULTED_FUNCTIONS == GLM_DISABLE - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator=(vec<2, T, Q> const& v) - { - this->x = v.x; - this->y = v.y; - return *this; - } -# endif - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator=(vec<2, U, Q> const& v) - { - this->x = static_cast(v.x); - this->y = static_cast(v.y); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator+=(U scalar) - { - this->x += static_cast(scalar); - this->y += static_cast(scalar); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator+=(vec<1, U, Q> const& v) - { - this->x += static_cast(v.x); - this->y += static_cast(v.x); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator+=(vec<2, U, Q> const& v) - { - this->x += static_cast(v.x); - this->y += static_cast(v.y); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator-=(U scalar) - { - this->x -= static_cast(scalar); - this->y -= static_cast(scalar); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator-=(vec<1, U, Q> const& v) - { - this->x -= static_cast(v.x); - this->y -= static_cast(v.x); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator-=(vec<2, U, Q> const& v) - { - this->x -= static_cast(v.x); - this->y -= static_cast(v.y); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator*=(U scalar) - { - this->x *= static_cast(scalar); - this->y *= static_cast(scalar); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator*=(vec<1, U, Q> const& v) - { - this->x *= static_cast(v.x); - this->y *= static_cast(v.x); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator*=(vec<2, U, Q> const& v) - { - this->x *= static_cast(v.x); - this->y *= static_cast(v.y); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator/=(U scalar) - { - this->x /= static_cast(scalar); - this->y /= static_cast(scalar); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator/=(vec<1, U, Q> const& v) - { - this->x /= static_cast(v.x); - this->y /= static_cast(v.x); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator/=(vec<2, U, Q> const& v) - { - this->x /= static_cast(v.x); - this->y /= static_cast(v.y); - return *this; - } - - // -- Increment and decrement operators -- - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator++() - { - ++this->x; - ++this->y; - return *this; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator--() - { - --this->x; - --this->y; - return *this; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> vec<2, T, Q>::operator++(int) - { - vec<2, T, Q> Result(*this); - ++*this; - return Result; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> vec<2, T, Q>::operator--(int) - { - vec<2, T, Q> Result(*this); - --*this; - return Result; - } - - // -- Unary bit operators -- - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator%=(U scalar) - { - this->x %= static_cast(scalar); - this->y %= static_cast(scalar); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator%=(vec<1, U, Q> const& v) - { - this->x %= static_cast(v.x); - this->y %= static_cast(v.x); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator%=(vec<2, U, Q> const& v) - { - this->x %= static_cast(v.x); - this->y %= static_cast(v.y); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator&=(U scalar) - { - this->x &= static_cast(scalar); - this->y &= static_cast(scalar); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator&=(vec<1, U, Q> const& v) - { - this->x &= static_cast(v.x); - this->y &= static_cast(v.x); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator&=(vec<2, U, Q> const& v) - { - this->x &= static_cast(v.x); - this->y &= static_cast(v.y); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator|=(U scalar) - { - this->x |= static_cast(scalar); - this->y |= static_cast(scalar); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator|=(vec<1, U, Q> const& v) - { - this->x |= static_cast(v.x); - this->y |= static_cast(v.x); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator|=(vec<2, U, Q> const& v) - { - this->x |= static_cast(v.x); - this->y |= static_cast(v.y); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator^=(U scalar) - { - this->x ^= static_cast(scalar); - this->y ^= static_cast(scalar); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator^=(vec<1, U, Q> const& v) - { - this->x ^= static_cast(v.x); - this->y ^= static_cast(v.x); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator^=(vec<2, U, Q> const& v) - { - this->x ^= static_cast(v.x); - this->y ^= static_cast(v.y); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator<<=(U scalar) - { - this->x <<= static_cast(scalar); - this->y <<= static_cast(scalar); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator<<=(vec<1, U, Q> const& v) - { - this->x <<= static_cast(v.x); - this->y <<= static_cast(v.x); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator<<=(vec<2, U, Q> const& v) - { - this->x <<= static_cast(v.x); - this->y <<= static_cast(v.y); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator>>=(U scalar) - { - this->x >>= static_cast(scalar); - this->y >>= static_cast(scalar); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator>>=(vec<1, U, Q> const& v) - { - this->x >>= static_cast(v.x); - this->y >>= static_cast(v.x); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> & vec<2, T, Q>::operator>>=(vec<2, U, Q> const& v) - { - this->x >>= static_cast(v.x); - this->y >>= static_cast(v.y); - return *this; - } - - // -- Unary arithmetic operators -- - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator+(vec<2, T, Q> const& v) - { - return v; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator-(vec<2, T, Q> const& v) - { - return vec<2, T, Q>( - -v.x, - -v.y); - } - - // -- Binary arithmetic operators -- - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator+(vec<2, T, Q> const& v, T scalar) - { - return vec<2, T, Q>( - v.x + scalar, - v.y + scalar); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator+(vec<2, T, Q> const& v1, vec<1, T, Q> const& v2) - { - return vec<2, T, Q>( - v1.x + v2.x, - v1.y + v2.x); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator+(T scalar, vec<2, T, Q> const& v) - { - return vec<2, T, Q>( - scalar + v.x, - scalar + v.y); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator+(vec<1, T, Q> const& v1, vec<2, T, Q> const& v2) - { - return vec<2, T, Q>( - v1.x + v2.x, - v1.x + v2.y); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator+(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2) - { - return vec<2, T, Q>( - v1.x + v2.x, - v1.y + v2.y); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator-(vec<2, T, Q> const& v, T scalar) - { - return vec<2, T, Q>( - v.x - scalar, - v.y - scalar); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator-(vec<2, T, Q> const& v1, vec<1, T, Q> const& v2) - { - return vec<2, T, Q>( - v1.x - v2.x, - v1.y - v2.x); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator-(T scalar, vec<2, T, Q> const& v) - { - return vec<2, T, Q>( - scalar - v.x, - scalar - v.y); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator-(vec<1, T, Q> const& v1, vec<2, T, Q> const& v2) - { - return vec<2, T, Q>( - v1.x - v2.x, - v1.x - v2.y); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator-(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2) - { - return vec<2, T, Q>( - v1.x - v2.x, - v1.y - v2.y); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator*(vec<2, T, Q> const& v, T scalar) - { - return vec<2, T, Q>( - v.x * scalar, - v.y * scalar); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator*(vec<2, T, Q> const& v1, vec<1, T, Q> const& v2) - { - return vec<2, T, Q>( - v1.x * v2.x, - v1.y * v2.x); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator*(T scalar, vec<2, T, Q> const& v) - { - return vec<2, T, Q>( - scalar * v.x, - scalar * v.y); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator*(vec<1, T, Q> const& v1, vec<2, T, Q> const& v2) - { - return vec<2, T, Q>( - v1.x * v2.x, - v1.x * v2.y); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator*(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2) - { - return vec<2, T, Q>( - v1.x * v2.x, - v1.y * v2.y); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator/(vec<2, T, Q> const& v, T scalar) - { - return vec<2, T, Q>( - v.x / scalar, - v.y / scalar); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator/(vec<2, T, Q> const& v1, vec<1, T, Q> const& v2) - { - return vec<2, T, Q>( - v1.x / v2.x, - v1.y / v2.x); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator/(T scalar, vec<2, T, Q> const& v) - { - return vec<2, T, Q>( - scalar / v.x, - scalar / v.y); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator/(vec<1, T, Q> const& v1, vec<2, T, Q> const& v2) - { - return vec<2, T, Q>( - v1.x / v2.x, - v1.x / v2.y); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator/(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2) - { - return vec<2, T, Q>( - v1.x / v2.x, - v1.y / v2.y); - } - - // -- Binary bit operators -- - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator%(vec<2, T, Q> const& v, T scalar) - { - return vec<2, T, Q>( - v.x % scalar, - v.y % scalar); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator%(vec<2, T, Q> const& v1, vec<1, T, Q> const& v2) - { - return vec<2, T, Q>( - v1.x % v2.x, - v1.y % v2.x); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator%(T scalar, vec<2, T, Q> const& v) - { - return vec<2, T, Q>( - scalar % v.x, - scalar % v.y); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator%(vec<1, T, Q> const& v1, vec<2, T, Q> const& v2) - { - return vec<2, T, Q>( - v1.x % v2.x, - v1.x % v2.y); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator%(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2) - { - return vec<2, T, Q>( - v1.x % v2.x, - v1.y % v2.y); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator&(vec<2, T, Q> const& v, T scalar) - { - return vec<2, T, Q>( - v.x & scalar, - v.y & scalar); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator&(vec<2, T, Q> const& v1, vec<1, T, Q> const& v2) - { - return vec<2, T, Q>( - v1.x & v2.x, - v1.y & v2.x); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator&(T scalar, vec<2, T, Q> const& v) - { - return vec<2, T, Q>( - scalar & v.x, - scalar & v.y); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator&(vec<1, T, Q> const& v1, vec<2, T, Q> const& v2) - { - return vec<2, T, Q>( - v1.x & v2.x, - v1.x & v2.y); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator&(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2) - { - return vec<2, T, Q>( - v1.x & v2.x, - v1.y & v2.y); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator|(vec<2, T, Q> const& v, T scalar) - { - return vec<2, T, Q>( - v.x | scalar, - v.y | scalar); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator|(vec<2, T, Q> const& v1, vec<1, T, Q> const& v2) - { - return vec<2, T, Q>( - v1.x | v2.x, - v1.y | v2.x); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator|(T scalar, vec<2, T, Q> const& v) - { - return vec<2, T, Q>( - scalar | v.x, - scalar | v.y); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator|(vec<1, T, Q> const& v1, vec<2, T, Q> const& v2) - { - return vec<2, T, Q>( - v1.x | v2.x, - v1.x | v2.y); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator|(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2) - { - return vec<2, T, Q>( - v1.x | v2.x, - v1.y | v2.y); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator^(vec<2, T, Q> const& v, T scalar) - { - return vec<2, T, Q>( - v.x ^ scalar, - v.y ^ scalar); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator^(vec<2, T, Q> const& v1, vec<1, T, Q> const& v2) - { - return vec<2, T, Q>( - v1.x ^ v2.x, - v1.y ^ v2.x); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator^(T scalar, vec<2, T, Q> const& v) - { - return vec<2, T, Q>( - scalar ^ v.x, - scalar ^ v.y); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator^(vec<1, T, Q> const& v1, vec<2, T, Q> const& v2) - { - return vec<2, T, Q>( - v1.x ^ v2.x, - v1.x ^ v2.y); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator^(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2) - { - return vec<2, T, Q>( - v1.x ^ v2.x, - v1.y ^ v2.y); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator<<(vec<2, T, Q> const& v, T scalar) - { - return vec<2, T, Q>( - v.x << scalar, - v.y << scalar); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator<<(vec<2, T, Q> const& v1, vec<1, T, Q> const& v2) - { - return vec<2, T, Q>( - v1.x << v2.x, - v1.y << v2.x); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator<<(T scalar, vec<2, T, Q> const& v) - { - return vec<2, T, Q>( - scalar << v.x, - scalar << v.y); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator<<(vec<1, T, Q> const& v1, vec<2, T, Q> const& v2) - { - return vec<2, T, Q>( - v1.x << v2.x, - v1.x << v2.y); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator<<(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2) - { - return vec<2, T, Q>( - v1.x << v2.x, - v1.y << v2.y); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator>>(vec<2, T, Q> const& v, T scalar) - { - return vec<2, T, Q>( - v.x >> scalar, - v.y >> scalar); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator>>(vec<2, T, Q> const& v1, vec<1, T, Q> const& v2) - { - return vec<2, T, Q>( - v1.x >> v2.x, - v1.y >> v2.x); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator>>(T scalar, vec<2, T, Q> const& v) - { - return vec<2, T, Q>( - scalar >> v.x, - scalar >> v.y); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator>>(vec<1, T, Q> const& v1, vec<2, T, Q> const& v2) - { - return vec<2, T, Q>( - v1.x >> v2.x, - v1.x >> v2.y); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator>>(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2) - { - return vec<2, T, Q>( - v1.x >> v2.x, - v1.y >> v2.y); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, T, Q> operator~(vec<2, T, Q> const& v) - { - return vec<2, T, Q>( - ~v.x, - ~v.y); - } - - // -- Boolean operators -- - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool operator==(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2) - { - return - detail::compute_equal::is_iec559>::call(v1.x, v2.x) && - detail::compute_equal::is_iec559>::call(v1.y, v2.y); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool operator!=(vec<2, T, Q> const& v1, vec<2, T, Q> const& v2) - { - return !(v1 == v2); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, bool, Q> operator&&(vec<2, bool, Q> const& v1, vec<2, bool, Q> const& v2) - { - return vec<2, bool, Q>(v1.x && v2.x, v1.y && v2.y); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<2, bool, Q> operator||(vec<2, bool, Q> const& v1, vec<2, bool, Q> const& v2) - { - return vec<2, bool, Q>(v1.x || v2.x, v1.y || v2.y); - } -}//namespace glm diff --git a/third_party/glm/detail/type_vec3.hpp b/third_party/glm/detail/type_vec3.hpp deleted file mode 100755 index d83cde6..0000000 --- a/third_party/glm/detail/type_vec3.hpp +++ /dev/null @@ -1,432 +0,0 @@ -/// @ref core -/// @file glm/detail/type_vec3.hpp - -#pragma once - -#include "qualifier.hpp" -#if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR -# include "_swizzle.hpp" -#elif GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_FUNCTION -# include "_swizzle_func.hpp" -#endif -#include - -namespace glm -{ - template - struct vec<3, T, Q> - { - // -- Implementation detail -- - - typedef T value_type; - typedef vec<3, T, Q> type; - typedef vec<3, bool, Q> bool_type; - - // -- Data -- - -# if GLM_SILENT_WARNINGS == GLM_ENABLE -# if GLM_COMPILER & GLM_COMPILER_GCC -# pragma GCC diagnostic push -# pragma GCC diagnostic ignored "-Wpedantic" -# elif GLM_COMPILER & GLM_COMPILER_CLANG -# pragma clang diagnostic push -# pragma clang diagnostic ignored "-Wgnu-anonymous-struct" -# pragma clang diagnostic ignored "-Wnested-anon-types" -# elif GLM_COMPILER & GLM_COMPILER_VC -# pragma warning(push) -# pragma warning(disable: 4201) // nonstandard extension used : nameless struct/union -# if GLM_CONFIG_ALIGNED_GENTYPES == GLM_ENABLE -# pragma warning(disable: 4324) // structure was padded due to alignment specifier -# endif -# endif -# endif - -# if GLM_CONFIG_XYZW_ONLY - T x, y, z; -# elif GLM_CONFIG_ANONYMOUS_STRUCT == GLM_ENABLE - union - { - struct{ T x, y, z; }; - struct{ T r, g, b; }; - struct{ T s, t, p; }; - - typename detail::storage<3, T, detail::is_aligned::value>::type data; - -# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR - GLM_SWIZZLE3_2_MEMBERS(T, Q, x, y, z) - GLM_SWIZZLE3_2_MEMBERS(T, Q, r, g, b) - GLM_SWIZZLE3_2_MEMBERS(T, Q, s, t, p) - GLM_SWIZZLE3_3_MEMBERS(T, Q, x, y, z) - GLM_SWIZZLE3_3_MEMBERS(T, Q, r, g, b) - GLM_SWIZZLE3_3_MEMBERS(T, Q, s, t, p) - GLM_SWIZZLE3_4_MEMBERS(T, Q, x, y, z) - GLM_SWIZZLE3_4_MEMBERS(T, Q, r, g, b) - GLM_SWIZZLE3_4_MEMBERS(T, Q, s, t, p) -# endif - }; -# else - union { T x, r, s; }; - union { T y, g, t; }; - union { T z, b, p; }; - -# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_FUNCTION - GLM_SWIZZLE_GEN_VEC_FROM_VEC3(T, Q) -# endif//GLM_CONFIG_SWIZZLE -# endif//GLM_LANG - -# if GLM_SILENT_WARNINGS == GLM_ENABLE -# if GLM_COMPILER & GLM_COMPILER_CLANG -# pragma clang diagnostic pop -# elif GLM_COMPILER & GLM_COMPILER_GCC -# pragma GCC diagnostic pop -# elif GLM_COMPILER & GLM_COMPILER_VC -# pragma warning(pop) -# endif -# endif - - // -- Component accesses -- - - /// Return the count of components of the vector - typedef length_t length_type; - GLM_FUNC_DECL static GLM_CONSTEXPR length_type length(){return 3;} - - GLM_FUNC_DECL GLM_CONSTEXPR T & operator[](length_type i); - GLM_FUNC_DECL GLM_CONSTEXPR T const& operator[](length_type i) const; - - // -- Implicit basic constructors -- - - GLM_FUNC_DECL GLM_CONSTEXPR vec() GLM_DEFAULT; - GLM_FUNC_DECL GLM_CONSTEXPR vec(vec const& v) GLM_DEFAULT; - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<3, T, P> const& v); - - // -- Explicit basic constructors -- - - GLM_FUNC_DECL GLM_CONSTEXPR explicit vec(T scalar); - GLM_FUNC_DECL GLM_CONSTEXPR vec(T a, T b, T c); - - // -- Conversion scalar constructors -- - - template - GLM_FUNC_DECL GLM_CONSTEXPR explicit vec(vec<1, U, P> const& v); - - /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(X x, Y y, Z z); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<1, X, Q> const& _x, Y _y, Z _z); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(X _x, vec<1, Y, Q> const& _y, Z _z); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<1, X, Q> const& _x, vec<1, Y, Q> const& _y, Z _z); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(X _x, Y _y, vec<1, Z, Q> const& _z); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<1, X, Q> const& _x, Y _y, vec<1, Z, Q> const& _z); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(X _x, vec<1, Y, Q> const& _y, vec<1, Z, Q> const& _z); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<1, X, Q> const& _x, vec<1, Y, Q> const& _y, vec<1, Z, Q> const& _z); - - // -- Conversion vector constructors -- - - /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<2, A, P> const& _xy, B _z); - /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<2, A, P> const& _xy, vec<1, B, P> const& _z); - /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(A _x, vec<2, B, P> const& _yz); - /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<1, A, P> const& _x, vec<2, B, P> const& _yz); - /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) - template - GLM_FUNC_DECL GLM_CONSTEXPR GLM_EXPLICIT vec(vec<4, U, P> const& v); - - /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) - template - GLM_FUNC_DECL GLM_CONSTEXPR GLM_EXPLICIT vec(vec<3, U, P> const& v); - - // -- Swizzle constructors -- -# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(detail::_swizzle<3, T, Q, E0, E1, E2, -1> const& that) - { - *this = that(); - } - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(detail::_swizzle<2, T, Q, E0, E1, -1, -2> const& v, T const& scalar) - { - *this = vec(v(), scalar); - } - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(T const& scalar, detail::_swizzle<2, T, Q, E0, E1, -1, -2> const& v) - { - *this = vec(scalar, v()); - } -# endif//GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR - - // -- Unary arithmetic operators -- - - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q>& operator=(vec<3, T, Q> const& v) GLM_DEFAULT; - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator=(vec<3, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator+=(U scalar); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator+=(vec<1, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator+=(vec<3, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator-=(U scalar); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator-=(vec<1, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator-=(vec<3, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator*=(U scalar); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator*=(vec<1, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator*=(vec<3, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator/=(U scalar); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator/=(vec<1, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator/=(vec<3, U, Q> const& v); - - // -- Increment and decrement operators -- - - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator++(); - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator--(); - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator++(int); - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator--(int); - - // -- Unary bit operators -- - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator%=(U scalar); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator%=(vec<1, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator%=(vec<3, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator&=(U scalar); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator&=(vec<1, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator&=(vec<3, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator|=(U scalar); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator|=(vec<1, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator|=(vec<3, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator^=(U scalar); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator^=(vec<1, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator^=(vec<3, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator<<=(U scalar); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator<<=(vec<1, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator<<=(vec<3, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator>>=(U scalar); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator>>=(vec<1, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> & operator>>=(vec<3, U, Q> const& v); - }; - - // -- Unary operators -- - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator+(vec<3, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator-(vec<3, T, Q> const& v); - - // -- Binary operators -- - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator+(vec<3, T, Q> const& v, T scalar); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator+(vec<3, T, Q> const& v, vec<1, T, Q> const& scalar); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator+(T scalar, vec<3, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator+(vec<1, T, Q> const& v1, vec<3, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator+(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator-(vec<3, T, Q> const& v, T scalar); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator-(vec<3, T, Q> const& v1, vec<1, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator-(T scalar, vec<3, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator-(vec<1, T, Q> const& v1, vec<3, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator-(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator*(vec<3, T, Q> const& v, T scalar); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator*(vec<3, T, Q> const& v1, vec<1, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator*(T scalar, vec<3, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator*(vec<1, T, Q> const& v1, vec<3, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator*(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator/(vec<3, T, Q> const& v, T scalar); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator/(vec<3, T, Q> const& v1, vec<1, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator/(T scalar, vec<3, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator/(vec<1, T, Q> const& v1, vec<3, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator/(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator%(vec<3, T, Q> const& v, T scalar); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator%(vec<3, T, Q> const& v1, vec<1, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator%(T scalar, vec<3, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator%(vec<1, T, Q> const& v1, vec<3, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator%(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator&(vec<3, T, Q> const& v1, T scalar); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator&(vec<3, T, Q> const& v1, vec<1, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator&(T scalar, vec<3, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator&(vec<1, T, Q> const& v1, vec<3, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator&(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator|(vec<3, T, Q> const& v, T scalar); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator|(vec<3, T, Q> const& v1, vec<1, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator|(T scalar, vec<3, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator|(vec<1, T, Q> const& v1, vec<3, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator|(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator^(vec<3, T, Q> const& v, T scalar); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator^(vec<3, T, Q> const& v1, vec<1, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator^(T scalar, vec<3, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator^(vec<1, T, Q> const& v1, vec<3, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator^(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator<<(vec<3, T, Q> const& v, T scalar); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator<<(vec<3, T, Q> const& v1, vec<1, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator<<(T scalar, vec<3, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator<<(vec<1, T, Q> const& v1, vec<3, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator<<(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator>>(vec<3, T, Q> const& v, T scalar); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator>>(vec<3, T, Q> const& v1, vec<1, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator>>(T scalar, vec<3, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator>>(vec<1, T, Q> const& v1, vec<3, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator>>(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, T, Q> operator~(vec<3, T, Q> const& v); - - // -- Boolean operators -- - - template - GLM_FUNC_DECL GLM_CONSTEXPR bool operator==(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR bool operator!=(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, bool, Q> operator&&(vec<3, bool, Q> const& v1, vec<3, bool, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<3, bool, Q> operator||(vec<3, bool, Q> const& v1, vec<3, bool, Q> const& v2); -}//namespace glm - -#ifndef GLM_EXTERNAL_TEMPLATE -#include "type_vec3.inl" -#endif//GLM_EXTERNAL_TEMPLATE diff --git a/third_party/glm/detail/type_vec3.inl b/third_party/glm/detail/type_vec3.inl deleted file mode 100755 index 6532c9e..0000000 --- a/third_party/glm/detail/type_vec3.inl +++ /dev/null @@ -1,1068 +0,0 @@ -/// @ref core - -#include "compute_vector_relational.hpp" - -namespace glm -{ - // -- Implicit basic constructors -- - -# if GLM_CONFIG_DEFAULTED_FUNCTIONS == GLM_DISABLE - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>::vec() -# if GLM_CONFIG_CTOR_INIT != GLM_CTOR_INIT_DISABLE - : x(0), y(0), z(0) -# endif - {} - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>::vec(vec<3, T, Q> const& v) - : x(v.x), y(v.y), z(v.z) - {} -# endif - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>::vec(vec<3, T, P> const& v) - : x(v.x), y(v.y), z(v.z) - {} - - // -- Explicit basic constructors -- - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>::vec(T scalar) - : x(scalar), y(scalar), z(scalar) - {} - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>::vec(T _x, T _y, T _z) - : x(_x), y(_y), z(_z) - {} - - // -- Conversion scalar constructors -- - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>::vec(vec<1, U, P> const& v) - : x(static_cast(v.x)) - , y(static_cast(v.x)) - , z(static_cast(v.x)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>::vec(X _x, Y _y, Z _z) - : x(static_cast(_x)) - , y(static_cast(_y)) - , z(static_cast(_z)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>::vec(vec<1, X, Q> const& _x, Y _y, Z _z) - : x(static_cast(_x.x)) - , y(static_cast(_y)) - , z(static_cast(_z)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>::vec(X _x, vec<1, Y, Q> const& _y, Z _z) - : x(static_cast(_x)) - , y(static_cast(_y.x)) - , z(static_cast(_z)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>::vec(vec<1, X, Q> const& _x, vec<1, Y, Q> const& _y, Z _z) - : x(static_cast(_x.x)) - , y(static_cast(_y.x)) - , z(static_cast(_z)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>::vec(X _x, Y _y, vec<1, Z, Q> const& _z) - : x(static_cast(_x)) - , y(static_cast(_y)) - , z(static_cast(_z.x)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>::vec(vec<1, X, Q> const& _x, Y _y, vec<1, Z, Q> const& _z) - : x(static_cast(_x.x)) - , y(static_cast(_y)) - , z(static_cast(_z.x)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>::vec(X _x, vec<1, Y, Q> const& _y, vec<1, Z, Q> const& _z) - : x(static_cast(_x)) - , y(static_cast(_y.x)) - , z(static_cast(_z.x)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>::vec(vec<1, X, Q> const& _x, vec<1, Y, Q> const& _y, vec<1, Z, Q> const& _z) - : x(static_cast(_x.x)) - , y(static_cast(_y.x)) - , z(static_cast(_z.x)) - {} - - // -- Conversion vector constructors -- - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>::vec(vec<2, A, P> const& _xy, B _z) - : x(static_cast(_xy.x)) - , y(static_cast(_xy.y)) - , z(static_cast(_z)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>::vec(vec<2, A, P> const& _xy, vec<1, B, P> const& _z) - : x(static_cast(_xy.x)) - , y(static_cast(_xy.y)) - , z(static_cast(_z.x)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>::vec(A _x, vec<2, B, P> const& _yz) - : x(static_cast(_x)) - , y(static_cast(_yz.x)) - , z(static_cast(_yz.y)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>::vec(vec<1, A, P> const& _x, vec<2, B, P> const& _yz) - : x(static_cast(_x.x)) - , y(static_cast(_yz.x)) - , z(static_cast(_yz.y)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>::vec(vec<3, U, P> const& v) - : x(static_cast(v.x)) - , y(static_cast(v.y)) - , z(static_cast(v.z)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>::vec(vec<4, U, P> const& v) - : x(static_cast(v.x)) - , y(static_cast(v.y)) - , z(static_cast(v.z)) - {} - - // -- Component accesses -- - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR T & vec<3, T, Q>::operator[](typename vec<3, T, Q>::length_type i) - { - assert(i >= 0 && i < this->length()); - switch(i) - { - default: - case 0: - return x; - case 1: - return y; - case 2: - return z; - } - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR T const& vec<3, T, Q>::operator[](typename vec<3, T, Q>::length_type i) const - { - assert(i >= 0 && i < this->length()); - switch(i) - { - default: - case 0: - return x; - case 1: - return y; - case 2: - return z; - } - } - - // -- Unary arithmetic operators -- - -# if GLM_CONFIG_DEFAULTED_FUNCTIONS == GLM_DISABLE - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>& vec<3, T, Q>::operator=(vec<3, T, Q> const& v) - { - this->x = v.x; - this->y = v.y; - this->z = v.z; - return *this; - } -# endif - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q>& vec<3, T, Q>::operator=(vec<3, U, Q> const& v) - { - this->x = static_cast(v.x); - this->y = static_cast(v.y); - this->z = static_cast(v.z); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator+=(U scalar) - { - this->x += static_cast(scalar); - this->y += static_cast(scalar); - this->z += static_cast(scalar); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator+=(vec<1, U, Q> const& v) - { - this->x += static_cast(v.x); - this->y += static_cast(v.x); - this->z += static_cast(v.x); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator+=(vec<3, U, Q> const& v) - { - this->x += static_cast(v.x); - this->y += static_cast(v.y); - this->z += static_cast(v.z); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator-=(U scalar) - { - this->x -= static_cast(scalar); - this->y -= static_cast(scalar); - this->z -= static_cast(scalar); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator-=(vec<1, U, Q> const& v) - { - this->x -= static_cast(v.x); - this->y -= static_cast(v.x); - this->z -= static_cast(v.x); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator-=(vec<3, U, Q> const& v) - { - this->x -= static_cast(v.x); - this->y -= static_cast(v.y); - this->z -= static_cast(v.z); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator*=(U scalar) - { - this->x *= static_cast(scalar); - this->y *= static_cast(scalar); - this->z *= static_cast(scalar); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator*=(vec<1, U, Q> const& v) - { - this->x *= static_cast(v.x); - this->y *= static_cast(v.x); - this->z *= static_cast(v.x); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator*=(vec<3, U, Q> const& v) - { - this->x *= static_cast(v.x); - this->y *= static_cast(v.y); - this->z *= static_cast(v.z); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator/=(U v) - { - this->x /= static_cast(v); - this->y /= static_cast(v); - this->z /= static_cast(v); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator/=(vec<1, U, Q> const& v) - { - this->x /= static_cast(v.x); - this->y /= static_cast(v.x); - this->z /= static_cast(v.x); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator/=(vec<3, U, Q> const& v) - { - this->x /= static_cast(v.x); - this->y /= static_cast(v.y); - this->z /= static_cast(v.z); - return *this; - } - - // -- Increment and decrement operators -- - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator++() - { - ++this->x; - ++this->y; - ++this->z; - return *this; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator--() - { - --this->x; - --this->y; - --this->z; - return *this; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> vec<3, T, Q>::operator++(int) - { - vec<3, T, Q> Result(*this); - ++*this; - return Result; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> vec<3, T, Q>::operator--(int) - { - vec<3, T, Q> Result(*this); - --*this; - return Result; - } - - // -- Unary bit operators -- - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator%=(U scalar) - { - this->x %= scalar; - this->y %= scalar; - this->z %= scalar; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator%=(vec<1, U, Q> const& v) - { - this->x %= v.x; - this->y %= v.x; - this->z %= v.x; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator%=(vec<3, U, Q> const& v) - { - this->x %= v.x; - this->y %= v.y; - this->z %= v.z; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator&=(U scalar) - { - this->x &= scalar; - this->y &= scalar; - this->z &= scalar; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator&=(vec<1, U, Q> const& v) - { - this->x &= v.x; - this->y &= v.x; - this->z &= v.x; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator&=(vec<3, U, Q> const& v) - { - this->x &= v.x; - this->y &= v.y; - this->z &= v.z; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator|=(U scalar) - { - this->x |= scalar; - this->y |= scalar; - this->z |= scalar; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator|=(vec<1, U, Q> const& v) - { - this->x |= v.x; - this->y |= v.x; - this->z |= v.x; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator|=(vec<3, U, Q> const& v) - { - this->x |= v.x; - this->y |= v.y; - this->z |= v.z; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator^=(U scalar) - { - this->x ^= scalar; - this->y ^= scalar; - this->z ^= scalar; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator^=(vec<1, U, Q> const& v) - { - this->x ^= v.x; - this->y ^= v.x; - this->z ^= v.x; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator^=(vec<3, U, Q> const& v) - { - this->x ^= v.x; - this->y ^= v.y; - this->z ^= v.z; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator<<=(U scalar) - { - this->x <<= scalar; - this->y <<= scalar; - this->z <<= scalar; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator<<=(vec<1, U, Q> const& v) - { - this->x <<= static_cast(v.x); - this->y <<= static_cast(v.x); - this->z <<= static_cast(v.x); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator<<=(vec<3, U, Q> const& v) - { - this->x <<= static_cast(v.x); - this->y <<= static_cast(v.y); - this->z <<= static_cast(v.z); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator>>=(U scalar) - { - this->x >>= static_cast(scalar); - this->y >>= static_cast(scalar); - this->z >>= static_cast(scalar); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator>>=(vec<1, U, Q> const& v) - { - this->x >>= static_cast(v.x); - this->y >>= static_cast(v.x); - this->z >>= static_cast(v.x); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> & vec<3, T, Q>::operator>>=(vec<3, U, Q> const& v) - { - this->x >>= static_cast(v.x); - this->y >>= static_cast(v.y); - this->z >>= static_cast(v.z); - return *this; - } - - // -- Unary arithmetic operators -- - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator+(vec<3, T, Q> const& v) - { - return v; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator-(vec<3, T, Q> const& v) - { - return vec<3, T, Q>( - -v.x, - -v.y, - -v.z); - } - - // -- Binary arithmetic operators -- - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator+(vec<3, T, Q> const& v, T scalar) - { - return vec<3, T, Q>( - v.x + scalar, - v.y + scalar, - v.z + scalar); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator+(vec<3, T, Q> const& v, vec<1, T, Q> const& scalar) - { - return vec<3, T, Q>( - v.x + scalar.x, - v.y + scalar.x, - v.z + scalar.x); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator+(T scalar, vec<3, T, Q> const& v) - { - return vec<3, T, Q>( - scalar + v.x, - scalar + v.y, - scalar + v.z); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator+(vec<1, T, Q> const& scalar, vec<3, T, Q> const& v) - { - return vec<3, T, Q>( - scalar.x + v.x, - scalar.x + v.y, - scalar.x + v.z); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator+(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2) - { - return vec<3, T, Q>( - v1.x + v2.x, - v1.y + v2.y, - v1.z + v2.z); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator-(vec<3, T, Q> const& v, T scalar) - { - return vec<3, T, Q>( - v.x - scalar, - v.y - scalar, - v.z - scalar); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator-(vec<3, T, Q> const& v, vec<1, T, Q> const& scalar) - { - return vec<3, T, Q>( - v.x - scalar.x, - v.y - scalar.x, - v.z - scalar.x); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator-(T scalar, vec<3, T, Q> const& v) - { - return vec<3, T, Q>( - scalar - v.x, - scalar - v.y, - scalar - v.z); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator-(vec<1, T, Q> const& scalar, vec<3, T, Q> const& v) - { - return vec<3, T, Q>( - scalar.x - v.x, - scalar.x - v.y, - scalar.x - v.z); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator-(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2) - { - return vec<3, T, Q>( - v1.x - v2.x, - v1.y - v2.y, - v1.z - v2.z); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator*(vec<3, T, Q> const& v, T scalar) - { - return vec<3, T, Q>( - v.x * scalar, - v.y * scalar, - v.z * scalar); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator*(vec<3, T, Q> const& v, vec<1, T, Q> const& scalar) - { - return vec<3, T, Q>( - v.x * scalar.x, - v.y * scalar.x, - v.z * scalar.x); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator*(T scalar, vec<3, T, Q> const& v) - { - return vec<3, T, Q>( - scalar * v.x, - scalar * v.y, - scalar * v.z); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator*(vec<1, T, Q> const& scalar, vec<3, T, Q> const& v) - { - return vec<3, T, Q>( - scalar.x * v.x, - scalar.x * v.y, - scalar.x * v.z); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator*(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2) - { - return vec<3, T, Q>( - v1.x * v2.x, - v1.y * v2.y, - v1.z * v2.z); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator/(vec<3, T, Q> const& v, T scalar) - { - return vec<3, T, Q>( - v.x / scalar, - v.y / scalar, - v.z / scalar); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator/(vec<3, T, Q> const& v, vec<1, T, Q> const& scalar) - { - return vec<3, T, Q>( - v.x / scalar.x, - v.y / scalar.x, - v.z / scalar.x); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator/(T scalar, vec<3, T, Q> const& v) - { - return vec<3, T, Q>( - scalar / v.x, - scalar / v.y, - scalar / v.z); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator/(vec<1, T, Q> const& scalar, vec<3, T, Q> const& v) - { - return vec<3, T, Q>( - scalar.x / v.x, - scalar.x / v.y, - scalar.x / v.z); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator/(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2) - { - return vec<3, T, Q>( - v1.x / v2.x, - v1.y / v2.y, - v1.z / v2.z); - } - - // -- Binary bit operators -- - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator%(vec<3, T, Q> const& v, T scalar) - { - return vec<3, T, Q>( - v.x % scalar, - v.y % scalar, - v.z % scalar); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator%(vec<3, T, Q> const& v, vec<1, T, Q> const& scalar) - { - return vec<3, T, Q>( - v.x % scalar.x, - v.y % scalar.x, - v.z % scalar.x); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator%(T scalar, vec<3, T, Q> const& v) - { - return vec<3, T, Q>( - scalar % v.x, - scalar % v.y, - scalar % v.z); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator%(vec<1, T, Q> const& scalar, vec<3, T, Q> const& v) - { - return vec<3, T, Q>( - scalar.x % v.x, - scalar.x % v.y, - scalar.x % v.z); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator%(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2) - { - return vec<3, T, Q>( - v1.x % v2.x, - v1.y % v2.y, - v1.z % v2.z); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator&(vec<3, T, Q> const& v, T scalar) - { - return vec<3, T, Q>( - v.x & scalar, - v.y & scalar, - v.z & scalar); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator&(vec<3, T, Q> const& v, vec<1, T, Q> const& scalar) - { - return vec<3, T, Q>( - v.x & scalar.x, - v.y & scalar.x, - v.z & scalar.x); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator&(T scalar, vec<3, T, Q> const& v) - { - return vec<3, T, Q>( - scalar & v.x, - scalar & v.y, - scalar & v.z); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator&(vec<1, T, Q> const& scalar, vec<3, T, Q> const& v) - { - return vec<3, T, Q>( - scalar.x & v.x, - scalar.x & v.y, - scalar.x & v.z); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator&(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2) - { - return vec<3, T, Q>( - v1.x & v2.x, - v1.y & v2.y, - v1.z & v2.z); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator|(vec<3, T, Q> const& v, T scalar) - { - return vec<3, T, Q>( - v.x | scalar, - v.y | scalar, - v.z | scalar); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator|(vec<3, T, Q> const& v, vec<1, T, Q> const& scalar) - { - return vec<3, T, Q>( - v.x | scalar.x, - v.y | scalar.x, - v.z | scalar.x); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator|(T scalar, vec<3, T, Q> const& v) - { - return vec<3, T, Q>( - scalar | v.x, - scalar | v.y, - scalar | v.z); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator|(vec<1, T, Q> const& scalar, vec<3, T, Q> const& v) - { - return vec<3, T, Q>( - scalar.x | v.x, - scalar.x | v.y, - scalar.x | v.z); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator|(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2) - { - return vec<3, T, Q>( - v1.x | v2.x, - v1.y | v2.y, - v1.z | v2.z); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator^(vec<3, T, Q> const& v, T scalar) - { - return vec<3, T, Q>( - v.x ^ scalar, - v.y ^ scalar, - v.z ^ scalar); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator^(vec<3, T, Q> const& v, vec<1, T, Q> const& scalar) - { - return vec<3, T, Q>( - v.x ^ scalar.x, - v.y ^ scalar.x, - v.z ^ scalar.x); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator^(T scalar, vec<3, T, Q> const& v) - { - return vec<3, T, Q>( - scalar ^ v.x, - scalar ^ v.y, - scalar ^ v.z); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator^(vec<1, T, Q> const& scalar, vec<3, T, Q> const& v) - { - return vec<3, T, Q>( - scalar.x ^ v.x, - scalar.x ^ v.y, - scalar.x ^ v.z); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator^(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2) - { - return vec<3, T, Q>( - v1.x ^ v2.x, - v1.y ^ v2.y, - v1.z ^ v2.z); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator<<(vec<3, T, Q> const& v, T scalar) - { - return vec<3, T, Q>( - v.x << scalar, - v.y << scalar, - v.z << scalar); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator<<(vec<3, T, Q> const& v, vec<1, T, Q> const& scalar) - { - return vec<3, T, Q>( - v.x << scalar.x, - v.y << scalar.x, - v.z << scalar.x); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator<<(T scalar, vec<3, T, Q> const& v) - { - return vec<3, T, Q>( - scalar << v.x, - scalar << v.y, - scalar << v.z); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator<<(vec<1, T, Q> const& scalar, vec<3, T, Q> const& v) - { - return vec<3, T, Q>( - scalar.x << v.x, - scalar.x << v.y, - scalar.x << v.z); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator<<(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2) - { - return vec<3, T, Q>( - v1.x << v2.x, - v1.y << v2.y, - v1.z << v2.z); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator>>(vec<3, T, Q> const& v, T scalar) - { - return vec<3, T, Q>( - v.x >> scalar, - v.y >> scalar, - v.z >> scalar); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator>>(vec<3, T, Q> const& v, vec<1, T, Q> const& scalar) - { - return vec<3, T, Q>( - v.x >> scalar.x, - v.y >> scalar.x, - v.z >> scalar.x); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator>>(T scalar, vec<3, T, Q> const& v) - { - return vec<3, T, Q>( - scalar >> v.x, - scalar >> v.y, - scalar >> v.z); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator>>(vec<1, T, Q> const& scalar, vec<3, T, Q> const& v) - { - return vec<3, T, Q>( - scalar.x >> v.x, - scalar.x >> v.y, - scalar.x >> v.z); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator>>(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2) - { - return vec<3, T, Q>( - v1.x >> v2.x, - v1.y >> v2.y, - v1.z >> v2.z); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, T, Q> operator~(vec<3, T, Q> const& v) - { - return vec<3, T, Q>( - ~v.x, - ~v.y, - ~v.z); - } - - // -- Boolean operators -- - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool operator==(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2) - { - return - detail::compute_equal::is_iec559>::call(v1.x, v2.x) && - detail::compute_equal::is_iec559>::call(v1.y, v2.y) && - detail::compute_equal::is_iec559>::call(v1.z, v2.z); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool operator!=(vec<3, T, Q> const& v1, vec<3, T, Q> const& v2) - { - return !(v1 == v2); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, bool, Q> operator&&(vec<3, bool, Q> const& v1, vec<3, bool, Q> const& v2) - { - return vec<3, bool, Q>(v1.x && v2.x, v1.y && v2.y, v1.z && v2.z); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<3, bool, Q> operator||(vec<3, bool, Q> const& v1, vec<3, bool, Q> const& v2) - { - return vec<3, bool, Q>(v1.x || v2.x, v1.y || v2.y, v1.z || v2.z); - } -}//namespace glm diff --git a/third_party/glm/detail/type_vec4.hpp b/third_party/glm/detail/type_vec4.hpp deleted file mode 100755 index 4a36434..0000000 --- a/third_party/glm/detail/type_vec4.hpp +++ /dev/null @@ -1,505 +0,0 @@ -/// @ref core -/// @file glm/detail/type_vec4.hpp - -#pragma once - -#include "qualifier.hpp" -#if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR -# include "_swizzle.hpp" -#elif GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_FUNCTION -# include "_swizzle_func.hpp" -#endif -#include - -namespace glm -{ - template - struct vec<4, T, Q> - { - // -- Implementation detail -- - - typedef T value_type; - typedef vec<4, T, Q> type; - typedef vec<4, bool, Q> bool_type; - - // -- Data -- - -# if GLM_SILENT_WARNINGS == GLM_ENABLE -# if GLM_COMPILER & GLM_COMPILER_GCC -# pragma GCC diagnostic push -# pragma GCC diagnostic ignored "-Wpedantic" -# elif GLM_COMPILER & GLM_COMPILER_CLANG -# pragma clang diagnostic push -# pragma clang diagnostic ignored "-Wgnu-anonymous-struct" -# pragma clang diagnostic ignored "-Wnested-anon-types" -# elif GLM_COMPILER & GLM_COMPILER_VC -# pragma warning(push) -# pragma warning(disable: 4201) // nonstandard extension used : nameless struct/union -# endif -# endif - -# if GLM_CONFIG_XYZW_ONLY - T x, y, z, w; -# elif GLM_CONFIG_ANONYMOUS_STRUCT == GLM_ENABLE - union - { - struct { T x, y, z, w; }; - struct { T r, g, b, a; }; - struct { T s, t, p, q; }; - - typename detail::storage<4, T, detail::is_aligned::value>::type data; - -# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR - GLM_SWIZZLE4_2_MEMBERS(T, Q, x, y, z, w) - GLM_SWIZZLE4_2_MEMBERS(T, Q, r, g, b, a) - GLM_SWIZZLE4_2_MEMBERS(T, Q, s, t, p, q) - GLM_SWIZZLE4_3_MEMBERS(T, Q, x, y, z, w) - GLM_SWIZZLE4_3_MEMBERS(T, Q, r, g, b, a) - GLM_SWIZZLE4_3_MEMBERS(T, Q, s, t, p, q) - GLM_SWIZZLE4_4_MEMBERS(T, Q, x, y, z, w) - GLM_SWIZZLE4_4_MEMBERS(T, Q, r, g, b, a) - GLM_SWIZZLE4_4_MEMBERS(T, Q, s, t, p, q) -# endif - }; -# else - union { T x, r, s; }; - union { T y, g, t; }; - union { T z, b, p; }; - union { T w, a, q; }; - -# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_FUNCTION - GLM_SWIZZLE_GEN_VEC_FROM_VEC4(T, Q) -# endif -# endif - -# if GLM_SILENT_WARNINGS == GLM_ENABLE -# if GLM_COMPILER & GLM_COMPILER_CLANG -# pragma clang diagnostic pop -# elif GLM_COMPILER & GLM_COMPILER_GCC -# pragma GCC diagnostic pop -# elif GLM_COMPILER & GLM_COMPILER_VC -# pragma warning(pop) -# endif -# endif - - // -- Component accesses -- - - typedef length_t length_type; - - /// Return the count of components of the vector - GLM_FUNC_DECL static GLM_CONSTEXPR length_type length(){return 4;} - - GLM_FUNC_DECL GLM_CONSTEXPR T & operator[](length_type i); - GLM_FUNC_DECL GLM_CONSTEXPR T const& operator[](length_type i) const; - - // -- Implicit basic constructors -- - - GLM_FUNC_DECL GLM_CONSTEXPR vec() GLM_DEFAULT; - GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<4, T, Q> const& v) GLM_DEFAULT; - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<4, T, P> const& v); - - // -- Explicit basic constructors -- - - GLM_FUNC_DECL GLM_CONSTEXPR explicit vec(T scalar); - GLM_FUNC_DECL GLM_CONSTEXPR vec(T x, T y, T z, T w); - - // -- Conversion scalar constructors -- - - template - GLM_FUNC_DECL GLM_CONSTEXPR explicit vec(vec<1, U, P> const& v); - - /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(X _x, Y _y, Z _z, W _w); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<1, X, Q> const& _x, Y _y, Z _z, W _w); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(X _x, vec<1, Y, Q> const& _y, Z _z, W _w); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<1, X, Q> const& _x, vec<1, Y, Q> const& _y, Z _z, W _w); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(X _x, Y _y, vec<1, Z, Q> const& _z, W _w); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<1, X, Q> const& _x, Y _y, vec<1, Z, Q> const& _z, W _w); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(X _x, vec<1, Y, Q> const& _y, vec<1, Z, Q> const& _z, W _w); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<1, X, Q> const& _x, vec<1, Y, Q> const& _y, vec<1, Z, Q> const& _z, W _w); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<1, X, Q> const& _x, Y _y, Z _z, vec<1, W, Q> const& _w); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(X _x, vec<1, Y, Q> const& _y, Z _z, vec<1, W, Q> const& _w); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<1, X, Q> const& _x, vec<1, Y, Q> const& _y, Z _z, vec<1, W, Q> const& _w); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(X _x, Y _y, vec<1, Z, Q> const& _z, vec<1, W, Q> const& _w); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<1, X, Q> const& _x, Y _y, vec<1, Z, Q> const& _z, vec<1, W, Q> const& _w); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(X _x, vec<1, Y, Q> const& _y, vec<1, Z, Q> const& _z, vec<1, W, Q> const& _w); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<1, X, Q> const& _x, vec<1, Y, Q> const& _Y, vec<1, Z, Q> const& _z, vec<1, W, Q> const& _w); - - // -- Conversion vector constructors -- - - /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<2, A, P> const& _xy, B _z, C _w); - /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<2, A, P> const& _xy, vec<1, B, P> const& _z, C _w); - /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<2, A, P> const& _xy, B _z, vec<1, C, P> const& _w); - /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<2, A, P> const& _xy, vec<1, B, P> const& _z, vec<1, C, P> const& _w); - /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(A _x, vec<2, B, P> const& _yz, C _w); - /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<1, A, P> const& _x, vec<2, B, P> const& _yz, C _w); - /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(A _x, vec<2, B, P> const& _yz, vec<1, C, P> const& _w); - /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<1, A, P> const& _x, vec<2, B, P> const& _yz, vec<1, C, P> const& _w); - /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(A _x, B _y, vec<2, C, P> const& _zw); - /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<1, A, P> const& _x, B _y, vec<2, C, P> const& _zw); - /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(A _x, vec<1, B, P> const& _y, vec<2, C, P> const& _zw); - /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<1, A, P> const& _x, vec<1, B, P> const& _y, vec<2, C, P> const& _zw); - /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<3, A, P> const& _xyz, B _w); - /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<3, A, P> const& _xyz, vec<1, B, P> const& _w); - /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(A _x, vec<3, B, P> const& _yzw); - /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<1, A, P> const& _x, vec<3, B, P> const& _yzw); - /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(vec<2, A, P> const& _xy, vec<2, B, P> const& _zw); - - /// Explicit conversions (From section 5.4.1 Conversion and scalar constructors of GLSL 1.30.08 specification) - template - GLM_FUNC_DECL GLM_CONSTEXPR GLM_EXPLICIT vec(vec<4, U, P> const& v); - - // -- Swizzle constructors -- -# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(detail::_swizzle<4, T, Q, E0, E1, E2, E3> const& that) - { - *this = that(); - } - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(detail::_swizzle<2, T, Q, E0, E1, -1, -2> const& v, detail::_swizzle<2, T, Q, F0, F1, -1, -2> const& u) - { - *this = vec<4, T, Q>(v(), u()); - } - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(T const& x, T const& y, detail::_swizzle<2, T, Q, E0, E1, -1, -2> const& v) - { - *this = vec<4, T, Q>(x, y, v()); - } - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(T const& x, detail::_swizzle<2, T, Q, E0, E1, -1, -2> const& v, T const& w) - { - *this = vec<4, T, Q>(x, v(), w); - } - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(detail::_swizzle<2, T, Q, E0, E1, -1, -2> const& v, T const& z, T const& w) - { - *this = vec<4, T, Q>(v(), z, w); - } - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(detail::_swizzle<3, T, Q, E0, E1, E2, -1> const& v, T const& w) - { - *this = vec<4, T, Q>(v(), w); - } - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec(T const& x, detail::_swizzle<3, T, Q, E0, E1, E2, -1> const& v) - { - *this = vec<4, T, Q>(x, v()); - } -# endif//GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR - - // -- Unary arithmetic operators -- - - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q>& operator=(vec<4, T, Q> const& v) GLM_DEFAULT; - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q>& operator=(vec<4, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q>& operator+=(U scalar); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q>& operator+=(vec<1, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q>& operator+=(vec<4, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q>& operator-=(U scalar); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q>& operator-=(vec<1, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q>& operator-=(vec<4, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q>& operator*=(U scalar); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q>& operator*=(vec<1, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q>& operator*=(vec<4, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q>& operator/=(U scalar); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q>& operator/=(vec<1, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q>& operator/=(vec<4, U, Q> const& v); - - // -- Increment and decrement operators -- - - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> & operator++(); - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> & operator--(); - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator++(int); - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator--(int); - - // -- Unary bit operators -- - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> & operator%=(U scalar); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> & operator%=(vec<1, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> & operator%=(vec<4, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> & operator&=(U scalar); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> & operator&=(vec<1, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> & operator&=(vec<4, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> & operator|=(U scalar); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> & operator|=(vec<1, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> & operator|=(vec<4, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> & operator^=(U scalar); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> & operator^=(vec<1, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> & operator^=(vec<4, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> & operator<<=(U scalar); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> & operator<<=(vec<1, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> & operator<<=(vec<4, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> & operator>>=(U scalar); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> & operator>>=(vec<1, U, Q> const& v); - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> & operator>>=(vec<4, U, Q> const& v); - }; - - // -- Unary operators -- - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator+(vec<4, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator-(vec<4, T, Q> const& v); - - // -- Binary operators -- - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator+(vec<4, T, Q> const& v, T const & scalar); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator+(vec<4, T, Q> const& v1, vec<1, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator+(T scalar, vec<4, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator+(vec<1, T, Q> const& v1, vec<4, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator+(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator-(vec<4, T, Q> const& v, T const & scalar); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator-(vec<4, T, Q> const& v1, vec<1, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator-(T scalar, vec<4, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator-(vec<1, T, Q> const& v1, vec<4, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator-(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator*(vec<4, T, Q> const& v, T const & scalar); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator*(vec<4, T, Q> const& v1, vec<1, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator*(T scalar, vec<4, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator*(vec<1, T, Q> const& v1, vec<4, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator*(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator/(vec<4, T, Q> const& v, T const & scalar); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator/(vec<4, T, Q> const& v1, vec<1, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator/(T scalar, vec<4, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator/(vec<1, T, Q> const& v1, vec<4, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator/(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator%(vec<4, T, Q> const& v, T scalar); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator%(vec<4, T, Q> const& v, vec<1, T, Q> const& scalar); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator%(T scalar, vec<4, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator%(vec<1, T, Q> const& scalar, vec<4, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator%(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator&(vec<4, T, Q> const& v, T scalar); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator&(vec<4, T, Q> const& v, vec<1, T, Q> const& scalar); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator&(T scalar, vec<4, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator&(vec<1, T, Q> const& scalar, vec<4, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator&(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator|(vec<4, T, Q> const& v, T scalar); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator|(vec<4, T, Q> const& v, vec<1, T, Q> const& scalar); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator|(T scalar, vec<4, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator|(vec<1, T, Q> const& scalar, vec<4, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator|(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator^(vec<4, T, Q> const& v, T scalar); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator^(vec<4, T, Q> const& v, vec<1, T, Q> const& scalar); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator^(T scalar, vec<4, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator^(vec<1, T, Q> const& scalar, vec<4, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator^(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator<<(vec<4, T, Q> const& v, T scalar); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator<<(vec<4, T, Q> const& v, vec<1, T, Q> const& scalar); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator<<(T scalar, vec<4, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator<<(vec<1, T, Q> const& scalar, vec<4, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator<<(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator>>(vec<4, T, Q> const& v, T scalar); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator>>(vec<4, T, Q> const& v, vec<1, T, Q> const& scalar); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator>>(T scalar, vec<4, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator>>(vec<1, T, Q> const& scalar, vec<4, T, Q> const& v); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator>>(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, T, Q> operator~(vec<4, T, Q> const& v); - - // -- Boolean operators -- - - template - GLM_FUNC_DECL GLM_CONSTEXPR bool operator==(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR bool operator!=(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, bool, Q> operator&&(vec<4, bool, Q> const& v1, vec<4, bool, Q> const& v2); - - template - GLM_FUNC_DECL GLM_CONSTEXPR vec<4, bool, Q> operator||(vec<4, bool, Q> const& v1, vec<4, bool, Q> const& v2); -}//namespace glm - -#ifndef GLM_EXTERNAL_TEMPLATE -#include "type_vec4.inl" -#endif//GLM_EXTERNAL_TEMPLATE diff --git a/third_party/glm/detail/type_vec4.inl b/third_party/glm/detail/type_vec4.inl deleted file mode 100755 index 3c212d9..0000000 --- a/third_party/glm/detail/type_vec4.inl +++ /dev/null @@ -1,1140 +0,0 @@ -/// @ref core - -#include "compute_vector_relational.hpp" - -namespace glm{ -namespace detail -{ - template - struct compute_vec4_add - { - GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec<4, T, Q> call(vec<4, T, Q> const& a, vec<4, T, Q> const& b) - { - return vec<4, T, Q>(a.x + b.x, a.y + b.y, a.z + b.z, a.w + b.w); - } - }; - - template - struct compute_vec4_sub - { - GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec<4, T, Q> call(vec<4, T, Q> const& a, vec<4, T, Q> const& b) - { - return vec<4, T, Q>(a.x - b.x, a.y - b.y, a.z - b.z, a.w - b.w); - } - }; - - template - struct compute_vec4_mul - { - GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec<4, T, Q> call(vec<4, T, Q> const& a, vec<4, T, Q> const& b) - { - return vec<4, T, Q>(a.x * b.x, a.y * b.y, a.z * b.z, a.w * b.w); - } - }; - - template - struct compute_vec4_div - { - GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec<4, T, Q> call(vec<4, T, Q> const& a, vec<4, T, Q> const& b) - { - return vec<4, T, Q>(a.x / b.x, a.y / b.y, a.z / b.z, a.w / b.w); - } - }; - - template - struct compute_vec4_mod - { - GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec<4, T, Q> call(vec<4, T, Q> const& a, vec<4, T, Q> const& b) - { - return vec<4, T, Q>(a.x % b.x, a.y % b.y, a.z % b.z, a.w % b.w); - } - }; - - template - struct compute_vec4_and - { - GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec<4, T, Q> call(vec<4, T, Q> const& a, vec<4, T, Q> const& b) - { - return vec<4, T, Q>(a.x & b.x, a.y & b.y, a.z & b.z, a.w & b.w); - } - }; - - template - struct compute_vec4_or - { - GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec<4, T, Q> call(vec<4, T, Q> const& a, vec<4, T, Q> const& b) - { - return vec<4, T, Q>(a.x | b.x, a.y | b.y, a.z | b.z, a.w | b.w); - } - }; - - template - struct compute_vec4_xor - { - GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec<4, T, Q> call(vec<4, T, Q> const& a, vec<4, T, Q> const& b) - { - return vec<4, T, Q>(a.x ^ b.x, a.y ^ b.y, a.z ^ b.z, a.w ^ b.w); - } - }; - - template - struct compute_vec4_shift_left - { - GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec<4, T, Q> call(vec<4, T, Q> const& a, vec<4, T, Q> const& b) - { - return vec<4, T, Q>(a.x << b.x, a.y << b.y, a.z << b.z, a.w << b.w); - } - }; - - template - struct compute_vec4_shift_right - { - GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec<4, T, Q> call(vec<4, T, Q> const& a, vec<4, T, Q> const& b) - { - return vec<4, T, Q>(a.x >> b.x, a.y >> b.y, a.z >> b.z, a.w >> b.w); - } - }; - - template - struct compute_vec4_equal - { - GLM_FUNC_QUALIFIER GLM_CONSTEXPR static bool call(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2) - { - return - detail::compute_equal::is_iec559>::call(v1.x, v2.x) && - detail::compute_equal::is_iec559>::call(v1.y, v2.y) && - detail::compute_equal::is_iec559>::call(v1.z, v2.z) && - detail::compute_equal::is_iec559>::call(v1.w, v2.w); - } - }; - - template - struct compute_vec4_nequal - { - GLM_FUNC_QUALIFIER GLM_CONSTEXPR static bool call(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2) - { - return !compute_vec4_equal::value, sizeof(T) * 8, detail::is_aligned::value>::call(v1, v2); - } - }; - - template - struct compute_vec4_bitwise_not - { - GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec<4, T, Q> call(vec<4, T, Q> const& v) - { - return vec<4, T, Q>(~v.x, ~v.y, ~v.z, ~v.w); - } - }; -}//namespace detail - - // -- Implicit basic constructors -- - -# if GLM_CONFIG_DEFAULTED_FUNCTIONS == GLM_DISABLE - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec() -# if GLM_CONFIG_CTOR_INIT != GLM_CTOR_INIT_DISABLE - : x(0), y(0), z(0), w(0) -# endif - {} - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<4, T, Q> const& v) - : x(v.x), y(v.y), z(v.z), w(v.w) - {} -# endif - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<4, T, P> const& v) - : x(v.x), y(v.y), z(v.z), w(v.w) - {} - - // -- Explicit basic constructors -- - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(T scalar) - : x(scalar), y(scalar), z(scalar), w(scalar) - {} - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(T _x, T _y, T _z, T _w) - : x(_x), y(_y), z(_z), w(_w) - {} - - // -- Conversion scalar constructors -- - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<1, U, P> const& v) - : x(static_cast(v.x)) - , y(static_cast(v.x)) - , z(static_cast(v.x)) - , w(static_cast(v.x)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(X _x, Y _y, Z _z, W _w) - : x(static_cast(_x)) - , y(static_cast(_y)) - , z(static_cast(_z)) - , w(static_cast(_w)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<1, X, Q> const& _x, Y _y, Z _z, W _w) - : x(static_cast(_x.x)) - , y(static_cast(_y)) - , z(static_cast(_z)) - , w(static_cast(_w)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(X _x, vec<1, Y, Q> const& _y, Z _z, W _w) - : x(static_cast(_x)) - , y(static_cast(_y.x)) - , z(static_cast(_z)) - , w(static_cast(_w)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<1, X, Q> const& _x, vec<1, Y, Q> const& _y, Z _z, W _w) - : x(static_cast(_x.x)) - , y(static_cast(_y.x)) - , z(static_cast(_z)) - , w(static_cast(_w)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(X _x, Y _y, vec<1, Z, Q> const& _z, W _w) - : x(static_cast(_x)) - , y(static_cast(_y)) - , z(static_cast(_z.x)) - , w(static_cast(_w)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<1, X, Q> const& _x, Y _y, vec<1, Z, Q> const& _z, W _w) - : x(static_cast(_x.x)) - , y(static_cast(_y)) - , z(static_cast(_z.x)) - , w(static_cast(_w)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(X _x, vec<1, Y, Q> const& _y, vec<1, Z, Q> const& _z, W _w) - : x(static_cast(_x)) - , y(static_cast(_y.x)) - , z(static_cast(_z.x)) - , w(static_cast(_w)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<1, X, Q> const& _x, vec<1, Y, Q> const& _y, vec<1, Z, Q> const& _z, W _w) - : x(static_cast(_x.x)) - , y(static_cast(_y.x)) - , z(static_cast(_z.x)) - , w(static_cast(_w)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<1, X, Q> const& _x, Y _y, Z _z, vec<1, W, Q> const& _w) - : x(static_cast(_x.x)) - , y(static_cast(_y)) - , z(static_cast(_z)) - , w(static_cast(_w.x)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(X _x, vec<1, Y, Q> const& _y, Z _z, vec<1, W, Q> const& _w) - : x(static_cast(_x)) - , y(static_cast(_y.x)) - , z(static_cast(_z)) - , w(static_cast(_w.x)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<1, X, Q> const& _x, vec<1, Y, Q> const& _y, Z _z, vec<1, W, Q> const& _w) - : x(static_cast(_x.x)) - , y(static_cast(_y.x)) - , z(static_cast(_z)) - , w(static_cast(_w.x)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(X _x, Y _y, vec<1, Z, Q> const& _z, vec<1, W, Q> const& _w) - : x(static_cast(_x)) - , y(static_cast(_y)) - , z(static_cast(_z.x)) - , w(static_cast(_w.x)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<1, X, Q> const& _x, Y _y, vec<1, Z, Q> const& _z, vec<1, W, Q> const& _w) - : x(static_cast(_x.x)) - , y(static_cast(_y)) - , z(static_cast(_z.x)) - , w(static_cast(_w.x)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(X _x, vec<1, Y, Q> const& _y, vec<1, Z, Q> const& _z, vec<1, W, Q> const& _w) - : x(static_cast(_x)) - , y(static_cast(_y.x)) - , z(static_cast(_z.x)) - , w(static_cast(_w.x)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<1, X, Q> const& _x, vec<1, Y, Q> const& _y, vec<1, Z, Q> const& _z, vec<1, W, Q> const& _w) - : x(static_cast(_x.x)) - , y(static_cast(_y.x)) - , z(static_cast(_z.x)) - , w(static_cast(_w.x)) - {} - - // -- Conversion vector constructors -- - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<2, A, P> const& _xy, B _z, C _w) - : x(static_cast(_xy.x)) - , y(static_cast(_xy.y)) - , z(static_cast(_z)) - , w(static_cast(_w)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<2, A, P> const& _xy, vec<1, B, P> const& _z, C _w) - : x(static_cast(_xy.x)) - , y(static_cast(_xy.y)) - , z(static_cast(_z.x)) - , w(static_cast(_w)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<2, A, P> const& _xy, B _z, vec<1, C, P> const& _w) - : x(static_cast(_xy.x)) - , y(static_cast(_xy.y)) - , z(static_cast(_z)) - , w(static_cast(_w.x)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<2, A, P> const& _xy, vec<1, B, P> const& _z, vec<1, C, P> const& _w) - : x(static_cast(_xy.x)) - , y(static_cast(_xy.y)) - , z(static_cast(_z.x)) - , w(static_cast(_w.x)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(A _x, vec<2, B, P> const& _yz, C _w) - : x(static_cast(_x)) - , y(static_cast(_yz.x)) - , z(static_cast(_yz.y)) - , w(static_cast(_w)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<1, A, P> const& _x, vec<2, B, P> const& _yz, C _w) - : x(static_cast(_x.x)) - , y(static_cast(_yz.x)) - , z(static_cast(_yz.y)) - , w(static_cast(_w)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(A _x, vec<2, B, P> const& _yz, vec<1, C, P> const& _w) - : x(static_cast(_x)) - , y(static_cast(_yz.x)) - , z(static_cast(_yz.y)) - , w(static_cast(_w.x)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<1, A, P> const& _x, vec<2, B, P> const& _yz, vec<1, C, P> const& _w) - : x(static_cast(_x.x)) - , y(static_cast(_yz.x)) - , z(static_cast(_yz.y)) - , w(static_cast(_w.x)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(A _x, B _y, vec<2, C, P> const& _zw) - : x(static_cast(_x)) - , y(static_cast(_y)) - , z(static_cast(_zw.x)) - , w(static_cast(_zw.y)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<1, A, P> const& _x, B _y, vec<2, C, P> const& _zw) - : x(static_cast(_x.x)) - , y(static_cast(_y)) - , z(static_cast(_zw.x)) - , w(static_cast(_zw.y)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(A _x, vec<1, B, P> const& _y, vec<2, C, P> const& _zw) - : x(static_cast(_x)) - , y(static_cast(_y.x)) - , z(static_cast(_zw.x)) - , w(static_cast(_zw.y)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<1, A, P> const& _x, vec<1, B, P> const& _y, vec<2, C, P> const& _zw) - : x(static_cast(_x.x)) - , y(static_cast(_y.x)) - , z(static_cast(_zw.x)) - , w(static_cast(_zw.y)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<3, A, P> const& _xyz, B _w) - : x(static_cast(_xyz.x)) - , y(static_cast(_xyz.y)) - , z(static_cast(_xyz.z)) - , w(static_cast(_w)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<3, A, P> const& _xyz, vec<1, B, P> const& _w) - : x(static_cast(_xyz.x)) - , y(static_cast(_xyz.y)) - , z(static_cast(_xyz.z)) - , w(static_cast(_w.x)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(A _x, vec<3, B, P> const& _yzw) - : x(static_cast(_x)) - , y(static_cast(_yzw.x)) - , z(static_cast(_yzw.y)) - , w(static_cast(_yzw.z)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<1, A, P> const& _x, vec<3, B, P> const& _yzw) - : x(static_cast(_x.x)) - , y(static_cast(_yzw.x)) - , z(static_cast(_yzw.y)) - , w(static_cast(_yzw.z)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<2, A, P> const& _xy, vec<2, B, P> const& _zw) - : x(static_cast(_xy.x)) - , y(static_cast(_xy.y)) - , z(static_cast(_zw.x)) - , w(static_cast(_zw.y)) - {} - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>::vec(vec<4, U, P> const& v) - : x(static_cast(v.x)) - , y(static_cast(v.y)) - , z(static_cast(v.z)) - , w(static_cast(v.w)) - {} - - // -- Component accesses -- - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR T& vec<4, T, Q>::operator[](typename vec<4, T, Q>::length_type i) - { - assert(i >= 0 && i < this->length()); - switch(i) - { - default: - case 0: - return x; - case 1: - return y; - case 2: - return z; - case 3: - return w; - } - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR T const& vec<4, T, Q>::operator[](typename vec<4, T, Q>::length_type i) const - { - assert(i >= 0 && i < this->length()); - switch(i) - { - default: - case 0: - return x; - case 1: - return y; - case 2: - return z; - case 3: - return w; - } - } - - // -- Unary arithmetic operators -- - -# if GLM_CONFIG_DEFAULTED_FUNCTIONS == GLM_DISABLE - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>& vec<4, T, Q>::operator=(vec<4, T, Q> const& v) - { - this->x = v.x; - this->y = v.y; - this->z = v.z; - this->w = v.w; - return *this; - } -# endif - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q>& vec<4, T, Q>::operator=(vec<4, U, Q> const& v) - { - this->x = static_cast(v.x); - this->y = static_cast(v.y); - this->z = static_cast(v.z); - this->w = static_cast(v.w); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator+=(U scalar) - { - return (*this = detail::compute_vec4_add::value>::call(*this, vec<4, T, Q>(scalar))); - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator+=(vec<1, U, Q> const& v) - { - return (*this = detail::compute_vec4_add::value>::call(*this, vec<4, T, Q>(v.x))); - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator+=(vec<4, U, Q> const& v) - { - return (*this = detail::compute_vec4_add::value>::call(*this, vec<4, T, Q>(v))); - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator-=(U scalar) - { - return (*this = detail::compute_vec4_sub::value>::call(*this, vec<4, T, Q>(scalar))); - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator-=(vec<1, U, Q> const& v) - { - return (*this = detail::compute_vec4_sub::value>::call(*this, vec<4, T, Q>(v.x))); - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator-=(vec<4, U, Q> const& v) - { - return (*this = detail::compute_vec4_sub::value>::call(*this, vec<4, T, Q>(v))); - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator*=(U scalar) - { - return (*this = detail::compute_vec4_mul::value>::call(*this, vec<4, T, Q>(scalar))); - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator*=(vec<1, U, Q> const& v) - { - return (*this = detail::compute_vec4_mul::value>::call(*this, vec<4, T, Q>(v.x))); - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator*=(vec<4, U, Q> const& v) - { - return (*this = detail::compute_vec4_mul::value>::call(*this, vec<4, T, Q>(v))); - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator/=(U scalar) - { - return (*this = detail::compute_vec4_div::value>::call(*this, vec<4, T, Q>(scalar))); - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator/=(vec<1, U, Q> const& v) - { - return (*this = detail::compute_vec4_div::value>::call(*this, vec<4, T, Q>(v.x))); - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator/=(vec<4, U, Q> const& v) - { - return (*this = detail::compute_vec4_div::value>::call(*this, vec<4, T, Q>(v))); - } - - // -- Increment and decrement operators -- - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator++() - { - ++this->x; - ++this->y; - ++this->z; - ++this->w; - return *this; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator--() - { - --this->x; - --this->y; - --this->z; - --this->w; - return *this; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> vec<4, T, Q>::operator++(int) - { - vec<4, T, Q> Result(*this); - ++*this; - return Result; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> vec<4, T, Q>::operator--(int) - { - vec<4, T, Q> Result(*this); - --*this; - return Result; - } - - // -- Unary bit operators -- - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator%=(U scalar) - { - return (*this = detail::compute_vec4_mod::value>::call(*this, vec<4, T, Q>(scalar))); - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator%=(vec<1, U, Q> const& v) - { - return (*this = detail::compute_vec4_mod::value>::call(*this, vec<4, T, Q>(v))); - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator%=(vec<4, U, Q> const& v) - { - return (*this = detail::compute_vec4_mod::value>::call(*this, vec<4, T, Q>(v))); - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator&=(U scalar) - { - return (*this = detail::compute_vec4_and::value, sizeof(T) * 8, detail::is_aligned::value>::call(*this, vec<4, T, Q>(scalar))); - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator&=(vec<1, U, Q> const& v) - { - return (*this = detail::compute_vec4_and::value, sizeof(T) * 8, detail::is_aligned::value>::call(*this, vec<4, T, Q>(v))); - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator&=(vec<4, U, Q> const& v) - { - return (*this = detail::compute_vec4_and::value, sizeof(T) * 8, detail::is_aligned::value>::call(*this, vec<4, T, Q>(v))); - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator|=(U scalar) - { - return (*this = detail::compute_vec4_or::value, sizeof(T) * 8, detail::is_aligned::value>::call(*this, vec<4, T, Q>(scalar))); - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator|=(vec<1, U, Q> const& v) - { - return (*this = detail::compute_vec4_or::value, sizeof(T) * 8, detail::is_aligned::value>::call(*this, vec<4, T, Q>(v))); - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator|=(vec<4, U, Q> const& v) - { - return (*this = detail::compute_vec4_or::value, sizeof(T) * 8, detail::is_aligned::value>::call(*this, vec<4, T, Q>(v))); - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator^=(U scalar) - { - return (*this = detail::compute_vec4_xor::value, sizeof(T) * 8, detail::is_aligned::value>::call(*this, vec<4, T, Q>(scalar))); - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator^=(vec<1, U, Q> const& v) - { - return (*this = detail::compute_vec4_xor::value, sizeof(T) * 8, detail::is_aligned::value>::call(*this, vec<4, T, Q>(v))); - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator^=(vec<4, U, Q> const& v) - { - return (*this = detail::compute_vec4_xor::value, sizeof(T) * 8, detail::is_aligned::value>::call(*this, vec<4, T, Q>(v))); - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator<<=(U scalar) - { - return (*this = detail::compute_vec4_shift_left::value, sizeof(T) * 8, detail::is_aligned::value>::call(*this, vec<4, T, Q>(scalar))); - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator<<=(vec<1, U, Q> const& v) - { - return (*this = detail::compute_vec4_shift_left::value, sizeof(T) * 8, detail::is_aligned::value>::call(*this, vec<4, T, Q>(v))); - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator<<=(vec<4, U, Q> const& v) - { - return (*this = detail::compute_vec4_shift_left::value, sizeof(T) * 8, detail::is_aligned::value>::call(*this, vec<4, T, Q>(v))); - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator>>=(U scalar) - { - return (*this = detail::compute_vec4_shift_right::value, sizeof(T) * 8, detail::is_aligned::value>::call(*this, vec<4, T, Q>(scalar))); - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator>>=(vec<1, U, Q> const& v) - { - return (*this = detail::compute_vec4_shift_right::value, sizeof(T) * 8, detail::is_aligned::value>::call(*this, vec<4, T, Q>(v))); - } - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> & vec<4, T, Q>::operator>>=(vec<4, U, Q> const& v) - { - return (*this = detail::compute_vec4_shift_right::value, sizeof(T) * 8, detail::is_aligned::value>::call(*this, vec<4, T, Q>(v))); - } - - // -- Unary constant operators -- - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator+(vec<4, T, Q> const& v) - { - return v; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator-(vec<4, T, Q> const& v) - { - return vec<4, T, Q>(0) -= v; - } - - // -- Binary arithmetic operators -- - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator+(vec<4, T, Q> const& v, T const & scalar) - { - return vec<4, T, Q>(v) += scalar; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator+(vec<4, T, Q> const& v1, vec<1, T, Q> const& v2) - { - return vec<4, T, Q>(v1) += v2; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator+(T scalar, vec<4, T, Q> const& v) - { - return vec<4, T, Q>(v) += scalar; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator+(vec<1, T, Q> const& v1, vec<4, T, Q> const& v2) - { - return vec<4, T, Q>(v2) += v1; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator+(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2) - { - return vec<4, T, Q>(v1) += v2; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator-(vec<4, T, Q> const& v, T const & scalar) - { - return vec<4, T, Q>(v) -= scalar; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator-(vec<4, T, Q> const& v1, vec<1, T, Q> const& v2) - { - return vec<4, T, Q>(v1) -= v2; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator-(T scalar, vec<4, T, Q> const& v) - { - return vec<4, T, Q>(scalar) -= v; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator-(vec<1, T, Q> const& v1, vec<4, T, Q> const& v2) - { - return vec<4, T, Q>(v1.x) -= v2; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator-(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2) - { - return vec<4, T, Q>(v1) -= v2; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator*(vec<4, T, Q> const& v, T const & scalar) - { - return vec<4, T, Q>(v) *= scalar; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator*(vec<4, T, Q> const& v1, vec<1, T, Q> const& v2) - { - return vec<4, T, Q>(v1) *= v2; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator*(T scalar, vec<4, T, Q> const& v) - { - return vec<4, T, Q>(v) *= scalar; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator*(vec<1, T, Q> const& v1, vec<4, T, Q> const& v2) - { - return vec<4, T, Q>(v2) *= v1; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator*(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2) - { - return vec<4, T, Q>(v1) *= v2; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator/(vec<4, T, Q> const& v, T const & scalar) - { - return vec<4, T, Q>(v) /= scalar; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator/(vec<4, T, Q> const& v1, vec<1, T, Q> const& v2) - { - return vec<4, T, Q>(v1) /= v2; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator/(T scalar, vec<4, T, Q> const& v) - { - return vec<4, T, Q>(scalar) /= v; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator/(vec<1, T, Q> const& v1, vec<4, T, Q> const& v2) - { - return vec<4, T, Q>(v1.x) /= v2; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator/(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2) - { - return vec<4, T, Q>(v1) /= v2; - } - - // -- Binary bit operators -- - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator%(vec<4, T, Q> const& v, T scalar) - { - return vec<4, T, Q>(v) %= scalar; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator%(vec<4, T, Q> const& v1, vec<1, T, Q> const& v2) - { - return vec<4, T, Q>(v1) %= v2.x; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator%(T scalar, vec<4, T, Q> const& v) - { - return vec<4, T, Q>(scalar) %= v; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator%(vec<1, T, Q> const& scalar, vec<4, T, Q> const& v) - { - return vec<4, T, Q>(scalar.x) %= v; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator%(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2) - { - return vec<4, T, Q>(v1) %= v2; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator&(vec<4, T, Q> const& v, T scalar) - { - return vec<4, T, Q>(v) &= scalar; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator&(vec<4, T, Q> const& v, vec<1, T, Q> const& scalar) - { - return vec<4, T, Q>(v) &= scalar; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator&(T scalar, vec<4, T, Q> const& v) - { - return vec<4, T, Q>(scalar) &= v; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator&(vec<1, T, Q> const& v1, vec<4, T, Q> const& v2) - { - return vec<4, T, Q>(v1.x) &= v2; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator&(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2) - { - return vec<4, T, Q>(v1) &= v2; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator|(vec<4, T, Q> const& v, T scalar) - { - return vec<4, T, Q>(v) |= scalar; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator|(vec<4, T, Q> const& v1, vec<1, T, Q> const& v2) - { - return vec<4, T, Q>(v1) |= v2.x; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator|(T scalar, vec<4, T, Q> const& v) - { - return vec<4, T, Q>(scalar) |= v; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator|(vec<1, T, Q> const& v1, vec<4, T, Q> const& v2) - { - return vec<4, T, Q>(v1.x) |= v2; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator|(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2) - { - return vec<4, T, Q>(v1) |= v2; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator^(vec<4, T, Q> const& v, T scalar) - { - return vec<4, T, Q>(v) ^= scalar; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator^(vec<4, T, Q> const& v1, vec<1, T, Q> const& v2) - { - return vec<4, T, Q>(v1) ^= v2.x; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator^(T scalar, vec<4, T, Q> const& v) - { - return vec<4, T, Q>(scalar) ^= v; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator^(vec<1, T, Q> const& v1, vec<4, T, Q> const& v2) - { - return vec<4, T, Q>(v1.x) ^= v2; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator^(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2) - { - return vec<4, T, Q>(v1) ^= v2; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator<<(vec<4, T, Q> const& v, T scalar) - { - return vec<4, T, Q>(v) <<= scalar; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator<<(vec<4, T, Q> const& v1, vec<1, T, Q> const& v2) - { - return vec<4, T, Q>(v1) <<= v2.x; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator<<(T scalar, vec<4, T, Q> const& v) - { - return vec<4, T, Q>(scalar) <<= v; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator<<(vec<1, T, Q> const& v1, vec<4, T, Q> const& v2) - { - return vec<4, T, Q>(v1.x) <<= v2; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator<<(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2) - { - return vec<4, T, Q>(v1) <<= v2; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator>>(vec<4, T, Q> const& v, T scalar) - { - return vec<4, T, Q>(v) >>= scalar; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator>>(vec<4, T, Q> const& v1, vec<1, T, Q> const& v2) - { - return vec<4, T, Q>(v1) >>= v2.x; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator>>(T scalar, vec<4, T, Q> const& v) - { - return vec<4, T, Q>(scalar) >>= v; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator>>(vec<1, T, Q> const& v1, vec<4, T, Q> const& v2) - { - return vec<4, T, Q>(v1.x) >>= v2; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator>>(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2) - { - return vec<4, T, Q>(v1) >>= v2; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, T, Q> operator~(vec<4, T, Q> const& v) - { - return detail::compute_vec4_bitwise_not::value, sizeof(T) * 8, detail::is_aligned::value>::call(v); - } - - // -- Boolean operators -- - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool operator==(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2) - { - return detail::compute_vec4_equal::value, sizeof(T) * 8, detail::is_aligned::value>::call(v1, v2); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool operator!=(vec<4, T, Q> const& v1, vec<4, T, Q> const& v2) - { - return detail::compute_vec4_nequal::value, sizeof(T) * 8, detail::is_aligned::value>::call(v1, v2); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, bool, Q> operator&&(vec<4, bool, Q> const& v1, vec<4, bool, Q> const& v2) - { - return vec<4, bool, Q>(v1.x && v2.x, v1.y && v2.y, v1.z && v2.z, v1.w && v2.w); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, bool, Q> operator||(vec<4, bool, Q> const& v1, vec<4, bool, Q> const& v2) - { - return vec<4, bool, Q>(v1.x || v2.x, v1.y || v2.y, v1.z || v2.z, v1.w || v2.w); - } -}//namespace glm - -#if GLM_CONFIG_SIMD == GLM_ENABLE -# include "type_vec4_simd.inl" -#endif diff --git a/third_party/glm/detail/type_vec4_simd.inl b/third_party/glm/detail/type_vec4_simd.inl deleted file mode 100755 index 29559b5..0000000 --- a/third_party/glm/detail/type_vec4_simd.inl +++ /dev/null @@ -1,775 +0,0 @@ -#if GLM_ARCH & GLM_ARCH_SSE2_BIT - -namespace glm{ -namespace detail -{ -# if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR - template - struct _swizzle_base1<4, float, Q, E0,E1,E2,E3, true> : public _swizzle_base0 - { - GLM_FUNC_QUALIFIER vec<4, float, Q> operator ()() const - { - __m128 data = *reinterpret_cast<__m128 const*>(&this->_buffer); - - vec<4, float, Q> Result; -# if GLM_ARCH & GLM_ARCH_AVX_BIT - Result.data = _mm_permute_ps(data, _MM_SHUFFLE(E3, E2, E1, E0)); -# else - Result.data = _mm_shuffle_ps(data, data, _MM_SHUFFLE(E3, E2, E1, E0)); -# endif - return Result; - } - }; - - template - struct _swizzle_base1<4, int, Q, E0,E1,E2,E3, true> : public _swizzle_base0 - { - GLM_FUNC_QUALIFIER vec<4, int, Q> operator ()() const - { - __m128i data = *reinterpret_cast<__m128i const*>(&this->_buffer); - - vec<4, int, Q> Result; - Result.data = _mm_shuffle_epi32(data, _MM_SHUFFLE(E3, E2, E1, E0)); - return Result; - } - }; - - template - struct _swizzle_base1<4, uint, Q, E0,E1,E2,E3, true> : public _swizzle_base0 - { - GLM_FUNC_QUALIFIER vec<4, uint, Q> operator ()() const - { - __m128i data = *reinterpret_cast<__m128i const*>(&this->_buffer); - - vec<4, uint, Q> Result; - Result.data = _mm_shuffle_epi32(data, _MM_SHUFFLE(E3, E2, E1, E0)); - return Result; - } - }; -# endif// GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR - - template - struct compute_vec4_add - { - static vec<4, float, Q> call(vec<4, float, Q> const& a, vec<4, float, Q> const& b) - { - vec<4, float, Q> Result; - Result.data = _mm_add_ps(a.data, b.data); - return Result; - } - }; - -# if GLM_ARCH & GLM_ARCH_AVX_BIT - template - struct compute_vec4_add - { - static vec<4, double, Q> call(vec<4, double, Q> const& a, vec<4, double, Q> const& b) - { - vec<4, double, Q> Result; - Result.data = _mm256_add_pd(a.data, b.data); - return Result; - } - }; -# endif - - template - struct compute_vec4_sub - { - static vec<4, float, Q> call(vec<4, float, Q> const& a, vec<4, float, Q> const& b) - { - vec<4, float, Q> Result; - Result.data = _mm_sub_ps(a.data, b.data); - return Result; - } - }; - -# if GLM_ARCH & GLM_ARCH_AVX_BIT - template - struct compute_vec4_sub - { - static vec<4, double, Q> call(vec<4, double, Q> const& a, vec<4, double, Q> const& b) - { - vec<4, double, Q> Result; - Result.data = _mm256_sub_pd(a.data, b.data); - return Result; - } - }; -# endif - - template - struct compute_vec4_mul - { - static vec<4, float, Q> call(vec<4, float, Q> const& a, vec<4, float, Q> const& b) - { - vec<4, float, Q> Result; - Result.data = _mm_mul_ps(a.data, b.data); - return Result; - } - }; - -# if GLM_ARCH & GLM_ARCH_AVX_BIT - template - struct compute_vec4_mul - { - static vec<4, double, Q> call(vec<4, double, Q> const& a, vec<4, double, Q> const& b) - { - vec<4, double, Q> Result; - Result.data = _mm256_mul_pd(a.data, b.data); - return Result; - } - }; -# endif - - template - struct compute_vec4_div - { - static vec<4, float, Q> call(vec<4, float, Q> const& a, vec<4, float, Q> const& b) - { - vec<4, float, Q> Result; - Result.data = _mm_div_ps(a.data, b.data); - return Result; - } - }; - - # if GLM_ARCH & GLM_ARCH_AVX_BIT - template - struct compute_vec4_div - { - static vec<4, double, Q> call(vec<4, double, Q> const& a, vec<4, double, Q> const& b) - { - vec<4, double, Q> Result; - Result.data = _mm256_div_pd(a.data, b.data); - return Result; - } - }; -# endif - - template<> - struct compute_vec4_div - { - static vec<4, float, aligned_lowp> call(vec<4, float, aligned_lowp> const& a, vec<4, float, aligned_lowp> const& b) - { - vec<4, float, aligned_lowp> Result; - Result.data = _mm_mul_ps(a.data, _mm_rcp_ps(b.data)); - return Result; - } - }; - - template - struct compute_vec4_and - { - static vec<4, T, Q> call(vec<4, T, Q> const& a, vec<4, T, Q> const& b) - { - vec<4, T, Q> Result; - Result.data = _mm_and_si128(a.data, b.data); - return Result; - } - }; - -# if GLM_ARCH & GLM_ARCH_AVX2_BIT - template - struct compute_vec4_and - { - static vec<4, T, Q> call(vec<4, T, Q> const& a, vec<4, T, Q> const& b) - { - vec<4, T, Q> Result; - Result.data = _mm256_and_si256(a.data, b.data); - return Result; - } - }; -# endif - - template - struct compute_vec4_or - { - static vec<4, T, Q> call(vec<4, T, Q> const& a, vec<4, T, Q> const& b) - { - vec<4, T, Q> Result; - Result.data = _mm_or_si128(a.data, b.data); - return Result; - } - }; - -# if GLM_ARCH & GLM_ARCH_AVX2_BIT - template - struct compute_vec4_or - { - static vec<4, T, Q> call(vec<4, T, Q> const& a, vec<4, T, Q> const& b) - { - vec<4, T, Q> Result; - Result.data = _mm256_or_si256(a.data, b.data); - return Result; - } - }; -# endif - - template - struct compute_vec4_xor - { - static vec<4, T, Q> call(vec<4, T, Q> const& a, vec<4, T, Q> const& b) - { - vec<4, T, Q> Result; - Result.data = _mm_xor_si128(a.data, b.data); - return Result; - } - }; - -# if GLM_ARCH & GLM_ARCH_AVX2_BIT - template - struct compute_vec4_xor - { - static vec<4, T, Q> call(vec<4, T, Q> const& a, vec<4, T, Q> const& b) - { - vec<4, T, Q> Result; - Result.data = _mm256_xor_si256(a.data, b.data); - return Result; - } - }; -# endif - - template - struct compute_vec4_shift_left - { - static vec<4, T, Q> call(vec<4, T, Q> const& a, vec<4, T, Q> const& b) - { - vec<4, T, Q> Result; - Result.data = _mm_sll_epi32(a.data, b.data); - return Result; - } - }; - -# if GLM_ARCH & GLM_ARCH_AVX2_BIT - template - struct compute_vec4_shift_left - { - static vec<4, T, Q> call(vec<4, T, Q> const& a, vec<4, T, Q> const& b) - { - vec<4, T, Q> Result; - Result.data = _mm256_sll_epi64(a.data, b.data); - return Result; - } - }; -# endif - - template - struct compute_vec4_shift_right - { - static vec<4, T, Q> call(vec<4, T, Q> const& a, vec<4, T, Q> const& b) - { - vec<4, T, Q> Result; - Result.data = _mm_srl_epi32(a.data, b.data); - return Result; - } - }; - -# if GLM_ARCH & GLM_ARCH_AVX2_BIT - template - struct compute_vec4_shift_right - { - static vec<4, T, Q> call(vec<4, T, Q> const& a, vec<4, T, Q> const& b) - { - vec<4, T, Q> Result; - Result.data = _mm256_srl_epi64(a.data, b.data); - return Result; - } - }; -# endif - - template - struct compute_vec4_bitwise_not - { - static vec<4, T, Q> call(vec<4, T, Q> const& v) - { - vec<4, T, Q> Result; - Result.data = _mm_xor_si128(v.data, _mm_set1_epi32(-1)); - return Result; - } - }; - -# if GLM_ARCH & GLM_ARCH_AVX2_BIT - template - struct compute_vec4_bitwise_not - { - static vec<4, T, Q> call(vec<4, T, Q> const& v) - { - vec<4, T, Q> Result; - Result.data = _mm256_xor_si256(v.data, _mm_set1_epi32(-1)); - return Result; - } - }; -# endif - - template - struct compute_vec4_equal - { - static bool call(vec<4, float, Q> const& v1, vec<4, float, Q> const& v2) - { - return _mm_movemask_ps(_mm_cmpeq_ps(v1.data, v2.data)) != 0; - } - }; - -# if GLM_ARCH & GLM_ARCH_SSE41_BIT - template - struct compute_vec4_equal - { - static bool call(vec<4, int, Q> const& v1, vec<4, int, Q> const& v2) - { - //return _mm_movemask_epi8(_mm_cmpeq_epi32(v1.data, v2.data)) != 0; - __m128i neq = _mm_xor_si128(v1.data, v2.data); - return _mm_test_all_zeros(neq, neq) == 0; - } - }; -# endif - - template - struct compute_vec4_nequal - { - static bool call(vec<4, float, Q> const& v1, vec<4, float, Q> const& v2) - { - return _mm_movemask_ps(_mm_cmpneq_ps(v1.data, v2.data)) != 0; - } - }; - -# if GLM_ARCH & GLM_ARCH_SSE41_BIT - template - struct compute_vec4_nequal - { - static bool call(vec<4, int, Q> const& v1, vec<4, int, Q> const& v2) - { - //return _mm_movemask_epi8(_mm_cmpneq_epi32(v1.data, v2.data)) != 0; - __m128i neq = _mm_xor_si128(v1.data, v2.data); - return _mm_test_all_zeros(neq, neq) != 0; - } - }; -# endif -}//namespace detail - - template<> - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, float, aligned_lowp>::vec(float _s) : - data(_mm_set1_ps(_s)) - {} - - template<> - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, float, aligned_mediump>::vec(float _s) : - data(_mm_set1_ps(_s)) - {} - - template<> - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, float, aligned_highp>::vec(float _s) : - data(_mm_set1_ps(_s)) - {} - -# if GLM_ARCH & GLM_ARCH_AVX_BIT - template<> - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, double, aligned_lowp>::vec(double _s) : - data(_mm256_set1_pd(_s)) - {} - - template<> - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, double, aligned_mediump>::vec(double _s) : - data(_mm256_set1_pd(_s)) - {} - - template<> - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, double, aligned_highp>::vec(double _s) : - data(_mm256_set1_pd(_s)) - {} -# endif - - template<> - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, int, aligned_lowp>::vec(int _s) : - data(_mm_set1_epi32(_s)) - {} - - template<> - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, int, aligned_mediump>::vec(int _s) : - data(_mm_set1_epi32(_s)) - {} - - template<> - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, int, aligned_highp>::vec(int _s) : - data(_mm_set1_epi32(_s)) - {} - -# if GLM_ARCH & GLM_ARCH_AVX2_BIT - template<> - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, detail::int64, aligned_lowp>::vec(detail::int64 _s) : - data(_mm256_set1_epi64x(_s)) - {} - - template<> - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, detail::int64, aligned_mediump>::vec(detail::int64 _s) : - data(_mm256_set1_epi64x(_s)) - {} - - template<> - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, detail::int64, aligned_highp>::vec(detail::int64 _s) : - data(_mm256_set1_epi64x(_s)) - {} -# endif - - template<> - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, float, aligned_lowp>::vec(float _x, float _y, float _z, float _w) : - data(_mm_set_ps(_w, _z, _y, _x)) - {} - - template<> - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, float, aligned_mediump>::vec(float _x, float _y, float _z, float _w) : - data(_mm_set_ps(_w, _z, _y, _x)) - {} - - template<> - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, float, aligned_highp>::vec(float _x, float _y, float _z, float _w) : - data(_mm_set_ps(_w, _z, _y, _x)) - {} - - template<> - template<> - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, int, aligned_lowp>::vec(int _x, int _y, int _z, int _w) : - data(_mm_set_epi32(_w, _z, _y, _x)) - {} - - template<> - template<> - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, int, aligned_mediump>::vec(int _x, int _y, int _z, int _w) : - data(_mm_set_epi32(_w, _z, _y, _x)) - {} - - template<> - template<> - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, int, aligned_highp>::vec(int _x, int _y, int _z, int _w) : - data(_mm_set_epi32(_w, _z, _y, _x)) - {} - - template<> - template<> - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, float, aligned_lowp>::vec(int _x, int _y, int _z, int _w) : - data(_mm_cvtepi32_ps(_mm_set_epi32(_w, _z, _y, _x))) - {} - - template<> - template<> - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, float, aligned_mediump>::vec(int _x, int _y, int _z, int _w) : - data(_mm_cvtepi32_ps(_mm_set_epi32(_w, _z, _y, _x))) - {} - - template<> - template<> - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, float, aligned_highp>::vec(int _x, int _y, int _z, int _w) : - data(_mm_cvtepi32_ps(_mm_set_epi32(_w, _z, _y, _x))) - {} -}//namespace glm - -#endif//GLM_ARCH & GLM_ARCH_SSE2_BIT - -#if GLM_ARCH & GLM_ARCH_NEON_BIT -namespace glm { -namespace detail { - - template - struct compute_vec4_add - { - static - vec<4, float, Q> - call(vec<4, float, Q> const& a, vec<4, float, Q> const& b) - { - vec<4, float, Q> Result; - Result.data = vaddq_f32(a.data, b.data); - return Result; - } - }; - - template - struct compute_vec4_add - { - static - vec<4, uint, Q> - call(vec<4, uint, Q> const& a, vec<4, uint, Q> const& b) - { - vec<4, uint, Q> Result; - Result.data = vaddq_u32(a.data, b.data); - return Result; - } - }; - - template - struct compute_vec4_add - { - static - vec<4, int, Q> - call(vec<4, int, Q> const& a, vec<4, int, Q> const& b) - { - vec<4, uint, Q> Result; - Result.data = vaddq_s32(a.data, b.data); - return Result; - } - }; - - template - struct compute_vec4_sub - { - static vec<4, float, Q> call(vec<4, float, Q> const& a, vec<4, float, Q> const& b) - { - vec<4, float, Q> Result; - Result.data = vsubq_f32(a.data, b.data); - return Result; - } - }; - - template - struct compute_vec4_sub - { - static vec<4, uint, Q> call(vec<4, uint, Q> const& a, vec<4, uint, Q> const& b) - { - vec<4, uint, Q> Result; - Result.data = vsubq_u32(a.data, b.data); - return Result; - } - }; - - template - struct compute_vec4_sub - { - static vec<4, int, Q> call(vec<4, int, Q> const& a, vec<4, int, Q> const& b) - { - vec<4, int, Q> Result; - Result.data = vsubq_s32(a.data, b.data); - return Result; - } - }; - - template - struct compute_vec4_mul - { - static vec<4, float, Q> call(vec<4, float, Q> const& a, vec<4, float, Q> const& b) - { - vec<4, float, Q> Result; - Result.data = vmulq_f32(a.data, b.data); - return Result; - } - }; - - template - struct compute_vec4_mul - { - static vec<4, uint, Q> call(vec<4, uint, Q> const& a, vec<4, uint, Q> const& b) - { - vec<4, uint, Q> Result; - Result.data = vmulq_u32(a.data, b.data); - return Result; - } - }; - - template - struct compute_vec4_mul - { - static vec<4, int, Q> call(vec<4, int, Q> const& a, vec<4, int, Q> const& b) - { - vec<4, int, Q> Result; - Result.data = vmulq_s32(a.data, b.data); - return Result; - } - }; - - template - struct compute_vec4_div - { - static vec<4, float, Q> call(vec<4, float, Q> const& a, vec<4, float, Q> const& b) - { - vec<4, float, Q> Result; - Result.data = vdivq_f32(a.data, b.data); - return Result; - } - }; - - template - struct compute_vec4_equal - { - static bool call(vec<4, float, Q> const& v1, vec<4, float, Q> const& v2) - { - uint32x4_t cmp = vceqq_f32(v1.data, v2.data); -#if GLM_ARCH & GLM_ARCH_ARMV8_BIT - cmp = vpminq_u32(cmp, cmp); - cmp = vpminq_u32(cmp, cmp); - uint32_t r = cmp[0]; -#else - uint32x2_t cmpx2 = vpmin_u32(vget_low_f32(cmp), vget_high_f32(cmp)); - cmpx2 = vpmin_u32(cmpx2, cmpx2); - uint32_t r = cmpx2[0]; -#endif - return r == ~0u; - } - }; - - template - struct compute_vec4_equal - { - static bool call(vec<4, uint, Q> const& v1, vec<4, uint, Q> const& v2) - { - uint32x4_t cmp = vceqq_u32(v1.data, v2.data); -#if GLM_ARCH & GLM_ARCH_ARMV8_BIT - cmp = vpminq_u32(cmp, cmp); - cmp = vpminq_u32(cmp, cmp); - uint32_t r = cmp[0]; -#else - uint32x2_t cmpx2 = vpmin_u32(vget_low_f32(cmp), vget_high_f32(cmp)); - cmpx2 = vpmin_u32(cmpx2, cmpx2); - uint32_t r = cmpx2[0]; -#endif - return r == ~0u; - } - }; - - template - struct compute_vec4_equal - { - static bool call(vec<4, int, Q> const& v1, vec<4, int, Q> const& v2) - { - uint32x4_t cmp = vceqq_s32(v1.data, v2.data); -#if GLM_ARCH & GLM_ARCH_ARMV8_BIT - cmp = vpminq_u32(cmp, cmp); - cmp = vpminq_u32(cmp, cmp); - uint32_t r = cmp[0]; -#else - uint32x2_t cmpx2 = vpmin_u32(vget_low_f32(cmp), vget_high_f32(cmp)); - cmpx2 = vpmin_u32(cmpx2, cmpx2); - uint32_t r = cmpx2[0]; -#endif - return r == ~0u; - } - }; - - template - struct compute_vec4_nequal - { - static bool call(vec<4, float, Q> const& v1, vec<4, float, Q> const& v2) - { - return !compute_vec4_equal::call(v1, v2); - } - }; - - template - struct compute_vec4_nequal - { - static bool call(vec<4, uint, Q> const& v1, vec<4, uint, Q> const& v2) - { - return !compute_vec4_equal::call(v1, v2); - } - }; - - template - struct compute_vec4_nequal - { - static bool call(vec<4, int, Q> const& v1, vec<4, int, Q> const& v2) - { - return !compute_vec4_equal::call(v1, v2); - } - }; - -}//namespace detail - -#if !GLM_CONFIG_XYZW_ONLY - template<> - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, float, aligned_lowp>::vec(float _s) : - data(vdupq_n_f32(_s)) - {} - - template<> - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, float, aligned_mediump>::vec(float _s) : - data(vdupq_n_f32(_s)) - {} - - template<> - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, float, aligned_highp>::vec(float _s) : - data(vdupq_n_f32(_s)) - {} - - template<> - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, int, aligned_lowp>::vec(int _s) : - data(vdupq_n_s32(_s)) - {} - - template<> - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, int, aligned_mediump>::vec(int _s) : - data(vdupq_n_s32(_s)) - {} - - template<> - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, int, aligned_highp>::vec(int _s) : - data(vdupq_n_s32(_s)) - {} - - template<> - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, uint, aligned_lowp>::vec(uint _s) : - data(vdupq_n_u32(_s)) - {} - - template<> - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, uint, aligned_mediump>::vec(uint _s) : - data(vdupq_n_u32(_s)) - {} - - template<> - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, uint, aligned_highp>::vec(uint _s) : - data(vdupq_n_u32(_s)) - {} - - template<> - template<> - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, float, aligned_highp>::vec(const vec<4, float, aligned_highp>& rhs) : - data(rhs.data) - {} - - template<> - template<> - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, float, aligned_highp>::vec(const vec<4, int, aligned_highp>& rhs) : - data(vcvtq_f32_s32(rhs.data)) - {} - - template<> - template<> - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, float, aligned_highp>::vec(const vec<4, uint, aligned_highp>& rhs) : - data(vcvtq_f32_u32(rhs.data)) - {} - - template<> - template<> - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, float, aligned_lowp>::vec(int _x, int _y, int _z, int _w) : - data(vcvtq_f32_s32(vec<4, int, aligned_lowp>(_x, _y, _z, _w).data)) - {} - - template<> - template<> - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, float, aligned_mediump>::vec(int _x, int _y, int _z, int _w) : - data(vcvtq_f32_s32(vec<4, int, aligned_mediump>(_x, _y, _z, _w).data)) - {} - - template<> - template<> - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, float, aligned_highp>::vec(int _x, int _y, int _z, int _w) : - data(vcvtq_f32_s32(vec<4, int, aligned_highp>(_x, _y, _z, _w).data)) - {} - - template<> - template<> - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, float, aligned_lowp>::vec(uint _x, uint _y, uint _z, uint _w) : - data(vcvtq_f32_u32(vec<4, uint, aligned_lowp>(_x, _y, _z, _w).data)) - {} - - template<> - template<> - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, float, aligned_mediump>::vec(uint _x, uint _y, uint _z, uint _w) : - data(vcvtq_f32_u32(vec<4, uint, aligned_mediump>(_x, _y, _z, _w).data)) - {} - - - template<> - template<> - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec<4, float, aligned_highp>::vec(uint _x, uint _y, uint _z, uint _w) : - data(vcvtq_f32_u32(vec<4, uint, aligned_highp>(_x, _y, _z, _w).data)) - {} - -#endif -}//namespace glm - -#endif diff --git a/third_party/glm/exponential.hpp b/third_party/glm/exponential.hpp deleted file mode 100755 index f8fb886..0000000 --- a/third_party/glm/exponential.hpp +++ /dev/null @@ -1,110 +0,0 @@ -/// @ref core -/// @file glm/exponential.hpp -/// -/// @see GLSL 4.20.8 specification, section 8.2 Exponential Functions -/// -/// @defgroup core_func_exponential Exponential functions -/// @ingroup core -/// -/// Provides GLSL exponential functions -/// -/// These all operate component-wise. The description is per component. -/// -/// Include to use these core features. - -#pragma once - -#include "detail/type_vec1.hpp" -#include "detail/type_vec2.hpp" -#include "detail/type_vec3.hpp" -#include "detail/type_vec4.hpp" -#include - -namespace glm -{ - /// @addtogroup core_func_exponential - /// @{ - - /// Returns 'base' raised to the power 'exponent'. - /// - /// @param base Floating point value. pow function is defined for input values of 'base' defined in the range (inf-, inf+) in the limit of the type qualifier. - /// @param exponent Floating point value representing the 'exponent'. - /// - /// @see GLSL pow man page - /// @see GLSL 4.20.8 specification, section 8.2 Exponential Functions - template - GLM_FUNC_DECL vec pow(vec const& base, vec const& exponent); - - /// Returns the natural exponentiation of x, i.e., e^x. - /// - /// @param v exp function is defined for input values of v defined in the range (inf-, inf+) in the limit of the type qualifier. - /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. - /// @tparam T Floating-point scalar types. - /// - /// @see GLSL exp man page - /// @see GLSL 4.20.8 specification, section 8.2 Exponential Functions - template - GLM_FUNC_DECL vec exp(vec const& v); - - /// Returns the natural logarithm of v, i.e., - /// returns the value y which satisfies the equation x = e^y. - /// Results are undefined if v <= 0. - /// - /// @param v log function is defined for input values of v defined in the range (0, inf+) in the limit of the type qualifier. - /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. - /// @tparam T Floating-point scalar types. - /// - /// @see GLSL log man page - /// @see GLSL 4.20.8 specification, section 8.2 Exponential Functions - template - GLM_FUNC_DECL vec log(vec const& v); - - /// Returns 2 raised to the v power. - /// - /// @param v exp2 function is defined for input values of v defined in the range (inf-, inf+) in the limit of the type qualifier. - /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. - /// @tparam T Floating-point scalar types. - /// - /// @see GLSL exp2 man page - /// @see GLSL 4.20.8 specification, section 8.2 Exponential Functions - template - GLM_FUNC_DECL vec exp2(vec const& v); - - /// Returns the base 2 log of x, i.e., returns the value y, - /// which satisfies the equation x = 2 ^ y. - /// - /// @param v log2 function is defined for input values of v defined in the range (0, inf+) in the limit of the type qualifier. - /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. - /// @tparam T Floating-point scalar types. - /// - /// @see GLSL log2 man page - /// @see GLSL 4.20.8 specification, section 8.2 Exponential Functions - template - GLM_FUNC_DECL vec log2(vec const& v); - - /// Returns the positive square root of v. - /// - /// @param v sqrt function is defined for input values of v defined in the range [0, inf+) in the limit of the type qualifier. - /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. - /// @tparam T Floating-point scalar types. - /// - /// @see GLSL sqrt man page - /// @see GLSL 4.20.8 specification, section 8.2 Exponential Functions - template - GLM_FUNC_DECL vec sqrt(vec const& v); - - /// Returns the reciprocal of the positive square root of v. - /// - /// @param v inversesqrt function is defined for input values of v defined in the range [0, inf+) in the limit of the type qualifier. - /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. - /// @tparam T Floating-point scalar types. - /// - /// @see GLSL inversesqrt man page - /// @see GLSL 4.20.8 specification, section 8.2 Exponential Functions - template - GLM_FUNC_DECL vec inversesqrt(vec const& v); - - /// @} -}//namespace glm - -#include "detail/func_exponential.inl" diff --git a/third_party/glm/ext.hpp b/third_party/glm/ext.hpp deleted file mode 100755 index 3bc8db2..0000000 --- a/third_party/glm/ext.hpp +++ /dev/null @@ -1,196 +0,0 @@ -/// @file glm/ext.hpp -/// -/// @ref core (Dependence) - -#include "detail/setup.hpp" - -#pragma once - -#include "glm.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_MESSAGE_EXT_INCLUDED_DISPLAYED) -# define GLM_MESSAGE_EXT_INCLUDED_DISPLAYED -# pragma message("GLM: All extensions included (not recommended)") -#endif//GLM_MESSAGES - -#include "./ext/matrix_double2x2.hpp" -#include "./ext/matrix_double2x2_precision.hpp" -#include "./ext/matrix_double2x3.hpp" -#include "./ext/matrix_double2x3_precision.hpp" -#include "./ext/matrix_double2x4.hpp" -#include "./ext/matrix_double2x4_precision.hpp" -#include "./ext/matrix_double3x2.hpp" -#include "./ext/matrix_double3x2_precision.hpp" -#include "./ext/matrix_double3x3.hpp" -#include "./ext/matrix_double3x3_precision.hpp" -#include "./ext/matrix_double3x4.hpp" -#include "./ext/matrix_double3x4_precision.hpp" -#include "./ext/matrix_double4x2.hpp" -#include "./ext/matrix_double4x2_precision.hpp" -#include "./ext/matrix_double4x3.hpp" -#include "./ext/matrix_double4x3_precision.hpp" -#include "./ext/matrix_double4x4.hpp" -#include "./ext/matrix_double4x4_precision.hpp" - -#include "./ext/matrix_float2x2.hpp" -#include "./ext/matrix_float2x2_precision.hpp" -#include "./ext/matrix_float2x3.hpp" -#include "./ext/matrix_float2x3_precision.hpp" -#include "./ext/matrix_float2x4.hpp" -#include "./ext/matrix_float2x4_precision.hpp" -#include "./ext/matrix_float3x2.hpp" -#include "./ext/matrix_float3x2_precision.hpp" -#include "./ext/matrix_float3x3.hpp" -#include "./ext/matrix_float3x3_precision.hpp" -#include "./ext/matrix_float3x4.hpp" -#include "./ext/matrix_float3x4_precision.hpp" -#include "./ext/matrix_float4x2.hpp" -#include "./ext/matrix_float4x2_precision.hpp" -#include "./ext/matrix_float4x3.hpp" -#include "./ext/matrix_float4x3_precision.hpp" -#include "./ext/matrix_float4x4.hpp" -#include "./ext/matrix_float4x4_precision.hpp" - -#include "./ext/matrix_relational.hpp" - -#include "./ext/quaternion_double.hpp" -#include "./ext/quaternion_double_precision.hpp" -#include "./ext/quaternion_float.hpp" -#include "./ext/quaternion_float_precision.hpp" -#include "./ext/quaternion_geometric.hpp" -#include "./ext/quaternion_relational.hpp" - -#include "./ext/scalar_constants.hpp" -#include "./ext/scalar_int_sized.hpp" -#include "./ext/scalar_relational.hpp" - -#include "./ext/vector_bool1.hpp" -#include "./ext/vector_bool1_precision.hpp" -#include "./ext/vector_bool2.hpp" -#include "./ext/vector_bool2_precision.hpp" -#include "./ext/vector_bool3.hpp" -#include "./ext/vector_bool3_precision.hpp" -#include "./ext/vector_bool4.hpp" -#include "./ext/vector_bool4_precision.hpp" - -#include "./ext/vector_double1.hpp" -#include "./ext/vector_double1_precision.hpp" -#include "./ext/vector_double2.hpp" -#include "./ext/vector_double2_precision.hpp" -#include "./ext/vector_double3.hpp" -#include "./ext/vector_double3_precision.hpp" -#include "./ext/vector_double4.hpp" -#include "./ext/vector_double4_precision.hpp" - -#include "./ext/vector_float1.hpp" -#include "./ext/vector_float1_precision.hpp" -#include "./ext/vector_float2.hpp" -#include "./ext/vector_float2_precision.hpp" -#include "./ext/vector_float3.hpp" -#include "./ext/vector_float3_precision.hpp" -#include "./ext/vector_float4.hpp" -#include "./ext/vector_float4_precision.hpp" - -#include "./ext/vector_int1.hpp" -#include "./ext/vector_int1_precision.hpp" -#include "./ext/vector_int2.hpp" -#include "./ext/vector_int2_precision.hpp" -#include "./ext/vector_int3.hpp" -#include "./ext/vector_int3_precision.hpp" -#include "./ext/vector_int4.hpp" -#include "./ext/vector_int4_precision.hpp" - -#include "./ext/vector_relational.hpp" - -#include "./ext/vector_uint1.hpp" -#include "./ext/vector_uint1_precision.hpp" -#include "./ext/vector_uint2.hpp" -#include "./ext/vector_uint2_precision.hpp" -#include "./ext/vector_uint3.hpp" -#include "./ext/vector_uint3_precision.hpp" -#include "./ext/vector_uint4.hpp" -#include "./ext/vector_uint4_precision.hpp" - -#include "./gtc/bitfield.hpp" -#include "./gtc/color_space.hpp" -#include "./gtc/constants.hpp" -#include "./gtc/epsilon.hpp" -#include "./gtc/integer.hpp" -#include "./gtc/matrix_access.hpp" -#include "./gtc/matrix_integer.hpp" -#include "./gtc/matrix_inverse.hpp" -#include "./gtc/matrix_transform.hpp" -#include "./gtc/noise.hpp" -#include "./gtc/packing.hpp" -#include "./gtc/quaternion.hpp" -#include "./gtc/random.hpp" -#include "./gtc/reciprocal.hpp" -#include "./gtc/round.hpp" -#include "./gtc/type_precision.hpp" -#include "./gtc/type_ptr.hpp" -#include "./gtc/ulp.hpp" -#include "./gtc/vec1.hpp" -#if GLM_CONFIG_ALIGNED_GENTYPES == GLM_ENABLE -# include "./gtc/type_aligned.hpp" -#endif - -#ifdef GLM_ENABLE_EXPERIMENTAL -#include "./gtx/associated_min_max.hpp" -#include "./gtx/bit.hpp" -#include "./gtx/closest_point.hpp" -#include "./gtx/color_encoding.hpp" -#include "./gtx/color_space.hpp" -#include "./gtx/color_space_YCoCg.hpp" -#include "./gtx/compatibility.hpp" -#include "./gtx/component_wise.hpp" -#include "./gtx/dual_quaternion.hpp" -#include "./gtx/euler_angles.hpp" -#include "./gtx/extend.hpp" -#include "./gtx/extended_min_max.hpp" -#include "./gtx/fast_exponential.hpp" -#include "./gtx/fast_square_root.hpp" -#include "./gtx/fast_trigonometry.hpp" -#include "./gtx/functions.hpp" -#include "./gtx/gradient_paint.hpp" -#include "./gtx/handed_coordinate_space.hpp" -#include "./gtx/integer.hpp" -#include "./gtx/intersect.hpp" -#include "./gtx/log_base.hpp" -#include "./gtx/matrix_cross_product.hpp" -#include "./gtx/matrix_interpolation.hpp" -#include "./gtx/matrix_major_storage.hpp" -#include "./gtx/matrix_operation.hpp" -#include "./gtx/matrix_query.hpp" -#include "./gtx/mixed_product.hpp" -#include "./gtx/norm.hpp" -#include "./gtx/normal.hpp" -#include "./gtx/normalize_dot.hpp" -#include "./gtx/number_precision.hpp" -#include "./gtx/optimum_pow.hpp" -#include "./gtx/orthonormalize.hpp" -#include "./gtx/perpendicular.hpp" -#include "./gtx/polar_coordinates.hpp" -#include "./gtx/projection.hpp" -#include "./gtx/quaternion.hpp" -#include "./gtx/raw_data.hpp" -#include "./gtx/rotate_vector.hpp" -#include "./gtx/spline.hpp" -#include "./gtx/std_based_type.hpp" -#if !(GLM_COMPILER & GLM_COMPILER_CUDA) -# include "./gtx/string_cast.hpp" -#endif -#include "./gtx/transform.hpp" -#include "./gtx/transform2.hpp" -#include "./gtx/vec_swizzle.hpp" -#include "./gtx/vector_angle.hpp" -#include "./gtx/vector_query.hpp" -#include "./gtx/wrap.hpp" - -#if GLM_HAS_TEMPLATE_ALIASES -# include "./gtx/scalar_multiplication.hpp" -#endif - -#if GLM_HAS_RANGE_FOR -# include "./gtx/range.hpp" -#endif -#endif//GLM_ENABLE_EXPERIMENTAL diff --git a/third_party/glm/ext/matrix_clip_space.hpp b/third_party/glm/ext/matrix_clip_space.hpp deleted file mode 100755 index c3874f2..0000000 --- a/third_party/glm/ext/matrix_clip_space.hpp +++ /dev/null @@ -1,522 +0,0 @@ -/// @ref ext_matrix_clip_space -/// @file glm/ext/matrix_clip_space.hpp -/// -/// @defgroup ext_matrix_clip_space GLM_EXT_matrix_clip_space -/// @ingroup ext -/// -/// Defines functions that generate clip space transformation matrices. -/// -/// The matrices generated by this extension use standard OpenGL fixed-function -/// conventions. For example, the lookAt function generates a transform from world -/// space into the specific eye space that the projective matrix functions -/// (perspective, ortho, etc) are designed to expect. The OpenGL compatibility -/// specifications defines the particular layout of this eye space. -/// -/// Include to use the features of this extension. -/// -/// @see ext_matrix_transform -/// @see ext_matrix_projection - -#pragma once - -// Dependencies -#include "../ext/scalar_constants.hpp" -#include "../geometric.hpp" -#include "../trigonometric.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_EXT_matrix_clip_space extension included") -#endif - -namespace glm -{ - /// @addtogroup ext_matrix_clip_space - /// @{ - - /// Creates a matrix for projecting two-dimensional coordinates onto the screen. - /// - /// @tparam T A floating-point scalar type - /// - /// @see - glm::ortho(T const& left, T const& right, T const& bottom, T const& top, T const& zNear, T const& zFar) - /// @see gluOrtho2D man page - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> ortho( - T left, T right, T bottom, T top); - - /// Creates a matrix for an orthographic parallel viewing volume, using left-handed coordinates. - /// The near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition) - /// - /// @tparam T A floating-point scalar type - /// - /// @see - glm::ortho(T const& left, T const& right, T const& bottom, T const& top) - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> orthoLH_ZO( - T left, T right, T bottom, T top, T zNear, T zFar); - - /// Creates a matrix for an orthographic parallel viewing volume using right-handed coordinates. - /// The near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition) - /// - /// @tparam T A floating-point scalar type - /// - /// @see - glm::ortho(T const& left, T const& right, T const& bottom, T const& top) - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> orthoLH_NO( - T left, T right, T bottom, T top, T zNear, T zFar); - - /// Creates a matrix for an orthographic parallel viewing volume, using left-handed coordinates. - /// The near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition) - /// - /// @tparam T A floating-point scalar type - /// - /// @see - glm::ortho(T const& left, T const& right, T const& bottom, T const& top) - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> orthoRH_ZO( - T left, T right, T bottom, T top, T zNear, T zFar); - - /// Creates a matrix for an orthographic parallel viewing volume, using right-handed coordinates. - /// The near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition) - /// - /// @tparam T A floating-point scalar type - /// - /// @see - glm::ortho(T const& left, T const& right, T const& bottom, T const& top) - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> orthoRH_NO( - T left, T right, T bottom, T top, T zNear, T zFar); - - /// Creates a matrix for an orthographic parallel viewing volume, using left-handed coordinates. - /// The near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition) - /// - /// @tparam T A floating-point scalar type - /// - /// @see - glm::ortho(T const& left, T const& right, T const& bottom, T const& top) - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> orthoZO( - T left, T right, T bottom, T top, T zNear, T zFar); - - /// Creates a matrix for an orthographic parallel viewing volume, using left-handed coordinates if GLM_FORCE_LEFT_HANDED if defined or right-handed coordinates otherwise. - /// The near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition) - /// - /// @tparam T A floating-point scalar type - /// - /// @see - glm::ortho(T const& left, T const& right, T const& bottom, T const& top) - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> orthoNO( - T left, T right, T bottom, T top, T zNear, T zFar); - - /// Creates a matrix for an orthographic parallel viewing volume, using left-handed coordinates. - /// If GLM_FORCE_DEPTH_ZERO_TO_ONE is defined, the near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition) - /// Otherwise, the near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition) - /// - /// @tparam T A floating-point scalar type - /// - /// @see - glm::ortho(T const& left, T const& right, T const& bottom, T const& top) - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> orthoLH( - T left, T right, T bottom, T top, T zNear, T zFar); - - /// Creates a matrix for an orthographic parallel viewing volume, using right-handed coordinates. - /// If GLM_FORCE_DEPTH_ZERO_TO_ONE is defined, the near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition) - /// Otherwise, the near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition) - /// - /// @tparam T A floating-point scalar type - /// - /// @see - glm::ortho(T const& left, T const& right, T const& bottom, T const& top) - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> orthoRH( - T left, T right, T bottom, T top, T zNear, T zFar); - - /// Creates a matrix for an orthographic parallel viewing volume, using the default handedness and default near and far clip planes definition. - /// To change default handedness use GLM_FORCE_LEFT_HANDED. To change default near and far clip planes definition use GLM_FORCE_DEPTH_ZERO_TO_ONE. - /// - /// @tparam T A floating-point scalar type - /// - /// @see - glm::ortho(T const& left, T const& right, T const& bottom, T const& top) - /// @see glOrtho man page - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> ortho( - T left, T right, T bottom, T top, T zNear, T zFar); - - /// Creates a left handed frustum matrix. - /// The near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition) - /// - /// @tparam T A floating-point scalar type - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> frustumLH_ZO( - T left, T right, T bottom, T top, T near, T far); - - /// Creates a left handed frustum matrix. - /// The near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition) - /// - /// @tparam T A floating-point scalar type - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> frustumLH_NO( - T left, T right, T bottom, T top, T near, T far); - - /// Creates a right handed frustum matrix. - /// The near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition) - /// - /// @tparam T A floating-point scalar type - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> frustumRH_ZO( - T left, T right, T bottom, T top, T near, T far); - - /// Creates a right handed frustum matrix. - /// The near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition) - /// - /// @tparam T A floating-point scalar type - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> frustumRH_NO( - T left, T right, T bottom, T top, T near, T far); - - /// Creates a frustum matrix using left-handed coordinates if GLM_FORCE_LEFT_HANDED if defined or right-handed coordinates otherwise. - /// The near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition) - /// - /// @tparam T A floating-point scalar type - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> frustumZO( - T left, T right, T bottom, T top, T near, T far); - - /// Creates a frustum matrix using left-handed coordinates if GLM_FORCE_LEFT_HANDED if defined or right-handed coordinates otherwise. - /// The near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition) - /// - /// @tparam T A floating-point scalar type - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> frustumNO( - T left, T right, T bottom, T top, T near, T far); - - /// Creates a left handed frustum matrix. - /// If GLM_FORCE_DEPTH_ZERO_TO_ONE is defined, the near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition) - /// Otherwise, the near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition) - /// - /// @tparam T A floating-point scalar type - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> frustumLH( - T left, T right, T bottom, T top, T near, T far); - - /// Creates a right handed frustum matrix. - /// If GLM_FORCE_DEPTH_ZERO_TO_ONE is defined, the near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition) - /// Otherwise, the near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition) - /// - /// @tparam T A floating-point scalar type - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> frustumRH( - T left, T right, T bottom, T top, T near, T far); - - /// Creates a frustum matrix with default handedness, using the default handedness and default near and far clip planes definition. - /// To change default handedness use GLM_FORCE_LEFT_HANDED. To change default near and far clip planes definition use GLM_FORCE_DEPTH_ZERO_TO_ONE. - /// - /// @tparam T A floating-point scalar type - /// @see glFrustum man page - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> frustum( - T left, T right, T bottom, T top, T near, T far); - - - /// Creates a matrix for a right handed, symetric perspective-view frustum. - /// The near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition) - /// - /// @param fovy Specifies the field of view angle, in degrees, in the y direction. Expressed in radians. - /// @param aspect Specifies the aspect ratio that determines the field of view in the x direction. The aspect ratio is the ratio of x (width) to y (height). - /// @param near Specifies the distance from the viewer to the near clipping plane (always positive). - /// @param far Specifies the distance from the viewer to the far clipping plane (always positive). - /// - /// @tparam T A floating-point scalar type - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> perspectiveRH_ZO( - T fovy, T aspect, T near, T far); - - /// Creates a matrix for a right handed, symetric perspective-view frustum. - /// The near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition) - /// - /// @param fovy Specifies the field of view angle, in degrees, in the y direction. Expressed in radians. - /// @param aspect Specifies the aspect ratio that determines the field of view in the x direction. The aspect ratio is the ratio of x (width) to y (height). - /// @param near Specifies the distance from the viewer to the near clipping plane (always positive). - /// @param far Specifies the distance from the viewer to the far clipping plane (always positive). - /// - /// @tparam T A floating-point scalar type - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> perspectiveRH_NO( - T fovy, T aspect, T near, T far); - - /// Creates a matrix for a left handed, symetric perspective-view frustum. - /// The near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition) - /// - /// @param fovy Specifies the field of view angle, in degrees, in the y direction. Expressed in radians. - /// @param aspect Specifies the aspect ratio that determines the field of view in the x direction. The aspect ratio is the ratio of x (width) to y (height). - /// @param near Specifies the distance from the viewer to the near clipping plane (always positive). - /// @param far Specifies the distance from the viewer to the far clipping plane (always positive). - /// - /// @tparam T A floating-point scalar type - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> perspectiveLH_ZO( - T fovy, T aspect, T near, T far); - - /// Creates a matrix for a left handed, symetric perspective-view frustum. - /// The near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition) - /// - /// @param fovy Specifies the field of view angle, in degrees, in the y direction. Expressed in radians. - /// @param aspect Specifies the aspect ratio that determines the field of view in the x direction. The aspect ratio is the ratio of x (width) to y (height). - /// @param near Specifies the distance from the viewer to the near clipping plane (always positive). - /// @param far Specifies the distance from the viewer to the far clipping plane (always positive). - /// - /// @tparam T A floating-point scalar type - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> perspectiveLH_NO( - T fovy, T aspect, T near, T far); - - /// Creates a matrix for a symetric perspective-view frustum using left-handed coordinates if GLM_FORCE_LEFT_HANDED if defined or right-handed coordinates otherwise. - /// The near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition) - /// - /// @param fovy Specifies the field of view angle, in degrees, in the y direction. Expressed in radians. - /// @param aspect Specifies the aspect ratio that determines the field of view in the x direction. The aspect ratio is the ratio of x (width) to y (height). - /// @param near Specifies the distance from the viewer to the near clipping plane (always positive). - /// @param far Specifies the distance from the viewer to the far clipping plane (always positive). - /// - /// @tparam T A floating-point scalar type - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> perspectiveZO( - T fovy, T aspect, T near, T far); - - /// Creates a matrix for a symetric perspective-view frustum using left-handed coordinates if GLM_FORCE_LEFT_HANDED if defined or right-handed coordinates otherwise. - /// The near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition) - /// - /// @param fovy Specifies the field of view angle, in degrees, in the y direction. Expressed in radians. - /// @param aspect Specifies the aspect ratio that determines the field of view in the x direction. The aspect ratio is the ratio of x (width) to y (height). - /// @param near Specifies the distance from the viewer to the near clipping plane (always positive). - /// @param far Specifies the distance from the viewer to the far clipping plane (always positive). - /// - /// @tparam T A floating-point scalar type - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> perspectiveNO( - T fovy, T aspect, T near, T far); - - /// Creates a matrix for a right handed, symetric perspective-view frustum. - /// If GLM_FORCE_DEPTH_ZERO_TO_ONE is defined, the near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition) - /// Otherwise, the near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition) - /// - /// @param fovy Specifies the field of view angle, in degrees, in the y direction. Expressed in radians. - /// @param aspect Specifies the aspect ratio that determines the field of view in the x direction. The aspect ratio is the ratio of x (width) to y (height). - /// @param near Specifies the distance from the viewer to the near clipping plane (always positive). - /// @param far Specifies the distance from the viewer to the far clipping plane (always positive). - /// - /// @tparam T A floating-point scalar type - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> perspectiveRH( - T fovy, T aspect, T near, T far); - - /// Creates a matrix for a left handed, symetric perspective-view frustum. - /// If GLM_FORCE_DEPTH_ZERO_TO_ONE is defined, the near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition) - /// Otherwise, the near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition) - /// - /// @param fovy Specifies the field of view angle, in degrees, in the y direction. Expressed in radians. - /// @param aspect Specifies the aspect ratio that determines the field of view in the x direction. The aspect ratio is the ratio of x (width) to y (height). - /// @param near Specifies the distance from the viewer to the near clipping plane (always positive). - /// @param far Specifies the distance from the viewer to the far clipping plane (always positive). - /// - /// @tparam T A floating-point scalar type - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> perspectiveLH( - T fovy, T aspect, T near, T far); - - /// Creates a matrix for a symetric perspective-view frustum based on the default handedness and default near and far clip planes definition. - /// To change default handedness use GLM_FORCE_LEFT_HANDED. To change default near and far clip planes definition use GLM_FORCE_DEPTH_ZERO_TO_ONE. - /// - /// @param fovy Specifies the field of view angle in the y direction. Expressed in radians. - /// @param aspect Specifies the aspect ratio that determines the field of view in the x direction. The aspect ratio is the ratio of x (width) to y (height). - /// @param near Specifies the distance from the viewer to the near clipping plane (always positive). - /// @param far Specifies the distance from the viewer to the far clipping plane (always positive). - /// - /// @tparam T A floating-point scalar type - /// @see gluPerspective man page - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> perspective( - T fovy, T aspect, T near, T far); - - /// Builds a perspective projection matrix based on a field of view using right-handed coordinates. - /// The near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition) - /// - /// @param fov Expressed in radians. - /// @param width Width of the viewport - /// @param height Height of the viewport - /// @param near Specifies the distance from the viewer to the near clipping plane (always positive). - /// @param far Specifies the distance from the viewer to the far clipping plane (always positive). - /// - /// @tparam T A floating-point scalar type - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> perspectiveFovRH_ZO( - T fov, T width, T height, T near, T far); - - /// Builds a perspective projection matrix based on a field of view using right-handed coordinates. - /// The near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition) - /// - /// @param fov Expressed in radians. - /// @param width Width of the viewport - /// @param height Height of the viewport - /// @param near Specifies the distance from the viewer to the near clipping plane (always positive). - /// @param far Specifies the distance from the viewer to the far clipping plane (always positive). - /// - /// @tparam T A floating-point scalar type - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> perspectiveFovRH_NO( - T fov, T width, T height, T near, T far); - - /// Builds a perspective projection matrix based on a field of view using left-handed coordinates. - /// The near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition) - /// - /// @param fov Expressed in radians. - /// @param width Width of the viewport - /// @param height Height of the viewport - /// @param near Specifies the distance from the viewer to the near clipping plane (always positive). - /// @param far Specifies the distance from the viewer to the far clipping plane (always positive). - /// - /// @tparam T A floating-point scalar type - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> perspectiveFovLH_ZO( - T fov, T width, T height, T near, T far); - - /// Builds a perspective projection matrix based on a field of view using left-handed coordinates. - /// The near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition) - /// - /// @param fov Expressed in radians. - /// @param width Width of the viewport - /// @param height Height of the viewport - /// @param near Specifies the distance from the viewer to the near clipping plane (always positive). - /// @param far Specifies the distance from the viewer to the far clipping plane (always positive). - /// - /// @tparam T A floating-point scalar type - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> perspectiveFovLH_NO( - T fov, T width, T height, T near, T far); - - /// Builds a perspective projection matrix based on a field of view using left-handed coordinates if GLM_FORCE_LEFT_HANDED if defined or right-handed coordinates otherwise. - /// The near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition) - /// - /// @param fov Expressed in radians. - /// @param width Width of the viewport - /// @param height Height of the viewport - /// @param near Specifies the distance from the viewer to the near clipping plane (always positive). - /// @param far Specifies the distance from the viewer to the far clipping plane (always positive). - /// - /// @tparam T A floating-point scalar type - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> perspectiveFovZO( - T fov, T width, T height, T near, T far); - - /// Builds a perspective projection matrix based on a field of view using left-handed coordinates if GLM_FORCE_LEFT_HANDED if defined or right-handed coordinates otherwise. - /// The near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition) - /// - /// @param fov Expressed in radians. - /// @param width Width of the viewport - /// @param height Height of the viewport - /// @param near Specifies the distance from the viewer to the near clipping plane (always positive). - /// @param far Specifies the distance from the viewer to the far clipping plane (always positive). - /// - /// @tparam T A floating-point scalar type - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> perspectiveFovNO( - T fov, T width, T height, T near, T far); - - /// Builds a right handed perspective projection matrix based on a field of view. - /// If GLM_FORCE_DEPTH_ZERO_TO_ONE is defined, the near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition) - /// Otherwise, the near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition) - /// - /// @param fov Expressed in radians. - /// @param width Width of the viewport - /// @param height Height of the viewport - /// @param near Specifies the distance from the viewer to the near clipping plane (always positive). - /// @param far Specifies the distance from the viewer to the far clipping plane (always positive). - /// - /// @tparam T A floating-point scalar type - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> perspectiveFovRH( - T fov, T width, T height, T near, T far); - - /// Builds a left handed perspective projection matrix based on a field of view. - /// If GLM_FORCE_DEPTH_ZERO_TO_ONE is defined, the near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition) - /// Otherwise, the near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition) - /// - /// @param fov Expressed in radians. - /// @param width Width of the viewport - /// @param height Height of the viewport - /// @param near Specifies the distance from the viewer to the near clipping plane (always positive). - /// @param far Specifies the distance from the viewer to the far clipping plane (always positive). - /// - /// @tparam T A floating-point scalar type - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> perspectiveFovLH( - T fov, T width, T height, T near, T far); - - /// Builds a perspective projection matrix based on a field of view and the default handedness and default near and far clip planes definition. - /// To change default handedness use GLM_FORCE_LEFT_HANDED. To change default near and far clip planes definition use GLM_FORCE_DEPTH_ZERO_TO_ONE. - /// - /// @param fov Expressed in radians. - /// @param width Width of the viewport - /// @param height Height of the viewport - /// @param near Specifies the distance from the viewer to the near clipping plane (always positive). - /// @param far Specifies the distance from the viewer to the far clipping plane (always positive). - /// - /// @tparam T A floating-point scalar type - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> perspectiveFov( - T fov, T width, T height, T near, T far); - - /// Creates a matrix for a left handed, symmetric perspective-view frustum with far plane at infinite. - /// - /// @param fovy Specifies the field of view angle, in degrees, in the y direction. Expressed in radians. - /// @param aspect Specifies the aspect ratio that determines the field of view in the x direction. The aspect ratio is the ratio of x (width) to y (height). - /// @param near Specifies the distance from the viewer to the near clipping plane (always positive). - /// - /// @tparam T A floating-point scalar type - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> infinitePerspectiveLH( - T fovy, T aspect, T near); - - /// Creates a matrix for a right handed, symmetric perspective-view frustum with far plane at infinite. - /// - /// @param fovy Specifies the field of view angle, in degrees, in the y direction. Expressed in radians. - /// @param aspect Specifies the aspect ratio that determines the field of view in the x direction. The aspect ratio is the ratio of x (width) to y (height). - /// @param near Specifies the distance from the viewer to the near clipping plane (always positive). - /// - /// @tparam T A floating-point scalar type - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> infinitePerspectiveRH( - T fovy, T aspect, T near); - - /// Creates a matrix for a symmetric perspective-view frustum with far plane at infinite with default handedness. - /// - /// @param fovy Specifies the field of view angle, in degrees, in the y direction. Expressed in radians. - /// @param aspect Specifies the aspect ratio that determines the field of view in the x direction. The aspect ratio is the ratio of x (width) to y (height). - /// @param near Specifies the distance from the viewer to the near clipping plane (always positive). - /// - /// @tparam T A floating-point scalar type - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> infinitePerspective( - T fovy, T aspect, T near); - - /// Creates a matrix for a symmetric perspective-view frustum with far plane at infinite for graphics hardware that doesn't support depth clamping. - /// - /// @param fovy Specifies the field of view angle, in degrees, in the y direction. Expressed in radians. - /// @param aspect Specifies the aspect ratio that determines the field of view in the x direction. The aspect ratio is the ratio of x (width) to y (height). - /// @param near Specifies the distance from the viewer to the near clipping plane (always positive). - /// - /// @tparam T A floating-point scalar type - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> tweakedInfinitePerspective( - T fovy, T aspect, T near); - - /// Creates a matrix for a symmetric perspective-view frustum with far plane at infinite for graphics hardware that doesn't support depth clamping. - /// - /// @param fovy Specifies the field of view angle, in degrees, in the y direction. Expressed in radians. - /// @param aspect Specifies the aspect ratio that determines the field of view in the x direction. The aspect ratio is the ratio of x (width) to y (height). - /// @param near Specifies the distance from the viewer to the near clipping plane (always positive). - /// @param ep Epsilon - /// - /// @tparam T A floating-point scalar type - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> tweakedInfinitePerspective( - T fovy, T aspect, T near, T ep); - - /// @} -}//namespace glm - -#include "matrix_clip_space.inl" diff --git a/third_party/glm/ext/matrix_clip_space.inl b/third_party/glm/ext/matrix_clip_space.inl deleted file mode 100755 index 7e4df33..0000000 --- a/third_party/glm/ext/matrix_clip_space.inl +++ /dev/null @@ -1,555 +0,0 @@ -namespace glm -{ - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> ortho(T left, T right, T bottom, T top) - { - mat<4, 4, T, defaultp> Result(static_cast(1)); - Result[0][0] = static_cast(2) / (right - left); - Result[1][1] = static_cast(2) / (top - bottom); - Result[2][2] = - static_cast(1); - Result[3][0] = - (right + left) / (right - left); - Result[3][1] = - (top + bottom) / (top - bottom); - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> orthoLH_ZO(T left, T right, T bottom, T top, T zNear, T zFar) - { - mat<4, 4, T, defaultp> Result(1); - Result[0][0] = static_cast(2) / (right - left); - Result[1][1] = static_cast(2) / (top - bottom); - Result[2][2] = static_cast(1) / (zFar - zNear); - Result[3][0] = - (right + left) / (right - left); - Result[3][1] = - (top + bottom) / (top - bottom); - Result[3][2] = - zNear / (zFar - zNear); - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> orthoLH_NO(T left, T right, T bottom, T top, T zNear, T zFar) - { - mat<4, 4, T, defaultp> Result(1); - Result[0][0] = static_cast(2) / (right - left); - Result[1][1] = static_cast(2) / (top - bottom); - Result[2][2] = static_cast(2) / (zFar - zNear); - Result[3][0] = - (right + left) / (right - left); - Result[3][1] = - (top + bottom) / (top - bottom); - Result[3][2] = - (zFar + zNear) / (zFar - zNear); - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> orthoRH_ZO(T left, T right, T bottom, T top, T zNear, T zFar) - { - mat<4, 4, T, defaultp> Result(1); - Result[0][0] = static_cast(2) / (right - left); - Result[1][1] = static_cast(2) / (top - bottom); - Result[2][2] = - static_cast(1) / (zFar - zNear); - Result[3][0] = - (right + left) / (right - left); - Result[3][1] = - (top + bottom) / (top - bottom); - Result[3][2] = - zNear / (zFar - zNear); - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> orthoRH_NO(T left, T right, T bottom, T top, T zNear, T zFar) - { - mat<4, 4, T, defaultp> Result(1); - Result[0][0] = static_cast(2) / (right - left); - Result[1][1] = static_cast(2) / (top - bottom); - Result[2][2] = - static_cast(2) / (zFar - zNear); - Result[3][0] = - (right + left) / (right - left); - Result[3][1] = - (top + bottom) / (top - bottom); - Result[3][2] = - (zFar + zNear) / (zFar - zNear); - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> orthoZO(T left, T right, T bottom, T top, T zNear, T zFar) - { -# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_LH_BIT - return orthoLH_ZO(left, right, bottom, top, zNear, zFar); -# else - return orthoRH_ZO(left, right, bottom, top, zNear, zFar); -# endif - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> orthoNO(T left, T right, T bottom, T top, T zNear, T zFar) - { -# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_LH_BIT - return orthoLH_NO(left, right, bottom, top, zNear, zFar); -# else - return orthoRH_NO(left, right, bottom, top, zNear, zFar); -# endif - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> orthoLH(T left, T right, T bottom, T top, T zNear, T zFar) - { -# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_ZO_BIT - return orthoLH_ZO(left, right, bottom, top, zNear, zFar); -# else - return orthoLH_NO(left, right, bottom, top, zNear, zFar); -# endif - - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> orthoRH(T left, T right, T bottom, T top, T zNear, T zFar) - { -# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_ZO_BIT - return orthoRH_ZO(left, right, bottom, top, zNear, zFar); -# else - return orthoRH_NO(left, right, bottom, top, zNear, zFar); -# endif - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> ortho(T left, T right, T bottom, T top, T zNear, T zFar) - { -# if GLM_CONFIG_CLIP_CONTROL == GLM_CLIP_CONTROL_LH_ZO - return orthoLH_ZO(left, right, bottom, top, zNear, zFar); -# elif GLM_CONFIG_CLIP_CONTROL == GLM_CLIP_CONTROL_LH_NO - return orthoLH_NO(left, right, bottom, top, zNear, zFar); -# elif GLM_CONFIG_CLIP_CONTROL == GLM_CLIP_CONTROL_RH_ZO - return orthoRH_ZO(left, right, bottom, top, zNear, zFar); -# elif GLM_CONFIG_CLIP_CONTROL == GLM_CLIP_CONTROL_RH_NO - return orthoRH_NO(left, right, bottom, top, zNear, zFar); -# endif - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> frustumLH_ZO(T left, T right, T bottom, T top, T nearVal, T farVal) - { - mat<4, 4, T, defaultp> Result(0); - Result[0][0] = (static_cast(2) * nearVal) / (right - left); - Result[1][1] = (static_cast(2) * nearVal) / (top - bottom); - Result[2][0] = (right + left) / (right - left); - Result[2][1] = (top + bottom) / (top - bottom); - Result[2][2] = farVal / (farVal - nearVal); - Result[2][3] = static_cast(1); - Result[3][2] = -(farVal * nearVal) / (farVal - nearVal); - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> frustumLH_NO(T left, T right, T bottom, T top, T nearVal, T farVal) - { - mat<4, 4, T, defaultp> Result(0); - Result[0][0] = (static_cast(2) * nearVal) / (right - left); - Result[1][1] = (static_cast(2) * nearVal) / (top - bottom); - Result[2][0] = (right + left) / (right - left); - Result[2][1] = (top + bottom) / (top - bottom); - Result[2][2] = (farVal + nearVal) / (farVal - nearVal); - Result[2][3] = static_cast(1); - Result[3][2] = - (static_cast(2) * farVal * nearVal) / (farVal - nearVal); - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> frustumRH_ZO(T left, T right, T bottom, T top, T nearVal, T farVal) - { - mat<4, 4, T, defaultp> Result(0); - Result[0][0] = (static_cast(2) * nearVal) / (right - left); - Result[1][1] = (static_cast(2) * nearVal) / (top - bottom); - Result[2][0] = (right + left) / (right - left); - Result[2][1] = (top + bottom) / (top - bottom); - Result[2][2] = farVal / (nearVal - farVal); - Result[2][3] = static_cast(-1); - Result[3][2] = -(farVal * nearVal) / (farVal - nearVal); - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> frustumRH_NO(T left, T right, T bottom, T top, T nearVal, T farVal) - { - mat<4, 4, T, defaultp> Result(0); - Result[0][0] = (static_cast(2) * nearVal) / (right - left); - Result[1][1] = (static_cast(2) * nearVal) / (top - bottom); - Result[2][0] = (right + left) / (right - left); - Result[2][1] = (top + bottom) / (top - bottom); - Result[2][2] = - (farVal + nearVal) / (farVal - nearVal); - Result[2][3] = static_cast(-1); - Result[3][2] = - (static_cast(2) * farVal * nearVal) / (farVal - nearVal); - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> frustumZO(T left, T right, T bottom, T top, T nearVal, T farVal) - { -# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_LH_BIT - return frustumLH_ZO(left, right, bottom, top, nearVal, farVal); -# else - return frustumRH_ZO(left, right, bottom, top, nearVal, farVal); -# endif - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> frustumNO(T left, T right, T bottom, T top, T nearVal, T farVal) - { -# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_LH_BIT - return frustumLH_NO(left, right, bottom, top, nearVal, farVal); -# else - return frustumRH_NO(left, right, bottom, top, nearVal, farVal); -# endif - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> frustumLH(T left, T right, T bottom, T top, T nearVal, T farVal) - { -# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_ZO_BIT - return frustumLH_ZO(left, right, bottom, top, nearVal, farVal); -# else - return frustumLH_NO(left, right, bottom, top, nearVal, farVal); -# endif - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> frustumRH(T left, T right, T bottom, T top, T nearVal, T farVal) - { -# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_ZO_BIT - return frustumRH_ZO(left, right, bottom, top, nearVal, farVal); -# else - return frustumRH_NO(left, right, bottom, top, nearVal, farVal); -# endif - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> frustum(T left, T right, T bottom, T top, T nearVal, T farVal) - { -# if GLM_CONFIG_CLIP_CONTROL == GLM_CLIP_CONTROL_LH_ZO - return frustumLH_ZO(left, right, bottom, top, nearVal, farVal); -# elif GLM_CONFIG_CLIP_CONTROL == GLM_CLIP_CONTROL_LH_NO - return frustumLH_NO(left, right, bottom, top, nearVal, farVal); -# elif GLM_CONFIG_CLIP_CONTROL == GLM_CLIP_CONTROL_RH_ZO - return frustumRH_ZO(left, right, bottom, top, nearVal, farVal); -# elif GLM_CONFIG_CLIP_CONTROL == GLM_CLIP_CONTROL_RH_NO - return frustumRH_NO(left, right, bottom, top, nearVal, farVal); -# endif - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> perspectiveRH_ZO(T fovy, T aspect, T zNear, T zFar) - { - assert(abs(aspect - std::numeric_limits::epsilon()) > static_cast(0)); - - T const tanHalfFovy = tan(fovy / static_cast(2)); - - mat<4, 4, T, defaultp> Result(static_cast(0)); - Result[0][0] = static_cast(1) / (aspect * tanHalfFovy); - Result[1][1] = static_cast(1) / (tanHalfFovy); - Result[2][2] = zFar / (zNear - zFar); - Result[2][3] = - static_cast(1); - Result[3][2] = -(zFar * zNear) / (zFar - zNear); - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> perspectiveRH_NO(T fovy, T aspect, T zNear, T zFar) - { - assert(abs(aspect - std::numeric_limits::epsilon()) > static_cast(0)); - - T const tanHalfFovy = tan(fovy / static_cast(2)); - - mat<4, 4, T, defaultp> Result(static_cast(0)); - Result[0][0] = static_cast(1) / (aspect * tanHalfFovy); - Result[1][1] = static_cast(1) / (tanHalfFovy); - Result[2][2] = - (zFar + zNear) / (zFar - zNear); - Result[2][3] = - static_cast(1); - Result[3][2] = - (static_cast(2) * zFar * zNear) / (zFar - zNear); - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> perspectiveLH_ZO(T fovy, T aspect, T zNear, T zFar) - { - assert(abs(aspect - std::numeric_limits::epsilon()) > static_cast(0)); - - T const tanHalfFovy = tan(fovy / static_cast(2)); - - mat<4, 4, T, defaultp> Result(static_cast(0)); - Result[0][0] = static_cast(1) / (aspect * tanHalfFovy); - Result[1][1] = static_cast(1) / (tanHalfFovy); - Result[2][2] = zFar / (zFar - zNear); - Result[2][3] = static_cast(1); - Result[3][2] = -(zFar * zNear) / (zFar - zNear); - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> perspectiveLH_NO(T fovy, T aspect, T zNear, T zFar) - { - assert(abs(aspect - std::numeric_limits::epsilon()) > static_cast(0)); - - T const tanHalfFovy = tan(fovy / static_cast(2)); - - mat<4, 4, T, defaultp> Result(static_cast(0)); - Result[0][0] = static_cast(1) / (aspect * tanHalfFovy); - Result[1][1] = static_cast(1) / (tanHalfFovy); - Result[2][2] = (zFar + zNear) / (zFar - zNear); - Result[2][3] = static_cast(1); - Result[3][2] = - (static_cast(2) * zFar * zNear) / (zFar - zNear); - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> perspectiveZO(T fovy, T aspect, T zNear, T zFar) - { -# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_LH_BIT - return perspectiveLH_ZO(fovy, aspect, zNear, zFar); -# else - return perspectiveRH_ZO(fovy, aspect, zNear, zFar); -# endif - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> perspectiveNO(T fovy, T aspect, T zNear, T zFar) - { -# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_LH_BIT - return perspectiveLH_NO(fovy, aspect, zNear, zFar); -# else - return perspectiveRH_NO(fovy, aspect, zNear, zFar); -# endif - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> perspectiveLH(T fovy, T aspect, T zNear, T zFar) - { -# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_ZO_BIT - return perspectiveLH_ZO(fovy, aspect, zNear, zFar); -# else - return perspectiveLH_NO(fovy, aspect, zNear, zFar); -# endif - - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> perspectiveRH(T fovy, T aspect, T zNear, T zFar) - { -# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_ZO_BIT - return perspectiveRH_ZO(fovy, aspect, zNear, zFar); -# else - return perspectiveRH_NO(fovy, aspect, zNear, zFar); -# endif - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> perspective(T fovy, T aspect, T zNear, T zFar) - { -# if GLM_CONFIG_CLIP_CONTROL == GLM_CLIP_CONTROL_LH_ZO - return perspectiveLH_ZO(fovy, aspect, zNear, zFar); -# elif GLM_CONFIG_CLIP_CONTROL == GLM_CLIP_CONTROL_LH_NO - return perspectiveLH_NO(fovy, aspect, zNear, zFar); -# elif GLM_CONFIG_CLIP_CONTROL == GLM_CLIP_CONTROL_RH_ZO - return perspectiveRH_ZO(fovy, aspect, zNear, zFar); -# elif GLM_CONFIG_CLIP_CONTROL == GLM_CLIP_CONTROL_RH_NO - return perspectiveRH_NO(fovy, aspect, zNear, zFar); -# endif - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> perspectiveFovRH_ZO(T fov, T width, T height, T zNear, T zFar) - { - assert(width > static_cast(0)); - assert(height > static_cast(0)); - assert(fov > static_cast(0)); - - T const rad = fov; - T const h = glm::cos(static_cast(0.5) * rad) / glm::sin(static_cast(0.5) * rad); - T const w = h * height / width; ///todo max(width , Height) / min(width , Height)? - - mat<4, 4, T, defaultp> Result(static_cast(0)); - Result[0][0] = w; - Result[1][1] = h; - Result[2][2] = zFar / (zNear - zFar); - Result[2][3] = - static_cast(1); - Result[3][2] = -(zFar * zNear) / (zFar - zNear); - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> perspectiveFovRH_NO(T fov, T width, T height, T zNear, T zFar) - { - assert(width > static_cast(0)); - assert(height > static_cast(0)); - assert(fov > static_cast(0)); - - T const rad = fov; - T const h = glm::cos(static_cast(0.5) * rad) / glm::sin(static_cast(0.5) * rad); - T const w = h * height / width; ///todo max(width , Height) / min(width , Height)? - - mat<4, 4, T, defaultp> Result(static_cast(0)); - Result[0][0] = w; - Result[1][1] = h; - Result[2][2] = - (zFar + zNear) / (zFar - zNear); - Result[2][3] = - static_cast(1); - Result[3][2] = - (static_cast(2) * zFar * zNear) / (zFar - zNear); - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> perspectiveFovLH_ZO(T fov, T width, T height, T zNear, T zFar) - { - assert(width > static_cast(0)); - assert(height > static_cast(0)); - assert(fov > static_cast(0)); - - T const rad = fov; - T const h = glm::cos(static_cast(0.5) * rad) / glm::sin(static_cast(0.5) * rad); - T const w = h * height / width; ///todo max(width , Height) / min(width , Height)? - - mat<4, 4, T, defaultp> Result(static_cast(0)); - Result[0][0] = w; - Result[1][1] = h; - Result[2][2] = zFar / (zFar - zNear); - Result[2][3] = static_cast(1); - Result[3][2] = -(zFar * zNear) / (zFar - zNear); - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> perspectiveFovLH_NO(T fov, T width, T height, T zNear, T zFar) - { - assert(width > static_cast(0)); - assert(height > static_cast(0)); - assert(fov > static_cast(0)); - - T const rad = fov; - T const h = glm::cos(static_cast(0.5) * rad) / glm::sin(static_cast(0.5) * rad); - T const w = h * height / width; ///todo max(width , Height) / min(width , Height)? - - mat<4, 4, T, defaultp> Result(static_cast(0)); - Result[0][0] = w; - Result[1][1] = h; - Result[2][2] = (zFar + zNear) / (zFar - zNear); - Result[2][3] = static_cast(1); - Result[3][2] = - (static_cast(2) * zFar * zNear) / (zFar - zNear); - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> perspectiveFovZO(T fov, T width, T height, T zNear, T zFar) - { -# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_LH_BIT - return perspectiveFovLH_ZO(fov, width, height, zNear, zFar); -# else - return perspectiveFovRH_ZO(fov, width, height, zNear, zFar); -# endif - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> perspectiveFovNO(T fov, T width, T height, T zNear, T zFar) - { -# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_LH_BIT - return perspectiveFovLH_NO(fov, width, height, zNear, zFar); -# else - return perspectiveFovRH_NO(fov, width, height, zNear, zFar); -# endif - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> perspectiveFovLH(T fov, T width, T height, T zNear, T zFar) - { -# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_ZO_BIT - return perspectiveFovLH_ZO(fov, width, height, zNear, zFar); -# else - return perspectiveFovLH_NO(fov, width, height, zNear, zFar); -# endif - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> perspectiveFovRH(T fov, T width, T height, T zNear, T zFar) - { -# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_ZO_BIT - return perspectiveFovRH_ZO(fov, width, height, zNear, zFar); -# else - return perspectiveFovRH_NO(fov, width, height, zNear, zFar); -# endif - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> perspectiveFov(T fov, T width, T height, T zNear, T zFar) - { -# if GLM_CONFIG_CLIP_CONTROL == GLM_CLIP_CONTROL_LH_ZO - return perspectiveFovLH_ZO(fov, width, height, zNear, zFar); -# elif GLM_CONFIG_CLIP_CONTROL == GLM_CLIP_CONTROL_LH_NO - return perspectiveFovLH_NO(fov, width, height, zNear, zFar); -# elif GLM_CONFIG_CLIP_CONTROL == GLM_CLIP_CONTROL_RH_ZO - return perspectiveFovRH_ZO(fov, width, height, zNear, zFar); -# elif GLM_CONFIG_CLIP_CONTROL == GLM_CLIP_CONTROL_RH_NO - return perspectiveFovRH_NO(fov, width, height, zNear, zFar); -# endif - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> infinitePerspectiveRH(T fovy, T aspect, T zNear) - { - T const range = tan(fovy / static_cast(2)) * zNear; - T const left = -range * aspect; - T const right = range * aspect; - T const bottom = -range; - T const top = range; - - mat<4, 4, T, defaultp> Result(static_cast(0)); - Result[0][0] = (static_cast(2) * zNear) / (right - left); - Result[1][1] = (static_cast(2) * zNear) / (top - bottom); - Result[2][2] = - static_cast(1); - Result[2][3] = - static_cast(1); - Result[3][2] = - static_cast(2) * zNear; - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> infinitePerspectiveLH(T fovy, T aspect, T zNear) - { - T const range = tan(fovy / static_cast(2)) * zNear; - T const left = -range * aspect; - T const right = range * aspect; - T const bottom = -range; - T const top = range; - - mat<4, 4, T, defaultp> Result(T(0)); - Result[0][0] = (static_cast(2) * zNear) / (right - left); - Result[1][1] = (static_cast(2) * zNear) / (top - bottom); - Result[2][2] = static_cast(1); - Result[2][3] = static_cast(1); - Result[3][2] = - static_cast(2) * zNear; - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> infinitePerspective(T fovy, T aspect, T zNear) - { -# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_LH_BIT - return infinitePerspectiveLH(fovy, aspect, zNear); -# else - return infinitePerspectiveRH(fovy, aspect, zNear); -# endif - } - - // Infinite projection matrix: http://www.terathon.com/gdc07_lengyel.pdf - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> tweakedInfinitePerspective(T fovy, T aspect, T zNear, T ep) - { - T const range = tan(fovy / static_cast(2)) * zNear; - T const left = -range * aspect; - T const right = range * aspect; - T const bottom = -range; - T const top = range; - - mat<4, 4, T, defaultp> Result(static_cast(0)); - Result[0][0] = (static_cast(2) * zNear) / (right - left); - Result[1][1] = (static_cast(2) * zNear) / (top - bottom); - Result[2][2] = ep - static_cast(1); - Result[2][3] = static_cast(-1); - Result[3][2] = (ep - static_cast(2)) * zNear; - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> tweakedInfinitePerspective(T fovy, T aspect, T zNear) - { - return tweakedInfinitePerspective(fovy, aspect, zNear, epsilon()); - } -}//namespace glm diff --git a/third_party/glm/ext/matrix_common.hpp b/third_party/glm/ext/matrix_common.hpp deleted file mode 100755 index 05c3799..0000000 --- a/third_party/glm/ext/matrix_common.hpp +++ /dev/null @@ -1,36 +0,0 @@ -/// @ref ext_matrix_common -/// @file glm/ext/matrix_common.hpp -/// -/// @defgroup ext_matrix_common GLM_EXT_matrix_common -/// @ingroup ext -/// -/// Defines functions for common matrix operations. -/// -/// Include to use the features of this extension. -/// -/// @see ext_matrix_common - -#pragma once - -#include "../detail/qualifier.hpp" -#include "../detail/_fixes.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_EXT_matrix_transform extension included") -#endif - -namespace glm -{ - /// @addtogroup ext_matrix_common - /// @{ - - template - GLM_FUNC_DECL mat mix(mat const& x, mat const& y, mat const& a); - - template - GLM_FUNC_DECL mat mix(mat const& x, mat const& y, U a); - - /// @} -}//namespace glm - -#include "matrix_common.inl" diff --git a/third_party/glm/ext/matrix_common.inl b/third_party/glm/ext/matrix_common.inl deleted file mode 100755 index 9d50848..0000000 --- a/third_party/glm/ext/matrix_common.inl +++ /dev/null @@ -1,16 +0,0 @@ -#include "../matrix.hpp" - -namespace glm -{ - template - GLM_FUNC_QUALIFIER mat mix(mat const& x, mat const& y, U a) - { - return mat(x) * (static_cast(1) - a) + mat(y) * a; - } - - template - GLM_FUNC_QUALIFIER mat mix(mat const& x, mat const& y, mat const& a) - { - return matrixCompMult(mat(x), static_cast(1) - a) + matrixCompMult(mat(y), a); - } -}//namespace glm diff --git a/third_party/glm/ext/matrix_double2x2.hpp b/third_party/glm/ext/matrix_double2x2.hpp deleted file mode 100755 index 94dca54..0000000 --- a/third_party/glm/ext/matrix_double2x2.hpp +++ /dev/null @@ -1,23 +0,0 @@ -/// @ref core -/// @file glm/ext/matrix_double2x2.hpp - -#pragma once -#include "../detail/type_mat2x2.hpp" - -namespace glm -{ - /// @addtogroup core_matrix - /// @{ - - /// 2 columns of 2 components matrix of double-precision floating-point numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - typedef mat<2, 2, double, defaultp> dmat2x2; - - /// 2 columns of 2 components matrix of double-precision floating-point numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - typedef mat<2, 2, double, defaultp> dmat2; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/matrix_double2x2_precision.hpp b/third_party/glm/ext/matrix_double2x2_precision.hpp deleted file mode 100755 index 9e2c174..0000000 --- a/third_party/glm/ext/matrix_double2x2_precision.hpp +++ /dev/null @@ -1,49 +0,0 @@ -/// @ref core -/// @file glm/ext/matrix_double2x2_precision.hpp - -#pragma once -#include "../detail/type_mat2x2.hpp" - -namespace glm -{ - /// @addtogroup core_matrix_precision - /// @{ - - /// 2 columns of 2 components matrix of double-precision floating-point numbers using low precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<2, 2, double, lowp> lowp_dmat2; - - /// 2 columns of 2 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<2, 2, double, mediump> mediump_dmat2; - - /// 2 columns of 2 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<2, 2, double, highp> highp_dmat2; - - /// 2 columns of 2 components matrix of double-precision floating-point numbers using low precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<2, 2, double, lowp> lowp_dmat2x2; - - /// 2 columns of 2 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<2, 2, double, mediump> mediump_dmat2x2; - - /// 2 columns of 2 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<2, 2, double, highp> highp_dmat2x2; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/matrix_double2x3.hpp b/third_party/glm/ext/matrix_double2x3.hpp deleted file mode 100755 index bfef87a..0000000 --- a/third_party/glm/ext/matrix_double2x3.hpp +++ /dev/null @@ -1,18 +0,0 @@ -/// @ref core -/// @file glm/ext/matrix_double2x3.hpp - -#pragma once -#include "../detail/type_mat2x3.hpp" - -namespace glm -{ - /// @addtogroup core_matrix - /// @{ - - /// 2 columns of 3 components matrix of double-precision floating-point numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - typedef mat<2, 3, double, defaultp> dmat2x3; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/matrix_double2x3_precision.hpp b/third_party/glm/ext/matrix_double2x3_precision.hpp deleted file mode 100755 index 098fb60..0000000 --- a/third_party/glm/ext/matrix_double2x3_precision.hpp +++ /dev/null @@ -1,31 +0,0 @@ -/// @ref core -/// @file glm/ext/matrix_double2x3_precision.hpp - -#pragma once -#include "../detail/type_mat2x3.hpp" - -namespace glm -{ - /// @addtogroup core_matrix_precision - /// @{ - - /// 2 columns of 3 components matrix of double-precision floating-point numbers using low precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<2, 3, double, lowp> lowp_dmat2x3; - - /// 2 columns of 3 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<2, 3, double, mediump> mediump_dmat2x3; - - /// 2 columns of 3 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<2, 3, double, highp> highp_dmat2x3; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/matrix_double2x4.hpp b/third_party/glm/ext/matrix_double2x4.hpp deleted file mode 100755 index 499284b..0000000 --- a/third_party/glm/ext/matrix_double2x4.hpp +++ /dev/null @@ -1,18 +0,0 @@ -/// @ref core -/// @file glm/ext/matrix_double2x4.hpp - -#pragma once -#include "../detail/type_mat2x4.hpp" - -namespace glm -{ - /// @addtogroup core_matrix - /// @{ - - /// 2 columns of 4 components matrix of double-precision floating-point numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - typedef mat<2, 4, double, defaultp> dmat2x4; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/matrix_double2x4_precision.hpp b/third_party/glm/ext/matrix_double2x4_precision.hpp deleted file mode 100755 index 9b61ebc..0000000 --- a/third_party/glm/ext/matrix_double2x4_precision.hpp +++ /dev/null @@ -1,31 +0,0 @@ -/// @ref core -/// @file glm/ext/matrix_double2x4_precision.hpp - -#pragma once -#include "../detail/type_mat2x4.hpp" - -namespace glm -{ - /// @addtogroup core_matrix_precision - /// @{ - - /// 2 columns of 4 components matrix of double-precision floating-point numbers using low precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<2, 4, double, lowp> lowp_dmat2x4; - - /// 2 columns of 4 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<2, 4, double, mediump> mediump_dmat2x4; - - /// 2 columns of 4 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<2, 4, double, highp> highp_dmat2x4; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/matrix_double3x2.hpp b/third_party/glm/ext/matrix_double3x2.hpp deleted file mode 100755 index dd23f36..0000000 --- a/third_party/glm/ext/matrix_double3x2.hpp +++ /dev/null @@ -1,18 +0,0 @@ -/// @ref core -/// @file glm/ext/matrix_double3x2.hpp - -#pragma once -#include "../detail/type_mat3x2.hpp" - -namespace glm -{ - /// @addtogroup core_matrix - /// @{ - - /// 3 columns of 2 components matrix of double-precision floating-point numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - typedef mat<3, 2, double, defaultp> dmat3x2; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/matrix_double3x2_precision.hpp b/third_party/glm/ext/matrix_double3x2_precision.hpp deleted file mode 100755 index 068d9e9..0000000 --- a/third_party/glm/ext/matrix_double3x2_precision.hpp +++ /dev/null @@ -1,31 +0,0 @@ -/// @ref core -/// @file glm/ext/matrix_double3x2_precision.hpp - -#pragma once -#include "../detail/type_mat3x2.hpp" - -namespace glm -{ - /// @addtogroup core_matrix_precision - /// @{ - - /// 3 columns of 2 components matrix of double-precision floating-point numbers using low precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<3, 2, double, lowp> lowp_dmat3x2; - - /// 3 columns of 2 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<3, 2, double, mediump> mediump_dmat3x2; - - /// 3 columns of 2 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<3, 2, double, highp> highp_dmat3x2; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/matrix_double3x3.hpp b/third_party/glm/ext/matrix_double3x3.hpp deleted file mode 100755 index 53572b7..0000000 --- a/third_party/glm/ext/matrix_double3x3.hpp +++ /dev/null @@ -1,23 +0,0 @@ -/// @ref core -/// @file glm/ext/matrix_double3x3.hpp - -#pragma once -#include "../detail/type_mat3x3.hpp" - -namespace glm -{ - /// @addtogroup core_matrix - /// @{ - - /// 3 columns of 3 components matrix of double-precision floating-point numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - typedef mat<3, 3, double, defaultp> dmat3x3; - - /// 3 columns of 3 components matrix of double-precision floating-point numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - typedef mat<3, 3, double, defaultp> dmat3; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/matrix_double3x3_precision.hpp b/third_party/glm/ext/matrix_double3x3_precision.hpp deleted file mode 100755 index 8691e78..0000000 --- a/third_party/glm/ext/matrix_double3x3_precision.hpp +++ /dev/null @@ -1,49 +0,0 @@ -/// @ref core -/// @file glm/ext/matrix_double3x3_precision.hpp - -#pragma once -#include "../detail/type_mat3x3.hpp" - -namespace glm -{ - /// @addtogroup core_matrix_precision - /// @{ - - /// 3 columns of 3 components matrix of double-precision floating-point numbers using low precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<3, 3, double, lowp> lowp_dmat3; - - /// 3 columns of 3 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<3, 3, double, mediump> mediump_dmat3; - - /// 3 columns of 3 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<3, 3, double, highp> highp_dmat3; - - /// 3 columns of 3 components matrix of double-precision floating-point numbers using low precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<3, 3, double, lowp> lowp_dmat3x3; - - /// 3 columns of 3 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<3, 3, double, mediump> mediump_dmat3x3; - - /// 3 columns of 3 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<3, 3, double, highp> highp_dmat3x3; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/matrix_double3x4.hpp b/third_party/glm/ext/matrix_double3x4.hpp deleted file mode 100755 index c572d63..0000000 --- a/third_party/glm/ext/matrix_double3x4.hpp +++ /dev/null @@ -1,18 +0,0 @@ -/// @ref core -/// @file glm/ext/matrix_double3x4.hpp - -#pragma once -#include "../detail/type_mat3x4.hpp" - -namespace glm -{ - /// @addtogroup core_matrix - /// @{ - - /// 3 columns of 4 components matrix of double-precision floating-point numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - typedef mat<3, 4, double, defaultp> dmat3x4; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/matrix_double3x4_precision.hpp b/third_party/glm/ext/matrix_double3x4_precision.hpp deleted file mode 100755 index f040217..0000000 --- a/third_party/glm/ext/matrix_double3x4_precision.hpp +++ /dev/null @@ -1,31 +0,0 @@ -/// @ref core -/// @file glm/ext/matrix_double3x4_precision.hpp - -#pragma once -#include "../detail/type_mat3x4.hpp" - -namespace glm -{ - /// @addtogroup core_matrix_precision - /// @{ - - /// 3 columns of 4 components matrix of double-precision floating-point numbers using low precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<3, 4, double, lowp> lowp_dmat3x4; - - /// 3 columns of 4 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<3, 4, double, mediump> mediump_dmat3x4; - - /// 3 columns of 4 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<3, 4, double, highp> highp_dmat3x4; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/matrix_double4x2.hpp b/third_party/glm/ext/matrix_double4x2.hpp deleted file mode 100755 index 9b229f4..0000000 --- a/third_party/glm/ext/matrix_double4x2.hpp +++ /dev/null @@ -1,18 +0,0 @@ -/// @ref core -/// @file glm/ext/matrix_double4x2.hpp - -#pragma once -#include "../detail/type_mat4x2.hpp" - -namespace glm -{ - /// @addtogroup core_matrix - /// @{ - - /// 4 columns of 2 components matrix of double-precision floating-point numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - typedef mat<4, 2, double, defaultp> dmat4x2; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/matrix_double4x2_precision.hpp b/third_party/glm/ext/matrix_double4x2_precision.hpp deleted file mode 100755 index 6ad18ba..0000000 --- a/third_party/glm/ext/matrix_double4x2_precision.hpp +++ /dev/null @@ -1,31 +0,0 @@ -/// @ref core -/// @file glm/ext/matrix_double4x2_precision.hpp - -#pragma once -#include "../detail/type_mat4x2.hpp" - -namespace glm -{ - /// @addtogroup core_matrix_precision - /// @{ - - /// 4 columns of 2 components matrix of double-precision floating-point numbers using low precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<4, 2, double, lowp> lowp_dmat4x2; - - /// 4 columns of 2 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<4, 2, double, mediump> mediump_dmat4x2; - - /// 4 columns of 2 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<4, 2, double, highp> highp_dmat4x2; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/matrix_double4x3.hpp b/third_party/glm/ext/matrix_double4x3.hpp deleted file mode 100755 index dca4cf9..0000000 --- a/third_party/glm/ext/matrix_double4x3.hpp +++ /dev/null @@ -1,18 +0,0 @@ -/// @ref core -/// @file glm/ext/matrix_double4x3.hpp - -#pragma once -#include "../detail/type_mat4x3.hpp" - -namespace glm -{ - /// @addtogroup core_matrix - /// @{ - - /// 4 columns of 3 components matrix of double-precision floating-point numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - typedef mat<4, 3, double, defaultp> dmat4x3; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/matrix_double4x3_precision.hpp b/third_party/glm/ext/matrix_double4x3_precision.hpp deleted file mode 100755 index f7371de..0000000 --- a/third_party/glm/ext/matrix_double4x3_precision.hpp +++ /dev/null @@ -1,31 +0,0 @@ -/// @ref core -/// @file glm/ext/matrix_double4x3_precision.hpp - -#pragma once -#include "../detail/type_mat4x3.hpp" - -namespace glm -{ - /// @addtogroup core_matrix_precision - /// @{ - - /// 4 columns of 3 components matrix of double-precision floating-point numbers using low precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<4, 3, double, lowp> lowp_dmat4x3; - - /// 4 columns of 3 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<4, 3, double, mediump> mediump_dmat4x3; - - /// 4 columns of 3 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<4, 3, double, highp> highp_dmat4x3; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/matrix_double4x4.hpp b/third_party/glm/ext/matrix_double4x4.hpp deleted file mode 100755 index 81e1bf6..0000000 --- a/third_party/glm/ext/matrix_double4x4.hpp +++ /dev/null @@ -1,23 +0,0 @@ -/// @ref core -/// @file glm/ext/matrix_double4x4.hpp - -#pragma once -#include "../detail/type_mat4x4.hpp" - -namespace glm -{ - /// @addtogroup core_matrix - /// @{ - - /// 4 columns of 4 components matrix of double-precision floating-point numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - typedef mat<4, 4, double, defaultp> dmat4x4; - - /// 4 columns of 4 components matrix of double-precision floating-point numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - typedef mat<4, 4, double, defaultp> dmat4; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/matrix_double4x4_precision.hpp b/third_party/glm/ext/matrix_double4x4_precision.hpp deleted file mode 100755 index 4c36a84..0000000 --- a/third_party/glm/ext/matrix_double4x4_precision.hpp +++ /dev/null @@ -1,49 +0,0 @@ -/// @ref core -/// @file glm/ext/matrix_double4x4_precision.hpp - -#pragma once -#include "../detail/type_mat4x4.hpp" - -namespace glm -{ - /// @addtogroup core_matrix_precision - /// @{ - - /// 4 columns of 4 components matrix of double-precision floating-point numbers using low precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<4, 4, double, lowp> lowp_dmat4; - - /// 4 columns of 4 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<4, 4, double, mediump> mediump_dmat4; - - /// 4 columns of 4 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<4, 4, double, highp> highp_dmat4; - - /// 4 columns of 4 components matrix of double-precision floating-point numbers using low precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<4, 4, double, lowp> lowp_dmat4x4; - - /// 4 columns of 4 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<4, 4, double, mediump> mediump_dmat4x4; - - /// 4 columns of 4 components matrix of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<4, 4, double, highp> highp_dmat4x4; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/matrix_float2x2.hpp b/third_party/glm/ext/matrix_float2x2.hpp deleted file mode 100755 index 53df921..0000000 --- a/third_party/glm/ext/matrix_float2x2.hpp +++ /dev/null @@ -1,23 +0,0 @@ -/// @ref core -/// @file glm/ext/matrix_float2x2.hpp - -#pragma once -#include "../detail/type_mat2x2.hpp" - -namespace glm -{ - /// @addtogroup core_matrix - /// @{ - - /// 2 columns of 2 components matrix of single-precision floating-point numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - typedef mat<2, 2, float, defaultp> mat2x2; - - /// 2 columns of 2 components matrix of single-precision floating-point numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - typedef mat<2, 2, float, defaultp> mat2; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/matrix_float2x2_precision.hpp b/third_party/glm/ext/matrix_float2x2_precision.hpp deleted file mode 100755 index 898b6db..0000000 --- a/third_party/glm/ext/matrix_float2x2_precision.hpp +++ /dev/null @@ -1,49 +0,0 @@ -/// @ref core -/// @file glm/ext/matrix_float2x2_precision.hpp - -#pragma once -#include "../detail/type_mat2x2.hpp" - -namespace glm -{ - /// @addtogroup core_matrix_precision - /// @{ - - /// 2 columns of 2 components matrix of single-precision floating-point numbers using low precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<2, 2, float, lowp> lowp_mat2; - - /// 2 columns of 2 components matrix of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<2, 2, float, mediump> mediump_mat2; - - /// 2 columns of 2 components matrix of single-precision floating-point numbers using high precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<2, 2, float, highp> highp_mat2; - - /// 2 columns of 2 components matrix of single-precision floating-point numbers using low precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<2, 2, float, lowp> lowp_mat2x2; - - /// 2 columns of 2 components matrix of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<2, 2, float, mediump> mediump_mat2x2; - - /// 2 columns of 2 components matrix of single-precision floating-point numbers using high precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<2, 2, float, highp> highp_mat2x2; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/matrix_float2x3.hpp b/third_party/glm/ext/matrix_float2x3.hpp deleted file mode 100755 index 6f68822..0000000 --- a/third_party/glm/ext/matrix_float2x3.hpp +++ /dev/null @@ -1,18 +0,0 @@ -/// @ref core -/// @file glm/ext/matrix_float2x3.hpp - -#pragma once -#include "../detail/type_mat2x3.hpp" - -namespace glm -{ - /// @addtogroup core_matrix - /// @{ - - /// 2 columns of 3 components matrix of single-precision floating-point numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - typedef mat<2, 3, float, defaultp> mat2x3; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/matrix_float2x3_precision.hpp b/third_party/glm/ext/matrix_float2x3_precision.hpp deleted file mode 100755 index 50c1032..0000000 --- a/third_party/glm/ext/matrix_float2x3_precision.hpp +++ /dev/null @@ -1,31 +0,0 @@ -/// @ref core -/// @file glm/ext/matrix_float2x3_precision.hpp - -#pragma once -#include "../detail/type_mat2x3.hpp" - -namespace glm -{ - /// @addtogroup core_matrix_precision - /// @{ - - /// 2 columns of 3 components matrix of single-precision floating-point numbers using low precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<2, 3, float, lowp> lowp_mat2x3; - - /// 2 columns of 3 components matrix of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<2, 3, float, mediump> mediump_mat2x3; - - /// 2 columns of 3 components matrix of single-precision floating-point numbers using high precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<2, 3, float, highp> highp_mat2x3; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/matrix_float2x4.hpp b/third_party/glm/ext/matrix_float2x4.hpp deleted file mode 100755 index 30f30de..0000000 --- a/third_party/glm/ext/matrix_float2x4.hpp +++ /dev/null @@ -1,18 +0,0 @@ -/// @ref core -/// @file glm/ext/matrix_float2x4.hpp - -#pragma once -#include "../detail/type_mat2x4.hpp" - -namespace glm -{ - /// @addtogroup core_matrix - /// @{ - - /// 2 columns of 4 components matrix of single-precision floating-point numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - typedef mat<2, 4, float, defaultp> mat2x4; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/matrix_float2x4_precision.hpp b/third_party/glm/ext/matrix_float2x4_precision.hpp deleted file mode 100755 index 079d638..0000000 --- a/third_party/glm/ext/matrix_float2x4_precision.hpp +++ /dev/null @@ -1,31 +0,0 @@ -/// @ref core -/// @file glm/ext/matrix_float2x4_precision.hpp - -#pragma once -#include "../detail/type_mat2x4.hpp" - -namespace glm -{ - /// @addtogroup core_matrix_precision - /// @{ - - /// 2 columns of 4 components matrix of single-precision floating-point numbers using low precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<2, 4, float, lowp> lowp_mat2x4; - - /// 2 columns of 4 components matrix of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<2, 4, float, mediump> mediump_mat2x4; - - /// 2 columns of 4 components matrix of single-precision floating-point numbers using high precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<2, 4, float, highp> highp_mat2x4; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/matrix_float3x2.hpp b/third_party/glm/ext/matrix_float3x2.hpp deleted file mode 100755 index d39dd2f..0000000 --- a/third_party/glm/ext/matrix_float3x2.hpp +++ /dev/null @@ -1,18 +0,0 @@ -/// @ref core -/// @file glm/ext/matrix_float3x2.hpp - -#pragma once -#include "../detail/type_mat3x2.hpp" - -namespace glm -{ - /// @addtogroup core - /// @{ - - /// 3 columns of 2 components matrix of single-precision floating-point numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - typedef mat<3, 2, float, defaultp> mat3x2; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/matrix_float3x2_precision.hpp b/third_party/glm/ext/matrix_float3x2_precision.hpp deleted file mode 100755 index 8572c2a..0000000 --- a/third_party/glm/ext/matrix_float3x2_precision.hpp +++ /dev/null @@ -1,31 +0,0 @@ -/// @ref core -/// @file glm/ext/matrix_float3x2_precision.hpp - -#pragma once -#include "../detail/type_mat3x2.hpp" - -namespace glm -{ - /// @addtogroup core_matrix_precision - /// @{ - - /// 3 columns of 2 components matrix of single-precision floating-point numbers using low precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<3, 2, float, lowp> lowp_mat3x2; - - /// 3 columns of 2 components matrix of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<3, 2, float, mediump> mediump_mat3x2; - - /// 3 columns of 2 components matrix of single-precision floating-point numbers using high precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<3, 2, float, highp> highp_mat3x2; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/matrix_float3x3.hpp b/third_party/glm/ext/matrix_float3x3.hpp deleted file mode 100755 index 177d809..0000000 --- a/third_party/glm/ext/matrix_float3x3.hpp +++ /dev/null @@ -1,23 +0,0 @@ -/// @ref core -/// @file glm/ext/matrix_float3x3.hpp - -#pragma once -#include "../detail/type_mat3x3.hpp" - -namespace glm -{ - /// @addtogroup core_matrix - /// @{ - - /// 3 columns of 3 components matrix of single-precision floating-point numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - typedef mat<3, 3, float, defaultp> mat3x3; - - /// 3 columns of 3 components matrix of single-precision floating-point numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - typedef mat<3, 3, float, defaultp> mat3; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/matrix_float3x3_precision.hpp b/third_party/glm/ext/matrix_float3x3_precision.hpp deleted file mode 100755 index 8a900c1..0000000 --- a/third_party/glm/ext/matrix_float3x3_precision.hpp +++ /dev/null @@ -1,49 +0,0 @@ -/// @ref core -/// @file glm/ext/matrix_float3x3_precision.hpp - -#pragma once -#include "../detail/type_mat3x3.hpp" - -namespace glm -{ - /// @addtogroup core_matrix_precision - /// @{ - - /// 3 columns of 3 components matrix of single-precision floating-point numbers using low precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<3, 3, float, lowp> lowp_mat3; - - /// 3 columns of 3 components matrix of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<3, 3, float, mediump> mediump_mat3; - - /// 3 columns of 3 components matrix of single-precision floating-point numbers using high precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<3, 3, float, highp> highp_mat3; - - /// 3 columns of 3 components matrix of single-precision floating-point numbers using low precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<3, 3, float, lowp> lowp_mat3x3; - - /// 3 columns of 3 components matrix of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<3, 3, float, mediump> mediump_mat3x3; - - /// 3 columns of 3 components matrix of single-precision floating-point numbers using high precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<3, 3, float, highp> highp_mat3x3; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/matrix_float3x4.hpp b/third_party/glm/ext/matrix_float3x4.hpp deleted file mode 100755 index 64b8459..0000000 --- a/third_party/glm/ext/matrix_float3x4.hpp +++ /dev/null @@ -1,18 +0,0 @@ -/// @ref core -/// @file glm/ext/matrix_float3x4.hpp - -#pragma once -#include "../detail/type_mat3x4.hpp" - -namespace glm -{ - /// @addtogroup core_matrix - /// @{ - - /// 3 columns of 4 components matrix of single-precision floating-point numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - typedef mat<3, 4, float, defaultp> mat3x4; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/matrix_float3x4_precision.hpp b/third_party/glm/ext/matrix_float3x4_precision.hpp deleted file mode 100755 index bc36bf1..0000000 --- a/third_party/glm/ext/matrix_float3x4_precision.hpp +++ /dev/null @@ -1,31 +0,0 @@ -/// @ref core -/// @file glm/ext/matrix_float3x4_precision.hpp - -#pragma once -#include "../detail/type_mat3x4.hpp" - -namespace glm -{ - /// @addtogroup core_matrix_precision - /// @{ - - /// 3 columns of 4 components matrix of single-precision floating-point numbers using low precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<3, 4, float, lowp> lowp_mat3x4; - - /// 3 columns of 4 components matrix of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<3, 4, float, mediump> mediump_mat3x4; - - /// 3 columns of 4 components matrix of single-precision floating-point numbers using high precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<3, 4, float, highp> highp_mat3x4; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/matrix_float4x2.hpp b/third_party/glm/ext/matrix_float4x2.hpp deleted file mode 100755 index 1ed5227..0000000 --- a/third_party/glm/ext/matrix_float4x2.hpp +++ /dev/null @@ -1,18 +0,0 @@ -/// @ref core -/// @file glm/ext/matrix_float4x2.hpp - -#pragma once -#include "../detail/type_mat4x2.hpp" - -namespace glm -{ - /// @addtogroup core_matrix - /// @{ - - /// 4 columns of 2 components matrix of single-precision floating-point numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - typedef mat<4, 2, float, defaultp> mat4x2; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/matrix_float4x2_precision.hpp b/third_party/glm/ext/matrix_float4x2_precision.hpp deleted file mode 100755 index 88fd069..0000000 --- a/third_party/glm/ext/matrix_float4x2_precision.hpp +++ /dev/null @@ -1,31 +0,0 @@ -/// @ref core -/// @file glm/ext/matrix_float2x2_precision.hpp - -#pragma once -#include "../detail/type_mat2x2.hpp" - -namespace glm -{ - /// @addtogroup core_matrix_precision - /// @{ - - /// 4 columns of 2 components matrix of single-precision floating-point numbers using low precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<4, 2, float, lowp> lowp_mat4x2; - - /// 4 columns of 2 components matrix of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<4, 2, float, mediump> mediump_mat4x2; - - /// 4 columns of 2 components matrix of single-precision floating-point numbers using high precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<4, 2, float, highp> highp_mat4x2; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/matrix_float4x3.hpp b/third_party/glm/ext/matrix_float4x3.hpp deleted file mode 100755 index 5dbe765..0000000 --- a/third_party/glm/ext/matrix_float4x3.hpp +++ /dev/null @@ -1,18 +0,0 @@ -/// @ref core -/// @file glm/ext/matrix_float4x3.hpp - -#pragma once -#include "../detail/type_mat4x3.hpp" - -namespace glm -{ - /// @addtogroup core_matrix - /// @{ - - /// 4 columns of 3 components matrix of single-precision floating-point numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - typedef mat<4, 3, float, defaultp> mat4x3; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/matrix_float4x3_precision.hpp b/third_party/glm/ext/matrix_float4x3_precision.hpp deleted file mode 100755 index 846ed4f..0000000 --- a/third_party/glm/ext/matrix_float4x3_precision.hpp +++ /dev/null @@ -1,31 +0,0 @@ -/// @ref core -/// @file glm/ext/matrix_float4x3_precision.hpp - -#pragma once -#include "../detail/type_mat4x3.hpp" - -namespace glm -{ - /// @addtogroup core_matrix_precision - /// @{ - - /// 4 columns of 3 components matrix of single-precision floating-point numbers using low precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<4, 3, float, lowp> lowp_mat4x3; - - /// 4 columns of 3 components matrix of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<4, 3, float, mediump> mediump_mat4x3; - - /// 4 columns of 3 components matrix of single-precision floating-point numbers using high precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<4, 3, float, highp> highp_mat4x3; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/matrix_float4x4.hpp b/third_party/glm/ext/matrix_float4x4.hpp deleted file mode 100755 index 5ba111d..0000000 --- a/third_party/glm/ext/matrix_float4x4.hpp +++ /dev/null @@ -1,23 +0,0 @@ -/// @ref core -/// @file glm/ext/matrix_float4x4.hpp - -#pragma once -#include "../detail/type_mat4x4.hpp" - -namespace glm -{ - /// @ingroup core_matrix - /// @{ - - /// 4 columns of 4 components matrix of single-precision floating-point numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - typedef mat<4, 4, float, defaultp> mat4x4; - - /// 4 columns of 4 components matrix of single-precision floating-point numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - typedef mat<4, 4, float, defaultp> mat4; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/matrix_float4x4_precision.hpp b/third_party/glm/ext/matrix_float4x4_precision.hpp deleted file mode 100755 index 597149b..0000000 --- a/third_party/glm/ext/matrix_float4x4_precision.hpp +++ /dev/null @@ -1,49 +0,0 @@ -/// @ref core -/// @file glm/ext/matrix_float4x4_precision.hpp - -#pragma once -#include "../detail/type_mat4x4.hpp" - -namespace glm -{ - /// @addtogroup core_matrix_precision - /// @{ - - /// 4 columns of 4 components matrix of single-precision floating-point numbers using low precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<4, 4, float, lowp> lowp_mat4; - - /// 4 columns of 4 components matrix of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<4, 4, float, mediump> mediump_mat4; - - /// 4 columns of 4 components matrix of single-precision floating-point numbers using high precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<4, 4, float, highp> highp_mat4; - - /// 4 columns of 4 components matrix of single-precision floating-point numbers using low precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<4, 4, float, lowp> lowp_mat4x4; - - /// 4 columns of 4 components matrix of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<4, 4, float, mediump> mediump_mat4x4; - - /// 4 columns of 4 components matrix of single-precision floating-point numbers using high precision arithmetic in term of ULPs. - /// - /// @see GLSL 4.20.8 specification, section 4.1.6 Matrices - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef mat<4, 4, float, highp> highp_mat4x4; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/matrix_projection.hpp b/third_party/glm/ext/matrix_projection.hpp deleted file mode 100755 index 51fd01b..0000000 --- a/third_party/glm/ext/matrix_projection.hpp +++ /dev/null @@ -1,149 +0,0 @@ -/// @ref ext_matrix_projection -/// @file glm/ext/matrix_projection.hpp -/// -/// @defgroup ext_matrix_projection GLM_EXT_matrix_projection -/// @ingroup ext -/// -/// Functions that generate common projection transformation matrices. -/// -/// The matrices generated by this extension use standard OpenGL fixed-function -/// conventions. For example, the lookAt function generates a transform from world -/// space into the specific eye space that the projective matrix functions -/// (perspective, ortho, etc) are designed to expect. The OpenGL compatibility -/// specifications defines the particular layout of this eye space. -/// -/// Include to use the features of this extension. -/// -/// @see ext_matrix_transform -/// @see ext_matrix_clip_space - -#pragma once - -// Dependencies -#include "../gtc/constants.hpp" -#include "../geometric.hpp" -#include "../trigonometric.hpp" -#include "../matrix.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_EXT_matrix_projection extension included") -#endif - -namespace glm -{ - /// @addtogroup ext_matrix_projection - /// @{ - - /// Map the specified object coordinates (obj.x, obj.y, obj.z) into window coordinates. - /// The near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition) - /// - /// @param obj Specify the object coordinates. - /// @param model Specifies the current modelview matrix - /// @param proj Specifies the current projection matrix - /// @param viewport Specifies the current viewport - /// @return Return the computed window coordinates. - /// @tparam T Native type used for the computation. Currently supported: half (not recommended), float or double. - /// @tparam U Currently supported: Floating-point types and integer types. - /// - /// @see gluProject man page - template - GLM_FUNC_DECL vec<3, T, Q> projectZO( - vec<3, T, Q> const& obj, mat<4, 4, T, Q> const& model, mat<4, 4, T, Q> const& proj, vec<4, U, Q> const& viewport); - - /// Map the specified object coordinates (obj.x, obj.y, obj.z) into window coordinates. - /// The near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition) - /// - /// @param obj Specify the object coordinates. - /// @param model Specifies the current modelview matrix - /// @param proj Specifies the current projection matrix - /// @param viewport Specifies the current viewport - /// @return Return the computed window coordinates. - /// @tparam T Native type used for the computation. Currently supported: half (not recommended), float or double. - /// @tparam U Currently supported: Floating-point types and integer types. - /// - /// @see gluProject man page - template - GLM_FUNC_DECL vec<3, T, Q> projectNO( - vec<3, T, Q> const& obj, mat<4, 4, T, Q> const& model, mat<4, 4, T, Q> const& proj, vec<4, U, Q> const& viewport); - - /// Map the specified object coordinates (obj.x, obj.y, obj.z) into window coordinates using default near and far clip planes definition. - /// To change default near and far clip planes definition use GLM_FORCE_DEPTH_ZERO_TO_ONE. - /// - /// @param obj Specify the object coordinates. - /// @param model Specifies the current modelview matrix - /// @param proj Specifies the current projection matrix - /// @param viewport Specifies the current viewport - /// @return Return the computed window coordinates. - /// @tparam T Native type used for the computation. Currently supported: half (not recommended), float or double. - /// @tparam U Currently supported: Floating-point types and integer types. - /// - /// @see gluProject man page - template - GLM_FUNC_DECL vec<3, T, Q> project( - vec<3, T, Q> const& obj, mat<4, 4, T, Q> const& model, mat<4, 4, T, Q> const& proj, vec<4, U, Q> const& viewport); - - /// Map the specified window coordinates (win.x, win.y, win.z) into object coordinates. - /// The near and far clip planes correspond to z normalized device coordinates of 0 and +1 respectively. (Direct3D clip volume definition) - /// - /// @param win Specify the window coordinates to be mapped. - /// @param model Specifies the modelview matrix - /// @param proj Specifies the projection matrix - /// @param viewport Specifies the viewport - /// @return Returns the computed object coordinates. - /// @tparam T Native type used for the computation. Currently supported: half (not recommended), float or double. - /// @tparam U Currently supported: Floating-point types and integer types. - /// - /// @see gluUnProject man page - template - GLM_FUNC_DECL vec<3, T, Q> unProjectZO( - vec<3, T, Q> const& win, mat<4, 4, T, Q> const& model, mat<4, 4, T, Q> const& proj, vec<4, U, Q> const& viewport); - - /// Map the specified window coordinates (win.x, win.y, win.z) into object coordinates. - /// The near and far clip planes correspond to z normalized device coordinates of -1 and +1 respectively. (OpenGL clip volume definition) - /// - /// @param win Specify the window coordinates to be mapped. - /// @param model Specifies the modelview matrix - /// @param proj Specifies the projection matrix - /// @param viewport Specifies the viewport - /// @return Returns the computed object coordinates. - /// @tparam T Native type used for the computation. Currently supported: half (not recommended), float or double. - /// @tparam U Currently supported: Floating-point types and integer types. - /// - /// @see gluUnProject man page - template - GLM_FUNC_DECL vec<3, T, Q> unProjectNO( - vec<3, T, Q> const& win, mat<4, 4, T, Q> const& model, mat<4, 4, T, Q> const& proj, vec<4, U, Q> const& viewport); - - /// Map the specified window coordinates (win.x, win.y, win.z) into object coordinates using default near and far clip planes definition. - /// To change default near and far clip planes definition use GLM_FORCE_DEPTH_ZERO_TO_ONE. - /// - /// @param win Specify the window coordinates to be mapped. - /// @param model Specifies the modelview matrix - /// @param proj Specifies the projection matrix - /// @param viewport Specifies the viewport - /// @return Returns the computed object coordinates. - /// @tparam T Native type used for the computation. Currently supported: half (not recommended), float or double. - /// @tparam U Currently supported: Floating-point types and integer types. - /// - /// @see gluUnProject man page - template - GLM_FUNC_DECL vec<3, T, Q> unProject( - vec<3, T, Q> const& win, mat<4, 4, T, Q> const& model, mat<4, 4, T, Q> const& proj, vec<4, U, Q> const& viewport); - - /// Define a picking region - /// - /// @param center Specify the center of a picking region in window coordinates. - /// @param delta Specify the width and height, respectively, of the picking region in window coordinates. - /// @param viewport Rendering viewport - /// @tparam T Native type used for the computation. Currently supported: half (not recommended), float or double. - /// @tparam U Currently supported: Floating-point types and integer types. - /// - /// @see gluPickMatrix man page - template - GLM_FUNC_DECL mat<4, 4, T, Q> pickMatrix( - vec<2, T, Q> const& center, vec<2, T, Q> const& delta, vec<4, U, Q> const& viewport); - - /// @} -}//namespace glm - -#include "matrix_projection.inl" diff --git a/third_party/glm/ext/matrix_projection.inl b/third_party/glm/ext/matrix_projection.inl deleted file mode 100755 index 8b4eea9..0000000 --- a/third_party/glm/ext/matrix_projection.inl +++ /dev/null @@ -1,104 +0,0 @@ -namespace glm -{ - template - GLM_FUNC_QUALIFIER vec<3, T, Q> projectZO(vec<3, T, Q> const& obj, mat<4, 4, T, Q> const& model, mat<4, 4, T, Q> const& proj, vec<4, U, Q> const& viewport) - { - vec<4, T, Q> tmp = vec<4, T, Q>(obj, static_cast(1)); - tmp = model * tmp; - tmp = proj * tmp; - - tmp /= tmp.w; - tmp.x = tmp.x * static_cast(0.5) + static_cast(0.5); - tmp.y = tmp.y * static_cast(0.5) + static_cast(0.5); - - tmp[0] = tmp[0] * T(viewport[2]) + T(viewport[0]); - tmp[1] = tmp[1] * T(viewport[3]) + T(viewport[1]); - - return vec<3, T, Q>(tmp); - } - - template - GLM_FUNC_QUALIFIER vec<3, T, Q> projectNO(vec<3, T, Q> const& obj, mat<4, 4, T, Q> const& model, mat<4, 4, T, Q> const& proj, vec<4, U, Q> const& viewport) - { - vec<4, T, Q> tmp = vec<4, T, Q>(obj, static_cast(1)); - tmp = model * tmp; - tmp = proj * tmp; - - tmp /= tmp.w; - tmp = tmp * static_cast(0.5) + static_cast(0.5); - tmp[0] = tmp[0] * T(viewport[2]) + T(viewport[0]); - tmp[1] = tmp[1] * T(viewport[3]) + T(viewport[1]); - - return vec<3, T, Q>(tmp); - } - - template - GLM_FUNC_QUALIFIER vec<3, T, Q> project(vec<3, T, Q> const& obj, mat<4, 4, T, Q> const& model, mat<4, 4, T, Q> const& proj, vec<4, U, Q> const& viewport) - { - if(GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_ZO_BIT) - return projectZO(obj, model, proj, viewport); - else - return projectNO(obj, model, proj, viewport); - } - - template - GLM_FUNC_QUALIFIER vec<3, T, Q> unProjectZO(vec<3, T, Q> const& win, mat<4, 4, T, Q> const& model, mat<4, 4, T, Q> const& proj, vec<4, U, Q> const& viewport) - { - mat<4, 4, T, Q> Inverse = inverse(proj * model); - - vec<4, T, Q> tmp = vec<4, T, Q>(win, T(1)); - tmp.x = (tmp.x - T(viewport[0])) / T(viewport[2]); - tmp.y = (tmp.y - T(viewport[1])) / T(viewport[3]); - tmp.x = tmp.x * static_cast(2) - static_cast(1); - tmp.y = tmp.y * static_cast(2) - static_cast(1); - - vec<4, T, Q> obj = Inverse * tmp; - obj /= obj.w; - - return vec<3, T, Q>(obj); - } - - template - GLM_FUNC_QUALIFIER vec<3, T, Q> unProjectNO(vec<3, T, Q> const& win, mat<4, 4, T, Q> const& model, mat<4, 4, T, Q> const& proj, vec<4, U, Q> const& viewport) - { - mat<4, 4, T, Q> Inverse = inverse(proj * model); - - vec<4, T, Q> tmp = vec<4, T, Q>(win, T(1)); - tmp.x = (tmp.x - T(viewport[0])) / T(viewport[2]); - tmp.y = (tmp.y - T(viewport[1])) / T(viewport[3]); - tmp = tmp * static_cast(2) - static_cast(1); - - vec<4, T, Q> obj = Inverse * tmp; - obj /= obj.w; - - return vec<3, T, Q>(obj); - } - - template - GLM_FUNC_QUALIFIER vec<3, T, Q> unProject(vec<3, T, Q> const& win, mat<4, 4, T, Q> const& model, mat<4, 4, T, Q> const& proj, vec<4, U, Q> const& viewport) - { - if(GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_ZO_BIT) - return unProjectZO(win, model, proj, viewport); - else - return unProjectNO(win, model, proj, viewport); - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> pickMatrix(vec<2, T, Q> const& center, vec<2, T, Q> const& delta, vec<4, U, Q> const& viewport) - { - assert(delta.x > static_cast(0) && delta.y > static_cast(0)); - mat<4, 4, T, Q> Result(static_cast(1)); - - if(!(delta.x > static_cast(0) && delta.y > static_cast(0))) - return Result; // Error - - vec<3, T, Q> Temp( - (static_cast(viewport[2]) - static_cast(2) * (center.x - static_cast(viewport[0]))) / delta.x, - (static_cast(viewport[3]) - static_cast(2) * (center.y - static_cast(viewport[1]))) / delta.y, - static_cast(0)); - - // Translate and scale the picked region to the entire window - Result = translate(Result, Temp); - return scale(Result, vec<3, T, Q>(static_cast(viewport[2]) / delta.x, static_cast(viewport[3]) / delta.y, static_cast(1))); - } -}//namespace glm diff --git a/third_party/glm/ext/matrix_relational.hpp b/third_party/glm/ext/matrix_relational.hpp deleted file mode 100755 index 20023ad..0000000 --- a/third_party/glm/ext/matrix_relational.hpp +++ /dev/null @@ -1,132 +0,0 @@ -/// @ref ext_matrix_relational -/// @file glm/ext/matrix_relational.hpp -/// -/// @defgroup ext_matrix_relational GLM_EXT_matrix_relational -/// @ingroup ext -/// -/// Exposes comparison functions for matrix types that take a user defined epsilon values. -/// -/// Include to use the features of this extension. -/// -/// @see ext_vector_relational -/// @see ext_scalar_relational -/// @see ext_quaternion_relational - -#pragma once - -// Dependencies -#include "../detail/qualifier.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_EXT_matrix_relational extension included") -#endif - -namespace glm -{ - /// @addtogroup ext_matrix_relational - /// @{ - - /// Perform a component-wise equal-to comparison of two matrices. - /// Return a boolean vector which components value is True if this expression is satisfied per column of the matrices. - /// - /// @tparam C Integer between 1 and 4 included that qualify the number of columns of the matrix - /// @tparam R Integer between 1 and 4 included that qualify the number of rows of the matrix - /// @tparam T Floating-point or integer scalar types - /// @tparam Q Value from qualifier enum - template - GLM_FUNC_DECL GLM_CONSTEXPR vec equal(mat const& x, mat const& y); - - /// Perform a component-wise not-equal-to comparison of two matrices. - /// Return a boolean vector which components value is True if this expression is satisfied per column of the matrices. - /// - /// @tparam C Integer between 1 and 4 included that qualify the number of columns of the matrix - /// @tparam R Integer between 1 and 4 included that qualify the number of rows of the matrix - /// @tparam T Floating-point or integer scalar types - /// @tparam Q Value from qualifier enum - template - GLM_FUNC_DECL GLM_CONSTEXPR vec notEqual(mat const& x, mat const& y); - - /// Returns the component-wise comparison of |x - y| < epsilon. - /// True if this expression is satisfied. - /// - /// @tparam C Integer between 1 and 4 included that qualify the number of columns of the matrix - /// @tparam R Integer between 1 and 4 included that qualify the number of rows of the matrix - /// @tparam T Floating-point or integer scalar types - /// @tparam Q Value from qualifier enum - template - GLM_FUNC_DECL GLM_CONSTEXPR vec equal(mat const& x, mat const& y, T epsilon); - - /// Returns the component-wise comparison of |x - y| < epsilon. - /// True if this expression is satisfied. - /// - /// @tparam C Integer between 1 and 4 included that qualify the number of columns of the matrix - /// @tparam R Integer between 1 and 4 included that qualify the number of rows of the matrix - /// @tparam T Floating-point or integer scalar types - /// @tparam Q Value from qualifier enum - template - GLM_FUNC_DECL GLM_CONSTEXPR vec equal(mat const& x, mat const& y, vec const& epsilon); - - /// Returns the component-wise comparison of |x - y| < epsilon. - /// True if this expression is not satisfied. - /// - /// @tparam C Integer between 1 and 4 included that qualify the number of columns of the matrix - /// @tparam R Integer between 1 and 4 included that qualify the number of rows of the matrix - /// @tparam T Floating-point or integer scalar types - /// @tparam Q Value from qualifier enum - template - GLM_FUNC_DECL GLM_CONSTEXPR vec notEqual(mat const& x, mat const& y, T epsilon); - - /// Returns the component-wise comparison of |x - y| >= epsilon. - /// True if this expression is not satisfied. - /// - /// @tparam C Integer between 1 and 4 included that qualify the number of columns of the matrix - /// @tparam R Integer between 1 and 4 included that qualify the number of rows of the matrix - /// @tparam T Floating-point or integer scalar types - /// @tparam Q Value from qualifier enum - template - GLM_FUNC_DECL GLM_CONSTEXPR vec notEqual(mat const& x, mat const& y, vec const& epsilon); - - /// Returns the component-wise comparison between two vectors in term of ULPs. - /// True if this expression is satisfied. - /// - /// @tparam C Integer between 1 and 4 included that qualify the number of columns of the matrix - /// @tparam R Integer between 1 and 4 included that qualify the number of rows of the matrix - /// @tparam T Floating-point - /// @tparam Q Value from qualifier enum - template - GLM_FUNC_DECL GLM_CONSTEXPR vec equal(mat const& x, mat const& y, int ULPs); - - /// Returns the component-wise comparison between two vectors in term of ULPs. - /// True if this expression is satisfied. - /// - /// @tparam C Integer between 1 and 4 included that qualify the number of columns of the matrix - /// @tparam R Integer between 1 and 4 included that qualify the number of rows of the matrix - /// @tparam T Floating-point - /// @tparam Q Value from qualifier enum - template - GLM_FUNC_DECL GLM_CONSTEXPR vec equal(mat const& x, mat const& y, vec const& ULPs); - - /// Returns the component-wise comparison between two vectors in term of ULPs. - /// True if this expression is not satisfied. - /// - /// @tparam C Integer between 1 and 4 included that qualify the number of columns of the matrix - /// @tparam R Integer between 1 and 4 included that qualify the number of rows of the matrix - /// @tparam T Floating-point - /// @tparam Q Value from qualifier enum - template - GLM_FUNC_DECL GLM_CONSTEXPR vec notEqual(mat const& x, mat const& y, int ULPs); - - /// Returns the component-wise comparison between two vectors in term of ULPs. - /// True if this expression is not satisfied. - /// - /// @tparam C Integer between 1 and 4 included that qualify the number of columns of the matrix - /// @tparam R Integer between 1 and 4 included that qualify the number of rows of the matrix - /// @tparam T Floating-point - /// @tparam Q Value from qualifier enum - template - GLM_FUNC_DECL GLM_CONSTEXPR vec notEqual(mat const& x, mat const& y, vec const& ULPs); - - /// @} -}//namespace glm - -#include "matrix_relational.inl" diff --git a/third_party/glm/ext/matrix_relational.inl b/third_party/glm/ext/matrix_relational.inl deleted file mode 100755 index b2b8753..0000000 --- a/third_party/glm/ext/matrix_relational.inl +++ /dev/null @@ -1,82 +0,0 @@ -/// @ref ext_vector_relational -/// @file glm/ext/vector_relational.inl - -// Dependency: -#include "../ext/vector_relational.hpp" -#include "../common.hpp" - -namespace glm -{ - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec equal(mat const& a, mat const& b) - { - return equal(a, b, static_cast(0)); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec equal(mat const& a, mat const& b, T Epsilon) - { - return equal(a, b, vec(Epsilon)); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec equal(mat const& a, mat const& b, vec const& Epsilon) - { - vec Result(true); - for(length_t i = 0; i < C; ++i) - Result[i] = all(equal(a[i], b[i], Epsilon[i])); - return Result; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec notEqual(mat const& x, mat const& y) - { - return notEqual(x, y, static_cast(0)); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec notEqual(mat const& x, mat const& y, T Epsilon) - { - return notEqual(x, y, vec(Epsilon)); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec notEqual(mat const& a, mat const& b, vec const& Epsilon) - { - vec Result(true); - for(length_t i = 0; i < C; ++i) - Result[i] = any(notEqual(a[i], b[i], Epsilon[i])); - return Result; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec equal(mat const& a, mat const& b, int MaxULPs) - { - return equal(a, b, vec(MaxULPs)); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec equal(mat const& a, mat const& b, vec const& MaxULPs) - { - vec Result(true); - for(length_t i = 0; i < C; ++i) - Result[i] = all(equal(a[i], b[i], MaxULPs[i])); - return Result; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec notEqual(mat const& x, mat const& y, int MaxULPs) - { - return notEqual(x, y, vec(MaxULPs)); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec notEqual(mat const& a, mat const& b, vec const& MaxULPs) - { - vec Result(true); - for(length_t i = 0; i < C; ++i) - Result[i] = any(notEqual(a[i], b[i], MaxULPs[i])); - return Result; - } - -}//namespace glm diff --git a/third_party/glm/ext/matrix_transform.hpp b/third_party/glm/ext/matrix_transform.hpp deleted file mode 100755 index cbd187e..0000000 --- a/third_party/glm/ext/matrix_transform.hpp +++ /dev/null @@ -1,144 +0,0 @@ -/// @ref ext_matrix_transform -/// @file glm/ext/matrix_transform.hpp -/// -/// @defgroup ext_matrix_transform GLM_EXT_matrix_transform -/// @ingroup ext -/// -/// Defines functions that generate common transformation matrices. -/// -/// The matrices generated by this extension use standard OpenGL fixed-function -/// conventions. For example, the lookAt function generates a transform from world -/// space into the specific eye space that the projective matrix functions -/// (perspective, ortho, etc) are designed to expect. The OpenGL compatibility -/// specifications defines the particular layout of this eye space. -/// -/// Include to use the features of this extension. -/// -/// @see ext_matrix_projection -/// @see ext_matrix_clip_space - -#pragma once - -// Dependencies -#include "../gtc/constants.hpp" -#include "../geometric.hpp" -#include "../trigonometric.hpp" -#include "../matrix.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_EXT_matrix_transform extension included") -#endif - -namespace glm -{ - /// @addtogroup ext_matrix_transform - /// @{ - - /// Builds an identity matrix. - template - GLM_FUNC_DECL GLM_CONSTEXPR genType identity(); - - /// Builds a translation 4 * 4 matrix created from a vector of 3 components. - /// - /// @param m Input matrix multiplied by this translation matrix. - /// @param v Coordinates of a translation vector. - /// - /// @tparam T A floating-point scalar type - /// @tparam Q A value from qualifier enum - /// - /// @code - /// #include - /// #include - /// ... - /// glm::mat4 m = glm::translate(glm::mat4(1.0f), glm::vec3(1.0f)); - /// // m[0][0] == 1.0f, m[0][1] == 0.0f, m[0][2] == 0.0f, m[0][3] == 0.0f - /// // m[1][0] == 0.0f, m[1][1] == 1.0f, m[1][2] == 0.0f, m[1][3] == 0.0f - /// // m[2][0] == 0.0f, m[2][1] == 0.0f, m[2][2] == 1.0f, m[2][3] == 0.0f - /// // m[3][0] == 1.0f, m[3][1] == 1.0f, m[3][2] == 1.0f, m[3][3] == 1.0f - /// @endcode - /// - /// @see - translate(mat<4, 4, T, Q> const& m, T x, T y, T z) - /// @see - translate(vec<3, T, Q> const& v) - /// @see glTranslate man page - template - GLM_FUNC_DECL mat<4, 4, T, Q> translate( - mat<4, 4, T, Q> const& m, vec<3, T, Q> const& v); - - /// Builds a rotation 4 * 4 matrix created from an axis vector and an angle. - /// - /// @param m Input matrix multiplied by this rotation matrix. - /// @param angle Rotation angle expressed in radians. - /// @param axis Rotation axis, recommended to be normalized. - /// - /// @tparam T A floating-point scalar type - /// @tparam Q A value from qualifier enum - /// - /// @see - rotate(mat<4, 4, T, Q> const& m, T angle, T x, T y, T z) - /// @see - rotate(T angle, vec<3, T, Q> const& v) - /// @see glRotate man page - template - GLM_FUNC_DECL mat<4, 4, T, Q> rotate( - mat<4, 4, T, Q> const& m, T angle, vec<3, T, Q> const& axis); - - /// Builds a scale 4 * 4 matrix created from 3 scalars. - /// - /// @param m Input matrix multiplied by this scale matrix. - /// @param v Ratio of scaling for each axis. - /// - /// @tparam T A floating-point scalar type - /// @tparam Q A value from qualifier enum - /// - /// @see - scale(mat<4, 4, T, Q> const& m, T x, T y, T z) - /// @see - scale(vec<3, T, Q> const& v) - /// @see glScale man page - template - GLM_FUNC_DECL mat<4, 4, T, Q> scale( - mat<4, 4, T, Q> const& m, vec<3, T, Q> const& v); - - /// Build a right handed look at view matrix. - /// - /// @param eye Position of the camera - /// @param center Position where the camera is looking at - /// @param up Normalized up vector, how the camera is oriented. Typically (0, 0, 1) - /// - /// @tparam T A floating-point scalar type - /// @tparam Q A value from qualifier enum - /// - /// @see - frustum(T const& left, T const& right, T const& bottom, T const& top, T const& nearVal, T const& farVal) frustum(T const& left, T const& right, T const& bottom, T const& top, T const& nearVal, T const& farVal) - template - GLM_FUNC_DECL mat<4, 4, T, Q> lookAtRH( - vec<3, T, Q> const& eye, vec<3, T, Q> const& center, vec<3, T, Q> const& up); - - /// Build a left handed look at view matrix. - /// - /// @param eye Position of the camera - /// @param center Position where the camera is looking at - /// @param up Normalized up vector, how the camera is oriented. Typically (0, 0, 1) - /// - /// @tparam T A floating-point scalar type - /// @tparam Q A value from qualifier enum - /// - /// @see - frustum(T const& left, T const& right, T const& bottom, T const& top, T const& nearVal, T const& farVal) frustum(T const& left, T const& right, T const& bottom, T const& top, T const& nearVal, T const& farVal) - template - GLM_FUNC_DECL mat<4, 4, T, Q> lookAtLH( - vec<3, T, Q> const& eye, vec<3, T, Q> const& center, vec<3, T, Q> const& up); - - /// Build a look at view matrix based on the default handedness. - /// - /// @param eye Position of the camera - /// @param center Position where the camera is looking at - /// @param up Normalized up vector, how the camera is oriented. Typically (0, 0, 1) - /// - /// @tparam T A floating-point scalar type - /// @tparam Q A value from qualifier enum - /// - /// @see - frustum(T const& left, T const& right, T const& bottom, T const& top, T const& nearVal, T const& farVal) frustum(T const& left, T const& right, T const& bottom, T const& top, T const& nearVal, T const& farVal) - /// @see gluLookAt man page - template - GLM_FUNC_DECL mat<4, 4, T, Q> lookAt( - vec<3, T, Q> const& eye, vec<3, T, Q> const& center, vec<3, T, Q> const& up); - - /// @} -}//namespace glm - -#include "matrix_transform.inl" diff --git a/third_party/glm/ext/matrix_transform.inl b/third_party/glm/ext/matrix_transform.inl deleted file mode 100755 index a415157..0000000 --- a/third_party/glm/ext/matrix_transform.inl +++ /dev/null @@ -1,152 +0,0 @@ -namespace glm -{ - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType identity() - { - return detail::init_gentype::GENTYPE>::identity(); - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> translate(mat<4, 4, T, Q> const& m, vec<3, T, Q> const& v) - { - mat<4, 4, T, Q> Result(m); - Result[3] = m[0] * v[0] + m[1] * v[1] + m[2] * v[2] + m[3]; - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> rotate(mat<4, 4, T, Q> const& m, T angle, vec<3, T, Q> const& v) - { - T const a = angle; - T const c = cos(a); - T const s = sin(a); - - vec<3, T, Q> axis(normalize(v)); - vec<3, T, Q> temp((T(1) - c) * axis); - - mat<4, 4, T, Q> Rotate; - Rotate[0][0] = c + temp[0] * axis[0]; - Rotate[0][1] = temp[0] * axis[1] + s * axis[2]; - Rotate[0][2] = temp[0] * axis[2] - s * axis[1]; - - Rotate[1][0] = temp[1] * axis[0] - s * axis[2]; - Rotate[1][1] = c + temp[1] * axis[1]; - Rotate[1][2] = temp[1] * axis[2] + s * axis[0]; - - Rotate[2][0] = temp[2] * axis[0] + s * axis[1]; - Rotate[2][1] = temp[2] * axis[1] - s * axis[0]; - Rotate[2][2] = c + temp[2] * axis[2]; - - mat<4, 4, T, Q> Result; - Result[0] = m[0] * Rotate[0][0] + m[1] * Rotate[0][1] + m[2] * Rotate[0][2]; - Result[1] = m[0] * Rotate[1][0] + m[1] * Rotate[1][1] + m[2] * Rotate[1][2]; - Result[2] = m[0] * Rotate[2][0] + m[1] * Rotate[2][1] + m[2] * Rotate[2][2]; - Result[3] = m[3]; - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> rotate_slow(mat<4, 4, T, Q> const& m, T angle, vec<3, T, Q> const& v) - { - T const a = angle; - T const c = cos(a); - T const s = sin(a); - mat<4, 4, T, Q> Result; - - vec<3, T, Q> axis = normalize(v); - - Result[0][0] = c + (static_cast(1) - c) * axis.x * axis.x; - Result[0][1] = (static_cast(1) - c) * axis.x * axis.y + s * axis.z; - Result[0][2] = (static_cast(1) - c) * axis.x * axis.z - s * axis.y; - Result[0][3] = static_cast(0); - - Result[1][0] = (static_cast(1) - c) * axis.y * axis.x - s * axis.z; - Result[1][1] = c + (static_cast(1) - c) * axis.y * axis.y; - Result[1][2] = (static_cast(1) - c) * axis.y * axis.z + s * axis.x; - Result[1][3] = static_cast(0); - - Result[2][0] = (static_cast(1) - c) * axis.z * axis.x + s * axis.y; - Result[2][1] = (static_cast(1) - c) * axis.z * axis.y - s * axis.x; - Result[2][2] = c + (static_cast(1) - c) * axis.z * axis.z; - Result[2][3] = static_cast(0); - - Result[3] = vec<4, T, Q>(0, 0, 0, 1); - return m * Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> scale(mat<4, 4, T, Q> const& m, vec<3, T, Q> const& v) - { - mat<4, 4, T, Q> Result; - Result[0] = m[0] * v[0]; - Result[1] = m[1] * v[1]; - Result[2] = m[2] * v[2]; - Result[3] = m[3]; - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> scale_slow(mat<4, 4, T, Q> const& m, vec<3, T, Q> const& v) - { - mat<4, 4, T, Q> Result(T(1)); - Result[0][0] = v.x; - Result[1][1] = v.y; - Result[2][2] = v.z; - return m * Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> lookAtRH(vec<3, T, Q> const& eye, vec<3, T, Q> const& center, vec<3, T, Q> const& up) - { - vec<3, T, Q> const f(normalize(center - eye)); - vec<3, T, Q> const s(normalize(cross(f, up))); - vec<3, T, Q> const u(cross(s, f)); - - mat<4, 4, T, Q> Result(1); - Result[0][0] = s.x; - Result[1][0] = s.y; - Result[2][0] = s.z; - Result[0][1] = u.x; - Result[1][1] = u.y; - Result[2][1] = u.z; - Result[0][2] =-f.x; - Result[1][2] =-f.y; - Result[2][2] =-f.z; - Result[3][0] =-dot(s, eye); - Result[3][1] =-dot(u, eye); - Result[3][2] = dot(f, eye); - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> lookAtLH(vec<3, T, Q> const& eye, vec<3, T, Q> const& center, vec<3, T, Q> const& up) - { - vec<3, T, Q> const f(normalize(center - eye)); - vec<3, T, Q> const s(normalize(cross(up, f))); - vec<3, T, Q> const u(cross(f, s)); - - mat<4, 4, T, Q> Result(1); - Result[0][0] = s.x; - Result[1][0] = s.y; - Result[2][0] = s.z; - Result[0][1] = u.x; - Result[1][1] = u.y; - Result[2][1] = u.z; - Result[0][2] = f.x; - Result[1][2] = f.y; - Result[2][2] = f.z; - Result[3][0] = -dot(s, eye); - Result[3][1] = -dot(u, eye); - Result[3][2] = -dot(f, eye); - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> lookAt(vec<3, T, Q> const& eye, vec<3, T, Q> const& center, vec<3, T, Q> const& up) - { - GLM_IF_CONSTEXPR(GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_LH_BIT) - return lookAtLH(eye, center, up); - else - return lookAtRH(eye, center, up); - } -}//namespace glm diff --git a/third_party/glm/ext/quaternion_common.hpp b/third_party/glm/ext/quaternion_common.hpp deleted file mode 100755 index 2980ed4..0000000 --- a/third_party/glm/ext/quaternion_common.hpp +++ /dev/null @@ -1,120 +0,0 @@ -/// @ref ext_quaternion_common -/// @file glm/ext/quaternion_common.hpp -/// -/// @defgroup ext_quaternion_common GLM_EXT_quaternion_common -/// @ingroup ext -/// -/// Provides common functions for quaternion types -/// -/// Include to use the features of this extension. -/// -/// @see ext_scalar_common -/// @see ext_vector_common -/// @see ext_quaternion_float -/// @see ext_quaternion_double -/// @see ext_quaternion_exponential -/// @see ext_quaternion_geometric -/// @see ext_quaternion_relational -/// @see ext_quaternion_trigonometric -/// @see ext_quaternion_transform - -#pragma once - -// Dependency: -#include "../ext/scalar_constants.hpp" -#include "../ext/quaternion_geometric.hpp" -#include "../common.hpp" -#include "../trigonometric.hpp" -#include "../exponential.hpp" -#include - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_EXT_quaternion_common extension included") -#endif - -namespace glm -{ - /// @addtogroup ext_quaternion_common - /// @{ - - /// Spherical linear interpolation of two quaternions. - /// The interpolation is oriented and the rotation is performed at constant speed. - /// For short path spherical linear interpolation, use the slerp function. - /// - /// @param x A quaternion - /// @param y A quaternion - /// @param a Interpolation factor. The interpolation is defined beyond the range [0, 1]. - /// - /// @tparam T A floating-point scalar type - /// @tparam Q A value from qualifier enum - /// - /// @see - slerp(qua const& x, qua const& y, T const& a) - template - GLM_FUNC_DECL qua mix(qua const& x, qua const& y, T a); - - /// Linear interpolation of two quaternions. - /// The interpolation is oriented. - /// - /// @param x A quaternion - /// @param y A quaternion - /// @param a Interpolation factor. The interpolation is defined in the range [0, 1]. - /// - /// @tparam T A floating-point scalar type - /// @tparam Q A value from qualifier enum - template - GLM_FUNC_DECL qua lerp(qua const& x, qua const& y, T a); - - /// Spherical linear interpolation of two quaternions. - /// The interpolation always take the short path and the rotation is performed at constant speed. - /// - /// @param x A quaternion - /// @param y A quaternion - /// @param a Interpolation factor. The interpolation is defined beyond the range [0, 1]. - /// - /// @tparam T A floating-point scalar type - /// @tparam Q A value from qualifier enum - template - GLM_FUNC_DECL qua slerp(qua const& x, qua const& y, T a); - - /// Returns the q conjugate. - /// - /// @tparam T A floating-point scalar type - /// @tparam Q A value from qualifier enum - template - GLM_FUNC_DECL qua conjugate(qua const& q); - - /// Returns the q inverse. - /// - /// @tparam T A floating-point scalar type - /// @tparam Q A value from qualifier enum - template - GLM_FUNC_DECL qua inverse(qua const& q); - - /// Returns true if x holds a NaN (not a number) - /// representation in the underlying implementation's set of - /// floating point representations. Returns false otherwise, - /// including for implementations with no NaN - /// representations. - /// - /// /!\ When using compiler fast math, this function may fail. - /// - /// @tparam T A floating-point scalar type - /// @tparam Q A value from qualifier enum - template - GLM_FUNC_DECL vec<4, bool, Q> isnan(qua const& x); - - /// Returns true if x holds a positive infinity or negative - /// infinity representation in the underlying implementation's - /// set of floating point representations. Returns false - /// otherwise, including for implementations with no infinity - /// representations. - /// - /// @tparam T A floating-point scalar type - /// @tparam Q A value from qualifier enum - template - GLM_FUNC_DECL vec<4, bool, Q> isinf(qua const& x); - - /// @} -} //namespace glm - -#include "quaternion_common.inl" diff --git a/third_party/glm/ext/quaternion_common.inl b/third_party/glm/ext/quaternion_common.inl deleted file mode 100755 index 3b2846f..0000000 --- a/third_party/glm/ext/quaternion_common.inl +++ /dev/null @@ -1,107 +0,0 @@ -namespace glm -{ - template - GLM_FUNC_QUALIFIER qua mix(qua const& x, qua const& y, T a) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'mix' only accept floating-point inputs"); - - T const cosTheta = dot(x, y); - - // Perform a linear interpolation when cosTheta is close to 1 to avoid side effect of sin(angle) becoming a zero denominator - if(cosTheta > static_cast(1) - epsilon()) - { - // Linear interpolation - return qua( - mix(x.w, y.w, a), - mix(x.x, y.x, a), - mix(x.y, y.y, a), - mix(x.z, y.z, a)); - } - else - { - // Essential Mathematics, page 467 - T angle = acos(cosTheta); - return (sin((static_cast(1) - a) * angle) * x + sin(a * angle) * y) / sin(angle); - } - } - - template - GLM_FUNC_QUALIFIER qua lerp(qua const& x, qua const& y, T a) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'lerp' only accept floating-point inputs"); - - // Lerp is only defined in [0, 1] - assert(a >= static_cast(0)); - assert(a <= static_cast(1)); - - return x * (static_cast(1) - a) + (y * a); - } - - template - GLM_FUNC_QUALIFIER qua slerp(qua const& x, qua const& y, T a) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'slerp' only accept floating-point inputs"); - - qua z = y; - - T cosTheta = dot(x, y); - - // If cosTheta < 0, the interpolation will take the long way around the sphere. - // To fix this, one quat must be negated. - if(cosTheta < static_cast(0)) - { - z = -y; - cosTheta = -cosTheta; - } - - // Perform a linear interpolation when cosTheta is close to 1 to avoid side effect of sin(angle) becoming a zero denominator - if(cosTheta > static_cast(1) - epsilon()) - { - // Linear interpolation - return qua( - mix(x.w, z.w, a), - mix(x.x, z.x, a), - mix(x.y, z.y, a), - mix(x.z, z.z, a)); - } - else - { - // Essential Mathematics, page 467 - T angle = acos(cosTheta); - return (sin((static_cast(1) - a) * angle) * x + sin(a * angle) * z) / sin(angle); - } - } - - template - GLM_FUNC_QUALIFIER qua conjugate(qua const& q) - { - return qua(q.w, -q.x, -q.y, -q.z); - } - - template - GLM_FUNC_QUALIFIER qua inverse(qua const& q) - { - return conjugate(q) / dot(q, q); - } - - template - GLM_FUNC_QUALIFIER vec<4, bool, Q> isnan(qua const& q) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'isnan' only accept floating-point inputs"); - - return vec<4, bool, Q>(isnan(q.x), isnan(q.y), isnan(q.z), isnan(q.w)); - } - - template - GLM_FUNC_QUALIFIER vec<4, bool, Q> isinf(qua const& q) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'isinf' only accept floating-point inputs"); - - return vec<4, bool, Q>(isinf(q.x), isinf(q.y), isinf(q.z), isinf(q.w)); - } -}//namespace glm - -#if GLM_CONFIG_SIMD == GLM_ENABLE -# include "quaternion_common_simd.inl" -#endif - diff --git a/third_party/glm/ext/quaternion_common_simd.inl b/third_party/glm/ext/quaternion_common_simd.inl deleted file mode 100755 index ddfc8a4..0000000 --- a/third_party/glm/ext/quaternion_common_simd.inl +++ /dev/null @@ -1,18 +0,0 @@ -#if GLM_ARCH & GLM_ARCH_SSE2_BIT - -namespace glm{ -namespace detail -{ - template - struct compute_dot, float, true> - { - static GLM_FUNC_QUALIFIER float call(qua const& x, qua const& y) - { - return _mm_cvtss_f32(glm_vec1_dot(x.data, y.data)); - } - }; -}//namespace detail -}//namespace glm - -#endif//GLM_ARCH & GLM_ARCH_SSE2_BIT - diff --git a/third_party/glm/ext/quaternion_double.hpp b/third_party/glm/ext/quaternion_double.hpp deleted file mode 100755 index 63b24de..0000000 --- a/third_party/glm/ext/quaternion_double.hpp +++ /dev/null @@ -1,39 +0,0 @@ -/// @ref ext_quaternion_double -/// @file glm/ext/quaternion_double.hpp -/// -/// @defgroup ext_quaternion_double GLM_EXT_quaternion_double -/// @ingroup ext -/// -/// Exposes double-precision floating point quaternion type. -/// -/// Include to use the features of this extension. -/// -/// @see ext_quaternion_float -/// @see ext_quaternion_double_precision -/// @see ext_quaternion_common -/// @see ext_quaternion_exponential -/// @see ext_quaternion_geometric -/// @see ext_quaternion_relational -/// @see ext_quaternion_transform -/// @see ext_quaternion_trigonometric - -#pragma once - -// Dependency: -#include "../detail/type_quat.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_EXT_quaternion_double extension included") -#endif - -namespace glm -{ - /// @addtogroup ext_quaternion_double - /// @{ - - /// Quaternion of double-precision floating-point numbers. - typedef qua dquat; - - /// @} -} //namespace glm - diff --git a/third_party/glm/ext/quaternion_double_precision.hpp b/third_party/glm/ext/quaternion_double_precision.hpp deleted file mode 100755 index 8aa24a1..0000000 --- a/third_party/glm/ext/quaternion_double_precision.hpp +++ /dev/null @@ -1,42 +0,0 @@ -/// @ref ext_quaternion_double_precision -/// @file glm/ext/quaternion_double_precision.hpp -/// -/// @defgroup ext_quaternion_double_precision GLM_EXT_quaternion_double_precision -/// @ingroup ext -/// -/// Exposes double-precision floating point quaternion type with various precision in term of ULPs. -/// -/// Include to use the features of this extension. - -#pragma once - -// Dependency: -#include "../detail/type_quat.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_EXT_quaternion_double_precision extension included") -#endif - -namespace glm -{ - /// @addtogroup ext_quaternion_double_precision - /// @{ - - /// Quaternion of double-precision floating-point numbers using high precision arithmetic in term of ULPs. - /// - /// @see ext_quaternion_double_precision - typedef qua lowp_dquat; - - /// Quaternion of medium double-qualifier floating-point numbers using high precision arithmetic in term of ULPs. - /// - /// @see ext_quaternion_double_precision - typedef qua mediump_dquat; - - /// Quaternion of high double-qualifier floating-point numbers using high precision arithmetic in term of ULPs. - /// - /// @see ext_quaternion_double_precision - typedef qua highp_dquat; - - /// @} -} //namespace glm - diff --git a/third_party/glm/ext/quaternion_exponential.hpp b/third_party/glm/ext/quaternion_exponential.hpp deleted file mode 100755 index affe297..0000000 --- a/third_party/glm/ext/quaternion_exponential.hpp +++ /dev/null @@ -1,63 +0,0 @@ -/// @ref ext_quaternion_exponential -/// @file glm/ext/quaternion_exponential.hpp -/// -/// @defgroup ext_quaternion_exponential GLM_EXT_quaternion_exponential -/// @ingroup ext -/// -/// Provides exponential functions for quaternion types -/// -/// Include to use the features of this extension. -/// -/// @see core_exponential -/// @see ext_quaternion_float -/// @see ext_quaternion_double - -#pragma once - -// Dependency: -#include "../common.hpp" -#include "../trigonometric.hpp" -#include "../geometric.hpp" -#include "../ext/scalar_constants.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_EXT_quaternion_exponential extension included") -#endif - -namespace glm -{ - /// @addtogroup ext_quaternion_transform - /// @{ - - /// Returns a exponential of a quaternion. - /// - /// @tparam T A floating-point scalar type - /// @tparam Q A value from qualifier enum - template - GLM_FUNC_DECL qua exp(qua const& q); - - /// Returns a logarithm of a quaternion - /// - /// @tparam T A floating-point scalar type - /// @tparam Q A value from qualifier enum - template - GLM_FUNC_DECL qua log(qua const& q); - - /// Returns a quaternion raised to a power. - /// - /// @tparam T A floating-point scalar type - /// @tparam Q A value from qualifier enum - template - GLM_FUNC_DECL qua pow(qua const& q, T y); - - /// Returns the square root of a quaternion - /// - /// @tparam T A floating-point scalar type - /// @tparam Q A value from qualifier enum - template - GLM_FUNC_DECL qua sqrt(qua const& q); - - /// @} -} //namespace glm - -#include "quaternion_exponential.inl" diff --git a/third_party/glm/ext/quaternion_exponential.inl b/third_party/glm/ext/quaternion_exponential.inl deleted file mode 100755 index 8456c00..0000000 --- a/third_party/glm/ext/quaternion_exponential.inl +++ /dev/null @@ -1,85 +0,0 @@ -#include "scalar_constants.hpp" - -namespace glm -{ - template - GLM_FUNC_QUALIFIER qua exp(qua const& q) - { - vec<3, T, Q> u(q.x, q.y, q.z); - T const Angle = glm::length(u); - if (Angle < epsilon()) - return qua(); - - vec<3, T, Q> const v(u / Angle); - return qua(cos(Angle), sin(Angle) * v); - } - - template - GLM_FUNC_QUALIFIER qua log(qua const& q) - { - vec<3, T, Q> u(q.x, q.y, q.z); - T Vec3Len = length(u); - - if (Vec3Len < epsilon()) - { - if(q.w > static_cast(0)) - return qua(log(q.w), static_cast(0), static_cast(0), static_cast(0)); - else if(q.w < static_cast(0)) - return qua(log(-q.w), pi(), static_cast(0), static_cast(0)); - else - return qua(std::numeric_limits::infinity(), std::numeric_limits::infinity(), std::numeric_limits::infinity(), std::numeric_limits::infinity()); - } - else - { - T t = atan(Vec3Len, T(q.w)) / Vec3Len; - T QuatLen2 = Vec3Len * Vec3Len + q.w * q.w; - return qua(static_cast(0.5) * log(QuatLen2), t * q.x, t * q.y, t * q.z); - } - } - - template - GLM_FUNC_QUALIFIER qua pow(qua const& x, T y) - { - //Raising to the power of 0 should yield 1 - //Needed to prevent a division by 0 error later on - if(y > -epsilon() && y < epsilon()) - return qua(1,0,0,0); - - //To deal with non-unit quaternions - T magnitude = sqrt(x.x * x.x + x.y * x.y + x.z * x.z + x.w *x.w); - - T Angle; - if(abs(x.w / magnitude) > cos_one_over_two()) - { - //Scalar component is close to 1; using it to recover angle would lose precision - //Instead, we use the non-scalar components since sin() is accurate around 0 - - //Prevent a division by 0 error later on - T VectorMagnitude = x.x * x.x + x.y * x.y + x.z * x.z; - if (glm::abs(VectorMagnitude - static_cast(0)) < glm::epsilon()) { - //Equivalent to raising a real number to a power - return qua(pow(x.w, y), 0, 0, 0); - } - - Angle = asin(sqrt(VectorMagnitude) / magnitude); - } - else - { - //Scalar component is small, shouldn't cause loss of precision - Angle = acos(x.w / magnitude); - } - - T NewAngle = Angle * y; - T Div = sin(NewAngle) / sin(Angle); - T Mag = pow(magnitude, y - static_cast(1)); - return qua(cos(NewAngle) * magnitude * Mag, x.x * Div * Mag, x.y * Div * Mag, x.z * Div * Mag); - } - - template - GLM_FUNC_QUALIFIER qua sqrt(qua const& x) - { - return pow(x, static_cast(0.5)); - } -}//namespace glm - - diff --git a/third_party/glm/ext/quaternion_float.hpp b/third_party/glm/ext/quaternion_float.hpp deleted file mode 100755 index ca42a60..0000000 --- a/third_party/glm/ext/quaternion_float.hpp +++ /dev/null @@ -1,39 +0,0 @@ -/// @ref ext_quaternion_float -/// @file glm/ext/quaternion_float.hpp -/// -/// @defgroup ext_quaternion_float GLM_EXT_quaternion_float -/// @ingroup ext -/// -/// Exposes single-precision floating point quaternion type. -/// -/// Include to use the features of this extension. -/// -/// @see ext_quaternion_double -/// @see ext_quaternion_float_precision -/// @see ext_quaternion_common -/// @see ext_quaternion_exponential -/// @see ext_quaternion_geometric -/// @see ext_quaternion_relational -/// @see ext_quaternion_transform -/// @see ext_quaternion_trigonometric - -#pragma once - -// Dependency: -#include "../detail/type_quat.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_EXT_quaternion_float extension included") -#endif - -namespace glm -{ - /// @addtogroup ext_quaternion_float - /// @{ - - /// Quaternion of single-precision floating-point numbers. - typedef qua quat; - - /// @} -} //namespace glm - diff --git a/third_party/glm/ext/quaternion_float_precision.hpp b/third_party/glm/ext/quaternion_float_precision.hpp deleted file mode 100755 index f9e4f5c..0000000 --- a/third_party/glm/ext/quaternion_float_precision.hpp +++ /dev/null @@ -1,36 +0,0 @@ -/// @ref ext_quaternion_float_precision -/// @file glm/ext/quaternion_float_precision.hpp -/// -/// @defgroup ext_quaternion_float_precision GLM_EXT_quaternion_float_precision -/// @ingroup ext -/// -/// Exposes single-precision floating point quaternion type with various precision in term of ULPs. -/// -/// Include to use the features of this extension. - -#pragma once - -// Dependency: -#include "../detail/type_quat.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_EXT_quaternion_float_precision extension included") -#endif - -namespace glm -{ - /// @addtogroup ext_quaternion_float_precision - /// @{ - - /// Quaternion of single-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef qua lowp_quat; - - /// Quaternion of single-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef qua mediump_quat; - - /// Quaternion of single-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef qua highp_quat; - - /// @} -} //namespace glm - diff --git a/third_party/glm/ext/quaternion_geometric.hpp b/third_party/glm/ext/quaternion_geometric.hpp deleted file mode 100755 index 6d98bbe..0000000 --- a/third_party/glm/ext/quaternion_geometric.hpp +++ /dev/null @@ -1,70 +0,0 @@ -/// @ref ext_quaternion_geometric -/// @file glm/ext/quaternion_geometric.hpp -/// -/// @defgroup ext_quaternion_geometric GLM_EXT_quaternion_geometric -/// @ingroup ext -/// -/// Provides geometric functions for quaternion types -/// -/// Include to use the features of this extension. -/// -/// @see core_geometric -/// @see ext_quaternion_float -/// @see ext_quaternion_double - -#pragma once - -// Dependency: -#include "../geometric.hpp" -#include "../exponential.hpp" -#include "../ext/vector_relational.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_EXT_quaternion_geometric extension included") -#endif - -namespace glm -{ - /// @addtogroup ext_quaternion_geometric - /// @{ - - /// Returns the norm of a quaternions - /// - /// @tparam T Floating-point scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see ext_quaternion_geometric - template - GLM_FUNC_DECL T length(qua const& q); - - /// Returns the normalized quaternion. - /// - /// @tparam T Floating-point scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see ext_quaternion_geometric - template - GLM_FUNC_DECL qua normalize(qua const& q); - - /// Returns dot product of q1 and q2, i.e., q1[0] * q2[0] + q1[1] * q2[1] + ... - /// - /// @tparam T Floating-point scalar types. - /// @tparam Q Value from qualifier enum - /// - /// @see ext_quaternion_geometric - template - GLM_FUNC_DECL T dot(qua const& x, qua const& y); - - /// Compute a cross product. - /// - /// @tparam T Floating-point scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see ext_quaternion_geometric - template - GLM_FUNC_QUALIFIER qua cross(qua const& q1, qua const& q2); - - /// @} -} //namespace glm - -#include "quaternion_geometric.inl" diff --git a/third_party/glm/ext/quaternion_geometric.inl b/third_party/glm/ext/quaternion_geometric.inl deleted file mode 100755 index e155ac5..0000000 --- a/third_party/glm/ext/quaternion_geometric.inl +++ /dev/null @@ -1,36 +0,0 @@ -namespace glm -{ - template - GLM_FUNC_QUALIFIER T dot(qua const& x, qua const& y) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'dot' accepts only floating-point inputs"); - return detail::compute_dot, T, detail::is_aligned::value>::call(x, y); - } - - template - GLM_FUNC_QUALIFIER T length(qua const& q) - { - return glm::sqrt(dot(q, q)); - } - - template - GLM_FUNC_QUALIFIER qua normalize(qua const& q) - { - T len = length(q); - if(len <= static_cast(0)) // Problem - return qua(static_cast(1), static_cast(0), static_cast(0), static_cast(0)); - T oneOverLen = static_cast(1) / len; - return qua(q.w * oneOverLen, q.x * oneOverLen, q.y * oneOverLen, q.z * oneOverLen); - } - - template - GLM_FUNC_QUALIFIER qua cross(qua const& q1, qua const& q2) - { - return qua( - q1.w * q2.w - q1.x * q2.x - q1.y * q2.y - q1.z * q2.z, - q1.w * q2.x + q1.x * q2.w + q1.y * q2.z - q1.z * q2.y, - q1.w * q2.y + q1.y * q2.w + q1.z * q2.x - q1.x * q2.z, - q1.w * q2.z + q1.z * q2.w + q1.x * q2.y - q1.y * q2.x); - } -}//namespace glm - diff --git a/third_party/glm/ext/quaternion_relational.hpp b/third_party/glm/ext/quaternion_relational.hpp deleted file mode 100755 index 7aa121d..0000000 --- a/third_party/glm/ext/quaternion_relational.hpp +++ /dev/null @@ -1,62 +0,0 @@ -/// @ref ext_quaternion_relational -/// @file glm/ext/quaternion_relational.hpp -/// -/// @defgroup ext_quaternion_relational GLM_EXT_quaternion_relational -/// @ingroup ext -/// -/// Exposes comparison functions for quaternion types that take a user defined epsilon values. -/// -/// Include to use the features of this extension. -/// -/// @see core_vector_relational -/// @see ext_vector_relational -/// @see ext_matrix_relational -/// @see ext_quaternion_float -/// @see ext_quaternion_double - -#pragma once - -// Dependency: -#include "../vector_relational.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_EXT_quaternion_relational extension included") -#endif - -namespace glm -{ - /// @addtogroup ext_quaternion_relational - /// @{ - - /// Returns the component-wise comparison of result x == y. - /// - /// @tparam T Floating-point scalar types - /// @tparam Q Value from qualifier enum - template - GLM_FUNC_DECL vec<4, bool, Q> equal(qua const& x, qua const& y); - - /// Returns the component-wise comparison of |x - y| < epsilon. - /// - /// @tparam T Floating-point scalar types - /// @tparam Q Value from qualifier enum - template - GLM_FUNC_DECL vec<4, bool, Q> equal(qua const& x, qua const& y, T epsilon); - - /// Returns the component-wise comparison of result x != y. - /// - /// @tparam T Floating-point scalar types - /// @tparam Q Value from qualifier enum - template - GLM_FUNC_DECL vec<4, bool, Q> notEqual(qua const& x, qua const& y); - - /// Returns the component-wise comparison of |x - y| >= epsilon. - /// - /// @tparam T Floating-point scalar types - /// @tparam Q Value from qualifier enum - template - GLM_FUNC_DECL vec<4, bool, Q> notEqual(qua const& x, qua const& y, T epsilon); - - /// @} -} //namespace glm - -#include "quaternion_relational.inl" diff --git a/third_party/glm/ext/quaternion_relational.inl b/third_party/glm/ext/quaternion_relational.inl deleted file mode 100755 index b1713e9..0000000 --- a/third_party/glm/ext/quaternion_relational.inl +++ /dev/null @@ -1,35 +0,0 @@ -namespace glm -{ - template - GLM_FUNC_QUALIFIER vec<4, bool, Q> equal(qua const& x, qua const& y) - { - vec<4, bool, Q> Result; - for(length_t i = 0; i < x.length(); ++i) - Result[i] = x[i] == y[i]; - return Result; - } - - template - GLM_FUNC_QUALIFIER vec<4, bool, Q> equal(qua const& x, qua const& y, T epsilon) - { - vec<4, T, Q> v(x.x - y.x, x.y - y.y, x.z - y.z, x.w - y.w); - return lessThan(abs(v), vec<4, T, Q>(epsilon)); - } - - template - GLM_FUNC_QUALIFIER vec<4, bool, Q> notEqual(qua const& x, qua const& y) - { - vec<4, bool, Q> Result; - for(length_t i = 0; i < x.length(); ++i) - Result[i] = x[i] != y[i]; - return Result; - } - - template - GLM_FUNC_QUALIFIER vec<4, bool, Q> notEqual(qua const& x, qua const& y, T epsilon) - { - vec<4, T, Q> v(x.x - y.x, x.y - y.y, x.z - y.z, x.w - y.w); - return greaterThanEqual(abs(v), vec<4, T, Q>(epsilon)); - } -}//namespace glm - diff --git a/third_party/glm/ext/quaternion_transform.hpp b/third_party/glm/ext/quaternion_transform.hpp deleted file mode 100755 index a9cc5c2..0000000 --- a/third_party/glm/ext/quaternion_transform.hpp +++ /dev/null @@ -1,47 +0,0 @@ -/// @ref ext_quaternion_transform -/// @file glm/ext/quaternion_transform.hpp -/// -/// @defgroup ext_quaternion_transform GLM_EXT_quaternion_transform -/// @ingroup ext -/// -/// Provides transformation functions for quaternion types -/// -/// Include to use the features of this extension. -/// -/// @see ext_quaternion_float -/// @see ext_quaternion_double -/// @see ext_quaternion_exponential -/// @see ext_quaternion_geometric -/// @see ext_quaternion_relational -/// @see ext_quaternion_trigonometric - -#pragma once - -// Dependency: -#include "../common.hpp" -#include "../trigonometric.hpp" -#include "../geometric.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_EXT_quaternion_transform extension included") -#endif - -namespace glm -{ - /// @addtogroup ext_quaternion_transform - /// @{ - - /// Rotates a quaternion from a vector of 3 components axis and an angle. - /// - /// @param q Source orientation - /// @param angle Angle expressed in radians. - /// @param axis Axis of the rotation - /// - /// @tparam T Floating-point scalar types - /// @tparam Q Value from qualifier enum - template - GLM_FUNC_DECL qua rotate(qua const& q, T const& angle, vec<3, T, Q> const& axis); - /// @} -} //namespace glm - -#include "quaternion_transform.inl" diff --git a/third_party/glm/ext/quaternion_transform.inl b/third_party/glm/ext/quaternion_transform.inl deleted file mode 100755 index b87ecb6..0000000 --- a/third_party/glm/ext/quaternion_transform.inl +++ /dev/null @@ -1,24 +0,0 @@ -namespace glm -{ - template - GLM_FUNC_QUALIFIER qua rotate(qua const& q, T const& angle, vec<3, T, Q> const& v) - { - vec<3, T, Q> Tmp = v; - - // Axis of rotation must be normalised - T len = glm::length(Tmp); - if(abs(len - static_cast(1)) > static_cast(0.001)) - { - T oneOverLen = static_cast(1) / len; - Tmp.x *= oneOverLen; - Tmp.y *= oneOverLen; - Tmp.z *= oneOverLen; - } - - T const AngleRad(angle); - T const Sin = sin(AngleRad * static_cast(0.5)); - - return q * qua(cos(AngleRad * static_cast(0.5)), Tmp.x * Sin, Tmp.y * Sin, Tmp.z * Sin); - } -}//namespace glm - diff --git a/third_party/glm/ext/quaternion_trigonometric.hpp b/third_party/glm/ext/quaternion_trigonometric.hpp deleted file mode 100755 index 76cea27..0000000 --- a/third_party/glm/ext/quaternion_trigonometric.hpp +++ /dev/null @@ -1,63 +0,0 @@ -/// @ref ext_quaternion_trigonometric -/// @file glm/ext/quaternion_trigonometric.hpp -/// -/// @defgroup ext_quaternion_trigonometric GLM_EXT_quaternion_trigonometric -/// @ingroup ext -/// -/// Provides trigonometric functions for quaternion types -/// -/// Include to use the features of this extension. -/// -/// @see ext_quaternion_float -/// @see ext_quaternion_double -/// @see ext_quaternion_exponential -/// @see ext_quaternion_geometric -/// @see ext_quaternion_relational -/// @see ext_quaternion_transform - -#pragma once - -// Dependency: -#include "../trigonometric.hpp" -#include "../exponential.hpp" -#include "scalar_constants.hpp" -#include "vector_relational.hpp" -#include - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_EXT_quaternion_trigonometric extension included") -#endif - -namespace glm -{ - /// @addtogroup ext_quaternion_trigonometric - /// @{ - - /// Returns the quaternion rotation angle. - /// - /// @tparam T A floating-point scalar type - /// @tparam Q A value from qualifier enum - template - GLM_FUNC_DECL T angle(qua const& x); - - /// Returns the q rotation axis. - /// - /// @tparam T A floating-point scalar type - /// @tparam Q A value from qualifier enum - template - GLM_FUNC_DECL vec<3, T, Q> axis(qua const& x); - - /// Build a quaternion from an angle and a normalized axis. - /// - /// @param angle Angle expressed in radians. - /// @param axis Axis of the quaternion, must be normalized. - /// - /// @tparam T A floating-point scalar type - /// @tparam Q A value from qualifier enum - template - GLM_FUNC_DECL qua angleAxis(T const& angle, vec<3, T, Q> const& axis); - - /// @} -} //namespace glm - -#include "quaternion_trigonometric.inl" diff --git a/third_party/glm/ext/quaternion_trigonometric.inl b/third_party/glm/ext/quaternion_trigonometric.inl deleted file mode 100755 index 06b7c4c..0000000 --- a/third_party/glm/ext/quaternion_trigonometric.inl +++ /dev/null @@ -1,34 +0,0 @@ -#include "scalar_constants.hpp" - -namespace glm -{ - template - GLM_FUNC_QUALIFIER T angle(qua const& x) - { - if (abs(x.w) > cos_one_over_two()) - { - return asin(sqrt(x.x * x.x + x.y * x.y + x.z * x.z)) * static_cast(2); - } - - return acos(x.w) * static_cast(2); - } - - template - GLM_FUNC_QUALIFIER vec<3, T, Q> axis(qua const& x) - { - T const tmp1 = static_cast(1) - x.w * x.w; - if(tmp1 <= static_cast(0)) - return vec<3, T, Q>(0, 0, 1); - T const tmp2 = static_cast(1) / sqrt(tmp1); - return vec<3, T, Q>(x.x * tmp2, x.y * tmp2, x.z * tmp2); - } - - template - GLM_FUNC_QUALIFIER qua angleAxis(T const& angle, vec<3, T, Q> const& v) - { - T const a(angle); - T const s = glm::sin(a * static_cast(0.5)); - - return qua(glm::cos(a * static_cast(0.5)), v * s); - } -}//namespace glm diff --git a/third_party/glm/ext/scalar_common.hpp b/third_party/glm/ext/scalar_common.hpp deleted file mode 100755 index 4ab0f88..0000000 --- a/third_party/glm/ext/scalar_common.hpp +++ /dev/null @@ -1,103 +0,0 @@ -/// @ref ext_scalar_common -/// @file glm/ext/scalar_common.hpp -/// -/// @defgroup ext_scalar_common GLM_EXT_scalar_common -/// @ingroup ext -/// -/// Exposes min and max functions for 3 to 4 scalar parameters. -/// -/// Include to use the features of this extension. -/// -/// @see core_func_common -/// @see ext_vector_common - -#pragma once - -// Dependency: -#include "../common.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_EXT_scalar_common extension included") -#endif - -namespace glm -{ - /// @addtogroup ext_scalar_common - /// @{ - - /// Returns the minimum component-wise values of 3 inputs - /// - /// @tparam T A floating-point scalar type. - template - GLM_FUNC_DECL T min(T a, T b, T c); - - /// Returns the minimum component-wise values of 4 inputs - /// - /// @tparam T A floating-point scalar type. - template - GLM_FUNC_DECL T min(T a, T b, T c, T d); - - /// Returns the maximum component-wise values of 3 inputs - /// - /// @tparam T A floating-point scalar type. - template - GLM_FUNC_DECL T max(T a, T b, T c); - - /// Returns the maximum component-wise values of 4 inputs - /// - /// @tparam T A floating-point scalar type. - template - GLM_FUNC_DECL T max(T a, T b, T c, T d); - - /// Returns the minimum component-wise values of 2 inputs. If one of the two arguments is NaN, the value of the other argument is returned. - /// - /// @tparam T A floating-point scalar type. - /// - /// @see std::fmin documentation - template - GLM_FUNC_DECL T fmin(T a, T b); - - /// Returns the minimum component-wise values of 3 inputs. If one of the two arguments is NaN, the value of the other argument is returned. - /// - /// @tparam T A floating-point scalar type. - /// - /// @see std::fmin documentation - template - GLM_FUNC_DECL T fmin(T a, T b, T c); - - /// Returns the minimum component-wise values of 4 inputs. If one of the two arguments is NaN, the value of the other argument is returned. - /// - /// @tparam T A floating-point scalar type. - /// - /// @see std::fmin documentation - template - GLM_FUNC_DECL T fmin(T a, T b, T c, T d); - - /// Returns the maximum component-wise values of 2 inputs. If one of the two arguments is NaN, the value of the other argument is returned. - /// - /// @tparam T A floating-point scalar type. - /// - /// @see std::fmax documentation - template - GLM_FUNC_DECL T fmax(T a, T b); - - /// Returns the maximum component-wise values of 3 inputs. If one of the two arguments is NaN, the value of the other argument is returned. - /// - /// @tparam T A floating-point scalar type. - /// - /// @see std::fmax documentation - template - GLM_FUNC_DECL T fmax(T a, T b, T C); - - /// Returns the maximum component-wise values of 4 inputs. If one of the two arguments is NaN, the value of the other argument is returned. - /// - /// @tparam T A floating-point scalar type. - /// - /// @see std::fmax documentation - template - GLM_FUNC_DECL T fmax(T a, T b, T C, T D); - - /// @} -}//namespace glm - -#include "scalar_common.inl" diff --git a/third_party/glm/ext/scalar_common.inl b/third_party/glm/ext/scalar_common.inl deleted file mode 100755 index 118a670..0000000 --- a/third_party/glm/ext/scalar_common.inl +++ /dev/null @@ -1,115 +0,0 @@ -namespace glm -{ - template - GLM_FUNC_QUALIFIER T min(T a, T b, T c) - { - return glm::min(glm::min(a, b), c); - } - - template - GLM_FUNC_QUALIFIER T min(T a, T b, T c, T d) - { - return glm::min(glm::min(a, b), glm::min(c, d)); - } - - template - GLM_FUNC_QUALIFIER T max(T a, T b, T c) - { - return glm::max(glm::max(a, b), c); - } - - template - GLM_FUNC_QUALIFIER T max(T a, T b, T c, T d) - { - return glm::max(glm::max(a, b), glm::max(c, d)); - } - -# if GLM_HAS_CXX11_STL - using std::fmin; -# else - template - GLM_FUNC_QUALIFIER T fmin(T a, T b) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'fmin' only accept floating-point input"); - - if (isnan(a)) - return b; - return min(a, b); - } -# endif - - template - GLM_FUNC_QUALIFIER T fmin(T a, T b, T c) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'fmin' only accept floating-point input"); - - if (isnan(a)) - return fmin(b, c); - if (isnan(b)) - return fmin(a, c); - if (isnan(c)) - return min(a, b); - return min(a, b, c); - } - - template - GLM_FUNC_QUALIFIER T fmin(T a, T b, T c, T d) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'fmin' only accept floating-point input"); - - if (isnan(a)) - return fmin(b, c, d); - if (isnan(b)) - return min(a, fmin(c, d)); - if (isnan(c)) - return fmin(min(a, b), d); - if (isnan(d)) - return min(a, b, c); - return min(a, b, c, d); - } - - -# if GLM_HAS_CXX11_STL - using std::fmax; -# else - template - GLM_FUNC_QUALIFIER T fmax(T a, T b) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'fmax' only accept floating-point input"); - - if (isnan(a)) - return b; - return max(a, b); - } -# endif - - template - GLM_FUNC_QUALIFIER T fmax(T a, T b, T c) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'fmax' only accept floating-point input"); - - if (isnan(a)) - return fmax(b, c); - if (isnan(b)) - return fmax(a, c); - if (isnan(c)) - return max(a, b); - return max(a, b, c); - } - - template - GLM_FUNC_QUALIFIER T fmax(T a, T b, T c, T d) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'fmax' only accept floating-point input"); - - if (isnan(a)) - return fmax(b, c, d); - if (isnan(b)) - return max(a, fmax(c, d)); - if (isnan(c)) - return fmax(max(a, b), d); - if (isnan(d)) - return max(a, b, c); - return max(a, b, c, d); - } -}//namespace glm diff --git a/third_party/glm/ext/scalar_constants.hpp b/third_party/glm/ext/scalar_constants.hpp deleted file mode 100755 index 74e210d..0000000 --- a/third_party/glm/ext/scalar_constants.hpp +++ /dev/null @@ -1,40 +0,0 @@ -/// @ref ext_scalar_constants -/// @file glm/ext/scalar_constants.hpp -/// -/// @defgroup ext_scalar_constants GLM_EXT_scalar_constants -/// @ingroup ext -/// -/// Provides a list of constants and precomputed useful values. -/// -/// Include to use the features of this extension. - -#pragma once - -// Dependencies -#include "../detail/setup.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_EXT_scalar_constants extension included") -#endif - -namespace glm -{ - /// @addtogroup ext_scalar_constants - /// @{ - - /// Return the epsilon constant for floating point types. - template - GLM_FUNC_DECL GLM_CONSTEXPR genType epsilon(); - - /// Return the pi constant for floating point types. - template - GLM_FUNC_DECL GLM_CONSTEXPR genType pi(); - - /// Return the value of cos(1 / 2) for floating point types. - template - GLM_FUNC_DECL GLM_CONSTEXPR genType cos_one_over_two(); - - /// @} -} //namespace glm - -#include "scalar_constants.inl" diff --git a/third_party/glm/ext/scalar_constants.inl b/third_party/glm/ext/scalar_constants.inl deleted file mode 100755 index b475adf..0000000 --- a/third_party/glm/ext/scalar_constants.inl +++ /dev/null @@ -1,24 +0,0 @@ -#include - -namespace glm -{ - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType epsilon() - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'epsilon' only accepts floating-point inputs"); - return std::numeric_limits::epsilon(); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType pi() - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'pi' only accepts floating-point inputs"); - return static_cast(3.14159265358979323846264338327950288); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType cos_one_over_two() - { - return genType(0.877582561890372716130286068203503191); - } -} //namespace glm diff --git a/third_party/glm/ext/scalar_int_sized.hpp b/third_party/glm/ext/scalar_int_sized.hpp deleted file mode 100755 index 8e9c511..0000000 --- a/third_party/glm/ext/scalar_int_sized.hpp +++ /dev/null @@ -1,70 +0,0 @@ -/// @ref ext_scalar_int_sized -/// @file glm/ext/scalar_int_sized.hpp -/// -/// @defgroup ext_scalar_int_sized GLM_EXT_scalar_int_sized -/// @ingroup ext -/// -/// Exposes sized signed integer scalar types. -/// -/// Include to use the features of this extension. -/// -/// @see ext_scalar_uint_sized - -#pragma once - -#include "../detail/setup.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_EXT_scalar_int_sized extension included") -#endif - -namespace glm{ -namespace detail -{ -# if GLM_HAS_EXTENDED_INTEGER_TYPE - typedef std::int8_t int8; - typedef std::int16_t int16; - typedef std::int32_t int32; -# else - typedef signed char int8; - typedef signed short int16; - typedef signed int int32; -#endif// - - template<> - struct is_int - { - enum test {value = ~0}; - }; - - template<> - struct is_int - { - enum test {value = ~0}; - }; - - template<> - struct is_int - { - enum test {value = ~0}; - }; -}//namespace detail - - - /// @addtogroup ext_scalar_int_sized - /// @{ - - /// 8 bit signed integer type. - typedef detail::int8 int8; - - /// 16 bit signed integer type. - typedef detail::int16 int16; - - /// 32 bit signed integer type. - typedef detail::int32 int32; - - /// 64 bit signed integer type. - typedef detail::int64 int64; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/scalar_integer.hpp b/third_party/glm/ext/scalar_integer.hpp deleted file mode 100755 index a2ca8a2..0000000 --- a/third_party/glm/ext/scalar_integer.hpp +++ /dev/null @@ -1,92 +0,0 @@ -/// @ref ext_scalar_integer -/// @file glm/ext/scalar_integer.hpp -/// -/// @see core (dependence) -/// -/// @defgroup ext_scalar_integer GLM_EXT_scalar_integer -/// @ingroup ext -/// -/// Include to use the features of this extension. - -#pragma once - -// Dependencies -#include "../detail/setup.hpp" -#include "../detail/qualifier.hpp" -#include "../detail/_vectorize.hpp" -#include "../detail/type_float.hpp" -#include "../vector_relational.hpp" -#include "../common.hpp" -#include - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_EXT_scalar_integer extension included") -#endif - -namespace glm -{ - /// @addtogroup ext_scalar_integer - /// @{ - - /// Return true if the value is a power of two number. - /// - /// @see ext_scalar_integer - template - GLM_FUNC_DECL bool isPowerOfTwo(genIUType v); - - /// Return the power of two number which value is just higher the input value, - /// round up to a power of two. - /// - /// @see ext_scalar_integer - template - GLM_FUNC_DECL genIUType nextPowerOfTwo(genIUType v); - - /// Return the power of two number which value is just lower the input value, - /// round down to a power of two. - /// - /// @see ext_scalar_integer - template - GLM_FUNC_DECL genIUType prevPowerOfTwo(genIUType v); - - /// Return true if the 'Value' is a multiple of 'Multiple'. - /// - /// @see ext_scalar_integer - template - GLM_FUNC_DECL bool isMultiple(genIUType v, genIUType Multiple); - - /// Higher multiple number of Source. - /// - /// @tparam genIUType Integer scalar or vector types. - /// - /// @param v Source value to which is applied the function - /// @param Multiple Must be a null or positive value - /// - /// @see ext_scalar_integer - template - GLM_FUNC_DECL genIUType nextMultiple(genIUType v, genIUType Multiple); - - /// Lower multiple number of Source. - /// - /// @tparam genIUType Integer scalar or vector types. - /// - /// @param v Source value to which is applied the function - /// @param Multiple Must be a null or positive value - /// - /// @see ext_scalar_integer - template - GLM_FUNC_DECL genIUType prevMultiple(genIUType v, genIUType Multiple); - - /// Returns the bit number of the Nth significant bit set to - /// 1 in the binary representation of value. - /// If value bitcount is less than the Nth significant bit, -1 will be returned. - /// - /// @tparam genIUType Signed or unsigned integer scalar types. - /// - /// @see ext_scalar_integer - template - GLM_FUNC_DECL int findNSB(genIUType x, int significantBitCount); - - /// @} -} //namespace glm - -#include "scalar_integer.inl" diff --git a/third_party/glm/ext/scalar_integer.inl b/third_party/glm/ext/scalar_integer.inl deleted file mode 100755 index efba960..0000000 --- a/third_party/glm/ext/scalar_integer.inl +++ /dev/null @@ -1,243 +0,0 @@ -#include "../integer.hpp" - -namespace glm{ -namespace detail -{ - template - struct compute_ceilShift - { - GLM_FUNC_QUALIFIER static vec call(vec const& v, T) - { - return v; - } - }; - - template - struct compute_ceilShift - { - GLM_FUNC_QUALIFIER static vec call(vec const& v, T Shift) - { - return v | (v >> Shift); - } - }; - - template - struct compute_ceilPowerOfTwo - { - GLM_FUNC_QUALIFIER static vec call(vec const& x) - { - GLM_STATIC_ASSERT(!std::numeric_limits::is_iec559, "'ceilPowerOfTwo' only accept integer scalar or vector inputs"); - - vec const Sign(sign(x)); - - vec v(abs(x)); - - v = v - static_cast(1); - v = v | (v >> static_cast(1)); - v = v | (v >> static_cast(2)); - v = v | (v >> static_cast(4)); - v = compute_ceilShift= 2>::call(v, 8); - v = compute_ceilShift= 4>::call(v, 16); - v = compute_ceilShift= 8>::call(v, 32); - return (v + static_cast(1)) * Sign; - } - }; - - template - struct compute_ceilPowerOfTwo - { - GLM_FUNC_QUALIFIER static vec call(vec const& x) - { - GLM_STATIC_ASSERT(!std::numeric_limits::is_iec559, "'ceilPowerOfTwo' only accept integer scalar or vector inputs"); - - vec v(x); - - v = v - static_cast(1); - v = v | (v >> static_cast(1)); - v = v | (v >> static_cast(2)); - v = v | (v >> static_cast(4)); - v = compute_ceilShift= 2>::call(v, 8); - v = compute_ceilShift= 4>::call(v, 16); - v = compute_ceilShift= 8>::call(v, 32); - return v + static_cast(1); - } - }; - - template - struct compute_ceilMultiple{}; - - template<> - struct compute_ceilMultiple - { - template - GLM_FUNC_QUALIFIER static genType call(genType Source, genType Multiple) - { - if(Source > genType(0)) - return Source + (Multiple - std::fmod(Source, Multiple)); - else - return Source + std::fmod(-Source, Multiple); - } - }; - - template<> - struct compute_ceilMultiple - { - template - GLM_FUNC_QUALIFIER static genType call(genType Source, genType Multiple) - { - genType Tmp = Source - genType(1); - return Tmp + (Multiple - (Tmp % Multiple)); - } - }; - - template<> - struct compute_ceilMultiple - { - template - GLM_FUNC_QUALIFIER static genType call(genType Source, genType Multiple) - { - assert(Multiple > genType(0)); - if(Source > genType(0)) - { - genType Tmp = Source - genType(1); - return Tmp + (Multiple - (Tmp % Multiple)); - } - else - return Source + (-Source % Multiple); - } - }; - - template - struct compute_floorMultiple{}; - - template<> - struct compute_floorMultiple - { - template - GLM_FUNC_QUALIFIER static genType call(genType Source, genType Multiple) - { - if(Source >= genType(0)) - return Source - std::fmod(Source, Multiple); - else - return Source - std::fmod(Source, Multiple) - Multiple; - } - }; - - template<> - struct compute_floorMultiple - { - template - GLM_FUNC_QUALIFIER static genType call(genType Source, genType Multiple) - { - if(Source >= genType(0)) - return Source - Source % Multiple; - else - { - genType Tmp = Source + genType(1); - return Tmp - Tmp % Multiple - Multiple; - } - } - }; - - template<> - struct compute_floorMultiple - { - template - GLM_FUNC_QUALIFIER static genType call(genType Source, genType Multiple) - { - if(Source >= genType(0)) - return Source - Source % Multiple; - else - { - genType Tmp = Source + genType(1); - return Tmp - Tmp % Multiple - Multiple; - } - } - }; -}//namespace detail - - template - GLM_FUNC_QUALIFIER bool isPowerOfTwo(genIUType Value) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'isPowerOfTwo' only accept integer inputs"); - - genIUType const Result = glm::abs(Value); - return !(Result & (Result - 1)); - } - - template - GLM_FUNC_QUALIFIER genIUType nextPowerOfTwo(genIUType value) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'nextPowerOfTwo' only accept integer inputs"); - - return detail::compute_ceilPowerOfTwo<1, genIUType, defaultp, std::numeric_limits::is_signed>::call(vec<1, genIUType, defaultp>(value)).x; - } - - template - GLM_FUNC_QUALIFIER genIUType prevPowerOfTwo(genIUType value) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'prevPowerOfTwo' only accept integer inputs"); - - return isPowerOfTwo(value) ? value : static_cast(static_cast(1) << static_cast(findMSB(value))); - } - - template - GLM_FUNC_QUALIFIER bool isMultiple(genIUType Value, genIUType Multiple) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'isMultiple' only accept integer inputs"); - - return isMultiple(vec<1, genIUType>(Value), vec<1, genIUType>(Multiple)).x; - } - - template - GLM_FUNC_QUALIFIER genIUType nextMultiple(genIUType Source, genIUType Multiple) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'nextMultiple' only accept integer inputs"); - - return detail::compute_ceilMultiple::is_iec559, std::numeric_limits::is_signed>::call(Source, Multiple); - } - - template - GLM_FUNC_QUALIFIER genIUType prevMultiple(genIUType Source, genIUType Multiple) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'prevMultiple' only accept integer inputs"); - - return detail::compute_floorMultiple::is_iec559, std::numeric_limits::is_signed>::call(Source, Multiple); - } - - template - GLM_FUNC_QUALIFIER int findNSB(genIUType x, int significantBitCount) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'findNSB' only accept integer inputs"); - - if(bitCount(x) < significantBitCount) - return -1; - - genIUType const One = static_cast(1); - int bitPos = 0; - - genIUType key = x; - int nBitCount = significantBitCount; - int Step = sizeof(x) * 8 / 2; - while (key > One) - { - genIUType Mask = static_cast((One << Step) - One); - genIUType currentKey = key & Mask; - int currentBitCount = bitCount(currentKey); - if (nBitCount > currentBitCount) - { - nBitCount -= currentBitCount; - bitPos += Step; - key >>= static_cast(Step); - } - else - { - key = key & Mask; - } - - Step >>= 1; - } - - return static_cast(bitPos); - } -}//namespace glm diff --git a/third_party/glm/ext/scalar_relational.hpp b/third_party/glm/ext/scalar_relational.hpp deleted file mode 100755 index 3076a5e..0000000 --- a/third_party/glm/ext/scalar_relational.hpp +++ /dev/null @@ -1,65 +0,0 @@ -/// @ref ext_scalar_relational -/// @file glm/ext/scalar_relational.hpp -/// -/// @defgroup ext_scalar_relational GLM_EXT_scalar_relational -/// @ingroup ext -/// -/// Exposes comparison functions for scalar types that take a user defined epsilon values. -/// -/// Include to use the features of this extension. -/// -/// @see core_vector_relational -/// @see ext_vector_relational -/// @see ext_matrix_relational - -#pragma once - -// Dependencies -#include "../detail/qualifier.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_EXT_scalar_relational extension included") -#endif - -namespace glm -{ - /// Returns the component-wise comparison of |x - y| < epsilon. - /// True if this expression is satisfied. - /// - /// @tparam genType Floating-point or integer scalar types - template - GLM_FUNC_DECL GLM_CONSTEXPR bool equal(genType const& x, genType const& y, genType const& epsilon); - - /// Returns the component-wise comparison of |x - y| >= epsilon. - /// True if this expression is not satisfied. - /// - /// @tparam genType Floating-point or integer scalar types - template - GLM_FUNC_DECL GLM_CONSTEXPR bool notEqual(genType const& x, genType const& y, genType const& epsilon); - - /// Returns the component-wise comparison between two scalars in term of ULPs. - /// True if this expression is satisfied. - /// - /// @param x First operand. - /// @param y Second operand. - /// @param ULPs Maximum difference in ULPs between the two operators to consider them equal. - /// - /// @tparam genType Floating-point or integer scalar types - template - GLM_FUNC_DECL GLM_CONSTEXPR bool equal(genType const& x, genType const& y, int ULPs); - - /// Returns the component-wise comparison between two scalars in term of ULPs. - /// True if this expression is not satisfied. - /// - /// @param x First operand. - /// @param y Second operand. - /// @param ULPs Maximum difference in ULPs between the two operators to consider them not equal. - /// - /// @tparam genType Floating-point or integer scalar types - template - GLM_FUNC_DECL GLM_CONSTEXPR bool notEqual(genType const& x, genType const& y, int ULPs); - - /// @} -}//namespace glm - -#include "scalar_relational.inl" diff --git a/third_party/glm/ext/scalar_relational.inl b/third_party/glm/ext/scalar_relational.inl deleted file mode 100755 index c85583e..0000000 --- a/third_party/glm/ext/scalar_relational.inl +++ /dev/null @@ -1,40 +0,0 @@ -#include "../common.hpp" -#include "../ext/scalar_int_sized.hpp" -#include "../ext/scalar_uint_sized.hpp" -#include "../detail/type_float.hpp" - -namespace glm -{ - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool equal(genType const& x, genType const& y, genType const& epsilon) - { - return abs(x - y) <= epsilon; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool notEqual(genType const& x, genType const& y, genType const& epsilon) - { - return abs(x - y) > epsilon; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool equal(genType const& x, genType const& y, int MaxULPs) - { - detail::float_t const a(x); - detail::float_t const b(y); - - // Different signs means they do not match. - if(a.negative() != b.negative()) - return false; - - // Find the difference in ULPs. - typename detail::float_t::int_type const DiffULPs = abs(a.i - b.i); - return DiffULPs <= MaxULPs; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR bool notEqual(genType const& x, genType const& y, int ULPs) - { - return !equal(x, y, ULPs); - } -}//namespace glm diff --git a/third_party/glm/ext/scalar_uint_sized.hpp b/third_party/glm/ext/scalar_uint_sized.hpp deleted file mode 100755 index fd5267f..0000000 --- a/third_party/glm/ext/scalar_uint_sized.hpp +++ /dev/null @@ -1,70 +0,0 @@ -/// @ref ext_scalar_uint_sized -/// @file glm/ext/scalar_uint_sized.hpp -/// -/// @defgroup ext_scalar_uint_sized GLM_EXT_scalar_uint_sized -/// @ingroup ext -/// -/// Exposes sized unsigned integer scalar types. -/// -/// Include to use the features of this extension. -/// -/// @see ext_scalar_int_sized - -#pragma once - -#include "../detail/setup.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_EXT_scalar_uint_sized extension included") -#endif - -namespace glm{ -namespace detail -{ -# if GLM_HAS_EXTENDED_INTEGER_TYPE - typedef std::uint8_t uint8; - typedef std::uint16_t uint16; - typedef std::uint32_t uint32; -# else - typedef unsigned char uint8; - typedef unsigned short uint16; - typedef unsigned int uint32; -#endif - - template<> - struct is_int - { - enum test {value = ~0}; - }; - - template<> - struct is_int - { - enum test {value = ~0}; - }; - - template<> - struct is_int - { - enum test {value = ~0}; - }; -}//namespace detail - - - /// @addtogroup ext_scalar_uint_sized - /// @{ - - /// 8 bit unsigned integer type. - typedef detail::uint8 uint8; - - /// 16 bit unsigned integer type. - typedef detail::uint16 uint16; - - /// 32 bit unsigned integer type. - typedef detail::uint32 uint32; - - /// 64 bit unsigned integer type. - typedef detail::uint64 uint64; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/scalar_ulp.hpp b/third_party/glm/ext/scalar_ulp.hpp deleted file mode 100755 index 941ada3..0000000 --- a/third_party/glm/ext/scalar_ulp.hpp +++ /dev/null @@ -1,74 +0,0 @@ -/// @ref ext_scalar_ulp -/// @file glm/ext/scalar_ulp.hpp -/// -/// @defgroup ext_scalar_ulp GLM_EXT_scalar_ulp -/// @ingroup ext -/// -/// Allow the measurement of the accuracy of a function against a reference -/// implementation. This extension works on floating-point data and provide results -/// in ULP. -/// -/// Include to use the features of this extension. -/// -/// @see ext_vector_ulp -/// @see ext_scalar_relational - -#pragma once - -// Dependencies -#include "../ext/scalar_int_sized.hpp" -#include "../common.hpp" -#include "../detail/qualifier.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_EXT_scalar_ulp extension included") -#endif - -namespace glm -{ - /// Return the next ULP value(s) after the input value(s). - /// - /// @tparam genType A floating-point scalar type. - /// - /// @see ext_scalar_ulp - template - GLM_FUNC_DECL genType nextFloat(genType x); - - /// Return the previous ULP value(s) before the input value(s). - /// - /// @tparam genType A floating-point scalar type. - /// - /// @see ext_scalar_ulp - template - GLM_FUNC_DECL genType prevFloat(genType x); - - /// Return the value(s) ULP distance after the input value(s). - /// - /// @tparam genType A floating-point scalar type. - /// - /// @see ext_scalar_ulp - template - GLM_FUNC_DECL genType nextFloat(genType x, int ULPs); - - /// Return the value(s) ULP distance before the input value(s). - /// - /// @tparam genType A floating-point scalar type. - /// - /// @see ext_scalar_ulp - template - GLM_FUNC_DECL genType prevFloat(genType x, int ULPs); - - /// Return the distance in the number of ULP between 2 single-precision floating-point scalars. - /// - /// @see ext_scalar_ulp - GLM_FUNC_DECL int floatDistance(float x, float y); - - /// Return the distance in the number of ULP between 2 double-precision floating-point scalars. - /// - /// @see ext_scalar_ulp - GLM_FUNC_DECL int64 floatDistance(double x, double y); - - /// @} -}//namespace glm - -#include "scalar_ulp.inl" diff --git a/third_party/glm/ext/scalar_ulp.inl b/third_party/glm/ext/scalar_ulp.inl deleted file mode 100755 index 308df15..0000000 --- a/third_party/glm/ext/scalar_ulp.inl +++ /dev/null @@ -1,284 +0,0 @@ -/// Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved. -/// -/// Developed at SunPro, a Sun Microsystems, Inc. business. -/// Permission to use, copy, modify, and distribute this -/// software is freely granted, provided that this notice -/// is preserved. - -#include "../detail/type_float.hpp" -#include "../ext/scalar_constants.hpp" -#include -#include - -#if(GLM_COMPILER & GLM_COMPILER_VC) -# pragma warning(push) -# pragma warning(disable : 4127) -#endif - -typedef union -{ - float value; - /* FIXME: Assumes 32 bit int. */ - unsigned int word; -} ieee_float_shape_type; - -typedef union -{ - double value; - struct - { - int lsw; - int msw; - } parts; -} ieee_double_shape_type; - -#define GLM_EXTRACT_WORDS(ix0,ix1,d) \ - do { \ - ieee_double_shape_type ew_u; \ - ew_u.value = (d); \ - (ix0) = ew_u.parts.msw; \ - (ix1) = ew_u.parts.lsw; \ - } while (0) - -#define GLM_GET_FLOAT_WORD(i,d) \ - do { \ - ieee_float_shape_type gf_u; \ - gf_u.value = (d); \ - (i) = gf_u.word; \ - } while (0) - -#define GLM_SET_FLOAT_WORD(d,i) \ - do { \ - ieee_float_shape_type sf_u; \ - sf_u.word = (i); \ - (d) = sf_u.value; \ - } while (0) - -#define GLM_INSERT_WORDS(d,ix0,ix1) \ - do { \ - ieee_double_shape_type iw_u; \ - iw_u.parts.msw = (ix0); \ - iw_u.parts.lsw = (ix1); \ - (d) = iw_u.value; \ - } while (0) - -namespace glm{ -namespace detail -{ - GLM_FUNC_QUALIFIER float nextafterf(float x, float y) - { - volatile float t; - int hx, hy, ix, iy; - - GLM_GET_FLOAT_WORD(hx, x); - GLM_GET_FLOAT_WORD(hy, y); - ix = hx & 0x7fffffff; // |x| - iy = hy & 0x7fffffff; // |y| - - if((ix > 0x7f800000) || // x is nan - (iy > 0x7f800000)) // y is nan - return x + y; - if(abs(y - x) <= epsilon()) - return y; // x=y, return y - if(ix == 0) - { // x == 0 - GLM_SET_FLOAT_WORD(x, (hy & 0x80000000) | 1);// return +-minsubnormal - t = x * x; - if(abs(t - x) <= epsilon()) - return t; - else - return x; // raise underflow flag - } - if(hx >= 0) - { // x > 0 - if(hx > hy) // x > y, x -= ulp - hx -= 1; - else // x < y, x += ulp - hx += 1; - } - else - { // x < 0 - if(hy >= 0 || hx > hy) // x < y, x -= ulp - hx -= 1; - else // x > y, x += ulp - hx += 1; - } - hy = hx & 0x7f800000; - if(hy >= 0x7f800000) - return x + x; // overflow - if(hy < 0x00800000) // underflow - { - t = x * x; - if(abs(t - x) > epsilon()) - { // raise underflow flag - GLM_SET_FLOAT_WORD(y, hx); - return y; - } - } - GLM_SET_FLOAT_WORD(x, hx); - return x; - } - - GLM_FUNC_QUALIFIER double nextafter(double x, double y) - { - volatile double t; - int hx, hy, ix, iy; - unsigned int lx, ly; - - GLM_EXTRACT_WORDS(hx, lx, x); - GLM_EXTRACT_WORDS(hy, ly, y); - ix = hx & 0x7fffffff; // |x| - iy = hy & 0x7fffffff; // |y| - - if(((ix >= 0x7ff00000) && ((ix - 0x7ff00000) | lx) != 0) || // x is nan - ((iy >= 0x7ff00000) && ((iy - 0x7ff00000) | ly) != 0)) // y is nan - return x + y; - if(abs(y - x) <= epsilon()) - return y; // x=y, return y - if((ix | lx) == 0) - { // x == 0 - GLM_INSERT_WORDS(x, hy & 0x80000000, 1); // return +-minsubnormal - t = x * x; - if(abs(t - x) <= epsilon()) - return t; - else - return x; // raise underflow flag - } - if(hx >= 0) { // x > 0 - if(hx > hy || ((hx == hy) && (lx > ly))) { // x > y, x -= ulp - if(lx == 0) hx -= 1; - lx -= 1; - } - else { // x < y, x += ulp - lx += 1; - if(lx == 0) hx += 1; - } - } - else { // x < 0 - if(hy >= 0 || hx > hy || ((hx == hy) && (lx > ly))){// x < y, x -= ulp - if(lx == 0) hx -= 1; - lx -= 1; - } - else { // x > y, x += ulp - lx += 1; - if(lx == 0) hx += 1; - } - } - hy = hx & 0x7ff00000; - if(hy >= 0x7ff00000) - return x + x; // overflow - if(hy < 0x00100000) - { // underflow - t = x * x; - if(abs(t - x) > epsilon()) - { // raise underflow flag - GLM_INSERT_WORDS(y, hx, lx); - return y; - } - } - GLM_INSERT_WORDS(x, hx, lx); - return x; - } -}//namespace detail -}//namespace glm - -#if(GLM_COMPILER & GLM_COMPILER_VC) -# pragma warning(pop) -#endif - -namespace glm -{ - template<> - GLM_FUNC_QUALIFIER float nextFloat(float x) - { -# if GLM_HAS_CXX11_STL - return std::nextafter(x, std::numeric_limits::max()); -# elif((GLM_COMPILER & GLM_COMPILER_VC) || ((GLM_COMPILER & GLM_COMPILER_INTEL) && (GLM_PLATFORM & GLM_PLATFORM_WINDOWS))) - return detail::nextafterf(x, FLT_MAX); -# elif(GLM_PLATFORM & GLM_PLATFORM_ANDROID) - return __builtin_nextafterf(x, FLT_MAX); -# else - return nextafterf(x, FLT_MAX); -# endif - } - - template<> - GLM_FUNC_QUALIFIER double nextFloat(double x) - { -# if GLM_HAS_CXX11_STL - return std::nextafter(x, std::numeric_limits::max()); -# elif((GLM_COMPILER & GLM_COMPILER_VC) || ((GLM_COMPILER & GLM_COMPILER_INTEL) && (GLM_PLATFORM & GLM_PLATFORM_WINDOWS))) - return detail::nextafter(x, std::numeric_limits::max()); -# elif(GLM_PLATFORM & GLM_PLATFORM_ANDROID) - return __builtin_nextafter(x, DBL_MAX); -# else - return nextafter(x, DBL_MAX); -# endif - } - - template - GLM_FUNC_QUALIFIER T nextFloat(T x, int ULPs) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'next_float' only accept floating-point input"); - assert(ULPs >= 0); - - T temp = x; - for(int i = 0; i < ULPs; ++i) - temp = nextFloat(temp); - return temp; - } - - GLM_FUNC_QUALIFIER float prevFloat(float x) - { -# if GLM_HAS_CXX11_STL - return std::nextafter(x, std::numeric_limits::min()); -# elif((GLM_COMPILER & GLM_COMPILER_VC) || ((GLM_COMPILER & GLM_COMPILER_INTEL) && (GLM_PLATFORM & GLM_PLATFORM_WINDOWS))) - return detail::nextafterf(x, FLT_MIN); -# elif(GLM_PLATFORM & GLM_PLATFORM_ANDROID) - return __builtin_nextafterf(x, FLT_MIN); -# else - return nextafterf(x, FLT_MIN); -# endif - } - - GLM_FUNC_QUALIFIER double prevFloat(double x) - { -# if GLM_HAS_CXX11_STL - return std::nextafter(x, std::numeric_limits::min()); -# elif((GLM_COMPILER & GLM_COMPILER_VC) || ((GLM_COMPILER & GLM_COMPILER_INTEL) && (GLM_PLATFORM & GLM_PLATFORM_WINDOWS))) - return _nextafter(x, DBL_MIN); -# elif(GLM_PLATFORM & GLM_PLATFORM_ANDROID) - return __builtin_nextafter(x, DBL_MIN); -# else - return nextafter(x, DBL_MIN); -# endif - } - - template - GLM_FUNC_QUALIFIER T prevFloat(T x, int ULPs) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'prev_float' only accept floating-point input"); - assert(ULPs >= 0); - - T temp = x; - for(int i = 0; i < ULPs; ++i) - temp = prevFloat(temp); - return temp; - } - - GLM_FUNC_QUALIFIER int floatDistance(float x, float y) - { - detail::float_t const a(x); - detail::float_t const b(y); - - return abs(a.i - b.i); - } - - GLM_FUNC_QUALIFIER int64 floatDistance(double x, double y) - { - detail::float_t const a(x); - detail::float_t const b(y); - - return abs(a.i - b.i); - } -}//namespace glm diff --git a/third_party/glm/ext/vector_bool1.hpp b/third_party/glm/ext/vector_bool1.hpp deleted file mode 100755 index 002c320..0000000 --- a/third_party/glm/ext/vector_bool1.hpp +++ /dev/null @@ -1,30 +0,0 @@ -/// @ref ext_vector_bool1 -/// @file glm/ext/vector_bool1.hpp -/// -/// @defgroup ext_vector_bool1 GLM_EXT_vector_bool1 -/// @ingroup ext -/// -/// Exposes bvec1 vector type. -/// -/// Include to use the features of this extension. -/// -/// @see ext_vector_bool1_precision extension. - -#pragma once - -#include "../detail/type_vec1.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_EXT_vector_bool1 extension included") -#endif - -namespace glm -{ - /// @addtogroup ext_vector_bool1 - /// @{ - - /// 1 components vector of boolean. - typedef vec<1, bool, defaultp> bvec1; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/vector_bool1_precision.hpp b/third_party/glm/ext/vector_bool1_precision.hpp deleted file mode 100755 index e62d3cf..0000000 --- a/third_party/glm/ext/vector_bool1_precision.hpp +++ /dev/null @@ -1,34 +0,0 @@ -/// @ref ext_vector_bool1_precision -/// @file glm/ext/vector_bool1_precision.hpp -/// -/// @defgroup ext_vector_bool1_precision GLM_EXT_vector_bool1_precision -/// @ingroup ext -/// -/// Exposes highp_bvec1, mediump_bvec1 and lowp_bvec1 types. -/// -/// Include to use the features of this extension. - -#pragma once - -#include "../detail/type_vec1.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_EXT_vector_bool1_precision extension included") -#endif - -namespace glm -{ - /// @addtogroup ext_vector_bool1_precision - /// @{ - - /// 1 component vector of bool values. - typedef vec<1, bool, highp> highp_bvec1; - - /// 1 component vector of bool values. - typedef vec<1, bool, mediump> mediump_bvec1; - - /// 1 component vector of bool values. - typedef vec<1, bool, lowp> lowp_bvec1; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/vector_bool2.hpp b/third_party/glm/ext/vector_bool2.hpp deleted file mode 100755 index 52288b7..0000000 --- a/third_party/glm/ext/vector_bool2.hpp +++ /dev/null @@ -1,18 +0,0 @@ -/// @ref core -/// @file glm/ext/vector_bool2.hpp - -#pragma once -#include "../detail/type_vec2.hpp" - -namespace glm -{ - /// @addtogroup core_vector - /// @{ - - /// 2 components vector of boolean. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - typedef vec<2, bool, defaultp> bvec2; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/vector_bool2_precision.hpp b/third_party/glm/ext/vector_bool2_precision.hpp deleted file mode 100755 index 4370933..0000000 --- a/third_party/glm/ext/vector_bool2_precision.hpp +++ /dev/null @@ -1,31 +0,0 @@ -/// @ref core -/// @file glm/ext/vector_bool2_precision.hpp - -#pragma once -#include "../detail/type_vec2.hpp" - -namespace glm -{ - /// @addtogroup core_vector_precision - /// @{ - - /// 2 components vector of high qualifier bool numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef vec<2, bool, highp> highp_bvec2; - - /// 2 components vector of medium qualifier bool numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef vec<2, bool, mediump> mediump_bvec2; - - /// 2 components vector of low qualifier bool numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef vec<2, bool, lowp> lowp_bvec2; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/vector_bool3.hpp b/third_party/glm/ext/vector_bool3.hpp deleted file mode 100755 index 90a0b7e..0000000 --- a/third_party/glm/ext/vector_bool3.hpp +++ /dev/null @@ -1,18 +0,0 @@ -/// @ref core -/// @file glm/ext/vector_bool3.hpp - -#pragma once -#include "../detail/type_vec3.hpp" - -namespace glm -{ - /// @addtogroup core_vector - /// @{ - - /// 3 components vector of boolean. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - typedef vec<3, bool, defaultp> bvec3; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/vector_bool3_precision.hpp b/third_party/glm/ext/vector_bool3_precision.hpp deleted file mode 100755 index 89cd2d3..0000000 --- a/third_party/glm/ext/vector_bool3_precision.hpp +++ /dev/null @@ -1,31 +0,0 @@ -/// @ref core -/// @file glm/ext/vector_bool3_precision.hpp - -#pragma once -#include "../detail/type_vec3.hpp" - -namespace glm -{ - /// @addtogroup core_vector_precision - /// @{ - - /// 3 components vector of high qualifier bool numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef vec<3, bool, highp> highp_bvec3; - - /// 3 components vector of medium qualifier bool numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef vec<3, bool, mediump> mediump_bvec3; - - /// 3 components vector of low qualifier bool numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef vec<3, bool, lowp> lowp_bvec3; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/vector_bool4.hpp b/third_party/glm/ext/vector_bool4.hpp deleted file mode 100755 index 18aa71b..0000000 --- a/third_party/glm/ext/vector_bool4.hpp +++ /dev/null @@ -1,18 +0,0 @@ -/// @ref core -/// @file glm/ext/vector_bool4.hpp - -#pragma once -#include "../detail/type_vec4.hpp" - -namespace glm -{ - /// @addtogroup core_vector - /// @{ - - /// 4 components vector of boolean. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - typedef vec<4, bool, defaultp> bvec4; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/vector_bool4_precision.hpp b/third_party/glm/ext/vector_bool4_precision.hpp deleted file mode 100755 index 79786e5..0000000 --- a/third_party/glm/ext/vector_bool4_precision.hpp +++ /dev/null @@ -1,31 +0,0 @@ -/// @ref core -/// @file glm/ext/vector_bool4_precision.hpp - -#pragma once -#include "../detail/type_vec4.hpp" - -namespace glm -{ - /// @addtogroup core_vector_precision - /// @{ - - /// 4 components vector of high qualifier bool numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef vec<4, bool, highp> highp_bvec4; - - /// 4 components vector of medium qualifier bool numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef vec<4, bool, mediump> mediump_bvec4; - - /// 4 components vector of low qualifier bool numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef vec<4, bool, lowp> lowp_bvec4; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/vector_common.hpp b/third_party/glm/ext/vector_common.hpp deleted file mode 100755 index 324fe1c..0000000 --- a/third_party/glm/ext/vector_common.hpp +++ /dev/null @@ -1,144 +0,0 @@ -/// @ref ext_vector_common -/// @file glm/ext/vector_common.hpp -/// -/// @defgroup ext_vector_common GLM_EXT_vector_common -/// @ingroup ext -/// -/// Exposes min and max functions for 3 to 4 vector parameters. -/// -/// Include to use the features of this extension. -/// -/// @see core_common -/// @see ext_scalar_common - -#pragma once - -// Dependency: -#include "../ext/scalar_common.hpp" -#include "../common.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_EXT_vector_common extension included") -#endif - -namespace glm -{ - /// @addtogroup ext_vector_common - /// @{ - - /// Return the minimum component-wise values of 3 inputs - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point or integer scalar types - /// @tparam Q Value from qualifier enum - template - GLM_FUNC_DECL GLM_CONSTEXPR vec min(vec const& a, vec const& b, vec const& c); - - /// Return the minimum component-wise values of 4 inputs - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point or integer scalar types - /// @tparam Q Value from qualifier enum - template - GLM_FUNC_DECL GLM_CONSTEXPR vec min(vec const& a, vec const& b, vec const& c, vec const& d); - - /// Return the maximum component-wise values of 3 inputs - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point or integer scalar types - /// @tparam Q Value from qualifier enum - template - GLM_FUNC_DECL GLM_CONSTEXPR vec max(vec const& x, vec const& y, vec const& z); - - /// Return the maximum component-wise values of 4 inputs - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point or integer scalar types - /// @tparam Q Value from qualifier enum - template - GLM_FUNC_DECL GLM_CONSTEXPR vec max( vec const& x, vec const& y, vec const& z, vec const& w); - - /// Returns y if y < x; otherwise, it returns x. If one of the two arguments is NaN, the value of the other argument is returned. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see std::fmin documentation - template - GLM_FUNC_DECL vec fmin(vec const& x, T y); - - /// Returns y if y < x; otherwise, it returns x. If one of the two arguments is NaN, the value of the other argument is returned. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see std::fmin documentation - template - GLM_FUNC_DECL vec fmin(vec const& x, vec const& y); - - /// Returns y if y < x; otherwise, it returns x. If one of the two arguments is NaN, the value of the other argument is returned. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see std::fmin documentation - template - GLM_FUNC_DECL vec fmin(vec const& a, vec const& b, vec const& c); - - /// Returns y if y < x; otherwise, it returns x. If one of the two arguments is NaN, the value of the other argument is returned. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see std::fmin documentation - template - GLM_FUNC_DECL vec fmin(vec const& a, vec const& b, vec const& c, vec const& d); - - /// Returns y if x < y; otherwise, it returns x. If one of the two arguments is NaN, the value of the other argument is returned. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see std::fmax documentation - template - GLM_FUNC_DECL vec fmax(vec const& a, T b); - - /// Returns y if x < y; otherwise, it returns x. If one of the two arguments is NaN, the value of the other argument is returned. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see std::fmax documentation - template - GLM_FUNC_DECL vec fmax(vec const& a, vec const& b); - - /// Returns y if x < y; otherwise, it returns x. If one of the two arguments is NaN, the value of the other argument is returned. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see std::fmax documentation - template - GLM_FUNC_DECL vec fmax(vec const& a, vec const& b, vec const& c); - - /// Returns y if x < y; otherwise, it returns x. If one of the two arguments is NaN, the value of the other argument is returned. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see std::fmax documentation - template - GLM_FUNC_DECL vec fmax(vec const& a, vec const& b, vec const& c, vec const& d); - - /// @} -}//namespace glm - -#include "vector_common.inl" diff --git a/third_party/glm/ext/vector_common.inl b/third_party/glm/ext/vector_common.inl deleted file mode 100755 index 71f3809..0000000 --- a/third_party/glm/ext/vector_common.inl +++ /dev/null @@ -1,88 +0,0 @@ -#include "../detail/_vectorize.hpp" - -namespace glm -{ - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec min(vec const& x, vec const& y, vec const& z) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'min' only accept floating-point or integer inputs"); - return glm::min(glm::min(x, y), z); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec min(vec const& x, vec const& y, vec const& z, vec const& w) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'min' only accept floating-point or integer inputs"); - return glm::min(glm::min(x, y), glm::min(z, w)); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec max(vec const& x, vec const& y, vec const& z) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'max' only accept floating-point or integer inputs"); - return glm::max(glm::max(x, y), z); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec max(vec const& x, vec const& y, vec const& z, vec const& w) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'max' only accept floating-point or integer inputs"); - return glm::max(glm::max(x, y), glm::max(z, w)); - } - - template - GLM_FUNC_QUALIFIER vec fmin(vec const& a, T b) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'fmin' only accept floating-point inputs"); - return detail::functor2::call(fmin, a, vec(b)); - } - - template - GLM_FUNC_QUALIFIER vec fmin(vec const& a, vec const& b) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'fmin' only accept floating-point inputs"); - return detail::functor2::call(fmin, a, b); - } - - template - GLM_FUNC_QUALIFIER vec fmin(vec const& a, vec const& b, vec const& c) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'fmin' only accept floating-point inputs"); - return fmin(fmin(a, b), c); - } - - template - GLM_FUNC_QUALIFIER vec fmin(vec const& a, vec const& b, vec const& c, vec const& d) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'fmin' only accept floating-point inputs"); - return fmin(fmin(a, b), fmin(c, d)); - } - - template - GLM_FUNC_QUALIFIER vec fmax(vec const& a, T b) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'fmax' only accept floating-point inputs"); - return detail::functor2::call(fmax, a, vec(b)); - } - - template - GLM_FUNC_QUALIFIER vec fmax(vec const& a, vec const& b) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'fmax' only accept floating-point inputs"); - return detail::functor2::call(fmax, a, b); - } - - template - GLM_FUNC_QUALIFIER vec fmax(vec const& a, vec const& b, vec const& c) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'fmax' only accept floating-point inputs"); - return fmax(fmax(a, b), c); - } - - template - GLM_FUNC_QUALIFIER vec fmax(vec const& a, vec const& b, vec const& c, vec const& d) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'fmax' only accept floating-point inputs"); - return fmax(fmax(a, b), fmax(c, d)); - } -}//namespace glm diff --git a/third_party/glm/ext/vector_double1.hpp b/third_party/glm/ext/vector_double1.hpp deleted file mode 100755 index 3882667..0000000 --- a/third_party/glm/ext/vector_double1.hpp +++ /dev/null @@ -1,31 +0,0 @@ -/// @ref ext_vector_double1 -/// @file glm/ext/vector_double1.hpp -/// -/// @defgroup ext_vector_double1 GLM_EXT_vector_double1 -/// @ingroup ext -/// -/// Exposes double-precision floating point vector type with one component. -/// -/// Include to use the features of this extension. -/// -/// @see ext_vector_double1_precision extension. -/// @see ext_vector_float1 extension. - -#pragma once - -#include "../detail/type_vec1.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_EXT_vector_double1 extension included") -#endif - -namespace glm -{ - /// @addtogroup ext_vector_double1 - /// @{ - - /// 1 components vector of double-precision floating-point numbers. - typedef vec<1, double, defaultp> dvec1; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/vector_double1_precision.hpp b/third_party/glm/ext/vector_double1_precision.hpp deleted file mode 100755 index 1d47195..0000000 --- a/third_party/glm/ext/vector_double1_precision.hpp +++ /dev/null @@ -1,36 +0,0 @@ -/// @ref ext_vector_double1_precision -/// @file glm/ext/vector_double1_precision.hpp -/// -/// @defgroup ext_vector_double1_precision GLM_EXT_vector_double1_precision -/// @ingroup ext -/// -/// Exposes highp_dvec1, mediump_dvec1 and lowp_dvec1 types. -/// -/// Include to use the features of this extension. -/// -/// @see ext_vector_double1 - -#pragma once - -#include "../detail/type_vec1.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_EXT_vector_double1_precision extension included") -#endif - -namespace glm -{ - /// @addtogroup ext_vector_double1_precision - /// @{ - - /// 1 component vector of double-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef vec<1, double, highp> highp_dvec1; - - /// 1 component vector of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef vec<1, double, mediump> mediump_dvec1; - - /// 1 component vector of double-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef vec<1, double, lowp> lowp_dvec1; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/vector_double2.hpp b/third_party/glm/ext/vector_double2.hpp deleted file mode 100755 index 60e3577..0000000 --- a/third_party/glm/ext/vector_double2.hpp +++ /dev/null @@ -1,18 +0,0 @@ -/// @ref core -/// @file glm/ext/vector_double2.hpp - -#pragma once -#include "../detail/type_vec2.hpp" - -namespace glm -{ - /// @addtogroup core_vector - /// @{ - - /// 2 components vector of double-precision floating-point numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - typedef vec<2, double, defaultp> dvec2; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/vector_double2_precision.hpp b/third_party/glm/ext/vector_double2_precision.hpp deleted file mode 100755 index fa53940..0000000 --- a/third_party/glm/ext/vector_double2_precision.hpp +++ /dev/null @@ -1,31 +0,0 @@ -/// @ref core -/// @file glm/ext/vector_double2_precision.hpp - -#pragma once -#include "../detail/type_vec2.hpp" - -namespace glm -{ - /// @addtogroup core_vector_precision - /// @{ - - /// 2 components vector of high double-qualifier floating-point numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef vec<2, double, highp> highp_dvec2; - - /// 2 components vector of medium double-qualifier floating-point numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef vec<2, double, mediump> mediump_dvec2; - - /// 2 components vector of low double-qualifier floating-point numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef vec<2, double, lowp> lowp_dvec2; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/vector_double3.hpp b/third_party/glm/ext/vector_double3.hpp deleted file mode 100755 index 6dfe4c6..0000000 --- a/third_party/glm/ext/vector_double3.hpp +++ /dev/null @@ -1,18 +0,0 @@ -/// @ref core -/// @file glm/ext/vector_double3.hpp - -#pragma once -#include "../detail/type_vec3.hpp" - -namespace glm -{ - /// @addtogroup core_vector - /// @{ - - /// 3 components vector of double-precision floating-point numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - typedef vec<3, double, defaultp> dvec3; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/vector_double3_precision.hpp b/third_party/glm/ext/vector_double3_precision.hpp deleted file mode 100755 index a8cfa37..0000000 --- a/third_party/glm/ext/vector_double3_precision.hpp +++ /dev/null @@ -1,34 +0,0 @@ -/// @ref core -/// @file glm/ext/vector_double3_precision.hpp - -#pragma once -#include "../detail/type_vec3.hpp" - -namespace glm -{ - /// @addtogroup core_vector_precision - /// @{ - - /// 3 components vector of high double-qualifier floating-point numbers. - /// There is no guarantee on the actual qualifier. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef vec<3, double, highp> highp_dvec3; - - /// 3 components vector of medium double-qualifier floating-point numbers. - /// There is no guarantee on the actual qualifier. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef vec<3, double, mediump> mediump_dvec3; - - /// 3 components vector of low double-qualifier floating-point numbers. - /// There is no guarantee on the actual qualifier. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef vec<3, double, lowp> lowp_dvec3; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/vector_double4.hpp b/third_party/glm/ext/vector_double4.hpp deleted file mode 100755 index 87f225f..0000000 --- a/third_party/glm/ext/vector_double4.hpp +++ /dev/null @@ -1,18 +0,0 @@ -/// @ref core -/// @file glm/ext/vector_double4.hpp - -#pragma once -#include "../detail/type_vec4.hpp" - -namespace glm -{ - /// @addtogroup core_vector - /// @{ - - /// 4 components vector of double-precision floating-point numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - typedef vec<4, double, defaultp> dvec4; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/vector_double4_precision.hpp b/third_party/glm/ext/vector_double4_precision.hpp deleted file mode 100755 index 09cafa1..0000000 --- a/third_party/glm/ext/vector_double4_precision.hpp +++ /dev/null @@ -1,35 +0,0 @@ -/// @ref core -/// @file glm/ext/vector_double4_precision.hpp - -#pragma once -#include "../detail/setup.hpp" -#include "../detail/type_vec4.hpp" - -namespace glm -{ - /// @addtogroup core_vector_precision - /// @{ - - /// 4 components vector of high double-qualifier floating-point numbers. - /// There is no guarantee on the actual qualifier. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef vec<4, double, highp> highp_dvec4; - - /// 4 components vector of medium double-qualifier floating-point numbers. - /// There is no guarantee on the actual qualifier. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef vec<4, double, mediump> mediump_dvec4; - - /// 4 components vector of low double-qualifier floating-point numbers. - /// There is no guarantee on the actual qualifier. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef vec<4, double, lowp> lowp_dvec4; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/vector_float1.hpp b/third_party/glm/ext/vector_float1.hpp deleted file mode 100755 index 28acc2c..0000000 --- a/third_party/glm/ext/vector_float1.hpp +++ /dev/null @@ -1,31 +0,0 @@ -/// @ref ext_vector_float1 -/// @file glm/ext/vector_float1.hpp -/// -/// @defgroup ext_vector_float1 GLM_EXT_vector_float1 -/// @ingroup ext -/// -/// Exposes single-precision floating point vector type with one component. -/// -/// Include to use the features of this extension. -/// -/// @see ext_vector_float1_precision extension. -/// @see ext_vector_double1 extension. - -#pragma once - -#include "../detail/type_vec1.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_EXT_vector_float1 extension included") -#endif - -namespace glm -{ - /// @addtogroup ext_vector_float1 - /// @{ - - /// 1 components vector of single-precision floating-point numbers. - typedef vec<1, float, defaultp> vec1; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/vector_float1_precision.hpp b/third_party/glm/ext/vector_float1_precision.hpp deleted file mode 100755 index 6e8dad8..0000000 --- a/third_party/glm/ext/vector_float1_precision.hpp +++ /dev/null @@ -1,36 +0,0 @@ -/// @ref ext_vector_float1_precision -/// @file glm/ext/vector_float1_precision.hpp -/// -/// @defgroup ext_vector_float1_precision GLM_EXT_vector_float1_precision -/// @ingroup ext -/// -/// Exposes highp_vec1, mediump_vec1 and lowp_vec1 types. -/// -/// Include to use the features of this extension. -/// -/// @see ext_vector_float1 extension. - -#pragma once - -#include "../detail/type_vec1.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_EXT_vector_float1_precision extension included") -#endif - -namespace glm -{ - /// @addtogroup ext_vector_float1_precision - /// @{ - - /// 1 component vector of single-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef vec<1, float, highp> highp_vec1; - - /// 1 component vector of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef vec<1, float, mediump> mediump_vec1; - - /// 1 component vector of single-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef vec<1, float, lowp> lowp_vec1; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/vector_float2.hpp b/third_party/glm/ext/vector_float2.hpp deleted file mode 100755 index d31545d..0000000 --- a/third_party/glm/ext/vector_float2.hpp +++ /dev/null @@ -1,18 +0,0 @@ -/// @ref core -/// @file glm/ext/vector_float2.hpp - -#pragma once -#include "../detail/type_vec2.hpp" - -namespace glm -{ - /// @addtogroup core_vector - /// @{ - - /// 2 components vector of single-precision floating-point numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - typedef vec<2, float, defaultp> vec2; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/vector_float2_precision.hpp b/third_party/glm/ext/vector_float2_precision.hpp deleted file mode 100755 index 23c0820..0000000 --- a/third_party/glm/ext/vector_float2_precision.hpp +++ /dev/null @@ -1,31 +0,0 @@ -/// @ref core -/// @file glm/ext/vector_float2_precision.hpp - -#pragma once -#include "../detail/type_vec2.hpp" - -namespace glm -{ - /// @addtogroup core_vector_precision - /// @{ - - /// 2 components vector of high single-qualifier floating-point numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef vec<2, float, highp> highp_vec2; - - /// 2 components vector of medium single-qualifier floating-point numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef vec<2, float, mediump> mediump_vec2; - - /// 2 components vector of low single-qualifier floating-point numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef vec<2, float, lowp> lowp_vec2; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/vector_float3.hpp b/third_party/glm/ext/vector_float3.hpp deleted file mode 100755 index cd79a62..0000000 --- a/third_party/glm/ext/vector_float3.hpp +++ /dev/null @@ -1,18 +0,0 @@ -/// @ref core -/// @file glm/ext/vector_float3.hpp - -#pragma once -#include "../detail/type_vec3.hpp" - -namespace glm -{ - /// @addtogroup core_vector - /// @{ - - /// 3 components vector of single-precision floating-point numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - typedef vec<3, float, defaultp> vec3; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/vector_float3_precision.hpp b/third_party/glm/ext/vector_float3_precision.hpp deleted file mode 100755 index be640b5..0000000 --- a/third_party/glm/ext/vector_float3_precision.hpp +++ /dev/null @@ -1,31 +0,0 @@ -/// @ref core -/// @file glm/ext/vector_float3_precision.hpp - -#pragma once -#include "../detail/type_vec3.hpp" - -namespace glm -{ - /// @addtogroup core_vector_precision - /// @{ - - /// 3 components vector of high single-qualifier floating-point numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef vec<3, float, highp> highp_vec3; - - /// 3 components vector of medium single-qualifier floating-point numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef vec<3, float, mediump> mediump_vec3; - - /// 3 components vector of low single-qualifier floating-point numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef vec<3, float, lowp> lowp_vec3; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/vector_float4.hpp b/third_party/glm/ext/vector_float4.hpp deleted file mode 100755 index d84adcc..0000000 --- a/third_party/glm/ext/vector_float4.hpp +++ /dev/null @@ -1,18 +0,0 @@ -/// @ref core -/// @file glm/ext/vector_float4.hpp - -#pragma once -#include "../detail/type_vec4.hpp" - -namespace glm -{ - /// @addtogroup core_vector - /// @{ - - /// 4 components vector of single-precision floating-point numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - typedef vec<4, float, defaultp> vec4; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/vector_float4_precision.hpp b/third_party/glm/ext/vector_float4_precision.hpp deleted file mode 100755 index aede838..0000000 --- a/third_party/glm/ext/vector_float4_precision.hpp +++ /dev/null @@ -1,31 +0,0 @@ -/// @ref core -/// @file glm/ext/vector_float4_precision.hpp - -#pragma once -#include "../detail/type_vec4.hpp" - -namespace glm -{ - /// @addtogroup core_vector_precision - /// @{ - - /// 4 components vector of high single-qualifier floating-point numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef vec<4, float, highp> highp_vec4; - - /// 4 components vector of medium single-qualifier floating-point numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef vec<4, float, mediump> mediump_vec4; - - /// 4 components vector of low single-qualifier floating-point numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef vec<4, float, lowp> lowp_vec4; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/vector_int1.hpp b/third_party/glm/ext/vector_int1.hpp deleted file mode 100755 index dc86038..0000000 --- a/third_party/glm/ext/vector_int1.hpp +++ /dev/null @@ -1,32 +0,0 @@ -/// @ref ext_vector_int1 -/// @file glm/ext/vector_int1.hpp -/// -/// @defgroup ext_vector_int1 GLM_EXT_vector_int1 -/// @ingroup ext -/// -/// Exposes ivec1 vector type. -/// -/// Include to use the features of this extension. -/// -/// @see ext_vector_uint1 extension. -/// @see ext_vector_int1_precision extension. - -#pragma once - -#include "../detail/type_vec1.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_EXT_vector_int1 extension included") -#endif - -namespace glm -{ - /// @addtogroup ext_vector_int1 - /// @{ - - /// 1 component vector of signed integer numbers. - typedef vec<1, int, defaultp> ivec1; - - /// @} -}//namespace glm - diff --git a/third_party/glm/ext/vector_int1_precision.hpp b/third_party/glm/ext/vector_int1_precision.hpp deleted file mode 100755 index 3323954..0000000 --- a/third_party/glm/ext/vector_int1_precision.hpp +++ /dev/null @@ -1,34 +0,0 @@ -/// @ref ext_vector_int1_precision -/// @file glm/ext/vector_int1_precision.hpp -/// -/// @defgroup ext_vector_int1_precision GLM_EXT_vector_int1_precision -/// @ingroup ext -/// -/// Exposes highp_ivec1, mediump_ivec1 and lowp_ivec1 types. -/// -/// Include to use the features of this extension. - -#pragma once - -#include "../detail/type_vec1.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_EXT_vector_int1_precision extension included") -#endif - -namespace glm -{ - /// @addtogroup ext_vector_int1_precision - /// @{ - - /// 1 component vector of signed integer values. - typedef vec<1, int, highp> highp_ivec1; - - /// 1 component vector of signed integer values. - typedef vec<1, int, mediump> mediump_ivec1; - - /// 1 component vector of signed integer values. - typedef vec<1, int, lowp> lowp_ivec1; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/vector_int2.hpp b/third_party/glm/ext/vector_int2.hpp deleted file mode 100755 index aef803e..0000000 --- a/third_party/glm/ext/vector_int2.hpp +++ /dev/null @@ -1,18 +0,0 @@ -/// @ref core -/// @file glm/ext/vector_int2.hpp - -#pragma once -#include "../detail/type_vec2.hpp" - -namespace glm -{ - /// @addtogroup core_vector - /// @{ - - /// 2 components vector of signed integer numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - typedef vec<2, int, defaultp> ivec2; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/vector_int2_precision.hpp b/third_party/glm/ext/vector_int2_precision.hpp deleted file mode 100755 index 97315fc..0000000 --- a/third_party/glm/ext/vector_int2_precision.hpp +++ /dev/null @@ -1,31 +0,0 @@ -/// @ref core -/// @file glm/ext/vector_int2_precision.hpp - -#pragma once -#include "../detail/type_vec2.hpp" - -namespace glm -{ - /// @addtogroup core_vector_precision - /// @{ - - /// 2 components vector of high qualifier signed integer numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef vec<2, int, highp> highp_ivec2; - - /// 2 components vector of medium qualifier signed integer numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef vec<2, int, mediump> mediump_ivec2; - - /// 2 components vector of low qualifier signed integer numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef vec<2, int, lowp> lowp_ivec2; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/vector_int3.hpp b/third_party/glm/ext/vector_int3.hpp deleted file mode 100755 index 4767e61..0000000 --- a/third_party/glm/ext/vector_int3.hpp +++ /dev/null @@ -1,18 +0,0 @@ -/// @ref core -/// @file glm/ext/vector_int3.hpp - -#pragma once -#include "../detail/type_vec3.hpp" - -namespace glm -{ - /// @addtogroup core_vector - /// @{ - - /// 3 components vector of signed integer numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - typedef vec<3, int, defaultp> ivec3; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/vector_int3_precision.hpp b/third_party/glm/ext/vector_int3_precision.hpp deleted file mode 100755 index 2cd3f5f..0000000 --- a/third_party/glm/ext/vector_int3_precision.hpp +++ /dev/null @@ -1,31 +0,0 @@ -/// @ref core -/// @file glm/ext/vector_int3_precision.hpp - -#pragma once -#include "../detail/type_vec3.hpp" - -namespace glm -{ - /// @addtogroup core_vector_precision - /// @{ - - /// 3 components vector of high qualifier signed integer numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef vec<3, int, highp> highp_ivec3; - - /// 3 components vector of medium qualifier signed integer numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef vec<3, int, mediump> mediump_ivec3; - - /// 3 components vector of low qualifier signed integer numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef vec<3, int, lowp> lowp_ivec3; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/vector_int4.hpp b/third_party/glm/ext/vector_int4.hpp deleted file mode 100755 index bb23adf..0000000 --- a/third_party/glm/ext/vector_int4.hpp +++ /dev/null @@ -1,18 +0,0 @@ -/// @ref core -/// @file glm/ext/vector_int4.hpp - -#pragma once -#include "../detail/type_vec4.hpp" - -namespace glm -{ - /// @addtogroup core_vector - /// @{ - - /// 4 components vector of signed integer numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - typedef vec<4, int, defaultp> ivec4; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/vector_int4_precision.hpp b/third_party/glm/ext/vector_int4_precision.hpp deleted file mode 100755 index 4fcd791..0000000 --- a/third_party/glm/ext/vector_int4_precision.hpp +++ /dev/null @@ -1,31 +0,0 @@ -/// @ref core -/// @file glm/ext/vector_int4_precision.hpp - -#pragma once -#include "../detail/type_vec4.hpp" - -namespace glm -{ - /// @addtogroup core_vector_precision - /// @{ - - /// 4 components vector of high qualifier signed integer numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef vec<4, int, highp> highp_ivec4; - - /// 4 components vector of medium qualifier signed integer numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef vec<4, int, mediump> mediump_ivec4; - - /// 4 components vector of low qualifier signed integer numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef vec<4, int, lowp> lowp_ivec4; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/vector_integer.hpp b/third_party/glm/ext/vector_integer.hpp deleted file mode 100755 index 1304dd8..0000000 --- a/third_party/glm/ext/vector_integer.hpp +++ /dev/null @@ -1,149 +0,0 @@ -/// @ref ext_vector_integer -/// @file glm/ext/vector_integer.hpp -/// -/// @see core (dependence) -/// @see ext_vector_integer (dependence) -/// -/// @defgroup ext_vector_integer GLM_EXT_vector_integer -/// @ingroup ext -/// -/// Include to use the features of this extension. - -#pragma once - -// Dependencies -#include "../detail/setup.hpp" -#include "../detail/qualifier.hpp" -#include "../detail/_vectorize.hpp" -#include "../vector_relational.hpp" -#include "../common.hpp" -#include - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_EXT_vector_integer extension included") -#endif - -namespace glm -{ - /// @addtogroup ext_vector_integer - /// @{ - - /// Return true if the value is a power of two number. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Signed or unsigned integer scalar types. - /// @tparam Q Value from qualifier enum - /// - /// @see ext_vector_integer - template - GLM_FUNC_DECL vec isPowerOfTwo(vec const& v); - - /// Return the power of two number which value is just higher the input value, - /// round up to a power of two. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Signed or unsigned integer scalar types. - /// @tparam Q Value from qualifier enum - /// - /// @see ext_vector_integer - template - GLM_FUNC_DECL vec nextPowerOfTwo(vec const& v); - - /// Return the power of two number which value is just lower the input value, - /// round down to a power of two. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Signed or unsigned integer scalar types. - /// @tparam Q Value from qualifier enum - /// - /// @see ext_vector_integer - template - GLM_FUNC_DECL vec prevPowerOfTwo(vec const& v); - - /// Return true if the 'Value' is a multiple of 'Multiple'. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Signed or unsigned integer scalar types. - /// @tparam Q Value from qualifier enum - /// - /// @see ext_vector_integer - template - GLM_FUNC_DECL vec isMultiple(vec const& v, T Multiple); - - /// Return true if the 'Value' is a multiple of 'Multiple'. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Signed or unsigned integer scalar types. - /// @tparam Q Value from qualifier enum - /// - /// @see ext_vector_integer - template - GLM_FUNC_DECL vec isMultiple(vec const& v, vec const& Multiple); - - /// Higher multiple number of Source. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Signed or unsigned integer scalar types. - /// @tparam Q Value from qualifier enum - /// - /// @param v Source values to which is applied the function - /// @param Multiple Must be a null or positive value - /// - /// @see ext_vector_integer - template - GLM_FUNC_DECL vec nextMultiple(vec const& v, T Multiple); - - /// Higher multiple number of Source. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Signed or unsigned integer scalar types. - /// @tparam Q Value from qualifier enum - /// - /// @param v Source values to which is applied the function - /// @param Multiple Must be a null or positive value - /// - /// @see ext_vector_integer - template - GLM_FUNC_DECL vec nextMultiple(vec const& v, vec const& Multiple); - - /// Lower multiple number of Source. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Signed or unsigned integer scalar types. - /// @tparam Q Value from qualifier enum - /// - /// @param v Source values to which is applied the function - /// @param Multiple Must be a null or positive value - /// - /// @see ext_vector_integer - template - GLM_FUNC_DECL vec prevMultiple(vec const& v, T Multiple); - - /// Lower multiple number of Source. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Signed or unsigned integer scalar types. - /// @tparam Q Value from qualifier enum - /// - /// @param v Source values to which is applied the function - /// @param Multiple Must be a null or positive value - /// - /// @see ext_vector_integer - template - GLM_FUNC_DECL vec prevMultiple(vec const& v, vec const& Multiple); - - /// Returns the bit number of the Nth significant bit set to - /// 1 in the binary representation of value. - /// If value bitcount is less than the Nth significant bit, -1 will be returned. - /// - /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. - /// @tparam T Signed or unsigned integer scalar types. - /// - /// @see ext_vector_integer - template - GLM_FUNC_DECL vec findNSB(vec const& Source, vec SignificantBitCount); - - /// @} -} //namespace glm - -#include "vector_integer.inl" diff --git a/third_party/glm/ext/vector_integer.inl b/third_party/glm/ext/vector_integer.inl deleted file mode 100755 index 939ff5e..0000000 --- a/third_party/glm/ext/vector_integer.inl +++ /dev/null @@ -1,85 +0,0 @@ -#include "scalar_integer.hpp" - -namespace glm -{ - template - GLM_FUNC_QUALIFIER vec isPowerOfTwo(vec const& Value) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'isPowerOfTwo' only accept integer inputs"); - - vec const Result(abs(Value)); - return equal(Result & (Result - vec(1)), vec(0)); - } - - template - GLM_FUNC_QUALIFIER vec nextPowerOfTwo(vec const& v) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'nextPowerOfTwo' only accept integer inputs"); - - return detail::compute_ceilPowerOfTwo::is_signed>::call(v); - } - - template - GLM_FUNC_QUALIFIER vec prevPowerOfTwo(vec const& v) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'prevPowerOfTwo' only accept integer inputs"); - - return detail::functor1::call(prevPowerOfTwo, v); - } - - template - GLM_FUNC_QUALIFIER vec isMultiple(vec const& Value, T Multiple) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'isMultiple' only accept integer inputs"); - - return (Value % Multiple) == vec(0); - } - - template - GLM_FUNC_QUALIFIER vec isMultiple(vec const& Value, vec const& Multiple) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'isMultiple' only accept integer inputs"); - - return (Value % Multiple) == vec(0); - } - - template - GLM_FUNC_QUALIFIER vec nextMultiple(vec const& Source, T Multiple) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'nextMultiple' only accept integer inputs"); - - return detail::functor2::call(nextMultiple, Source, vec(Multiple)); - } - - template - GLM_FUNC_QUALIFIER vec nextMultiple(vec const& Source, vec const& Multiple) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'nextMultiple' only accept integer inputs"); - - return detail::functor2::call(nextMultiple, Source, Multiple); - } - - template - GLM_FUNC_QUALIFIER vec prevMultiple(vec const& Source, T Multiple) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'prevMultiple' only accept integer inputs"); - - return detail::functor2::call(prevMultiple, Source, vec(Multiple)); - } - - template - GLM_FUNC_QUALIFIER vec prevMultiple(vec const& Source, vec const& Multiple) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'prevMultiple' only accept integer inputs"); - - return detail::functor2::call(prevMultiple, Source, Multiple); - } - - template - GLM_FUNC_QUALIFIER vec findNSB(vec const& Source, vec SignificantBitCount) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'findNSB' only accept integer inputs"); - - return detail::functor2_vec_int::call(findNSB, Source, SignificantBitCount); - } -}//namespace glm diff --git a/third_party/glm/ext/vector_relational.hpp b/third_party/glm/ext/vector_relational.hpp deleted file mode 100755 index 1c2367d..0000000 --- a/third_party/glm/ext/vector_relational.hpp +++ /dev/null @@ -1,107 +0,0 @@ -/// @ref ext_vector_relational -/// @file glm/ext/vector_relational.hpp -/// -/// @see core (dependence) -/// @see ext_scalar_integer (dependence) -/// -/// @defgroup ext_vector_relational GLM_EXT_vector_relational -/// @ingroup ext -/// -/// Exposes comparison functions for vector types that take a user defined epsilon values. -/// -/// Include to use the features of this extension. -/// -/// @see core_vector_relational -/// @see ext_scalar_relational -/// @see ext_matrix_relational - -#pragma once - -// Dependencies -#include "../detail/qualifier.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_EXT_vector_relational extension included") -#endif - -namespace glm -{ - /// @addtogroup ext_vector_relational - /// @{ - - /// Returns the component-wise comparison of |x - y| < epsilon. - /// True if this expression is satisfied. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point or integer scalar types - /// @tparam Q Value from qualifier enum - template - GLM_FUNC_DECL GLM_CONSTEXPR vec equal(vec const& x, vec const& y, T epsilon); - - /// Returns the component-wise comparison of |x - y| < epsilon. - /// True if this expression is satisfied. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point or integer scalar types - /// @tparam Q Value from qualifier enum - template - GLM_FUNC_DECL GLM_CONSTEXPR vec equal(vec const& x, vec const& y, vec const& epsilon); - - /// Returns the component-wise comparison of |x - y| >= epsilon. - /// True if this expression is not satisfied. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point or integer scalar types - /// @tparam Q Value from qualifier enum - template - GLM_FUNC_DECL GLM_CONSTEXPR vec notEqual(vec const& x, vec const& y, T epsilon); - - /// Returns the component-wise comparison of |x - y| >= epsilon. - /// True if this expression is not satisfied. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point or integer scalar types - /// @tparam Q Value from qualifier enum - template - GLM_FUNC_DECL GLM_CONSTEXPR vec notEqual(vec const& x, vec const& y, vec const& epsilon); - - /// Returns the component-wise comparison between two vectors in term of ULPs. - /// True if this expression is satisfied. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point - /// @tparam Q Value from qualifier enum - template - GLM_FUNC_DECL GLM_CONSTEXPR vec equal(vec const& x, vec const& y, int ULPs); - - /// Returns the component-wise comparison between two vectors in term of ULPs. - /// True if this expression is satisfied. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point - /// @tparam Q Value from qualifier enum - template - GLM_FUNC_DECL GLM_CONSTEXPR vec equal(vec const& x, vec const& y, vec const& ULPs); - - /// Returns the component-wise comparison between two vectors in term of ULPs. - /// True if this expression is not satisfied. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point - /// @tparam Q Value from qualifier enum - template - GLM_FUNC_DECL GLM_CONSTEXPR vec notEqual(vec const& x, vec const& y, int ULPs); - - /// Returns the component-wise comparison between two vectors in term of ULPs. - /// True if this expression is not satisfied. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point - /// @tparam Q Value from qualifier enum - template - GLM_FUNC_DECL GLM_CONSTEXPR vec notEqual(vec const& x, vec const& y, vec const& ULPs); - - /// @} -}//namespace glm - -#include "vector_relational.inl" diff --git a/third_party/glm/ext/vector_relational.inl b/third_party/glm/ext/vector_relational.inl deleted file mode 100755 index 7a39ab5..0000000 --- a/third_party/glm/ext/vector_relational.inl +++ /dev/null @@ -1,75 +0,0 @@ -#include "../vector_relational.hpp" -#include "../common.hpp" -#include "../detail/qualifier.hpp" -#include "../detail/type_float.hpp" - -namespace glm -{ - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec equal(vec const& x, vec const& y, T Epsilon) - { - return equal(x, y, vec(Epsilon)); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec equal(vec const& x, vec const& y, vec const& Epsilon) - { - return lessThanEqual(abs(x - y), Epsilon); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec notEqual(vec const& x, vec const& y, T Epsilon) - { - return notEqual(x, y, vec(Epsilon)); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec notEqual(vec const& x, vec const& y, vec const& Epsilon) - { - return greaterThan(abs(x - y), Epsilon); - } - - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec equal(vec const& x, vec const& y, int MaxULPs) - { - return equal(x, y, vec(MaxULPs)); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec equal(vec const& x, vec const& y, vec const& MaxULPs) - { - vec Result(false); - for(length_t i = 0; i < L; ++i) - { - detail::float_t const a(x[i]); - detail::float_t const b(y[i]); - - // Different signs means they do not match. - if(a.negative() != b.negative()) - { - // Check for equality to make sure +0==-0 - Result[i] = a.mantissa() == b.mantissa() && a.exponent() == b.exponent(); - } - else - { - // Find the difference in ULPs. - typename detail::float_t::int_type const DiffULPs = abs(a.i - b.i); - Result[i] = DiffULPs <= MaxULPs[i]; - } - } - return Result; - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec notEqual(vec const& x, vec const& y, int MaxULPs) - { - return notEqual(x, y, vec(MaxULPs)); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR vec notEqual(vec const& x, vec const& y, vec const& MaxULPs) - { - return not_(equal(x, y, MaxULPs)); - } -}//namespace glm diff --git a/third_party/glm/ext/vector_uint1.hpp b/third_party/glm/ext/vector_uint1.hpp deleted file mode 100755 index eb8a704..0000000 --- a/third_party/glm/ext/vector_uint1.hpp +++ /dev/null @@ -1,32 +0,0 @@ -/// @ref ext_vector_uint1 -/// @file glm/ext/vector_uint1.hpp -/// -/// @defgroup ext_vector_uint1 GLM_EXT_vector_uint1 -/// @ingroup ext -/// -/// Exposes uvec1 vector type. -/// -/// Include to use the features of this extension. -/// -/// @see ext_vector_int1 extension. -/// @see ext_vector_uint1_precision extension. - -#pragma once - -#include "../detail/type_vec1.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_EXT_vector_uint1 extension included") -#endif - -namespace glm -{ - /// @addtogroup ext_vector_uint1 - /// @{ - - /// 1 component vector of unsigned integer numbers. - typedef vec<1, unsigned int, defaultp> uvec1; - - /// @} -}//namespace glm - diff --git a/third_party/glm/ext/vector_uint1_precision.hpp b/third_party/glm/ext/vector_uint1_precision.hpp deleted file mode 100755 index 30daa5b..0000000 --- a/third_party/glm/ext/vector_uint1_precision.hpp +++ /dev/null @@ -1,40 +0,0 @@ -/// @ref ext_vector_uint1_precision -/// @file glm/ext/vector_uint1_precision.hpp -/// -/// @defgroup ext_vector_uint1_precision GLM_EXT_vector_uint1_precision -/// @ingroup ext -/// -/// Exposes highp_uvec1, mediump_uvec1 and lowp_uvec1 types. -/// -/// Include to use the features of this extension. - -#pragma once - -#include "../detail/type_vec1.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_EXT_vector_uint1_precision extension included") -#endif - -namespace glm -{ - /// @addtogroup ext_vector_uint1_precision - /// @{ - - /// 1 component vector of unsigned integer values. - /// - /// @see ext_vector_uint1_precision - typedef vec<1, unsigned int, highp> highp_uvec1; - - /// 1 component vector of unsigned integer values. - /// - /// @see ext_vector_uint1_precision - typedef vec<1, unsigned int, mediump> mediump_uvec1; - - /// 1 component vector of unsigned integer values. - /// - /// @see ext_vector_uint1_precision - typedef vec<1, unsigned int, lowp> lowp_uvec1; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/vector_uint2.hpp b/third_party/glm/ext/vector_uint2.hpp deleted file mode 100755 index 03c00f5..0000000 --- a/third_party/glm/ext/vector_uint2.hpp +++ /dev/null @@ -1,18 +0,0 @@ -/// @ref core -/// @file glm/ext/vector_uint2.hpp - -#pragma once -#include "../detail/type_vec2.hpp" - -namespace glm -{ - /// @addtogroup core_vector - /// @{ - - /// 2 components vector of unsigned integer numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - typedef vec<2, unsigned int, defaultp> uvec2; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/vector_uint2_precision.hpp b/third_party/glm/ext/vector_uint2_precision.hpp deleted file mode 100755 index 2ba7b0d..0000000 --- a/third_party/glm/ext/vector_uint2_precision.hpp +++ /dev/null @@ -1,31 +0,0 @@ -/// @ref core -/// @file glm/ext/vector_uint2_precision.hpp - -#pragma once -#include "../detail/type_vec2.hpp" - -namespace glm -{ - /// @addtogroup core_vector_precision - /// @{ - - /// 2 components vector of high qualifier unsigned integer numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef vec<2, unsigned int, highp> highp_uvec2; - - /// 2 components vector of medium qualifier unsigned integer numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef vec<2, unsigned int, mediump> mediump_uvec2; - - /// 2 components vector of low qualifier unsigned integer numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef vec<2, unsigned int, lowp> lowp_uvec2; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/vector_uint3.hpp b/third_party/glm/ext/vector_uint3.hpp deleted file mode 100755 index f5b41c4..0000000 --- a/third_party/glm/ext/vector_uint3.hpp +++ /dev/null @@ -1,18 +0,0 @@ -/// @ref core -/// @file glm/ext/vector_uint3.hpp - -#pragma once -#include "../detail/type_vec3.hpp" - -namespace glm -{ - /// @addtogroup core_vector - /// @{ - - /// 3 components vector of unsigned integer numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - typedef vec<3, unsigned int, defaultp> uvec3; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/vector_uint3_precision.hpp b/third_party/glm/ext/vector_uint3_precision.hpp deleted file mode 100755 index 125191c..0000000 --- a/third_party/glm/ext/vector_uint3_precision.hpp +++ /dev/null @@ -1,31 +0,0 @@ -/// @ref core -/// @file glm/ext/vector_uint3_precision.hpp - -#pragma once -#include "../detail/type_vec3.hpp" - -namespace glm -{ - /// @addtogroup core_vector_precision - /// @{ - - /// 3 components vector of high qualifier unsigned integer numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef vec<3, unsigned int, highp> highp_uvec3; - - /// 3 components vector of medium qualifier unsigned integer numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef vec<3, unsigned int, mediump> mediump_uvec3; - - /// 3 components vector of low qualifier unsigned integer numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef vec<3, unsigned int, lowp> lowp_uvec3; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/vector_uint4.hpp b/third_party/glm/ext/vector_uint4.hpp deleted file mode 100755 index 32ced58..0000000 --- a/third_party/glm/ext/vector_uint4.hpp +++ /dev/null @@ -1,18 +0,0 @@ -/// @ref core -/// @file glm/ext/vector_uint4.hpp - -#pragma once -#include "../detail/type_vec4.hpp" - -namespace glm -{ - /// @addtogroup core_vector - /// @{ - - /// 4 components vector of unsigned integer numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - typedef vec<4, unsigned int, defaultp> uvec4; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/vector_uint4_precision.hpp b/third_party/glm/ext/vector_uint4_precision.hpp deleted file mode 100755 index cf4097c..0000000 --- a/third_party/glm/ext/vector_uint4_precision.hpp +++ /dev/null @@ -1,31 +0,0 @@ -/// @ref core -/// @file glm/ext/vector_uint4_precision.hpp - -#pragma once -#include "../detail/type_vec4.hpp" - -namespace glm -{ - /// @addtogroup core_vector_precision - /// @{ - - /// 4 components vector of high qualifier unsigned integer numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef vec<4, unsigned int, highp> highp_uvec4; - - /// 4 components vector of medium qualifier unsigned integer numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef vec<4, unsigned int, mediump> mediump_uvec4; - - /// 4 components vector of low qualifier unsigned integer numbers. - /// - /// @see GLSL 4.20.8 specification, section 4.1.5 Vectors - /// @see GLSL 4.20.8 specification, section 4.7.2 Precision Qualifier - typedef vec<4, unsigned int, lowp> lowp_uvec4; - - /// @} -}//namespace glm diff --git a/third_party/glm/ext/vector_ulp.hpp b/third_party/glm/ext/vector_ulp.hpp deleted file mode 100755 index 6210396..0000000 --- a/third_party/glm/ext/vector_ulp.hpp +++ /dev/null @@ -1,109 +0,0 @@ -/// @ref ext_vector_ulp -/// @file glm/ext/vector_ulp.hpp -/// -/// @defgroup ext_vector_ulp GLM_EXT_vector_ulp -/// @ingroup ext -/// -/// Allow the measurement of the accuracy of a function against a reference -/// implementation. This extension works on floating-point data and provide results -/// in ULP. -/// -/// Include to use the features of this extension. -/// -/// @see ext_scalar_ulp -/// @see ext_scalar_relational -/// @see ext_vector_relational - -#pragma once - -// Dependencies -#include "../ext/scalar_ulp.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_EXT_vector_ulp extension included") -#endif - -namespace glm -{ - /// Return the next ULP value(s) after the input value(s). - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point - /// @tparam Q Value from qualifier enum - /// - /// @see ext_scalar_ulp - template - GLM_FUNC_DECL vec nextFloat(vec const& x); - - /// Return the value(s) ULP distance after the input value(s). - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point - /// @tparam Q Value from qualifier enum - /// - /// @see ext_scalar_ulp - template - GLM_FUNC_DECL vec nextFloat(vec const& x, int ULPs); - - /// Return the value(s) ULP distance after the input value(s). - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point - /// @tparam Q Value from qualifier enum - /// - /// @see ext_scalar_ulp - template - GLM_FUNC_DECL vec nextFloat(vec const& x, vec const& ULPs); - - /// Return the previous ULP value(s) before the input value(s). - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point - /// @tparam Q Value from qualifier enum - /// - /// @see ext_scalar_ulp - template - GLM_FUNC_DECL vec prevFloat(vec const& x); - - /// Return the value(s) ULP distance before the input value(s). - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point - /// @tparam Q Value from qualifier enum - /// - /// @see ext_scalar_ulp - template - GLM_FUNC_DECL vec prevFloat(vec const& x, int ULPs); - - /// Return the value(s) ULP distance before the input value(s). - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point - /// @tparam Q Value from qualifier enum - /// - /// @see ext_scalar_ulp - template - GLM_FUNC_DECL vec prevFloat(vec const& x, vec const& ULPs); - - /// Return the distance in the number of ULP between 2 single-precision floating-point scalars. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam Q Value from qualifier enum - /// - /// @see ext_scalar_ulp - template - GLM_FUNC_DECL vec floatDistance(vec const& x, vec const& y); - - /// Return the distance in the number of ULP between 2 double-precision floating-point scalars. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam Q Value from qualifier enum - /// - /// @see ext_scalar_ulp - template - GLM_FUNC_DECL vec floatDistance(vec const& x, vec const& y); - - /// @} -}//namespace glm - -#include "vector_ulp.inl" diff --git a/third_party/glm/ext/vector_ulp.inl b/third_party/glm/ext/vector_ulp.inl deleted file mode 100755 index 91565ce..0000000 --- a/third_party/glm/ext/vector_ulp.inl +++ /dev/null @@ -1,74 +0,0 @@ -namespace glm -{ - template - GLM_FUNC_QUALIFIER vec nextFloat(vec const& x) - { - vec Result; - for(length_t i = 0, n = Result.length(); i < n; ++i) - Result[i] = nextFloat(x[i]); - return Result; - } - - template - GLM_FUNC_QUALIFIER vec nextFloat(vec const& x, int ULPs) - { - vec Result; - for(length_t i = 0, n = Result.length(); i < n; ++i) - Result[i] = nextFloat(x[i], ULPs); - return Result; - } - - template - GLM_FUNC_QUALIFIER vec nextFloat(vec const& x, vec const& ULPs) - { - vec Result; - for(length_t i = 0, n = Result.length(); i < n; ++i) - Result[i] = nextFloat(x[i], ULPs[i]); - return Result; - } - - template - GLM_FUNC_QUALIFIER vec prevFloat(vec const& x) - { - vec Result; - for(length_t i = 0, n = Result.length(); i < n; ++i) - Result[i] = prevFloat(x[i]); - return Result; - } - - template - GLM_FUNC_QUALIFIER vec prevFloat(vec const& x, int ULPs) - { - vec Result; - for(length_t i = 0, n = Result.length(); i < n; ++i) - Result[i] = prevFloat(x[i], ULPs); - return Result; - } - - template - GLM_FUNC_QUALIFIER vec prevFloat(vec const& x, vec const& ULPs) - { - vec Result; - for(length_t i = 0, n = Result.length(); i < n; ++i) - Result[i] = prevFloat(x[i], ULPs[i]); - return Result; - } - - template - GLM_FUNC_QUALIFIER vec floatDistance(vec const& x, vec const& y) - { - vec Result; - for(length_t i = 0, n = Result.length(); i < n; ++i) - Result[i] = floatDistance(x[i], y[i]); - return Result; - } - - template - GLM_FUNC_QUALIFIER vec floatDistance(vec const& x, vec const& y) - { - vec Result; - for(length_t i = 0, n = Result.length(); i < n; ++i) - Result[i] = floatDistance(x[i], y[i]); - return Result; - } -}//namespace glm diff --git a/third_party/glm/fwd.hpp b/third_party/glm/fwd.hpp deleted file mode 100755 index 474d44f..0000000 --- a/third_party/glm/fwd.hpp +++ /dev/null @@ -1,818 +0,0 @@ -#pragma once - -#include "detail/qualifier.hpp" - -namespace glm -{ -#if GLM_HAS_EXTENDED_INTEGER_TYPE - typedef std::int8_t int8; - typedef std::int16_t int16; - typedef std::int32_t int32; - typedef std::int64_t int64; - - typedef std::uint8_t uint8; - typedef std::uint16_t uint16; - typedef std::uint32_t uint32; - typedef std::uint64_t uint64; -#else - typedef signed char int8; - typedef signed short int16; - typedef signed int int32; - typedef detail::int64 int64; - - typedef unsigned char uint8; - typedef unsigned short uint16; - typedef unsigned int uint32; - typedef detail::uint64 uint64; -#endif - - // Scalar int - - typedef int8 lowp_i8; - typedef int8 mediump_i8; - typedef int8 highp_i8; - typedef int8 i8; - - typedef int8 lowp_int8; - typedef int8 mediump_int8; - typedef int8 highp_int8; - - typedef int8 lowp_int8_t; - typedef int8 mediump_int8_t; - typedef int8 highp_int8_t; - typedef int8 int8_t; - - typedef int16 lowp_i16; - typedef int16 mediump_i16; - typedef int16 highp_i16; - typedef int16 i16; - - typedef int16 lowp_int16; - typedef int16 mediump_int16; - typedef int16 highp_int16; - - typedef int16 lowp_int16_t; - typedef int16 mediump_int16_t; - typedef int16 highp_int16_t; - typedef int16 int16_t; - - typedef int32 lowp_i32; - typedef int32 mediump_i32; - typedef int32 highp_i32; - typedef int32 i32; - - typedef int32 lowp_int32; - typedef int32 mediump_int32; - typedef int32 highp_int32; - - typedef int32 lowp_int32_t; - typedef int32 mediump_int32_t; - typedef int32 highp_int32_t; - typedef int32 int32_t; - - typedef int64 lowp_i64; - typedef int64 mediump_i64; - typedef int64 highp_i64; - typedef int64 i64; - - typedef int64 lowp_int64; - typedef int64 mediump_int64; - typedef int64 highp_int64; - - typedef int64 lowp_int64_t; - typedef int64 mediump_int64_t; - typedef int64 highp_int64_t; - typedef int64 int64_t; - - // Scalar uint - - typedef uint8 lowp_u8; - typedef uint8 mediump_u8; - typedef uint8 highp_u8; - typedef uint8 u8; - - typedef uint8 lowp_uint8; - typedef uint8 mediump_uint8; - typedef uint8 highp_uint8; - - typedef uint8 lowp_uint8_t; - typedef uint8 mediump_uint8_t; - typedef uint8 highp_uint8_t; - typedef uint8 uint8_t; - - typedef uint16 lowp_u16; - typedef uint16 mediump_u16; - typedef uint16 highp_u16; - typedef uint16 u16; - - typedef uint16 lowp_uint16; - typedef uint16 mediump_uint16; - typedef uint16 highp_uint16; - - typedef uint16 lowp_uint16_t; - typedef uint16 mediump_uint16_t; - typedef uint16 highp_uint16_t; - typedef uint16 uint16_t; - - typedef uint32 lowp_u32; - typedef uint32 mediump_u32; - typedef uint32 highp_u32; - typedef uint32 u32; - - typedef uint32 lowp_uint32; - typedef uint32 mediump_uint32; - typedef uint32 highp_uint32; - - typedef uint32 lowp_uint32_t; - typedef uint32 mediump_uint32_t; - typedef uint32 highp_uint32_t; - typedef uint32 uint32_t; - - typedef uint64 lowp_u64; - typedef uint64 mediump_u64; - typedef uint64 highp_u64; - typedef uint64 u64; - - typedef uint64 lowp_uint64; - typedef uint64 mediump_uint64; - typedef uint64 highp_uint64; - - typedef uint64 lowp_uint64_t; - typedef uint64 mediump_uint64_t; - typedef uint64 highp_uint64_t; - typedef uint64 uint64_t; - - // Scalar float - - typedef float lowp_f32; - typedef float mediump_f32; - typedef float highp_f32; - typedef float f32; - - typedef float lowp_float32; - typedef float mediump_float32; - typedef float highp_float32; - typedef float float32; - - typedef float lowp_float32_t; - typedef float mediump_float32_t; - typedef float highp_float32_t; - typedef float float32_t; - - - typedef double lowp_f64; - typedef double mediump_f64; - typedef double highp_f64; - typedef double f64; - - typedef double lowp_float64; - typedef double mediump_float64; - typedef double highp_float64; - typedef double float64; - - typedef double lowp_float64_t; - typedef double mediump_float64_t; - typedef double highp_float64_t; - typedef double float64_t; - - // Vector bool - - typedef vec<1, bool, lowp> lowp_bvec1; - typedef vec<2, bool, lowp> lowp_bvec2; - typedef vec<3, bool, lowp> lowp_bvec3; - typedef vec<4, bool, lowp> lowp_bvec4; - - typedef vec<1, bool, mediump> mediump_bvec1; - typedef vec<2, bool, mediump> mediump_bvec2; - typedef vec<3, bool, mediump> mediump_bvec3; - typedef vec<4, bool, mediump> mediump_bvec4; - - typedef vec<1, bool, highp> highp_bvec1; - typedef vec<2, bool, highp> highp_bvec2; - typedef vec<3, bool, highp> highp_bvec3; - typedef vec<4, bool, highp> highp_bvec4; - - typedef vec<1, bool, defaultp> bvec1; - typedef vec<2, bool, defaultp> bvec2; - typedef vec<3, bool, defaultp> bvec3; - typedef vec<4, bool, defaultp> bvec4; - - // Vector int - - typedef vec<1, i32, lowp> lowp_ivec1; - typedef vec<2, i32, lowp> lowp_ivec2; - typedef vec<3, i32, lowp> lowp_ivec3; - typedef vec<4, i32, lowp> lowp_ivec4; - - typedef vec<1, i32, mediump> mediump_ivec1; - typedef vec<2, i32, mediump> mediump_ivec2; - typedef vec<3, i32, mediump> mediump_ivec3; - typedef vec<4, i32, mediump> mediump_ivec4; - - typedef vec<1, i32, highp> highp_ivec1; - typedef vec<2, i32, highp> highp_ivec2; - typedef vec<3, i32, highp> highp_ivec3; - typedef vec<4, i32, highp> highp_ivec4; - - typedef vec<1, i32, defaultp> ivec1; - typedef vec<2, i32, defaultp> ivec2; - typedef vec<3, i32, defaultp> ivec3; - typedef vec<4, i32, defaultp> ivec4; - - typedef vec<1, i8, lowp> lowp_i8vec1; - typedef vec<2, i8, lowp> lowp_i8vec2; - typedef vec<3, i8, lowp> lowp_i8vec3; - typedef vec<4, i8, lowp> lowp_i8vec4; - - typedef vec<1, i8, mediump> mediump_i8vec1; - typedef vec<2, i8, mediump> mediump_i8vec2; - typedef vec<3, i8, mediump> mediump_i8vec3; - typedef vec<4, i8, mediump> mediump_i8vec4; - - typedef vec<1, i8, highp> highp_i8vec1; - typedef vec<2, i8, highp> highp_i8vec2; - typedef vec<3, i8, highp> highp_i8vec3; - typedef vec<4, i8, highp> highp_i8vec4; - - typedef vec<1, i8, defaultp> i8vec1; - typedef vec<2, i8, defaultp> i8vec2; - typedef vec<3, i8, defaultp> i8vec3; - typedef vec<4, i8, defaultp> i8vec4; - - typedef vec<1, i16, lowp> lowp_i16vec1; - typedef vec<2, i16, lowp> lowp_i16vec2; - typedef vec<3, i16, lowp> lowp_i16vec3; - typedef vec<4, i16, lowp> lowp_i16vec4; - - typedef vec<1, i16, mediump> mediump_i16vec1; - typedef vec<2, i16, mediump> mediump_i16vec2; - typedef vec<3, i16, mediump> mediump_i16vec3; - typedef vec<4, i16, mediump> mediump_i16vec4; - - typedef vec<1, i16, highp> highp_i16vec1; - typedef vec<2, i16, highp> highp_i16vec2; - typedef vec<3, i16, highp> highp_i16vec3; - typedef vec<4, i16, highp> highp_i16vec4; - - typedef vec<1, i16, defaultp> i16vec1; - typedef vec<2, i16, defaultp> i16vec2; - typedef vec<3, i16, defaultp> i16vec3; - typedef vec<4, i16, defaultp> i16vec4; - - typedef vec<1, i32, lowp> lowp_i32vec1; - typedef vec<2, i32, lowp> lowp_i32vec2; - typedef vec<3, i32, lowp> lowp_i32vec3; - typedef vec<4, i32, lowp> lowp_i32vec4; - - typedef vec<1, i32, mediump> mediump_i32vec1; - typedef vec<2, i32, mediump> mediump_i32vec2; - typedef vec<3, i32, mediump> mediump_i32vec3; - typedef vec<4, i32, mediump> mediump_i32vec4; - - typedef vec<1, i32, highp> highp_i32vec1; - typedef vec<2, i32, highp> highp_i32vec2; - typedef vec<3, i32, highp> highp_i32vec3; - typedef vec<4, i32, highp> highp_i32vec4; - - typedef vec<1, i32, defaultp> i32vec1; - typedef vec<2, i32, defaultp> i32vec2; - typedef vec<3, i32, defaultp> i32vec3; - typedef vec<4, i32, defaultp> i32vec4; - - typedef vec<1, i64, lowp> lowp_i64vec1; - typedef vec<2, i64, lowp> lowp_i64vec2; - typedef vec<3, i64, lowp> lowp_i64vec3; - typedef vec<4, i64, lowp> lowp_i64vec4; - - typedef vec<1, i64, mediump> mediump_i64vec1; - typedef vec<2, i64, mediump> mediump_i64vec2; - typedef vec<3, i64, mediump> mediump_i64vec3; - typedef vec<4, i64, mediump> mediump_i64vec4; - - typedef vec<1, i64, highp> highp_i64vec1; - typedef vec<2, i64, highp> highp_i64vec2; - typedef vec<3, i64, highp> highp_i64vec3; - typedef vec<4, i64, highp> highp_i64vec4; - - typedef vec<1, i64, defaultp> i64vec1; - typedef vec<2, i64, defaultp> i64vec2; - typedef vec<3, i64, defaultp> i64vec3; - typedef vec<4, i64, defaultp> i64vec4; - - // Vector uint - - typedef vec<1, u32, lowp> lowp_uvec1; - typedef vec<2, u32, lowp> lowp_uvec2; - typedef vec<3, u32, lowp> lowp_uvec3; - typedef vec<4, u32, lowp> lowp_uvec4; - - typedef vec<1, u32, mediump> mediump_uvec1; - typedef vec<2, u32, mediump> mediump_uvec2; - typedef vec<3, u32, mediump> mediump_uvec3; - typedef vec<4, u32, mediump> mediump_uvec4; - - typedef vec<1, u32, highp> highp_uvec1; - typedef vec<2, u32, highp> highp_uvec2; - typedef vec<3, u32, highp> highp_uvec3; - typedef vec<4, u32, highp> highp_uvec4; - - typedef vec<1, u32, defaultp> uvec1; - typedef vec<2, u32, defaultp> uvec2; - typedef vec<3, u32, defaultp> uvec3; - typedef vec<4, u32, defaultp> uvec4; - - typedef vec<1, u8, lowp> lowp_u8vec1; - typedef vec<2, u8, lowp> lowp_u8vec2; - typedef vec<3, u8, lowp> lowp_u8vec3; - typedef vec<4, u8, lowp> lowp_u8vec4; - - typedef vec<1, u8, mediump> mediump_u8vec1; - typedef vec<2, u8, mediump> mediump_u8vec2; - typedef vec<3, u8, mediump> mediump_u8vec3; - typedef vec<4, u8, mediump> mediump_u8vec4; - - typedef vec<1, u8, highp> highp_u8vec1; - typedef vec<2, u8, highp> highp_u8vec2; - typedef vec<3, u8, highp> highp_u8vec3; - typedef vec<4, u8, highp> highp_u8vec4; - - typedef vec<1, u8, defaultp> u8vec1; - typedef vec<2, u8, defaultp> u8vec2; - typedef vec<3, u8, defaultp> u8vec3; - typedef vec<4, u8, defaultp> u8vec4; - - typedef vec<1, u16, lowp> lowp_u16vec1; - typedef vec<2, u16, lowp> lowp_u16vec2; - typedef vec<3, u16, lowp> lowp_u16vec3; - typedef vec<4, u16, lowp> lowp_u16vec4; - - typedef vec<1, u16, mediump> mediump_u16vec1; - typedef vec<2, u16, mediump> mediump_u16vec2; - typedef vec<3, u16, mediump> mediump_u16vec3; - typedef vec<4, u16, mediump> mediump_u16vec4; - - typedef vec<1, u16, highp> highp_u16vec1; - typedef vec<2, u16, highp> highp_u16vec2; - typedef vec<3, u16, highp> highp_u16vec3; - typedef vec<4, u16, highp> highp_u16vec4; - - typedef vec<1, u16, defaultp> u16vec1; - typedef vec<2, u16, defaultp> u16vec2; - typedef vec<3, u16, defaultp> u16vec3; - typedef vec<4, u16, defaultp> u16vec4; - - typedef vec<1, u32, lowp> lowp_u32vec1; - typedef vec<2, u32, lowp> lowp_u32vec2; - typedef vec<3, u32, lowp> lowp_u32vec3; - typedef vec<4, u32, lowp> lowp_u32vec4; - - typedef vec<1, u32, mediump> mediump_u32vec1; - typedef vec<2, u32, mediump> mediump_u32vec2; - typedef vec<3, u32, mediump> mediump_u32vec3; - typedef vec<4, u32, mediump> mediump_u32vec4; - - typedef vec<1, u32, highp> highp_u32vec1; - typedef vec<2, u32, highp> highp_u32vec2; - typedef vec<3, u32, highp> highp_u32vec3; - typedef vec<4, u32, highp> highp_u32vec4; - - typedef vec<1, u32, defaultp> u32vec1; - typedef vec<2, u32, defaultp> u32vec2; - typedef vec<3, u32, defaultp> u32vec3; - typedef vec<4, u32, defaultp> u32vec4; - - typedef vec<1, u64, lowp> lowp_u64vec1; - typedef vec<2, u64, lowp> lowp_u64vec2; - typedef vec<3, u64, lowp> lowp_u64vec3; - typedef vec<4, u64, lowp> lowp_u64vec4; - - typedef vec<1, u64, mediump> mediump_u64vec1; - typedef vec<2, u64, mediump> mediump_u64vec2; - typedef vec<3, u64, mediump> mediump_u64vec3; - typedef vec<4, u64, mediump> mediump_u64vec4; - - typedef vec<1, u64, highp> highp_u64vec1; - typedef vec<2, u64, highp> highp_u64vec2; - typedef vec<3, u64, highp> highp_u64vec3; - typedef vec<4, u64, highp> highp_u64vec4; - - typedef vec<1, u64, defaultp> u64vec1; - typedef vec<2, u64, defaultp> u64vec2; - typedef vec<3, u64, defaultp> u64vec3; - typedef vec<4, u64, defaultp> u64vec4; - - // Vector float - - typedef vec<1, float, lowp> lowp_vec1; - typedef vec<2, float, lowp> lowp_vec2; - typedef vec<3, float, lowp> lowp_vec3; - typedef vec<4, float, lowp> lowp_vec4; - - typedef vec<1, float, mediump> mediump_vec1; - typedef vec<2, float, mediump> mediump_vec2; - typedef vec<3, float, mediump> mediump_vec3; - typedef vec<4, float, mediump> mediump_vec4; - - typedef vec<1, float, highp> highp_vec1; - typedef vec<2, float, highp> highp_vec2; - typedef vec<3, float, highp> highp_vec3; - typedef vec<4, float, highp> highp_vec4; - - typedef vec<1, float, defaultp> vec1; - typedef vec<2, float, defaultp> vec2; - typedef vec<3, float, defaultp> vec3; - typedef vec<4, float, defaultp> vec4; - - typedef vec<1, float, lowp> lowp_fvec1; - typedef vec<2, float, lowp> lowp_fvec2; - typedef vec<3, float, lowp> lowp_fvec3; - typedef vec<4, float, lowp> lowp_fvec4; - - typedef vec<1, float, mediump> mediump_fvec1; - typedef vec<2, float, mediump> mediump_fvec2; - typedef vec<3, float, mediump> mediump_fvec3; - typedef vec<4, float, mediump> mediump_fvec4; - - typedef vec<1, float, highp> highp_fvec1; - typedef vec<2, float, highp> highp_fvec2; - typedef vec<3, float, highp> highp_fvec3; - typedef vec<4, float, highp> highp_fvec4; - - typedef vec<1, f32, defaultp> fvec1; - typedef vec<2, f32, defaultp> fvec2; - typedef vec<3, f32, defaultp> fvec3; - typedef vec<4, f32, defaultp> fvec4; - - typedef vec<1, f32, lowp> lowp_f32vec1; - typedef vec<2, f32, lowp> lowp_f32vec2; - typedef vec<3, f32, lowp> lowp_f32vec3; - typedef vec<4, f32, lowp> lowp_f32vec4; - - typedef vec<1, f32, mediump> mediump_f32vec1; - typedef vec<2, f32, mediump> mediump_f32vec2; - typedef vec<3, f32, mediump> mediump_f32vec3; - typedef vec<4, f32, mediump> mediump_f32vec4; - - typedef vec<1, f32, highp> highp_f32vec1; - typedef vec<2, f32, highp> highp_f32vec2; - typedef vec<3, f32, highp> highp_f32vec3; - typedef vec<4, f32, highp> highp_f32vec4; - - typedef vec<1, f32, defaultp> f32vec1; - typedef vec<2, f32, defaultp> f32vec2; - typedef vec<3, f32, defaultp> f32vec3; - typedef vec<4, f32, defaultp> f32vec4; - - typedef vec<1, f64, lowp> lowp_dvec1; - typedef vec<2, f64, lowp> lowp_dvec2; - typedef vec<3, f64, lowp> lowp_dvec3; - typedef vec<4, f64, lowp> lowp_dvec4; - - typedef vec<1, f64, mediump> mediump_dvec1; - typedef vec<2, f64, mediump> mediump_dvec2; - typedef vec<3, f64, mediump> mediump_dvec3; - typedef vec<4, f64, mediump> mediump_dvec4; - - typedef vec<1, f64, highp> highp_dvec1; - typedef vec<2, f64, highp> highp_dvec2; - typedef vec<3, f64, highp> highp_dvec3; - typedef vec<4, f64, highp> highp_dvec4; - - typedef vec<1, f64, defaultp> dvec1; - typedef vec<2, f64, defaultp> dvec2; - typedef vec<3, f64, defaultp> dvec3; - typedef vec<4, f64, defaultp> dvec4; - - typedef vec<1, f64, lowp> lowp_f64vec1; - typedef vec<2, f64, lowp> lowp_f64vec2; - typedef vec<3, f64, lowp> lowp_f64vec3; - typedef vec<4, f64, lowp> lowp_f64vec4; - - typedef vec<1, f64, mediump> mediump_f64vec1; - typedef vec<2, f64, mediump> mediump_f64vec2; - typedef vec<3, f64, mediump> mediump_f64vec3; - typedef vec<4, f64, mediump> mediump_f64vec4; - - typedef vec<1, f64, highp> highp_f64vec1; - typedef vec<2, f64, highp> highp_f64vec2; - typedef vec<3, f64, highp> highp_f64vec3; - typedef vec<4, f64, highp> highp_f64vec4; - - typedef vec<1, f64, defaultp> f64vec1; - typedef vec<2, f64, defaultp> f64vec2; - typedef vec<3, f64, defaultp> f64vec3; - typedef vec<4, f64, defaultp> f64vec4; - - // Matrix NxN - - typedef mat<2, 2, f32, lowp> lowp_mat2; - typedef mat<3, 3, f32, lowp> lowp_mat3; - typedef mat<4, 4, f32, lowp> lowp_mat4; - - typedef mat<2, 2, f32, mediump> mediump_mat2; - typedef mat<3, 3, f32, mediump> mediump_mat3; - typedef mat<4, 4, f32, mediump> mediump_mat4; - - typedef mat<2, 2, f32, highp> highp_mat2; - typedef mat<3, 3, f32, highp> highp_mat3; - typedef mat<4, 4, f32, highp> highp_mat4; - - typedef mat<2, 2, f32, defaultp> mat2; - typedef mat<3, 3, f32, defaultp> mat3; - typedef mat<4, 4, f32, defaultp> mat4; - - typedef mat<2, 2, f32, lowp> lowp_fmat2; - typedef mat<3, 3, f32, lowp> lowp_fmat3; - typedef mat<4, 4, f32, lowp> lowp_fmat4; - - typedef mat<2, 2, f32, mediump> mediump_fmat2; - typedef mat<3, 3, f32, mediump> mediump_fmat3; - typedef mat<4, 4, f32, mediump> mediump_fmat4; - - typedef mat<2, 2, f32, highp> highp_fmat2; - typedef mat<3, 3, f32, highp> highp_fmat3; - typedef mat<4, 4, f32, highp> highp_fmat4; - - typedef mat<2, 2, f32, defaultp> fmat2; - typedef mat<3, 3, f32, defaultp> fmat3; - typedef mat<4, 4, f32, defaultp> fmat4; - - typedef mat<2, 2, f32, lowp> lowp_f32mat2; - typedef mat<3, 3, f32, lowp> lowp_f32mat3; - typedef mat<4, 4, f32, lowp> lowp_f32mat4; - - typedef mat<2, 2, f32, mediump> mediump_f32mat2; - typedef mat<3, 3, f32, mediump> mediump_f32mat3; - typedef mat<4, 4, f32, mediump> mediump_f32mat4; - - typedef mat<2, 2, f32, highp> highp_f32mat2; - typedef mat<3, 3, f32, highp> highp_f32mat3; - typedef mat<4, 4, f32, highp> highp_f32mat4; - - typedef mat<2, 2, f32, defaultp> f32mat2; - typedef mat<3, 3, f32, defaultp> f32mat3; - typedef mat<4, 4, f32, defaultp> f32mat4; - - typedef mat<2, 2, f64, lowp> lowp_dmat2; - typedef mat<3, 3, f64, lowp> lowp_dmat3; - typedef mat<4, 4, f64, lowp> lowp_dmat4; - - typedef mat<2, 2, f64, mediump> mediump_dmat2; - typedef mat<3, 3, f64, mediump> mediump_dmat3; - typedef mat<4, 4, f64, mediump> mediump_dmat4; - - typedef mat<2, 2, f64, highp> highp_dmat2; - typedef mat<3, 3, f64, highp> highp_dmat3; - typedef mat<4, 4, f64, highp> highp_dmat4; - - typedef mat<2, 2, f64, defaultp> dmat2; - typedef mat<3, 3, f64, defaultp> dmat3; - typedef mat<4, 4, f64, defaultp> dmat4; - - typedef mat<2, 2, f64, lowp> lowp_f64mat2; - typedef mat<3, 3, f64, lowp> lowp_f64mat3; - typedef mat<4, 4, f64, lowp> lowp_f64mat4; - - typedef mat<2, 2, f64, mediump> mediump_f64mat2; - typedef mat<3, 3, f64, mediump> mediump_f64mat3; - typedef mat<4, 4, f64, mediump> mediump_f64mat4; - - typedef mat<2, 2, f64, highp> highp_f64mat2; - typedef mat<3, 3, f64, highp> highp_f64mat3; - typedef mat<4, 4, f64, highp> highp_f64mat4; - - typedef mat<2, 2, f64, defaultp> f64mat2; - typedef mat<3, 3, f64, defaultp> f64mat3; - typedef mat<4, 4, f64, defaultp> f64mat4; - - // Matrix MxN - - typedef mat<2, 2, f32, lowp> lowp_mat2x2; - typedef mat<2, 3, f32, lowp> lowp_mat2x3; - typedef mat<2, 4, f32, lowp> lowp_mat2x4; - typedef mat<3, 2, f32, lowp> lowp_mat3x2; - typedef mat<3, 3, f32, lowp> lowp_mat3x3; - typedef mat<3, 4, f32, lowp> lowp_mat3x4; - typedef mat<4, 2, f32, lowp> lowp_mat4x2; - typedef mat<4, 3, f32, lowp> lowp_mat4x3; - typedef mat<4, 4, f32, lowp> lowp_mat4x4; - - typedef mat<2, 2, f32, mediump> mediump_mat2x2; - typedef mat<2, 3, f32, mediump> mediump_mat2x3; - typedef mat<2, 4, f32, mediump> mediump_mat2x4; - typedef mat<3, 2, f32, mediump> mediump_mat3x2; - typedef mat<3, 3, f32, mediump> mediump_mat3x3; - typedef mat<3, 4, f32, mediump> mediump_mat3x4; - typedef mat<4, 2, f32, mediump> mediump_mat4x2; - typedef mat<4, 3, f32, mediump> mediump_mat4x3; - typedef mat<4, 4, f32, mediump> mediump_mat4x4; - - typedef mat<2, 2, f32, highp> highp_mat2x2; - typedef mat<2, 3, f32, highp> highp_mat2x3; - typedef mat<2, 4, f32, highp> highp_mat2x4; - typedef mat<3, 2, f32, highp> highp_mat3x2; - typedef mat<3, 3, f32, highp> highp_mat3x3; - typedef mat<3, 4, f32, highp> highp_mat3x4; - typedef mat<4, 2, f32, highp> highp_mat4x2; - typedef mat<4, 3, f32, highp> highp_mat4x3; - typedef mat<4, 4, f32, highp> highp_mat4x4; - - typedef mat<2, 2, f32, defaultp> mat2x2; - typedef mat<3, 2, f32, defaultp> mat3x2; - typedef mat<4, 2, f32, defaultp> mat4x2; - typedef mat<2, 3, f32, defaultp> mat2x3; - typedef mat<3, 3, f32, defaultp> mat3x3; - typedef mat<4, 3, f32, defaultp> mat4x3; - typedef mat<2, 4, f32, defaultp> mat2x4; - typedef mat<3, 4, f32, defaultp> mat3x4; - typedef mat<4, 4, f32, defaultp> mat4x4; - - typedef mat<2, 2, f32, lowp> lowp_fmat2x2; - typedef mat<2, 3, f32, lowp> lowp_fmat2x3; - typedef mat<2, 4, f32, lowp> lowp_fmat2x4; - typedef mat<3, 2, f32, lowp> lowp_fmat3x2; - typedef mat<3, 3, f32, lowp> lowp_fmat3x3; - typedef mat<3, 4, f32, lowp> lowp_fmat3x4; - typedef mat<4, 2, f32, lowp> lowp_fmat4x2; - typedef mat<4, 3, f32, lowp> lowp_fmat4x3; - typedef mat<4, 4, f32, lowp> lowp_fmat4x4; - - typedef mat<2, 2, f32, mediump> mediump_fmat2x2; - typedef mat<2, 3, f32, mediump> mediump_fmat2x3; - typedef mat<2, 4, f32, mediump> mediump_fmat2x4; - typedef mat<3, 2, f32, mediump> mediump_fmat3x2; - typedef mat<3, 3, f32, mediump> mediump_fmat3x3; - typedef mat<3, 4, f32, mediump> mediump_fmat3x4; - typedef mat<4, 2, f32, mediump> mediump_fmat4x2; - typedef mat<4, 3, f32, mediump> mediump_fmat4x3; - typedef mat<4, 4, f32, mediump> mediump_fmat4x4; - - typedef mat<2, 2, f32, highp> highp_fmat2x2; - typedef mat<2, 3, f32, highp> highp_fmat2x3; - typedef mat<2, 4, f32, highp> highp_fmat2x4; - typedef mat<3, 2, f32, highp> highp_fmat3x2; - typedef mat<3, 3, f32, highp> highp_fmat3x3; - typedef mat<3, 4, f32, highp> highp_fmat3x4; - typedef mat<4, 2, f32, highp> highp_fmat4x2; - typedef mat<4, 3, f32, highp> highp_fmat4x3; - typedef mat<4, 4, f32, highp> highp_fmat4x4; - - typedef mat<2, 2, f32, defaultp> fmat2x2; - typedef mat<3, 2, f32, defaultp> fmat3x2; - typedef mat<4, 2, f32, defaultp> fmat4x2; - typedef mat<2, 3, f32, defaultp> fmat2x3; - typedef mat<3, 3, f32, defaultp> fmat3x3; - typedef mat<4, 3, f32, defaultp> fmat4x3; - typedef mat<2, 4, f32, defaultp> fmat2x4; - typedef mat<3, 4, f32, defaultp> fmat3x4; - typedef mat<4, 4, f32, defaultp> fmat4x4; - - typedef mat<2, 2, f32, lowp> lowp_f32mat2x2; - typedef mat<2, 3, f32, lowp> lowp_f32mat2x3; - typedef mat<2, 4, f32, lowp> lowp_f32mat2x4; - typedef mat<3, 2, f32, lowp> lowp_f32mat3x2; - typedef mat<3, 3, f32, lowp> lowp_f32mat3x3; - typedef mat<3, 4, f32, lowp> lowp_f32mat3x4; - typedef mat<4, 2, f32, lowp> lowp_f32mat4x2; - typedef mat<4, 3, f32, lowp> lowp_f32mat4x3; - typedef mat<4, 4, f32, lowp> lowp_f32mat4x4; - - typedef mat<2, 2, f32, mediump> mediump_f32mat2x2; - typedef mat<2, 3, f32, mediump> mediump_f32mat2x3; - typedef mat<2, 4, f32, mediump> mediump_f32mat2x4; - typedef mat<3, 2, f32, mediump> mediump_f32mat3x2; - typedef mat<3, 3, f32, mediump> mediump_f32mat3x3; - typedef mat<3, 4, f32, mediump> mediump_f32mat3x4; - typedef mat<4, 2, f32, mediump> mediump_f32mat4x2; - typedef mat<4, 3, f32, mediump> mediump_f32mat4x3; - typedef mat<4, 4, f32, mediump> mediump_f32mat4x4; - - typedef mat<2, 2, f32, highp> highp_f32mat2x2; - typedef mat<2, 3, f32, highp> highp_f32mat2x3; - typedef mat<2, 4, f32, highp> highp_f32mat2x4; - typedef mat<3, 2, f32, highp> highp_f32mat3x2; - typedef mat<3, 3, f32, highp> highp_f32mat3x3; - typedef mat<3, 4, f32, highp> highp_f32mat3x4; - typedef mat<4, 2, f32, highp> highp_f32mat4x2; - typedef mat<4, 3, f32, highp> highp_f32mat4x3; - typedef mat<4, 4, f32, highp> highp_f32mat4x4; - - typedef mat<2, 2, f32, defaultp> f32mat2x2; - typedef mat<3, 2, f32, defaultp> f32mat3x2; - typedef mat<4, 2, f32, defaultp> f32mat4x2; - typedef mat<2, 3, f32, defaultp> f32mat2x3; - typedef mat<3, 3, f32, defaultp> f32mat3x3; - typedef mat<4, 3, f32, defaultp> f32mat4x3; - typedef mat<2, 4, f32, defaultp> f32mat2x4; - typedef mat<3, 4, f32, defaultp> f32mat3x4; - typedef mat<4, 4, f32, defaultp> f32mat4x4; - - typedef mat<2, 2, double, lowp> lowp_dmat2x2; - typedef mat<2, 3, double, lowp> lowp_dmat2x3; - typedef mat<2, 4, double, lowp> lowp_dmat2x4; - typedef mat<3, 2, double, lowp> lowp_dmat3x2; - typedef mat<3, 3, double, lowp> lowp_dmat3x3; - typedef mat<3, 4, double, lowp> lowp_dmat3x4; - typedef mat<4, 2, double, lowp> lowp_dmat4x2; - typedef mat<4, 3, double, lowp> lowp_dmat4x3; - typedef mat<4, 4, double, lowp> lowp_dmat4x4; - - typedef mat<2, 2, double, mediump> mediump_dmat2x2; - typedef mat<2, 3, double, mediump> mediump_dmat2x3; - typedef mat<2, 4, double, mediump> mediump_dmat2x4; - typedef mat<3, 2, double, mediump> mediump_dmat3x2; - typedef mat<3, 3, double, mediump> mediump_dmat3x3; - typedef mat<3, 4, double, mediump> mediump_dmat3x4; - typedef mat<4, 2, double, mediump> mediump_dmat4x2; - typedef mat<4, 3, double, mediump> mediump_dmat4x3; - typedef mat<4, 4, double, mediump> mediump_dmat4x4; - - typedef mat<2, 2, double, highp> highp_dmat2x2; - typedef mat<2, 3, double, highp> highp_dmat2x3; - typedef mat<2, 4, double, highp> highp_dmat2x4; - typedef mat<3, 2, double, highp> highp_dmat3x2; - typedef mat<3, 3, double, highp> highp_dmat3x3; - typedef mat<3, 4, double, highp> highp_dmat3x4; - typedef mat<4, 2, double, highp> highp_dmat4x2; - typedef mat<4, 3, double, highp> highp_dmat4x3; - typedef mat<4, 4, double, highp> highp_dmat4x4; - - typedef mat<2, 2, double, defaultp> dmat2x2; - typedef mat<3, 2, double, defaultp> dmat3x2; - typedef mat<4, 2, double, defaultp> dmat4x2; - typedef mat<2, 3, double, defaultp> dmat2x3; - typedef mat<3, 3, double, defaultp> dmat3x3; - typedef mat<4, 3, double, defaultp> dmat4x3; - typedef mat<2, 4, double, defaultp> dmat2x4; - typedef mat<3, 4, double, defaultp> dmat3x4; - typedef mat<4, 4, double, defaultp> dmat4x4; - - typedef mat<2, 2, f64, lowp> lowp_f64mat2x2; - typedef mat<2, 3, f64, lowp> lowp_f64mat2x3; - typedef mat<2, 4, f64, lowp> lowp_f64mat2x4; - typedef mat<3, 2, f64, lowp> lowp_f64mat3x2; - typedef mat<3, 3, f64, lowp> lowp_f64mat3x3; - typedef mat<3, 4, f64, lowp> lowp_f64mat3x4; - typedef mat<4, 2, f64, lowp> lowp_f64mat4x2; - typedef mat<4, 3, f64, lowp> lowp_f64mat4x3; - typedef mat<4, 4, f64, lowp> lowp_f64mat4x4; - - typedef mat<2, 2, f64, mediump> mediump_f64mat2x2; - typedef mat<2, 3, f64, mediump> mediump_f64mat2x3; - typedef mat<2, 4, f64, mediump> mediump_f64mat2x4; - typedef mat<3, 2, f64, mediump> mediump_f64mat3x2; - typedef mat<3, 3, f64, mediump> mediump_f64mat3x3; - typedef mat<3, 4, f64, mediump> mediump_f64mat3x4; - typedef mat<4, 2, f64, mediump> mediump_f64mat4x2; - typedef mat<4, 3, f64, mediump> mediump_f64mat4x3; - typedef mat<4, 4, f64, mediump> mediump_f64mat4x4; - - typedef mat<2, 2, f64, highp> highp_f64mat2x2; - typedef mat<2, 3, f64, highp> highp_f64mat2x3; - typedef mat<2, 4, f64, highp> highp_f64mat2x4; - typedef mat<3, 2, f64, highp> highp_f64mat3x2; - typedef mat<3, 3, f64, highp> highp_f64mat3x3; - typedef mat<3, 4, f64, highp> highp_f64mat3x4; - typedef mat<4, 2, f64, highp> highp_f64mat4x2; - typedef mat<4, 3, f64, highp> highp_f64mat4x3; - typedef mat<4, 4, f64, highp> highp_f64mat4x4; - - typedef mat<2, 2, f64, defaultp> f64mat2x2; - typedef mat<3, 2, f64, defaultp> f64mat3x2; - typedef mat<4, 2, f64, defaultp> f64mat4x2; - typedef mat<2, 3, f64, defaultp> f64mat2x3; - typedef mat<3, 3, f64, defaultp> f64mat3x3; - typedef mat<4, 3, f64, defaultp> f64mat4x3; - typedef mat<2, 4, f64, defaultp> f64mat2x4; - typedef mat<3, 4, f64, defaultp> f64mat3x4; - typedef mat<4, 4, f64, defaultp> f64mat4x4; - - // Quaternion - - typedef qua lowp_quat; - typedef qua mediump_quat; - typedef qua highp_quat; - typedef qua quat; - - typedef qua lowp_fquat; - typedef qua mediump_fquat; - typedef qua highp_fquat; - typedef qua fquat; - - typedef qua lowp_f32quat; - typedef qua mediump_f32quat; - typedef qua highp_f32quat; - typedef qua f32quat; - - typedef qua lowp_dquat; - typedef qua mediump_dquat; - typedef qua highp_dquat; - typedef qua dquat; - - typedef qua lowp_f64quat; - typedef qua mediump_f64quat; - typedef qua highp_f64quat; - typedef qua f64quat; -}//namespace glm - - diff --git a/third_party/glm/geometric.hpp b/third_party/glm/geometric.hpp deleted file mode 100755 index c068a3c..0000000 --- a/third_party/glm/geometric.hpp +++ /dev/null @@ -1,116 +0,0 @@ -/// @ref core -/// @file glm/geometric.hpp -/// -/// @see GLSL 4.20.8 specification, section 8.5 Geometric Functions -/// -/// @defgroup core_func_geometric Geometric functions -/// @ingroup core -/// -/// These operate on vectors as vectors, not component-wise. -/// -/// Include to use these core features. - -#pragma once - -#include "detail/type_vec3.hpp" - -namespace glm -{ - /// @addtogroup core_func_geometric - /// @{ - - /// Returns the length of x, i.e., sqrt(x * x). - /// - /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. - /// @tparam T Floating-point scalar types. - /// - /// @see GLSL length man page - /// @see GLSL 4.20.8 specification, section 8.5 Geometric Functions - template - GLM_FUNC_DECL T length(vec const& x); - - /// Returns the distance betwwen p0 and p1, i.e., length(p0 - p1). - /// - /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. - /// @tparam T Floating-point scalar types. - /// - /// @see GLSL distance man page - /// @see GLSL 4.20.8 specification, section 8.5 Geometric Functions - template - GLM_FUNC_DECL T distance(vec const& p0, vec const& p1); - - /// Returns the dot product of x and y, i.e., result = x * y. - /// - /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. - /// @tparam T Floating-point scalar types. - /// - /// @see GLSL dot man page - /// @see GLSL 4.20.8 specification, section 8.5 Geometric Functions - template - GLM_FUNC_DECL T dot(vec const& x, vec const& y); - - /// Returns the cross product of x and y. - /// - /// @tparam T Floating-point scalar types. - /// - /// @see GLSL cross man page - /// @see GLSL 4.20.8 specification, section 8.5 Geometric Functions - template - GLM_FUNC_DECL vec<3, T, Q> cross(vec<3, T, Q> const& x, vec<3, T, Q> const& y); - - /// Returns a vector in the same direction as x but with length of 1. - /// According to issue 10 GLSL 1.10 specification, if length(x) == 0 then result is undefined and generate an error. - /// - /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. - /// @tparam T Floating-point scalar types. - /// - /// @see GLSL normalize man page - /// @see GLSL 4.20.8 specification, section 8.5 Geometric Functions - template - GLM_FUNC_DECL vec normalize(vec const& x); - - /// If dot(Nref, I) < 0.0, return N, otherwise, return -N. - /// - /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. - /// @tparam T Floating-point scalar types. - /// - /// @see GLSL faceforward man page - /// @see GLSL 4.20.8 specification, section 8.5 Geometric Functions - template - GLM_FUNC_DECL vec faceforward( - vec const& N, - vec const& I, - vec const& Nref); - - /// For the incident vector I and surface orientation N, - /// returns the reflection direction : result = I - 2.0 * dot(N, I) * N. - /// - /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. - /// @tparam T Floating-point scalar types. - /// - /// @see GLSL reflect man page - /// @see GLSL 4.20.8 specification, section 8.5 Geometric Functions - template - GLM_FUNC_DECL vec reflect( - vec const& I, - vec const& N); - - /// For the incident vector I and surface normal N, - /// and the ratio of indices of refraction eta, - /// return the refraction vector. - /// - /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. - /// @tparam T Floating-point scalar types. - /// - /// @see GLSL refract man page - /// @see GLSL 4.20.8 specification, section 8.5 Geometric Functions - template - GLM_FUNC_DECL vec refract( - vec const& I, - vec const& N, - T eta); - - /// @} -}//namespace glm - -#include "detail/func_geometric.inl" diff --git a/third_party/glm/glm.hpp b/third_party/glm/glm.hpp deleted file mode 100755 index 8b61064..0000000 --- a/third_party/glm/glm.hpp +++ /dev/null @@ -1,136 +0,0 @@ -/// @ref core -/// @file glm/glm.hpp -/// -/// @defgroup core Core features -/// -/// @brief Features that implement in C++ the GLSL specification as closely as possible. -/// -/// The GLM core consists of C++ types that mirror GLSL types and -/// C++ functions that mirror the GLSL functions. -/// -/// The best documentation for GLM Core is the current GLSL specification, -/// version 4.2 -/// (pdf file). -/// -/// GLM core functionalities require to be included to be used. -/// -/// -/// @defgroup core_vector Vector types -/// -/// Vector types of two to four components with an exhaustive set of operators. -/// -/// @ingroup core -/// -/// -/// @defgroup core_vector_precision Vector types with precision qualifiers -/// -/// @brief Vector types with precision qualifiers which may result in various precision in term of ULPs -/// -/// GLSL allows defining qualifiers for particular variables. -/// With OpenGL's GLSL, these qualifiers have no effect; they are there for compatibility, -/// with OpenGL ES's GLSL, these qualifiers do have an effect. -/// -/// C++ has no language equivalent to qualifier qualifiers. So GLM provides the next-best thing: -/// a number of typedefs that use a particular qualifier. -/// -/// None of these types make any guarantees about the actual qualifier used. -/// -/// @ingroup core -/// -/// -/// @defgroup core_matrix Matrix types -/// -/// Matrix types of with C columns and R rows where C and R are values between 2 to 4 included. -/// These types have exhaustive sets of operators. -/// -/// @ingroup core -/// -/// -/// @defgroup core_matrix_precision Matrix types with precision qualifiers -/// -/// @brief Matrix types with precision qualifiers which may result in various precision in term of ULPs -/// -/// GLSL allows defining qualifiers for particular variables. -/// With OpenGL's GLSL, these qualifiers have no effect; they are there for compatibility, -/// with OpenGL ES's GLSL, these qualifiers do have an effect. -/// -/// C++ has no language equivalent to qualifier qualifiers. So GLM provides the next-best thing: -/// a number of typedefs that use a particular qualifier. -/// -/// None of these types make any guarantees about the actual qualifier used. -/// -/// @ingroup core -/// -/// -/// @defgroup ext Stable extensions -/// -/// @brief Additional features not specified by GLSL specification. -/// -/// EXT extensions are fully tested and documented. -/// -/// Even if it's highly unrecommended, it's possible to include all the extensions at once by -/// including . Otherwise, each extension needs to be included a specific file. -/// -/// -/// @defgroup gtc Recommended extensions -/// -/// @brief Additional features not specified by GLSL specification. -/// -/// GTC extensions aim to be stable with tests and documentation. -/// -/// Even if it's highly unrecommended, it's possible to include all the extensions at once by -/// including . Otherwise, each extension needs to be included a specific file. -/// -/// -/// @defgroup gtx Experimental extensions -/// -/// @brief Experimental features not specified by GLSL specification. -/// -/// Experimental extensions are useful functions and types, but the development of -/// their API and functionality is not necessarily stable. They can change -/// substantially between versions. Backwards compatibility is not much of an issue -/// for them. -/// -/// Even if it's highly unrecommended, it's possible to include all the extensions -/// at once by including . Otherwise, each extension needs to be -/// included a specific file. -/// -/// @mainpage OpenGL Mathematics (GLM) -/// - Website: glm.g-truc.net -/// - GLM API documentation -/// - GLM Manual - -#include "detail/_fixes.hpp" - -#include "detail/setup.hpp" - -#pragma once - -#include -#include -#include -#include -#include -#include "fwd.hpp" - -#include "vec2.hpp" -#include "vec3.hpp" -#include "vec4.hpp" -#include "mat2x2.hpp" -#include "mat2x3.hpp" -#include "mat2x4.hpp" -#include "mat3x2.hpp" -#include "mat3x3.hpp" -#include "mat3x4.hpp" -#include "mat4x2.hpp" -#include "mat4x3.hpp" -#include "mat4x4.hpp" - -#include "trigonometric.hpp" -#include "exponential.hpp" -#include "common.hpp" -#include "packing.hpp" -#include "geometric.hpp" -#include "matrix.hpp" -#include "vector_relational.hpp" -#include "integer.hpp" diff --git a/third_party/glm/gtc/bitfield.hpp b/third_party/glm/gtc/bitfield.hpp deleted file mode 100755 index 084fbe7..0000000 --- a/third_party/glm/gtc/bitfield.hpp +++ /dev/null @@ -1,266 +0,0 @@ -/// @ref gtc_bitfield -/// @file glm/gtc/bitfield.hpp -/// -/// @see core (dependence) -/// @see gtc_bitfield (dependence) -/// -/// @defgroup gtc_bitfield GLM_GTC_bitfield -/// @ingroup gtc -/// -/// Include to use the features of this extension. -/// -/// Allow to perform bit operations on integer values - -#include "../detail/setup.hpp" - -#pragma once - -// Dependencies -#include "../ext/scalar_int_sized.hpp" -#include "../ext/scalar_uint_sized.hpp" -#include "../detail/qualifier.hpp" -#include "../detail/_vectorize.hpp" -#include "type_precision.hpp" -#include - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_GTC_bitfield extension included") -#endif - -namespace glm -{ - /// @addtogroup gtc_bitfield - /// @{ - - /// Build a mask of 'count' bits - /// - /// @see gtc_bitfield - template - GLM_FUNC_DECL genIUType mask(genIUType Bits); - - /// Build a mask of 'count' bits - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Signed and unsigned integer scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see gtc_bitfield - template - GLM_FUNC_DECL vec mask(vec const& v); - - /// Rotate all bits to the right. All the bits dropped in the right side are inserted back on the left side. - /// - /// @see gtc_bitfield - template - GLM_FUNC_DECL genIUType bitfieldRotateRight(genIUType In, int Shift); - - /// Rotate all bits to the right. All the bits dropped in the right side are inserted back on the left side. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Signed and unsigned integer scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see gtc_bitfield - template - GLM_FUNC_DECL vec bitfieldRotateRight(vec const& In, int Shift); - - /// Rotate all bits to the left. All the bits dropped in the left side are inserted back on the right side. - /// - /// @see gtc_bitfield - template - GLM_FUNC_DECL genIUType bitfieldRotateLeft(genIUType In, int Shift); - - /// Rotate all bits to the left. All the bits dropped in the left side are inserted back on the right side. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Signed and unsigned integer scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see gtc_bitfield - template - GLM_FUNC_DECL vec bitfieldRotateLeft(vec const& In, int Shift); - - /// Set to 1 a range of bits. - /// - /// @see gtc_bitfield - template - GLM_FUNC_DECL genIUType bitfieldFillOne(genIUType Value, int FirstBit, int BitCount); - - /// Set to 1 a range of bits. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Signed and unsigned integer scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see gtc_bitfield - template - GLM_FUNC_DECL vec bitfieldFillOne(vec const& Value, int FirstBit, int BitCount); - - /// Set to 0 a range of bits. - /// - /// @see gtc_bitfield - template - GLM_FUNC_DECL genIUType bitfieldFillZero(genIUType Value, int FirstBit, int BitCount); - - /// Set to 0 a range of bits. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Signed and unsigned integer scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see gtc_bitfield - template - GLM_FUNC_DECL vec bitfieldFillZero(vec const& Value, int FirstBit, int BitCount); - - /// Interleaves the bits of x and y. - /// The first bit is the first bit of x followed by the first bit of y. - /// The other bits are interleaved following the previous sequence. - /// - /// @see gtc_bitfield - GLM_FUNC_DECL int16 bitfieldInterleave(int8 x, int8 y); - - /// Interleaves the bits of x and y. - /// The first bit is the first bit of x followed by the first bit of y. - /// The other bits are interleaved following the previous sequence. - /// - /// @see gtc_bitfield - GLM_FUNC_DECL uint16 bitfieldInterleave(uint8 x, uint8 y); - - /// Interleaves the bits of x and y. - /// The first bit is the first bit of v.x followed by the first bit of v.y. - /// The other bits are interleaved following the previous sequence. - /// - /// @see gtc_bitfield - GLM_FUNC_DECL uint16 bitfieldInterleave(u8vec2 const& v); - - /// Deinterleaves the bits of x. - /// - /// @see gtc_bitfield - GLM_FUNC_DECL glm::u8vec2 bitfieldDeinterleave(glm::uint16 x); - - /// Interleaves the bits of x and y. - /// The first bit is the first bit of x followed by the first bit of y. - /// The other bits are interleaved following the previous sequence. - /// - /// @see gtc_bitfield - GLM_FUNC_DECL int32 bitfieldInterleave(int16 x, int16 y); - - /// Interleaves the bits of x and y. - /// The first bit is the first bit of x followed by the first bit of y. - /// The other bits are interleaved following the previous sequence. - /// - /// @see gtc_bitfield - GLM_FUNC_DECL uint32 bitfieldInterleave(uint16 x, uint16 y); - - /// Interleaves the bits of x and y. - /// The first bit is the first bit of v.x followed by the first bit of v.y. - /// The other bits are interleaved following the previous sequence. - /// - /// @see gtc_bitfield - GLM_FUNC_DECL uint32 bitfieldInterleave(u16vec2 const& v); - - /// Deinterleaves the bits of x. - /// - /// @see gtc_bitfield - GLM_FUNC_DECL glm::u16vec2 bitfieldDeinterleave(glm::uint32 x); - - /// Interleaves the bits of x and y. - /// The first bit is the first bit of x followed by the first bit of y. - /// The other bits are interleaved following the previous sequence. - /// - /// @see gtc_bitfield - GLM_FUNC_DECL int64 bitfieldInterleave(int32 x, int32 y); - - /// Interleaves the bits of x and y. - /// The first bit is the first bit of x followed by the first bit of y. - /// The other bits are interleaved following the previous sequence. - /// - /// @see gtc_bitfield - GLM_FUNC_DECL uint64 bitfieldInterleave(uint32 x, uint32 y); - - /// Interleaves the bits of x and y. - /// The first bit is the first bit of v.x followed by the first bit of v.y. - /// The other bits are interleaved following the previous sequence. - /// - /// @see gtc_bitfield - GLM_FUNC_DECL uint64 bitfieldInterleave(u32vec2 const& v); - - /// Deinterleaves the bits of x. - /// - /// @see gtc_bitfield - GLM_FUNC_DECL glm::u32vec2 bitfieldDeinterleave(glm::uint64 x); - - /// Interleaves the bits of x, y and z. - /// The first bit is the first bit of x followed by the first bit of y and the first bit of z. - /// The other bits are interleaved following the previous sequence. - /// - /// @see gtc_bitfield - GLM_FUNC_DECL int32 bitfieldInterleave(int8 x, int8 y, int8 z); - - /// Interleaves the bits of x, y and z. - /// The first bit is the first bit of x followed by the first bit of y and the first bit of z. - /// The other bits are interleaved following the previous sequence. - /// - /// @see gtc_bitfield - GLM_FUNC_DECL uint32 bitfieldInterleave(uint8 x, uint8 y, uint8 z); - - /// Interleaves the bits of x, y and z. - /// The first bit is the first bit of x followed by the first bit of y and the first bit of z. - /// The other bits are interleaved following the previous sequence. - /// - /// @see gtc_bitfield - GLM_FUNC_DECL int64 bitfieldInterleave(int16 x, int16 y, int16 z); - - /// Interleaves the bits of x, y and z. - /// The first bit is the first bit of x followed by the first bit of y and the first bit of z. - /// The other bits are interleaved following the previous sequence. - /// - /// @see gtc_bitfield - GLM_FUNC_DECL uint64 bitfieldInterleave(uint16 x, uint16 y, uint16 z); - - /// Interleaves the bits of x, y and z. - /// The first bit is the first bit of x followed by the first bit of y and the first bit of z. - /// The other bits are interleaved following the previous sequence. - /// - /// @see gtc_bitfield - GLM_FUNC_DECL int64 bitfieldInterleave(int32 x, int32 y, int32 z); - - /// Interleaves the bits of x, y and z. - /// The first bit is the first bit of x followed by the first bit of y and the first bit of z. - /// The other bits are interleaved following the previous sequence. - /// - /// @see gtc_bitfield - GLM_FUNC_DECL uint64 bitfieldInterleave(uint32 x, uint32 y, uint32 z); - - /// Interleaves the bits of x, y, z and w. - /// The first bit is the first bit of x followed by the first bit of y, the first bit of z and finally the first bit of w. - /// The other bits are interleaved following the previous sequence. - /// - /// @see gtc_bitfield - GLM_FUNC_DECL int32 bitfieldInterleave(int8 x, int8 y, int8 z, int8 w); - - /// Interleaves the bits of x, y, z and w. - /// The first bit is the first bit of x followed by the first bit of y, the first bit of z and finally the first bit of w. - /// The other bits are interleaved following the previous sequence. - /// - /// @see gtc_bitfield - GLM_FUNC_DECL uint32 bitfieldInterleave(uint8 x, uint8 y, uint8 z, uint8 w); - - /// Interleaves the bits of x, y, z and w. - /// The first bit is the first bit of x followed by the first bit of y, the first bit of z and finally the first bit of w. - /// The other bits are interleaved following the previous sequence. - /// - /// @see gtc_bitfield - GLM_FUNC_DECL int64 bitfieldInterleave(int16 x, int16 y, int16 z, int16 w); - - /// Interleaves the bits of x, y, z and w. - /// The first bit is the first bit of x followed by the first bit of y, the first bit of z and finally the first bit of w. - /// The other bits are interleaved following the previous sequence. - /// - /// @see gtc_bitfield - GLM_FUNC_DECL uint64 bitfieldInterleave(uint16 x, uint16 y, uint16 z, uint16 w); - - /// @} -} //namespace glm - -#include "bitfield.inl" diff --git a/third_party/glm/gtc/bitfield.inl b/third_party/glm/gtc/bitfield.inl deleted file mode 100755 index 06cf188..0000000 --- a/third_party/glm/gtc/bitfield.inl +++ /dev/null @@ -1,626 +0,0 @@ -/// @ref gtc_bitfield - -#include "../simd/integer.h" - -namespace glm{ -namespace detail -{ - template - GLM_FUNC_DECL RET bitfieldInterleave(PARAM x, PARAM y); - - template - GLM_FUNC_DECL RET bitfieldInterleave(PARAM x, PARAM y, PARAM z); - - template - GLM_FUNC_DECL RET bitfieldInterleave(PARAM x, PARAM y, PARAM z, PARAM w); - - template<> - GLM_FUNC_QUALIFIER glm::uint16 bitfieldInterleave(glm::uint8 x, glm::uint8 y) - { - glm::uint16 REG1(x); - glm::uint16 REG2(y); - - REG1 = ((REG1 << 4) | REG1) & static_cast(0x0F0F); - REG2 = ((REG2 << 4) | REG2) & static_cast(0x0F0F); - - REG1 = ((REG1 << 2) | REG1) & static_cast(0x3333); - REG2 = ((REG2 << 2) | REG2) & static_cast(0x3333); - - REG1 = ((REG1 << 1) | REG1) & static_cast(0x5555); - REG2 = ((REG2 << 1) | REG2) & static_cast(0x5555); - - return REG1 | static_cast(REG2 << 1); - } - - template<> - GLM_FUNC_QUALIFIER glm::uint32 bitfieldInterleave(glm::uint16 x, glm::uint16 y) - { - glm::uint32 REG1(x); - glm::uint32 REG2(y); - - REG1 = ((REG1 << 8) | REG1) & static_cast(0x00FF00FF); - REG2 = ((REG2 << 8) | REG2) & static_cast(0x00FF00FF); - - REG1 = ((REG1 << 4) | REG1) & static_cast(0x0F0F0F0F); - REG2 = ((REG2 << 4) | REG2) & static_cast(0x0F0F0F0F); - - REG1 = ((REG1 << 2) | REG1) & static_cast(0x33333333); - REG2 = ((REG2 << 2) | REG2) & static_cast(0x33333333); - - REG1 = ((REG1 << 1) | REG1) & static_cast(0x55555555); - REG2 = ((REG2 << 1) | REG2) & static_cast(0x55555555); - - return REG1 | (REG2 << 1); - } - - template<> - GLM_FUNC_QUALIFIER glm::uint64 bitfieldInterleave(glm::uint32 x, glm::uint32 y) - { - glm::uint64 REG1(x); - glm::uint64 REG2(y); - - REG1 = ((REG1 << 16) | REG1) & static_cast(0x0000FFFF0000FFFFull); - REG2 = ((REG2 << 16) | REG2) & static_cast(0x0000FFFF0000FFFFull); - - REG1 = ((REG1 << 8) | REG1) & static_cast(0x00FF00FF00FF00FFull); - REG2 = ((REG2 << 8) | REG2) & static_cast(0x00FF00FF00FF00FFull); - - REG1 = ((REG1 << 4) | REG1) & static_cast(0x0F0F0F0F0F0F0F0Full); - REG2 = ((REG2 << 4) | REG2) & static_cast(0x0F0F0F0F0F0F0F0Full); - - REG1 = ((REG1 << 2) | REG1) & static_cast(0x3333333333333333ull); - REG2 = ((REG2 << 2) | REG2) & static_cast(0x3333333333333333ull); - - REG1 = ((REG1 << 1) | REG1) & static_cast(0x5555555555555555ull); - REG2 = ((REG2 << 1) | REG2) & static_cast(0x5555555555555555ull); - - return REG1 | (REG2 << 1); - } - - template<> - GLM_FUNC_QUALIFIER glm::uint32 bitfieldInterleave(glm::uint8 x, glm::uint8 y, glm::uint8 z) - { - glm::uint32 REG1(x); - glm::uint32 REG2(y); - glm::uint32 REG3(z); - - REG1 = ((REG1 << 16) | REG1) & static_cast(0xFF0000FFu); - REG2 = ((REG2 << 16) | REG2) & static_cast(0xFF0000FFu); - REG3 = ((REG3 << 16) | REG3) & static_cast(0xFF0000FFu); - - REG1 = ((REG1 << 8) | REG1) & static_cast(0x0F00F00Fu); - REG2 = ((REG2 << 8) | REG2) & static_cast(0x0F00F00Fu); - REG3 = ((REG3 << 8) | REG3) & static_cast(0x0F00F00Fu); - - REG1 = ((REG1 << 4) | REG1) & static_cast(0xC30C30C3u); - REG2 = ((REG2 << 4) | REG2) & static_cast(0xC30C30C3u); - REG3 = ((REG3 << 4) | REG3) & static_cast(0xC30C30C3u); - - REG1 = ((REG1 << 2) | REG1) & static_cast(0x49249249u); - REG2 = ((REG2 << 2) | REG2) & static_cast(0x49249249u); - REG3 = ((REG3 << 2) | REG3) & static_cast(0x49249249u); - - return REG1 | (REG2 << 1) | (REG3 << 2); - } - - template<> - GLM_FUNC_QUALIFIER glm::uint64 bitfieldInterleave(glm::uint16 x, glm::uint16 y, glm::uint16 z) - { - glm::uint64 REG1(x); - glm::uint64 REG2(y); - glm::uint64 REG3(z); - - REG1 = ((REG1 << 32) | REG1) & static_cast(0xFFFF00000000FFFFull); - REG2 = ((REG2 << 32) | REG2) & static_cast(0xFFFF00000000FFFFull); - REG3 = ((REG3 << 32) | REG3) & static_cast(0xFFFF00000000FFFFull); - - REG1 = ((REG1 << 16) | REG1) & static_cast(0x00FF0000FF0000FFull); - REG2 = ((REG2 << 16) | REG2) & static_cast(0x00FF0000FF0000FFull); - REG3 = ((REG3 << 16) | REG3) & static_cast(0x00FF0000FF0000FFull); - - REG1 = ((REG1 << 8) | REG1) & static_cast(0xF00F00F00F00F00Full); - REG2 = ((REG2 << 8) | REG2) & static_cast(0xF00F00F00F00F00Full); - REG3 = ((REG3 << 8) | REG3) & static_cast(0xF00F00F00F00F00Full); - - REG1 = ((REG1 << 4) | REG1) & static_cast(0x30C30C30C30C30C3ull); - REG2 = ((REG2 << 4) | REG2) & static_cast(0x30C30C30C30C30C3ull); - REG3 = ((REG3 << 4) | REG3) & static_cast(0x30C30C30C30C30C3ull); - - REG1 = ((REG1 << 2) | REG1) & static_cast(0x9249249249249249ull); - REG2 = ((REG2 << 2) | REG2) & static_cast(0x9249249249249249ull); - REG3 = ((REG3 << 2) | REG3) & static_cast(0x9249249249249249ull); - - return REG1 | (REG2 << 1) | (REG3 << 2); - } - - template<> - GLM_FUNC_QUALIFIER glm::uint64 bitfieldInterleave(glm::uint32 x, glm::uint32 y, glm::uint32 z) - { - glm::uint64 REG1(x); - glm::uint64 REG2(y); - glm::uint64 REG3(z); - - REG1 = ((REG1 << 32) | REG1) & static_cast(0xFFFF00000000FFFFull); - REG2 = ((REG2 << 32) | REG2) & static_cast(0xFFFF00000000FFFFull); - REG3 = ((REG3 << 32) | REG3) & static_cast(0xFFFF00000000FFFFull); - - REG1 = ((REG1 << 16) | REG1) & static_cast(0x00FF0000FF0000FFull); - REG2 = ((REG2 << 16) | REG2) & static_cast(0x00FF0000FF0000FFull); - REG3 = ((REG3 << 16) | REG3) & static_cast(0x00FF0000FF0000FFull); - - REG1 = ((REG1 << 8) | REG1) & static_cast(0xF00F00F00F00F00Full); - REG2 = ((REG2 << 8) | REG2) & static_cast(0xF00F00F00F00F00Full); - REG3 = ((REG3 << 8) | REG3) & static_cast(0xF00F00F00F00F00Full); - - REG1 = ((REG1 << 4) | REG1) & static_cast(0x30C30C30C30C30C3ull); - REG2 = ((REG2 << 4) | REG2) & static_cast(0x30C30C30C30C30C3ull); - REG3 = ((REG3 << 4) | REG3) & static_cast(0x30C30C30C30C30C3ull); - - REG1 = ((REG1 << 2) | REG1) & static_cast(0x9249249249249249ull); - REG2 = ((REG2 << 2) | REG2) & static_cast(0x9249249249249249ull); - REG3 = ((REG3 << 2) | REG3) & static_cast(0x9249249249249249ull); - - return REG1 | (REG2 << 1) | (REG3 << 2); - } - - template<> - GLM_FUNC_QUALIFIER glm::uint32 bitfieldInterleave(glm::uint8 x, glm::uint8 y, glm::uint8 z, glm::uint8 w) - { - glm::uint32 REG1(x); - glm::uint32 REG2(y); - glm::uint32 REG3(z); - glm::uint32 REG4(w); - - REG1 = ((REG1 << 12) | REG1) & static_cast(0x000F000Fu); - REG2 = ((REG2 << 12) | REG2) & static_cast(0x000F000Fu); - REG3 = ((REG3 << 12) | REG3) & static_cast(0x000F000Fu); - REG4 = ((REG4 << 12) | REG4) & static_cast(0x000F000Fu); - - REG1 = ((REG1 << 6) | REG1) & static_cast(0x03030303u); - REG2 = ((REG2 << 6) | REG2) & static_cast(0x03030303u); - REG3 = ((REG3 << 6) | REG3) & static_cast(0x03030303u); - REG4 = ((REG4 << 6) | REG4) & static_cast(0x03030303u); - - REG1 = ((REG1 << 3) | REG1) & static_cast(0x11111111u); - REG2 = ((REG2 << 3) | REG2) & static_cast(0x11111111u); - REG3 = ((REG3 << 3) | REG3) & static_cast(0x11111111u); - REG4 = ((REG4 << 3) | REG4) & static_cast(0x11111111u); - - return REG1 | (REG2 << 1) | (REG3 << 2) | (REG4 << 3); - } - - template<> - GLM_FUNC_QUALIFIER glm::uint64 bitfieldInterleave(glm::uint16 x, glm::uint16 y, glm::uint16 z, glm::uint16 w) - { - glm::uint64 REG1(x); - glm::uint64 REG2(y); - glm::uint64 REG3(z); - glm::uint64 REG4(w); - - REG1 = ((REG1 << 24) | REG1) & static_cast(0x000000FF000000FFull); - REG2 = ((REG2 << 24) | REG2) & static_cast(0x000000FF000000FFull); - REG3 = ((REG3 << 24) | REG3) & static_cast(0x000000FF000000FFull); - REG4 = ((REG4 << 24) | REG4) & static_cast(0x000000FF000000FFull); - - REG1 = ((REG1 << 12) | REG1) & static_cast(0x000F000F000F000Full); - REG2 = ((REG2 << 12) | REG2) & static_cast(0x000F000F000F000Full); - REG3 = ((REG3 << 12) | REG3) & static_cast(0x000F000F000F000Full); - REG4 = ((REG4 << 12) | REG4) & static_cast(0x000F000F000F000Full); - - REG1 = ((REG1 << 6) | REG1) & static_cast(0x0303030303030303ull); - REG2 = ((REG2 << 6) | REG2) & static_cast(0x0303030303030303ull); - REG3 = ((REG3 << 6) | REG3) & static_cast(0x0303030303030303ull); - REG4 = ((REG4 << 6) | REG4) & static_cast(0x0303030303030303ull); - - REG1 = ((REG1 << 3) | REG1) & static_cast(0x1111111111111111ull); - REG2 = ((REG2 << 3) | REG2) & static_cast(0x1111111111111111ull); - REG3 = ((REG3 << 3) | REG3) & static_cast(0x1111111111111111ull); - REG4 = ((REG4 << 3) | REG4) & static_cast(0x1111111111111111ull); - - return REG1 | (REG2 << 1) | (REG3 << 2) | (REG4 << 3); - } -}//namespace detail - - template - GLM_FUNC_QUALIFIER genIUType mask(genIUType Bits) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'mask' accepts only integer values"); - - return Bits >= sizeof(genIUType) * 8 ? ~static_cast(0) : (static_cast(1) << Bits) - static_cast(1); - } - - template - GLM_FUNC_QUALIFIER vec mask(vec const& v) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'mask' accepts only integer values"); - - return detail::functor1::call(mask, v); - } - - template - GLM_FUNC_QUALIFIER genIType bitfieldRotateRight(genIType In, int Shift) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'bitfieldRotateRight' accepts only integer values"); - - int const BitSize = static_cast(sizeof(genIType) * 8); - return (In << static_cast(Shift)) | (In >> static_cast(BitSize - Shift)); - } - - template - GLM_FUNC_QUALIFIER vec bitfieldRotateRight(vec const& In, int Shift) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'bitfieldRotateRight' accepts only integer values"); - - int const BitSize = static_cast(sizeof(T) * 8); - return (In << static_cast(Shift)) | (In >> static_cast(BitSize - Shift)); - } - - template - GLM_FUNC_QUALIFIER genIType bitfieldRotateLeft(genIType In, int Shift) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'bitfieldRotateLeft' accepts only integer values"); - - int const BitSize = static_cast(sizeof(genIType) * 8); - return (In >> static_cast(Shift)) | (In << static_cast(BitSize - Shift)); - } - - template - GLM_FUNC_QUALIFIER vec bitfieldRotateLeft(vec const& In, int Shift) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "'bitfieldRotateLeft' accepts only integer values"); - - int const BitSize = static_cast(sizeof(T) * 8); - return (In >> static_cast(Shift)) | (In << static_cast(BitSize - Shift)); - } - - template - GLM_FUNC_QUALIFIER genIUType bitfieldFillOne(genIUType Value, int FirstBit, int BitCount) - { - return Value | static_cast(mask(BitCount) << FirstBit); - } - - template - GLM_FUNC_QUALIFIER vec bitfieldFillOne(vec const& Value, int FirstBit, int BitCount) - { - return Value | static_cast(mask(BitCount) << FirstBit); - } - - template - GLM_FUNC_QUALIFIER genIUType bitfieldFillZero(genIUType Value, int FirstBit, int BitCount) - { - return Value & static_cast(~(mask(BitCount) << FirstBit)); - } - - template - GLM_FUNC_QUALIFIER vec bitfieldFillZero(vec const& Value, int FirstBit, int BitCount) - { - return Value & static_cast(~(mask(BitCount) << FirstBit)); - } - - GLM_FUNC_QUALIFIER int16 bitfieldInterleave(int8 x, int8 y) - { - union sign8 - { - int8 i; - uint8 u; - } sign_x, sign_y; - - union sign16 - { - int16 i; - uint16 u; - } result; - - sign_x.i = x; - sign_y.i = y; - result.u = bitfieldInterleave(sign_x.u, sign_y.u); - - return result.i; - } - - GLM_FUNC_QUALIFIER uint16 bitfieldInterleave(uint8 x, uint8 y) - { - return detail::bitfieldInterleave(x, y); - } - - GLM_FUNC_QUALIFIER uint16 bitfieldInterleave(u8vec2 const& v) - { - return detail::bitfieldInterleave(v.x, v.y); - } - - GLM_FUNC_QUALIFIER u8vec2 bitfieldDeinterleave(glm::uint16 x) - { - uint16 REG1(x); - uint16 REG2(x >>= 1); - - REG1 = REG1 & static_cast(0x5555); - REG2 = REG2 & static_cast(0x5555); - - REG1 = ((REG1 >> 1) | REG1) & static_cast(0x3333); - REG2 = ((REG2 >> 1) | REG2) & static_cast(0x3333); - - REG1 = ((REG1 >> 2) | REG1) & static_cast(0x0F0F); - REG2 = ((REG2 >> 2) | REG2) & static_cast(0x0F0F); - - REG1 = ((REG1 >> 4) | REG1) & static_cast(0x00FF); - REG2 = ((REG2 >> 4) | REG2) & static_cast(0x00FF); - - REG1 = ((REG1 >> 8) | REG1) & static_cast(0xFFFF); - REG2 = ((REG2 >> 8) | REG2) & static_cast(0xFFFF); - - return glm::u8vec2(REG1, REG2); - } - - GLM_FUNC_QUALIFIER int32 bitfieldInterleave(int16 x, int16 y) - { - union sign16 - { - int16 i; - uint16 u; - } sign_x, sign_y; - - union sign32 - { - int32 i; - uint32 u; - } result; - - sign_x.i = x; - sign_y.i = y; - result.u = bitfieldInterleave(sign_x.u, sign_y.u); - - return result.i; - } - - GLM_FUNC_QUALIFIER uint32 bitfieldInterleave(uint16 x, uint16 y) - { - return detail::bitfieldInterleave(x, y); - } - - GLM_FUNC_QUALIFIER glm::uint32 bitfieldInterleave(u16vec2 const& v) - { - return detail::bitfieldInterleave(v.x, v.y); - } - - GLM_FUNC_QUALIFIER glm::u16vec2 bitfieldDeinterleave(glm::uint32 x) - { - glm::uint32 REG1(x); - glm::uint32 REG2(x >>= 1); - - REG1 = REG1 & static_cast(0x55555555); - REG2 = REG2 & static_cast(0x55555555); - - REG1 = ((REG1 >> 1) | REG1) & static_cast(0x33333333); - REG2 = ((REG2 >> 1) | REG2) & static_cast(0x33333333); - - REG1 = ((REG1 >> 2) | REG1) & static_cast(0x0F0F0F0F); - REG2 = ((REG2 >> 2) | REG2) & static_cast(0x0F0F0F0F); - - REG1 = ((REG1 >> 4) | REG1) & static_cast(0x00FF00FF); - REG2 = ((REG2 >> 4) | REG2) & static_cast(0x00FF00FF); - - REG1 = ((REG1 >> 8) | REG1) & static_cast(0x0000FFFF); - REG2 = ((REG2 >> 8) | REG2) & static_cast(0x0000FFFF); - - return glm::u16vec2(REG1, REG2); - } - - GLM_FUNC_QUALIFIER int64 bitfieldInterleave(int32 x, int32 y) - { - union sign32 - { - int32 i; - uint32 u; - } sign_x, sign_y; - - union sign64 - { - int64 i; - uint64 u; - } result; - - sign_x.i = x; - sign_y.i = y; - result.u = bitfieldInterleave(sign_x.u, sign_y.u); - - return result.i; - } - - GLM_FUNC_QUALIFIER uint64 bitfieldInterleave(uint32 x, uint32 y) - { - return detail::bitfieldInterleave(x, y); - } - - GLM_FUNC_QUALIFIER glm::uint64 bitfieldInterleave(u32vec2 const& v) - { - return detail::bitfieldInterleave(v.x, v.y); - } - - GLM_FUNC_QUALIFIER glm::u32vec2 bitfieldDeinterleave(glm::uint64 x) - { - glm::uint64 REG1(x); - glm::uint64 REG2(x >>= 1); - - REG1 = REG1 & static_cast(0x5555555555555555ull); - REG2 = REG2 & static_cast(0x5555555555555555ull); - - REG1 = ((REG1 >> 1) | REG1) & static_cast(0x3333333333333333ull); - REG2 = ((REG2 >> 1) | REG2) & static_cast(0x3333333333333333ull); - - REG1 = ((REG1 >> 2) | REG1) & static_cast(0x0F0F0F0F0F0F0F0Full); - REG2 = ((REG2 >> 2) | REG2) & static_cast(0x0F0F0F0F0F0F0F0Full); - - REG1 = ((REG1 >> 4) | REG1) & static_cast(0x00FF00FF00FF00FFull); - REG2 = ((REG2 >> 4) | REG2) & static_cast(0x00FF00FF00FF00FFull); - - REG1 = ((REG1 >> 8) | REG1) & static_cast(0x0000FFFF0000FFFFull); - REG2 = ((REG2 >> 8) | REG2) & static_cast(0x0000FFFF0000FFFFull); - - REG1 = ((REG1 >> 16) | REG1) & static_cast(0x00000000FFFFFFFFull); - REG2 = ((REG2 >> 16) | REG2) & static_cast(0x00000000FFFFFFFFull); - - return glm::u32vec2(REG1, REG2); - } - - GLM_FUNC_QUALIFIER int32 bitfieldInterleave(int8 x, int8 y, int8 z) - { - union sign8 - { - int8 i; - uint8 u; - } sign_x, sign_y, sign_z; - - union sign32 - { - int32 i; - uint32 u; - } result; - - sign_x.i = x; - sign_y.i = y; - sign_z.i = z; - result.u = bitfieldInterleave(sign_x.u, sign_y.u, sign_z.u); - - return result.i; - } - - GLM_FUNC_QUALIFIER uint32 bitfieldInterleave(uint8 x, uint8 y, uint8 z) - { - return detail::bitfieldInterleave(x, y, z); - } - - GLM_FUNC_QUALIFIER uint32 bitfieldInterleave(u8vec3 const& v) - { - return detail::bitfieldInterleave(v.x, v.y, v.z); - } - - GLM_FUNC_QUALIFIER int64 bitfieldInterleave(int16 x, int16 y, int16 z) - { - union sign16 - { - int16 i; - uint16 u; - } sign_x, sign_y, sign_z; - - union sign64 - { - int64 i; - uint64 u; - } result; - - sign_x.i = x; - sign_y.i = y; - sign_z.i = z; - result.u = bitfieldInterleave(sign_x.u, sign_y.u, sign_z.u); - - return result.i; - } - - GLM_FUNC_QUALIFIER uint64 bitfieldInterleave(uint16 x, uint16 y, uint16 z) - { - return detail::bitfieldInterleave(x, y, z); - } - - GLM_FUNC_QUALIFIER uint64 bitfieldInterleave(u16vec3 const& v) - { - return detail::bitfieldInterleave(v.x, v.y, v.z); - } - - GLM_FUNC_QUALIFIER int64 bitfieldInterleave(int32 x, int32 y, int32 z) - { - union sign16 - { - int32 i; - uint32 u; - } sign_x, sign_y, sign_z; - - union sign64 - { - int64 i; - uint64 u; - } result; - - sign_x.i = x; - sign_y.i = y; - sign_z.i = z; - result.u = bitfieldInterleave(sign_x.u, sign_y.u, sign_z.u); - - return result.i; - } - - GLM_FUNC_QUALIFIER uint64 bitfieldInterleave(uint32 x, uint32 y, uint32 z) - { - return detail::bitfieldInterleave(x, y, z); - } - - GLM_FUNC_QUALIFIER uint64 bitfieldInterleave(u32vec3 const& v) - { - return detail::bitfieldInterleave(v.x, v.y, v.z); - } - - GLM_FUNC_QUALIFIER int32 bitfieldInterleave(int8 x, int8 y, int8 z, int8 w) - { - union sign8 - { - int8 i; - uint8 u; - } sign_x, sign_y, sign_z, sign_w; - - union sign32 - { - int32 i; - uint32 u; - } result; - - sign_x.i = x; - sign_y.i = y; - sign_z.i = z; - sign_w.i = w; - result.u = bitfieldInterleave(sign_x.u, sign_y.u, sign_z.u, sign_w.u); - - return result.i; - } - - GLM_FUNC_QUALIFIER uint32 bitfieldInterleave(uint8 x, uint8 y, uint8 z, uint8 w) - { - return detail::bitfieldInterleave(x, y, z, w); - } - - GLM_FUNC_QUALIFIER uint32 bitfieldInterleave(u8vec4 const& v) - { - return detail::bitfieldInterleave(v.x, v.y, v.z, v.w); - } - - GLM_FUNC_QUALIFIER int64 bitfieldInterleave(int16 x, int16 y, int16 z, int16 w) - { - union sign16 - { - int16 i; - uint16 u; - } sign_x, sign_y, sign_z, sign_w; - - union sign64 - { - int64 i; - uint64 u; - } result; - - sign_x.i = x; - sign_y.i = y; - sign_z.i = z; - sign_w.i = w; - result.u = bitfieldInterleave(sign_x.u, sign_y.u, sign_z.u, sign_w.u); - - return result.i; - } - - GLM_FUNC_QUALIFIER uint64 bitfieldInterleave(uint16 x, uint16 y, uint16 z, uint16 w) - { - return detail::bitfieldInterleave(x, y, z, w); - } - - GLM_FUNC_QUALIFIER uint64 bitfieldInterleave(u16vec4 const& v) - { - return detail::bitfieldInterleave(v.x, v.y, v.z, v.w); - } -}//namespace glm diff --git a/third_party/glm/gtc/color_space.hpp b/third_party/glm/gtc/color_space.hpp deleted file mode 100755 index cffd9f0..0000000 --- a/third_party/glm/gtc/color_space.hpp +++ /dev/null @@ -1,56 +0,0 @@ -/// @ref gtc_color_space -/// @file glm/gtc/color_space.hpp -/// -/// @see core (dependence) -/// @see gtc_color_space (dependence) -/// -/// @defgroup gtc_color_space GLM_GTC_color_space -/// @ingroup gtc -/// -/// Include to use the features of this extension. -/// -/// Allow to perform bit operations on integer values - -#pragma once - -// Dependencies -#include "../detail/setup.hpp" -#include "../detail/qualifier.hpp" -#include "../exponential.hpp" -#include "../vec3.hpp" -#include "../vec4.hpp" -#include - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_GTC_color_space extension included") -#endif - -namespace glm -{ - /// @addtogroup gtc_color_space - /// @{ - - /// Convert a linear color to sRGB color using a standard gamma correction. - /// IEC 61966-2-1:1999 / Rec. 709 specification https://www.w3.org/Graphics/Color/srgb - template - GLM_FUNC_DECL vec convertLinearToSRGB(vec const& ColorLinear); - - /// Convert a linear color to sRGB color using a custom gamma correction. - /// IEC 61966-2-1:1999 / Rec. 709 specification https://www.w3.org/Graphics/Color/srgb - template - GLM_FUNC_DECL vec convertLinearToSRGB(vec const& ColorLinear, T Gamma); - - /// Convert a sRGB color to linear color using a standard gamma correction. - /// IEC 61966-2-1:1999 / Rec. 709 specification https://www.w3.org/Graphics/Color/srgb - template - GLM_FUNC_DECL vec convertSRGBToLinear(vec const& ColorSRGB); - - /// Convert a sRGB color to linear color using a custom gamma correction. - // IEC 61966-2-1:1999 / Rec. 709 specification https://www.w3.org/Graphics/Color/srgb - template - GLM_FUNC_DECL vec convertSRGBToLinear(vec const& ColorSRGB, T Gamma); - - /// @} -} //namespace glm - -#include "color_space.inl" diff --git a/third_party/glm/gtc/color_space.inl b/third_party/glm/gtc/color_space.inl deleted file mode 100755 index 2a90004..0000000 --- a/third_party/glm/gtc/color_space.inl +++ /dev/null @@ -1,84 +0,0 @@ -/// @ref gtc_color_space - -namespace glm{ -namespace detail -{ - template - struct compute_rgbToSrgb - { - GLM_FUNC_QUALIFIER static vec call(vec const& ColorRGB, T GammaCorrection) - { - vec const ClampedColor(clamp(ColorRGB, static_cast(0), static_cast(1))); - - return mix( - pow(ClampedColor, vec(GammaCorrection)) * static_cast(1.055) - static_cast(0.055), - ClampedColor * static_cast(12.92), - lessThan(ClampedColor, vec(static_cast(0.0031308)))); - } - }; - - template - struct compute_rgbToSrgb<4, T, Q> - { - GLM_FUNC_QUALIFIER static vec<4, T, Q> call(vec<4, T, Q> const& ColorRGB, T GammaCorrection) - { - return vec<4, T, Q>(compute_rgbToSrgb<3, T, Q>::call(vec<3, T, Q>(ColorRGB), GammaCorrection), ColorRGB.w); - } - }; - - template - struct compute_srgbToRgb - { - GLM_FUNC_QUALIFIER static vec call(vec const& ColorSRGB, T Gamma) - { - return mix( - pow((ColorSRGB + static_cast(0.055)) * static_cast(0.94786729857819905213270142180095), vec(Gamma)), - ColorSRGB * static_cast(0.07739938080495356037151702786378), - lessThanEqual(ColorSRGB, vec(static_cast(0.04045)))); - } - }; - - template - struct compute_srgbToRgb<4, T, Q> - { - GLM_FUNC_QUALIFIER static vec<4, T, Q> call(vec<4, T, Q> const& ColorSRGB, T Gamma) - { - return vec<4, T, Q>(compute_srgbToRgb<3, T, Q>::call(vec<3, T, Q>(ColorSRGB), Gamma), ColorSRGB.w); - } - }; -}//namespace detail - - template - GLM_FUNC_QUALIFIER vec convertLinearToSRGB(vec const& ColorLinear) - { - return detail::compute_rgbToSrgb::call(ColorLinear, static_cast(0.41666)); - } - - // Based on Ian Taylor http://chilliant.blogspot.fr/2012/08/srgb-approximations-for-hlsl.html - template<> - GLM_FUNC_QUALIFIER vec<3, float, lowp> convertLinearToSRGB(vec<3, float, lowp> const& ColorLinear) - { - vec<3, float, lowp> S1 = sqrt(ColorLinear); - vec<3, float, lowp> S2 = sqrt(S1); - vec<3, float, lowp> S3 = sqrt(S2); - return 0.662002687f * S1 + 0.684122060f * S2 - 0.323583601f * S3 - 0.0225411470f * ColorLinear; - } - - template - GLM_FUNC_QUALIFIER vec convertLinearToSRGB(vec const& ColorLinear, T Gamma) - { - return detail::compute_rgbToSrgb::call(ColorLinear, static_cast(1) / Gamma); - } - - template - GLM_FUNC_QUALIFIER vec convertSRGBToLinear(vec const& ColorSRGB) - { - return detail::compute_srgbToRgb::call(ColorSRGB, static_cast(2.4)); - } - - template - GLM_FUNC_QUALIFIER vec convertSRGBToLinear(vec const& ColorSRGB, T Gamma) - { - return detail::compute_srgbToRgb::call(ColorSRGB, Gamma); - } -}//namespace glm diff --git a/third_party/glm/gtc/constants.hpp b/third_party/glm/gtc/constants.hpp deleted file mode 100755 index 99f2128..0000000 --- a/third_party/glm/gtc/constants.hpp +++ /dev/null @@ -1,165 +0,0 @@ -/// @ref gtc_constants -/// @file glm/gtc/constants.hpp -/// -/// @see core (dependence) -/// -/// @defgroup gtc_constants GLM_GTC_constants -/// @ingroup gtc -/// -/// Include to use the features of this extension. -/// -/// Provide a list of constants and precomputed useful values. - -#pragma once - -// Dependencies -#include "../ext/scalar_constants.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_GTC_constants extension included") -#endif - -namespace glm -{ - /// @addtogroup gtc_constants - /// @{ - - /// Return 0. - /// @see gtc_constants - template - GLM_FUNC_DECL GLM_CONSTEXPR genType zero(); - - /// Return 1. - /// @see gtc_constants - template - GLM_FUNC_DECL GLM_CONSTEXPR genType one(); - - /// Return pi * 2. - /// @see gtc_constants - template - GLM_FUNC_DECL GLM_CONSTEXPR genType two_pi(); - - /// Return square root of pi. - /// @see gtc_constants - template - GLM_FUNC_DECL GLM_CONSTEXPR genType root_pi(); - - /// Return pi / 2. - /// @see gtc_constants - template - GLM_FUNC_DECL GLM_CONSTEXPR genType half_pi(); - - /// Return pi / 2 * 3. - /// @see gtc_constants - template - GLM_FUNC_DECL GLM_CONSTEXPR genType three_over_two_pi(); - - /// Return pi / 4. - /// @see gtc_constants - template - GLM_FUNC_DECL GLM_CONSTEXPR genType quarter_pi(); - - /// Return 1 / pi. - /// @see gtc_constants - template - GLM_FUNC_DECL GLM_CONSTEXPR genType one_over_pi(); - - /// Return 1 / (pi * 2). - /// @see gtc_constants - template - GLM_FUNC_DECL GLM_CONSTEXPR genType one_over_two_pi(); - - /// Return 2 / pi. - /// @see gtc_constants - template - GLM_FUNC_DECL GLM_CONSTEXPR genType two_over_pi(); - - /// Return 4 / pi. - /// @see gtc_constants - template - GLM_FUNC_DECL GLM_CONSTEXPR genType four_over_pi(); - - /// Return 2 / sqrt(pi). - /// @see gtc_constants - template - GLM_FUNC_DECL GLM_CONSTEXPR genType two_over_root_pi(); - - /// Return 1 / sqrt(2). - /// @see gtc_constants - template - GLM_FUNC_DECL GLM_CONSTEXPR genType one_over_root_two(); - - /// Return sqrt(pi / 2). - /// @see gtc_constants - template - GLM_FUNC_DECL GLM_CONSTEXPR genType root_half_pi(); - - /// Return sqrt(2 * pi). - /// @see gtc_constants - template - GLM_FUNC_DECL GLM_CONSTEXPR genType root_two_pi(); - - /// Return sqrt(ln(4)). - /// @see gtc_constants - template - GLM_FUNC_DECL GLM_CONSTEXPR genType root_ln_four(); - - /// Return e constant. - /// @see gtc_constants - template - GLM_FUNC_DECL GLM_CONSTEXPR genType e(); - - /// Return Euler's constant. - /// @see gtc_constants - template - GLM_FUNC_DECL GLM_CONSTEXPR genType euler(); - - /// Return sqrt(2). - /// @see gtc_constants - template - GLM_FUNC_DECL GLM_CONSTEXPR genType root_two(); - - /// Return sqrt(3). - /// @see gtc_constants - template - GLM_FUNC_DECL GLM_CONSTEXPR genType root_three(); - - /// Return sqrt(5). - /// @see gtc_constants - template - GLM_FUNC_DECL GLM_CONSTEXPR genType root_five(); - - /// Return ln(2). - /// @see gtc_constants - template - GLM_FUNC_DECL GLM_CONSTEXPR genType ln_two(); - - /// Return ln(10). - /// @see gtc_constants - template - GLM_FUNC_DECL GLM_CONSTEXPR genType ln_ten(); - - /// Return ln(ln(2)). - /// @see gtc_constants - template - GLM_FUNC_DECL GLM_CONSTEXPR genType ln_ln_two(); - - /// Return 1 / 3. - /// @see gtc_constants - template - GLM_FUNC_DECL GLM_CONSTEXPR genType third(); - - /// Return 2 / 3. - /// @see gtc_constants - template - GLM_FUNC_DECL GLM_CONSTEXPR genType two_thirds(); - - /// Return the golden ratio constant. - /// @see gtc_constants - template - GLM_FUNC_DECL GLM_CONSTEXPR genType golden_ratio(); - - /// @} -} //namespace glm - -#include "constants.inl" diff --git a/third_party/glm/gtc/constants.inl b/third_party/glm/gtc/constants.inl deleted file mode 100755 index bb98c6b..0000000 --- a/third_party/glm/gtc/constants.inl +++ /dev/null @@ -1,167 +0,0 @@ -/// @ref gtc_constants - -namespace glm -{ - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType zero() - { - return genType(0); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType one() - { - return genType(1); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType two_pi() - { - return genType(6.28318530717958647692528676655900576); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType root_pi() - { - return genType(1.772453850905516027); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType half_pi() - { - return genType(1.57079632679489661923132169163975144); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType three_over_two_pi() - { - return genType(4.71238898038468985769396507491925432); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType quarter_pi() - { - return genType(0.785398163397448309615660845819875721); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType one_over_pi() - { - return genType(0.318309886183790671537767526745028724); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType one_over_two_pi() - { - return genType(0.159154943091895335768883763372514362); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType two_over_pi() - { - return genType(0.636619772367581343075535053490057448); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType four_over_pi() - { - return genType(1.273239544735162686151070106980114898); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType two_over_root_pi() - { - return genType(1.12837916709551257389615890312154517); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType one_over_root_two() - { - return genType(0.707106781186547524400844362104849039); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType root_half_pi() - { - return genType(1.253314137315500251); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType root_two_pi() - { - return genType(2.506628274631000502); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType root_ln_four() - { - return genType(1.17741002251547469); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType e() - { - return genType(2.71828182845904523536); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType euler() - { - return genType(0.577215664901532860606); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType root_two() - { - return genType(1.41421356237309504880168872420969808); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType root_three() - { - return genType(1.73205080756887729352744634150587236); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType root_five() - { - return genType(2.23606797749978969640917366873127623); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType ln_two() - { - return genType(0.693147180559945309417232121458176568); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType ln_ten() - { - return genType(2.30258509299404568401799145468436421); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType ln_ln_two() - { - return genType(-0.3665129205816643); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType third() - { - return genType(0.3333333333333333333333333333333333333333); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType two_thirds() - { - return genType(0.666666666666666666666666666666666666667); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR genType golden_ratio() - { - return genType(1.61803398874989484820458683436563811); - } - -} //namespace glm diff --git a/third_party/glm/gtc/epsilon.hpp b/third_party/glm/gtc/epsilon.hpp deleted file mode 100755 index 640439b..0000000 --- a/third_party/glm/gtc/epsilon.hpp +++ /dev/null @@ -1,60 +0,0 @@ -/// @ref gtc_epsilon -/// @file glm/gtc/epsilon.hpp -/// -/// @see core (dependence) -/// @see gtc_quaternion (dependence) -/// -/// @defgroup gtc_epsilon GLM_GTC_epsilon -/// @ingroup gtc -/// -/// Include to use the features of this extension. -/// -/// Comparison functions for a user defined epsilon values. - -#pragma once - -// Dependencies -#include "../detail/setup.hpp" -#include "../detail/qualifier.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_GTC_epsilon extension included") -#endif - -namespace glm -{ - /// @addtogroup gtc_epsilon - /// @{ - - /// Returns the component-wise comparison of |x - y| < epsilon. - /// True if this expression is satisfied. - /// - /// @see gtc_epsilon - template - GLM_FUNC_DECL vec epsilonEqual(vec const& x, vec const& y, T const& epsilon); - - /// Returns the component-wise comparison of |x - y| < epsilon. - /// True if this expression is satisfied. - /// - /// @see gtc_epsilon - template - GLM_FUNC_DECL bool epsilonEqual(genType const& x, genType const& y, genType const& epsilon); - - /// Returns the component-wise comparison of |x - y| < epsilon. - /// True if this expression is not satisfied. - /// - /// @see gtc_epsilon - template - GLM_FUNC_DECL vec epsilonNotEqual(vec const& x, vec const& y, T const& epsilon); - - /// Returns the component-wise comparison of |x - y| >= epsilon. - /// True if this expression is not satisfied. - /// - /// @see gtc_epsilon - template - GLM_FUNC_DECL bool epsilonNotEqual(genType const& x, genType const& y, genType const& epsilon); - - /// @} -}//namespace glm - -#include "epsilon.inl" diff --git a/third_party/glm/gtc/epsilon.inl b/third_party/glm/gtc/epsilon.inl deleted file mode 100755 index 508b9f8..0000000 --- a/third_party/glm/gtc/epsilon.inl +++ /dev/null @@ -1,80 +0,0 @@ -/// @ref gtc_epsilon - -// Dependency: -#include "../vector_relational.hpp" -#include "../common.hpp" - -namespace glm -{ - template<> - GLM_FUNC_QUALIFIER bool epsilonEqual - ( - float const& x, - float const& y, - float const& epsilon - ) - { - return abs(x - y) < epsilon; - } - - template<> - GLM_FUNC_QUALIFIER bool epsilonEqual - ( - double const& x, - double const& y, - double const& epsilon - ) - { - return abs(x - y) < epsilon; - } - - template - GLM_FUNC_QUALIFIER vec epsilonEqual(vec const& x, vec const& y, T const& epsilon) - { - return lessThan(abs(x - y), vec(epsilon)); - } - - template - GLM_FUNC_QUALIFIER vec epsilonEqual(vec const& x, vec const& y, vec const& epsilon) - { - return lessThan(abs(x - y), vec(epsilon)); - } - - template<> - GLM_FUNC_QUALIFIER bool epsilonNotEqual(float const& x, float const& y, float const& epsilon) - { - return abs(x - y) >= epsilon; - } - - template<> - GLM_FUNC_QUALIFIER bool epsilonNotEqual(double const& x, double const& y, double const& epsilon) - { - return abs(x - y) >= epsilon; - } - - template - GLM_FUNC_QUALIFIER vec epsilonNotEqual(vec const& x, vec const& y, T const& epsilon) - { - return greaterThanEqual(abs(x - y), vec(epsilon)); - } - - template - GLM_FUNC_QUALIFIER vec epsilonNotEqual(vec const& x, vec const& y, vec const& epsilon) - { - return greaterThanEqual(abs(x - y), vec(epsilon)); - } - - template - GLM_FUNC_QUALIFIER vec<4, bool, Q> epsilonEqual(qua const& x, qua const& y, T const& epsilon) - { - vec<4, T, Q> v(x.x - y.x, x.y - y.y, x.z - y.z, x.w - y.w); - return lessThan(abs(v), vec<4, T, Q>(epsilon)); - } - - template - GLM_FUNC_QUALIFIER vec<4, bool, Q> epsilonNotEqual(qua const& x, qua const& y, T const& epsilon) - { - vec<4, T, Q> v(x.x - y.x, x.y - y.y, x.z - y.z, x.w - y.w); - return greaterThanEqual(abs(v), vec<4, T, Q>(epsilon)); - } -}//namespace glm diff --git a/third_party/glm/gtc/integer.hpp b/third_party/glm/gtc/integer.hpp deleted file mode 100755 index 64ce10b..0000000 --- a/third_party/glm/gtc/integer.hpp +++ /dev/null @@ -1,65 +0,0 @@ -/// @ref gtc_integer -/// @file glm/gtc/integer.hpp -/// -/// @see core (dependence) -/// @see gtc_integer (dependence) -/// -/// @defgroup gtc_integer GLM_GTC_integer -/// @ingroup gtc -/// -/// Include to use the features of this extension. -/// -/// @brief Allow to perform bit operations on integer values - -#pragma once - -// Dependencies -#include "../detail/setup.hpp" -#include "../detail/qualifier.hpp" -#include "../common.hpp" -#include "../integer.hpp" -#include "../exponential.hpp" -#include - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_GTC_integer extension included") -#endif - -namespace glm -{ - /// @addtogroup gtc_integer - /// @{ - - /// Returns the log2 of x for integer values. Usefull to compute mipmap count from the texture size. - /// @see gtc_integer - template - GLM_FUNC_DECL genIUType log2(genIUType x); - - /// Returns a value equal to the nearest integer to x. - /// The fraction 0.5 will round in a direction chosen by the - /// implementation, presumably the direction that is fastest. - /// - /// @param x The values of the argument must be greater or equal to zero. - /// @tparam T floating point scalar types. - /// - /// @see GLSL round man page - /// @see gtc_integer - template - GLM_FUNC_DECL vec iround(vec const& x); - - /// Returns a value equal to the nearest integer to x. - /// The fraction 0.5 will round in a direction chosen by the - /// implementation, presumably the direction that is fastest. - /// - /// @param x The values of the argument must be greater or equal to zero. - /// @tparam T floating point scalar types. - /// - /// @see GLSL round man page - /// @see gtc_integer - template - GLM_FUNC_DECL vec uround(vec const& x); - - /// @} -} //namespace glm - -#include "integer.inl" diff --git a/third_party/glm/gtc/integer.inl b/third_party/glm/gtc/integer.inl deleted file mode 100755 index f0a8b4f..0000000 --- a/third_party/glm/gtc/integer.inl +++ /dev/null @@ -1,68 +0,0 @@ -/// @ref gtc_integer - -namespace glm{ -namespace detail -{ - template - struct compute_log2 - { - GLM_FUNC_QUALIFIER static vec call(vec const& v) - { - //Equivalent to return findMSB(vec); but save one function call in ASM with VC - //return findMSB(vec); - return vec(detail::compute_findMSB_vec::call(v)); - } - }; - -# if GLM_HAS_BITSCAN_WINDOWS - template - struct compute_log2<4, int, Q, false, Aligned> - { - GLM_FUNC_QUALIFIER static vec<4, int, Q> call(vec<4, int, Q> const& v) - { - vec<4, int, Q> Result; - _BitScanReverse(reinterpret_cast(&Result.x), v.x); - _BitScanReverse(reinterpret_cast(&Result.y), v.y); - _BitScanReverse(reinterpret_cast(&Result.z), v.z); - _BitScanReverse(reinterpret_cast(&Result.w), v.w); - return Result; - } - }; -# endif//GLM_HAS_BITSCAN_WINDOWS -}//namespace detail - template - GLM_FUNC_QUALIFIER int iround(genType x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'iround' only accept floating-point inputs"); - assert(static_cast(0.0) <= x); - - return static_cast(x + static_cast(0.5)); - } - - template - GLM_FUNC_QUALIFIER vec iround(vec const& x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'iround' only accept floating-point inputs"); - assert(all(lessThanEqual(vec(0), x))); - - return vec(x + static_cast(0.5)); - } - - template - GLM_FUNC_QUALIFIER uint uround(genType x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'uround' only accept floating-point inputs"); - assert(static_cast(0.0) <= x); - - return static_cast(x + static_cast(0.5)); - } - - template - GLM_FUNC_QUALIFIER vec uround(vec const& x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'uround' only accept floating-point inputs"); - assert(all(lessThanEqual(vec(0), x))); - - return vec(x + static_cast(0.5)); - } -}//namespace glm diff --git a/third_party/glm/gtc/matrix_access.hpp b/third_party/glm/gtc/matrix_access.hpp deleted file mode 100755 index 4935ba7..0000000 --- a/third_party/glm/gtc/matrix_access.hpp +++ /dev/null @@ -1,60 +0,0 @@ -/// @ref gtc_matrix_access -/// @file glm/gtc/matrix_access.hpp -/// -/// @see core (dependence) -/// -/// @defgroup gtc_matrix_access GLM_GTC_matrix_access -/// @ingroup gtc -/// -/// Include to use the features of this extension. -/// -/// Defines functions to access rows or columns of a matrix easily. - -#pragma once - -// Dependency: -#include "../detail/setup.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_GTC_matrix_access extension included") -#endif - -namespace glm -{ - /// @addtogroup gtc_matrix_access - /// @{ - - /// Get a specific row of a matrix. - /// @see gtc_matrix_access - template - GLM_FUNC_DECL typename genType::row_type row( - genType const& m, - length_t index); - - /// Set a specific row to a matrix. - /// @see gtc_matrix_access - template - GLM_FUNC_DECL genType row( - genType const& m, - length_t index, - typename genType::row_type const& x); - - /// Get a specific column of a matrix. - /// @see gtc_matrix_access - template - GLM_FUNC_DECL typename genType::col_type column( - genType const& m, - length_t index); - - /// Set a specific column to a matrix. - /// @see gtc_matrix_access - template - GLM_FUNC_DECL genType column( - genType const& m, - length_t index, - typename genType::col_type const& x); - - /// @} -}//namespace glm - -#include "matrix_access.inl" diff --git a/third_party/glm/gtc/matrix_access.inl b/third_party/glm/gtc/matrix_access.inl deleted file mode 100755 index 09fcc10..0000000 --- a/third_party/glm/gtc/matrix_access.inl +++ /dev/null @@ -1,62 +0,0 @@ -/// @ref gtc_matrix_access - -namespace glm -{ - template - GLM_FUNC_QUALIFIER genType row - ( - genType const& m, - length_t index, - typename genType::row_type const& x - ) - { - assert(index >= 0 && index < m[0].length()); - - genType Result = m; - for(length_t i = 0; i < m.length(); ++i) - Result[i][index] = x[i]; - return Result; - } - - template - GLM_FUNC_QUALIFIER typename genType::row_type row - ( - genType const& m, - length_t index - ) - { - assert(index >= 0 && index < m[0].length()); - - typename genType::row_type Result(0); - for(length_t i = 0; i < m.length(); ++i) - Result[i] = m[i][index]; - return Result; - } - - template - GLM_FUNC_QUALIFIER genType column - ( - genType const& m, - length_t index, - typename genType::col_type const& x - ) - { - assert(index >= 0 && index < m.length()); - - genType Result = m; - Result[index] = x; - return Result; - } - - template - GLM_FUNC_QUALIFIER typename genType::col_type column - ( - genType const& m, - length_t index - ) - { - assert(index >= 0 && index < m.length()); - - return m[index]; - } -}//namespace glm diff --git a/third_party/glm/gtc/matrix_integer.hpp b/third_party/glm/gtc/matrix_integer.hpp deleted file mode 100755 index 557a977..0000000 --- a/third_party/glm/gtc/matrix_integer.hpp +++ /dev/null @@ -1,487 +0,0 @@ -/// @ref gtc_matrix_integer -/// @file glm/gtc/matrix_integer.hpp -/// -/// @see core (dependence) -/// -/// @defgroup gtc_matrix_integer GLM_GTC_matrix_integer -/// @ingroup gtc -/// -/// Include to use the features of this extension. -/// -/// Defines a number of matrices with integer types. - -#pragma once - -// Dependency: -#include "../mat2x2.hpp" -#include "../mat2x3.hpp" -#include "../mat2x4.hpp" -#include "../mat3x2.hpp" -#include "../mat3x3.hpp" -#include "../mat3x4.hpp" -#include "../mat4x2.hpp" -#include "../mat4x3.hpp" -#include "../mat4x4.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_GTC_matrix_integer extension included") -#endif - -namespace glm -{ - /// @addtogroup gtc_matrix_integer - /// @{ - - /// High-qualifier signed integer 2x2 matrix. - /// @see gtc_matrix_integer - typedef mat<2, 2, int, highp> highp_imat2; - - /// High-qualifier signed integer 3x3 matrix. - /// @see gtc_matrix_integer - typedef mat<3, 3, int, highp> highp_imat3; - - /// High-qualifier signed integer 4x4 matrix. - /// @see gtc_matrix_integer - typedef mat<4, 4, int, highp> highp_imat4; - - /// High-qualifier signed integer 2x2 matrix. - /// @see gtc_matrix_integer - typedef mat<2, 2, int, highp> highp_imat2x2; - - /// High-qualifier signed integer 2x3 matrix. - /// @see gtc_matrix_integer - typedef mat<2, 3, int, highp> highp_imat2x3; - - /// High-qualifier signed integer 2x4 matrix. - /// @see gtc_matrix_integer - typedef mat<2, 4, int, highp> highp_imat2x4; - - /// High-qualifier signed integer 3x2 matrix. - /// @see gtc_matrix_integer - typedef mat<3, 2, int, highp> highp_imat3x2; - - /// High-qualifier signed integer 3x3 matrix. - /// @see gtc_matrix_integer - typedef mat<3, 3, int, highp> highp_imat3x3; - - /// High-qualifier signed integer 3x4 matrix. - /// @see gtc_matrix_integer - typedef mat<3, 4, int, highp> highp_imat3x4; - - /// High-qualifier signed integer 4x2 matrix. - /// @see gtc_matrix_integer - typedef mat<4, 2, int, highp> highp_imat4x2; - - /// High-qualifier signed integer 4x3 matrix. - /// @see gtc_matrix_integer - typedef mat<4, 3, int, highp> highp_imat4x3; - - /// High-qualifier signed integer 4x4 matrix. - /// @see gtc_matrix_integer - typedef mat<4, 4, int, highp> highp_imat4x4; - - - /// Medium-qualifier signed integer 2x2 matrix. - /// @see gtc_matrix_integer - typedef mat<2, 2, int, mediump> mediump_imat2; - - /// Medium-qualifier signed integer 3x3 matrix. - /// @see gtc_matrix_integer - typedef mat<3, 3, int, mediump> mediump_imat3; - - /// Medium-qualifier signed integer 4x4 matrix. - /// @see gtc_matrix_integer - typedef mat<4, 4, int, mediump> mediump_imat4; - - - /// Medium-qualifier signed integer 2x2 matrix. - /// @see gtc_matrix_integer - typedef mat<2, 2, int, mediump> mediump_imat2x2; - - /// Medium-qualifier signed integer 2x3 matrix. - /// @see gtc_matrix_integer - typedef mat<2, 3, int, mediump> mediump_imat2x3; - - /// Medium-qualifier signed integer 2x4 matrix. - /// @see gtc_matrix_integer - typedef mat<2, 4, int, mediump> mediump_imat2x4; - - /// Medium-qualifier signed integer 3x2 matrix. - /// @see gtc_matrix_integer - typedef mat<3, 2, int, mediump> mediump_imat3x2; - - /// Medium-qualifier signed integer 3x3 matrix. - /// @see gtc_matrix_integer - typedef mat<3, 3, int, mediump> mediump_imat3x3; - - /// Medium-qualifier signed integer 3x4 matrix. - /// @see gtc_matrix_integer - typedef mat<3, 4, int, mediump> mediump_imat3x4; - - /// Medium-qualifier signed integer 4x2 matrix. - /// @see gtc_matrix_integer - typedef mat<4, 2, int, mediump> mediump_imat4x2; - - /// Medium-qualifier signed integer 4x3 matrix. - /// @see gtc_matrix_integer - typedef mat<4, 3, int, mediump> mediump_imat4x3; - - /// Medium-qualifier signed integer 4x4 matrix. - /// @see gtc_matrix_integer - typedef mat<4, 4, int, mediump> mediump_imat4x4; - - - /// Low-qualifier signed integer 2x2 matrix. - /// @see gtc_matrix_integer - typedef mat<2, 2, int, lowp> lowp_imat2; - - /// Low-qualifier signed integer 3x3 matrix. - /// @see gtc_matrix_integer - typedef mat<3, 3, int, lowp> lowp_imat3; - - /// Low-qualifier signed integer 4x4 matrix. - /// @see gtc_matrix_integer - typedef mat<4, 4, int, lowp> lowp_imat4; - - - /// Low-qualifier signed integer 2x2 matrix. - /// @see gtc_matrix_integer - typedef mat<2, 2, int, lowp> lowp_imat2x2; - - /// Low-qualifier signed integer 2x3 matrix. - /// @see gtc_matrix_integer - typedef mat<2, 3, int, lowp> lowp_imat2x3; - - /// Low-qualifier signed integer 2x4 matrix. - /// @see gtc_matrix_integer - typedef mat<2, 4, int, lowp> lowp_imat2x4; - - /// Low-qualifier signed integer 3x2 matrix. - /// @see gtc_matrix_integer - typedef mat<3, 2, int, lowp> lowp_imat3x2; - - /// Low-qualifier signed integer 3x3 matrix. - /// @see gtc_matrix_integer - typedef mat<3, 3, int, lowp> lowp_imat3x3; - - /// Low-qualifier signed integer 3x4 matrix. - /// @see gtc_matrix_integer - typedef mat<3, 4, int, lowp> lowp_imat3x4; - - /// Low-qualifier signed integer 4x2 matrix. - /// @see gtc_matrix_integer - typedef mat<4, 2, int, lowp> lowp_imat4x2; - - /// Low-qualifier signed integer 4x3 matrix. - /// @see gtc_matrix_integer - typedef mat<4, 3, int, lowp> lowp_imat4x3; - - /// Low-qualifier signed integer 4x4 matrix. - /// @see gtc_matrix_integer - typedef mat<4, 4, int, lowp> lowp_imat4x4; - - - /// High-qualifier unsigned integer 2x2 matrix. - /// @see gtc_matrix_integer - typedef mat<2, 2, uint, highp> highp_umat2; - - /// High-qualifier unsigned integer 3x3 matrix. - /// @see gtc_matrix_integer - typedef mat<3, 3, uint, highp> highp_umat3; - - /// High-qualifier unsigned integer 4x4 matrix. - /// @see gtc_matrix_integer - typedef mat<4, 4, uint, highp> highp_umat4; - - /// High-qualifier unsigned integer 2x2 matrix. - /// @see gtc_matrix_integer - typedef mat<2, 2, uint, highp> highp_umat2x2; - - /// High-qualifier unsigned integer 2x3 matrix. - /// @see gtc_matrix_integer - typedef mat<2, 3, uint, highp> highp_umat2x3; - - /// High-qualifier unsigned integer 2x4 matrix. - /// @see gtc_matrix_integer - typedef mat<2, 4, uint, highp> highp_umat2x4; - - /// High-qualifier unsigned integer 3x2 matrix. - /// @see gtc_matrix_integer - typedef mat<3, 2, uint, highp> highp_umat3x2; - - /// High-qualifier unsigned integer 3x3 matrix. - /// @see gtc_matrix_integer - typedef mat<3, 3, uint, highp> highp_umat3x3; - - /// High-qualifier unsigned integer 3x4 matrix. - /// @see gtc_matrix_integer - typedef mat<3, 4, uint, highp> highp_umat3x4; - - /// High-qualifier unsigned integer 4x2 matrix. - /// @see gtc_matrix_integer - typedef mat<4, 2, uint, highp> highp_umat4x2; - - /// High-qualifier unsigned integer 4x3 matrix. - /// @see gtc_matrix_integer - typedef mat<4, 3, uint, highp> highp_umat4x3; - - /// High-qualifier unsigned integer 4x4 matrix. - /// @see gtc_matrix_integer - typedef mat<4, 4, uint, highp> highp_umat4x4; - - - /// Medium-qualifier unsigned integer 2x2 matrix. - /// @see gtc_matrix_integer - typedef mat<2, 2, uint, mediump> mediump_umat2; - - /// Medium-qualifier unsigned integer 3x3 matrix. - /// @see gtc_matrix_integer - typedef mat<3, 3, uint, mediump> mediump_umat3; - - /// Medium-qualifier unsigned integer 4x4 matrix. - /// @see gtc_matrix_integer - typedef mat<4, 4, uint, mediump> mediump_umat4; - - - /// Medium-qualifier unsigned integer 2x2 matrix. - /// @see gtc_matrix_integer - typedef mat<2, 2, uint, mediump> mediump_umat2x2; - - /// Medium-qualifier unsigned integer 2x3 matrix. - /// @see gtc_matrix_integer - typedef mat<2, 3, uint, mediump> mediump_umat2x3; - - /// Medium-qualifier unsigned integer 2x4 matrix. - /// @see gtc_matrix_integer - typedef mat<2, 4, uint, mediump> mediump_umat2x4; - - /// Medium-qualifier unsigned integer 3x2 matrix. - /// @see gtc_matrix_integer - typedef mat<3, 2, uint, mediump> mediump_umat3x2; - - /// Medium-qualifier unsigned integer 3x3 matrix. - /// @see gtc_matrix_integer - typedef mat<3, 3, uint, mediump> mediump_umat3x3; - - /// Medium-qualifier unsigned integer 3x4 matrix. - /// @see gtc_matrix_integer - typedef mat<3, 4, uint, mediump> mediump_umat3x4; - - /// Medium-qualifier unsigned integer 4x2 matrix. - /// @see gtc_matrix_integer - typedef mat<4, 2, uint, mediump> mediump_umat4x2; - - /// Medium-qualifier unsigned integer 4x3 matrix. - /// @see gtc_matrix_integer - typedef mat<4, 3, uint, mediump> mediump_umat4x3; - - /// Medium-qualifier unsigned integer 4x4 matrix. - /// @see gtc_matrix_integer - typedef mat<4, 4, uint, mediump> mediump_umat4x4; - - - /// Low-qualifier unsigned integer 2x2 matrix. - /// @see gtc_matrix_integer - typedef mat<2, 2, uint, lowp> lowp_umat2; - - /// Low-qualifier unsigned integer 3x3 matrix. - /// @see gtc_matrix_integer - typedef mat<3, 3, uint, lowp> lowp_umat3; - - /// Low-qualifier unsigned integer 4x4 matrix. - /// @see gtc_matrix_integer - typedef mat<4, 4, uint, lowp> lowp_umat4; - - - /// Low-qualifier unsigned integer 2x2 matrix. - /// @see gtc_matrix_integer - typedef mat<2, 2, uint, lowp> lowp_umat2x2; - - /// Low-qualifier unsigned integer 2x3 matrix. - /// @see gtc_matrix_integer - typedef mat<2, 3, uint, lowp> lowp_umat2x3; - - /// Low-qualifier unsigned integer 2x4 matrix. - /// @see gtc_matrix_integer - typedef mat<2, 4, uint, lowp> lowp_umat2x4; - - /// Low-qualifier unsigned integer 3x2 matrix. - /// @see gtc_matrix_integer - typedef mat<3, 2, uint, lowp> lowp_umat3x2; - - /// Low-qualifier unsigned integer 3x3 matrix. - /// @see gtc_matrix_integer - typedef mat<3, 3, uint, lowp> lowp_umat3x3; - - /// Low-qualifier unsigned integer 3x4 matrix. - /// @see gtc_matrix_integer - typedef mat<3, 4, uint, lowp> lowp_umat3x4; - - /// Low-qualifier unsigned integer 4x2 matrix. - /// @see gtc_matrix_integer - typedef mat<4, 2, uint, lowp> lowp_umat4x2; - - /// Low-qualifier unsigned integer 4x3 matrix. - /// @see gtc_matrix_integer - typedef mat<4, 3, uint, lowp> lowp_umat4x3; - - /// Low-qualifier unsigned integer 4x4 matrix. - /// @see gtc_matrix_integer - typedef mat<4, 4, uint, lowp> lowp_umat4x4; - -#if(defined(GLM_PRECISION_HIGHP_INT)) - typedef highp_imat2 imat2; - typedef highp_imat3 imat3; - typedef highp_imat4 imat4; - typedef highp_imat2x2 imat2x2; - typedef highp_imat2x3 imat2x3; - typedef highp_imat2x4 imat2x4; - typedef highp_imat3x2 imat3x2; - typedef highp_imat3x3 imat3x3; - typedef highp_imat3x4 imat3x4; - typedef highp_imat4x2 imat4x2; - typedef highp_imat4x3 imat4x3; - typedef highp_imat4x4 imat4x4; -#elif(defined(GLM_PRECISION_LOWP_INT)) - typedef lowp_imat2 imat2; - typedef lowp_imat3 imat3; - typedef lowp_imat4 imat4; - typedef lowp_imat2x2 imat2x2; - typedef lowp_imat2x3 imat2x3; - typedef lowp_imat2x4 imat2x4; - typedef lowp_imat3x2 imat3x2; - typedef lowp_imat3x3 imat3x3; - typedef lowp_imat3x4 imat3x4; - typedef lowp_imat4x2 imat4x2; - typedef lowp_imat4x3 imat4x3; - typedef lowp_imat4x4 imat4x4; -#else //if(defined(GLM_PRECISION_MEDIUMP_INT)) - - /// Signed integer 2x2 matrix. - /// @see gtc_matrix_integer - typedef mediump_imat2 imat2; - - /// Signed integer 3x3 matrix. - /// @see gtc_matrix_integer - typedef mediump_imat3 imat3; - - /// Signed integer 4x4 matrix. - /// @see gtc_matrix_integer - typedef mediump_imat4 imat4; - - /// Signed integer 2x2 matrix. - /// @see gtc_matrix_integer - typedef mediump_imat2x2 imat2x2; - - /// Signed integer 2x3 matrix. - /// @see gtc_matrix_integer - typedef mediump_imat2x3 imat2x3; - - /// Signed integer 2x4 matrix. - /// @see gtc_matrix_integer - typedef mediump_imat2x4 imat2x4; - - /// Signed integer 3x2 matrix. - /// @see gtc_matrix_integer - typedef mediump_imat3x2 imat3x2; - - /// Signed integer 3x3 matrix. - /// @see gtc_matrix_integer - typedef mediump_imat3x3 imat3x3; - - /// Signed integer 3x4 matrix. - /// @see gtc_matrix_integer - typedef mediump_imat3x4 imat3x4; - - /// Signed integer 4x2 matrix. - /// @see gtc_matrix_integer - typedef mediump_imat4x2 imat4x2; - - /// Signed integer 4x3 matrix. - /// @see gtc_matrix_integer - typedef mediump_imat4x3 imat4x3; - - /// Signed integer 4x4 matrix. - /// @see gtc_matrix_integer - typedef mediump_imat4x4 imat4x4; -#endif//GLM_PRECISION - -#if(defined(GLM_PRECISION_HIGHP_UINT)) - typedef highp_umat2 umat2; - typedef highp_umat3 umat3; - typedef highp_umat4 umat4; - typedef highp_umat2x2 umat2x2; - typedef highp_umat2x3 umat2x3; - typedef highp_umat2x4 umat2x4; - typedef highp_umat3x2 umat3x2; - typedef highp_umat3x3 umat3x3; - typedef highp_umat3x4 umat3x4; - typedef highp_umat4x2 umat4x2; - typedef highp_umat4x3 umat4x3; - typedef highp_umat4x4 umat4x4; -#elif(defined(GLM_PRECISION_LOWP_UINT)) - typedef lowp_umat2 umat2; - typedef lowp_umat3 umat3; - typedef lowp_umat4 umat4; - typedef lowp_umat2x2 umat2x2; - typedef lowp_umat2x3 umat2x3; - typedef lowp_umat2x4 umat2x4; - typedef lowp_umat3x2 umat3x2; - typedef lowp_umat3x3 umat3x3; - typedef lowp_umat3x4 umat3x4; - typedef lowp_umat4x2 umat4x2; - typedef lowp_umat4x3 umat4x3; - typedef lowp_umat4x4 umat4x4; -#else //if(defined(GLM_PRECISION_MEDIUMP_UINT)) - - /// Unsigned integer 2x2 matrix. - /// @see gtc_matrix_integer - typedef mediump_umat2 umat2; - - /// Unsigned integer 3x3 matrix. - /// @see gtc_matrix_integer - typedef mediump_umat3 umat3; - - /// Unsigned integer 4x4 matrix. - /// @see gtc_matrix_integer - typedef mediump_umat4 umat4; - - /// Unsigned integer 2x2 matrix. - /// @see gtc_matrix_integer - typedef mediump_umat2x2 umat2x2; - - /// Unsigned integer 2x3 matrix. - /// @see gtc_matrix_integer - typedef mediump_umat2x3 umat2x3; - - /// Unsigned integer 2x4 matrix. - /// @see gtc_matrix_integer - typedef mediump_umat2x4 umat2x4; - - /// Unsigned integer 3x2 matrix. - /// @see gtc_matrix_integer - typedef mediump_umat3x2 umat3x2; - - /// Unsigned integer 3x3 matrix. - /// @see gtc_matrix_integer - typedef mediump_umat3x3 umat3x3; - - /// Unsigned integer 3x4 matrix. - /// @see gtc_matrix_integer - typedef mediump_umat3x4 umat3x4; - - /// Unsigned integer 4x2 matrix. - /// @see gtc_matrix_integer - typedef mediump_umat4x2 umat4x2; - - /// Unsigned integer 4x3 matrix. - /// @see gtc_matrix_integer - typedef mediump_umat4x3 umat4x3; - - /// Unsigned integer 4x4 matrix. - /// @see gtc_matrix_integer - typedef mediump_umat4x4 umat4x4; -#endif//GLM_PRECISION - - /// @} -}//namespace glm diff --git a/third_party/glm/gtc/matrix_inverse.hpp b/third_party/glm/gtc/matrix_inverse.hpp deleted file mode 100755 index a1900ad..0000000 --- a/third_party/glm/gtc/matrix_inverse.hpp +++ /dev/null @@ -1,50 +0,0 @@ -/// @ref gtc_matrix_inverse -/// @file glm/gtc/matrix_inverse.hpp -/// -/// @see core (dependence) -/// -/// @defgroup gtc_matrix_inverse GLM_GTC_matrix_inverse -/// @ingroup gtc -/// -/// Include to use the features of this extension. -/// -/// Defines additional matrix inverting functions. - -#pragma once - -// Dependencies -#include "../detail/setup.hpp" -#include "../matrix.hpp" -#include "../mat2x2.hpp" -#include "../mat3x3.hpp" -#include "../mat4x4.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_GTC_matrix_inverse extension included") -#endif - -namespace glm -{ - /// @addtogroup gtc_matrix_inverse - /// @{ - - /// Fast matrix inverse for affine matrix. - /// - /// @param m Input matrix to invert. - /// @tparam genType Squared floating-point matrix: half, float or double. Inverse of matrix based of half-qualifier floating point value is highly innacurate. - /// @see gtc_matrix_inverse - template - GLM_FUNC_DECL genType affineInverse(genType const& m); - - /// Compute the inverse transpose of a matrix. - /// - /// @param m Input matrix to invert transpose. - /// @tparam genType Squared floating-point matrix: half, float or double. Inverse of matrix based of half-qualifier floating point value is highly innacurate. - /// @see gtc_matrix_inverse - template - GLM_FUNC_DECL genType inverseTranspose(genType const& m); - - /// @} -}//namespace glm - -#include "matrix_inverse.inl" diff --git a/third_party/glm/gtc/matrix_inverse.inl b/third_party/glm/gtc/matrix_inverse.inl deleted file mode 100755 index c004b9e..0000000 --- a/third_party/glm/gtc/matrix_inverse.inl +++ /dev/null @@ -1,118 +0,0 @@ -/// @ref gtc_matrix_inverse - -namespace glm -{ - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> affineInverse(mat<3, 3, T, Q> const& m) - { - mat<2, 2, T, Q> const Inv(inverse(mat<2, 2, T, Q>(m))); - - return mat<3, 3, T, Q>( - vec<3, T, Q>(Inv[0], static_cast(0)), - vec<3, T, Q>(Inv[1], static_cast(0)), - vec<3, T, Q>(-Inv * vec<2, T, Q>(m[2]), static_cast(1))); - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> affineInverse(mat<4, 4, T, Q> const& m) - { - mat<3, 3, T, Q> const Inv(inverse(mat<3, 3, T, Q>(m))); - - return mat<4, 4, T, Q>( - vec<4, T, Q>(Inv[0], static_cast(0)), - vec<4, T, Q>(Inv[1], static_cast(0)), - vec<4, T, Q>(Inv[2], static_cast(0)), - vec<4, T, Q>(-Inv * vec<3, T, Q>(m[3]), static_cast(1))); - } - - template - GLM_FUNC_QUALIFIER mat<2, 2, T, Q> inverseTranspose(mat<2, 2, T, Q> const& m) - { - T Determinant = m[0][0] * m[1][1] - m[1][0] * m[0][1]; - - mat<2, 2, T, Q> Inverse( - + m[1][1] / Determinant, - - m[0][1] / Determinant, - - m[1][0] / Determinant, - + m[0][0] / Determinant); - - return Inverse; - } - - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> inverseTranspose(mat<3, 3, T, Q> const& m) - { - T Determinant = - + m[0][0] * (m[1][1] * m[2][2] - m[1][2] * m[2][1]) - - m[0][1] * (m[1][0] * m[2][2] - m[1][2] * m[2][0]) - + m[0][2] * (m[1][0] * m[2][1] - m[1][1] * m[2][0]); - - mat<3, 3, T, Q> Inverse; - Inverse[0][0] = + (m[1][1] * m[2][2] - m[2][1] * m[1][2]); - Inverse[0][1] = - (m[1][0] * m[2][2] - m[2][0] * m[1][2]); - Inverse[0][2] = + (m[1][0] * m[2][1] - m[2][0] * m[1][1]); - Inverse[1][0] = - (m[0][1] * m[2][2] - m[2][1] * m[0][2]); - Inverse[1][1] = + (m[0][0] * m[2][2] - m[2][0] * m[0][2]); - Inverse[1][2] = - (m[0][0] * m[2][1] - m[2][0] * m[0][1]); - Inverse[2][0] = + (m[0][1] * m[1][2] - m[1][1] * m[0][2]); - Inverse[2][1] = - (m[0][0] * m[1][2] - m[1][0] * m[0][2]); - Inverse[2][2] = + (m[0][0] * m[1][1] - m[1][0] * m[0][1]); - Inverse /= Determinant; - - return Inverse; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> inverseTranspose(mat<4, 4, T, Q> const& m) - { - T SubFactor00 = m[2][2] * m[3][3] - m[3][2] * m[2][3]; - T SubFactor01 = m[2][1] * m[3][3] - m[3][1] * m[2][3]; - T SubFactor02 = m[2][1] * m[3][2] - m[3][1] * m[2][2]; - T SubFactor03 = m[2][0] * m[3][3] - m[3][0] * m[2][3]; - T SubFactor04 = m[2][0] * m[3][2] - m[3][0] * m[2][2]; - T SubFactor05 = m[2][0] * m[3][1] - m[3][0] * m[2][1]; - T SubFactor06 = m[1][2] * m[3][3] - m[3][2] * m[1][3]; - T SubFactor07 = m[1][1] * m[3][3] - m[3][1] * m[1][3]; - T SubFactor08 = m[1][1] * m[3][2] - m[3][1] * m[1][2]; - T SubFactor09 = m[1][0] * m[3][3] - m[3][0] * m[1][3]; - T SubFactor10 = m[1][0] * m[3][2] - m[3][0] * m[1][2]; - T SubFactor11 = m[1][0] * m[3][1] - m[3][0] * m[1][1]; - T SubFactor12 = m[1][2] * m[2][3] - m[2][2] * m[1][3]; - T SubFactor13 = m[1][1] * m[2][3] - m[2][1] * m[1][3]; - T SubFactor14 = m[1][1] * m[2][2] - m[2][1] * m[1][2]; - T SubFactor15 = m[1][0] * m[2][3] - m[2][0] * m[1][3]; - T SubFactor16 = m[1][0] * m[2][2] - m[2][0] * m[1][2]; - T SubFactor17 = m[1][0] * m[2][1] - m[2][0] * m[1][1]; - - mat<4, 4, T, Q> Inverse; - Inverse[0][0] = + (m[1][1] * SubFactor00 - m[1][2] * SubFactor01 + m[1][3] * SubFactor02); - Inverse[0][1] = - (m[1][0] * SubFactor00 - m[1][2] * SubFactor03 + m[1][3] * SubFactor04); - Inverse[0][2] = + (m[1][0] * SubFactor01 - m[1][1] * SubFactor03 + m[1][3] * SubFactor05); - Inverse[0][3] = - (m[1][0] * SubFactor02 - m[1][1] * SubFactor04 + m[1][2] * SubFactor05); - - Inverse[1][0] = - (m[0][1] * SubFactor00 - m[0][2] * SubFactor01 + m[0][3] * SubFactor02); - Inverse[1][1] = + (m[0][0] * SubFactor00 - m[0][2] * SubFactor03 + m[0][3] * SubFactor04); - Inverse[1][2] = - (m[0][0] * SubFactor01 - m[0][1] * SubFactor03 + m[0][3] * SubFactor05); - Inverse[1][3] = + (m[0][0] * SubFactor02 - m[0][1] * SubFactor04 + m[0][2] * SubFactor05); - - Inverse[2][0] = + (m[0][1] * SubFactor06 - m[0][2] * SubFactor07 + m[0][3] * SubFactor08); - Inverse[2][1] = - (m[0][0] * SubFactor06 - m[0][2] * SubFactor09 + m[0][3] * SubFactor10); - Inverse[2][2] = + (m[0][0] * SubFactor07 - m[0][1] * SubFactor09 + m[0][3] * SubFactor11); - Inverse[2][3] = - (m[0][0] * SubFactor08 - m[0][1] * SubFactor10 + m[0][2] * SubFactor11); - - Inverse[3][0] = - (m[0][1] * SubFactor12 - m[0][2] * SubFactor13 + m[0][3] * SubFactor14); - Inverse[3][1] = + (m[0][0] * SubFactor12 - m[0][2] * SubFactor15 + m[0][3] * SubFactor16); - Inverse[3][2] = - (m[0][0] * SubFactor13 - m[0][1] * SubFactor15 + m[0][3] * SubFactor17); - Inverse[3][3] = + (m[0][0] * SubFactor14 - m[0][1] * SubFactor16 + m[0][2] * SubFactor17); - - T Determinant = - + m[0][0] * Inverse[0][0] - + m[0][1] * Inverse[0][1] - + m[0][2] * Inverse[0][2] - + m[0][3] * Inverse[0][3]; - - Inverse /= Determinant; - - return Inverse; - } -}//namespace glm diff --git a/third_party/glm/gtc/matrix_transform.hpp b/third_party/glm/gtc/matrix_transform.hpp deleted file mode 100755 index 612418f..0000000 --- a/third_party/glm/gtc/matrix_transform.hpp +++ /dev/null @@ -1,36 +0,0 @@ -/// @ref gtc_matrix_transform -/// @file glm/gtc/matrix_transform.hpp -/// -/// @see core (dependence) -/// @see gtx_transform -/// @see gtx_transform2 -/// -/// @defgroup gtc_matrix_transform GLM_GTC_matrix_transform -/// @ingroup gtc -/// -/// Include to use the features of this extension. -/// -/// Defines functions that generate common transformation matrices. -/// -/// The matrices generated by this extension use standard OpenGL fixed-function -/// conventions. For example, the lookAt function generates a transform from world -/// space into the specific eye space that the projective matrix functions -/// (perspective, ortho, etc) are designed to expect. The OpenGL compatibility -/// specifications defines the particular layout of this eye space. - -#pragma once - -// Dependencies -#include "../mat4x4.hpp" -#include "../vec2.hpp" -#include "../vec3.hpp" -#include "../vec4.hpp" -#include "../ext/matrix_projection.hpp" -#include "../ext/matrix_clip_space.hpp" -#include "../ext/matrix_transform.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_GTC_matrix_transform extension included") -#endif - -#include "matrix_transform.inl" diff --git a/third_party/glm/gtc/matrix_transform.inl b/third_party/glm/gtc/matrix_transform.inl deleted file mode 100755 index 15b46bc..0000000 --- a/third_party/glm/gtc/matrix_transform.inl +++ /dev/null @@ -1,3 +0,0 @@ -#include "../geometric.hpp" -#include "../trigonometric.hpp" -#include "../matrix.hpp" diff --git a/third_party/glm/gtc/noise.hpp b/third_party/glm/gtc/noise.hpp deleted file mode 100755 index ab1772e..0000000 --- a/third_party/glm/gtc/noise.hpp +++ /dev/null @@ -1,61 +0,0 @@ -/// @ref gtc_noise -/// @file glm/gtc/noise.hpp -/// -/// @see core (dependence) -/// -/// @defgroup gtc_noise GLM_GTC_noise -/// @ingroup gtc -/// -/// Include to use the features of this extension. -/// -/// Defines 2D, 3D and 4D procedural noise functions -/// Based on the work of Stefan Gustavson and Ashima Arts on "webgl-noise": -/// https://github.com/ashima/webgl-noise -/// Following Stefan Gustavson's paper "Simplex noise demystified": -/// http://www.itn.liu.se/~stegu/simplexnoise/simplexnoise.pdf - -#pragma once - -// Dependencies -#include "../detail/setup.hpp" -#include "../detail/qualifier.hpp" -#include "../detail/_noise.hpp" -#include "../geometric.hpp" -#include "../common.hpp" -#include "../vector_relational.hpp" -#include "../vec2.hpp" -#include "../vec3.hpp" -#include "../vec4.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_GTC_noise extension included") -#endif - -namespace glm -{ - /// @addtogroup gtc_noise - /// @{ - - /// Classic perlin noise. - /// @see gtc_noise - template - GLM_FUNC_DECL T perlin( - vec const& p); - - /// Periodic perlin noise. - /// @see gtc_noise - template - GLM_FUNC_DECL T perlin( - vec const& p, - vec const& rep); - - /// Simplex noise. - /// @see gtc_noise - template - GLM_FUNC_DECL T simplex( - vec const& p); - - /// @} -}//namespace glm - -#include "noise.inl" diff --git a/third_party/glm/gtc/noise.inl b/third_party/glm/gtc/noise.inl deleted file mode 100755 index 30d0b27..0000000 --- a/third_party/glm/gtc/noise.inl +++ /dev/null @@ -1,807 +0,0 @@ -/// @ref gtc_noise -/// -// Based on the work of Stefan Gustavson and Ashima Arts on "webgl-noise": -// https://github.com/ashima/webgl-noise -// Following Stefan Gustavson's paper "Simplex noise demystified": -// http://www.itn.liu.se/~stegu/simplexnoise/simplexnoise.pdf - -namespace glm{ -namespace gtc -{ - template - GLM_FUNC_QUALIFIER vec<4, T, Q> grad4(T const& j, vec<4, T, Q> const& ip) - { - vec<3, T, Q> pXYZ = floor(fract(vec<3, T, Q>(j) * vec<3, T, Q>(ip)) * T(7)) * ip[2] - T(1); - T pW = static_cast(1.5) - dot(abs(pXYZ), vec<3, T, Q>(1)); - vec<4, T, Q> s = vec<4, T, Q>(lessThan(vec<4, T, Q>(pXYZ, pW), vec<4, T, Q>(0.0))); - pXYZ = pXYZ + (vec<3, T, Q>(s) * T(2) - T(1)) * s.w; - return vec<4, T, Q>(pXYZ, pW); - } -}//namespace gtc - - // Classic Perlin noise - template - GLM_FUNC_QUALIFIER T perlin(vec<2, T, Q> const& Position) - { - vec<4, T, Q> Pi = glm::floor(vec<4, T, Q>(Position.x, Position.y, Position.x, Position.y)) + vec<4, T, Q>(0.0, 0.0, 1.0, 1.0); - vec<4, T, Q> Pf = glm::fract(vec<4, T, Q>(Position.x, Position.y, Position.x, Position.y)) - vec<4, T, Q>(0.0, 0.0, 1.0, 1.0); - Pi = mod(Pi, vec<4, T, Q>(289)); // To avoid truncation effects in permutation - vec<4, T, Q> ix(Pi.x, Pi.z, Pi.x, Pi.z); - vec<4, T, Q> iy(Pi.y, Pi.y, Pi.w, Pi.w); - vec<4, T, Q> fx(Pf.x, Pf.z, Pf.x, Pf.z); - vec<4, T, Q> fy(Pf.y, Pf.y, Pf.w, Pf.w); - - vec<4, T, Q> i = detail::permute(detail::permute(ix) + iy); - - vec<4, T, Q> gx = static_cast(2) * glm::fract(i / T(41)) - T(1); - vec<4, T, Q> gy = glm::abs(gx) - T(0.5); - vec<4, T, Q> tx = glm::floor(gx + T(0.5)); - gx = gx - tx; - - vec<2, T, Q> g00(gx.x, gy.x); - vec<2, T, Q> g10(gx.y, gy.y); - vec<2, T, Q> g01(gx.z, gy.z); - vec<2, T, Q> g11(gx.w, gy.w); - - vec<4, T, Q> norm = detail::taylorInvSqrt(vec<4, T, Q>(dot(g00, g00), dot(g01, g01), dot(g10, g10), dot(g11, g11))); - g00 *= norm.x; - g01 *= norm.y; - g10 *= norm.z; - g11 *= norm.w; - - T n00 = dot(g00, vec<2, T, Q>(fx.x, fy.x)); - T n10 = dot(g10, vec<2, T, Q>(fx.y, fy.y)); - T n01 = dot(g01, vec<2, T, Q>(fx.z, fy.z)); - T n11 = dot(g11, vec<2, T, Q>(fx.w, fy.w)); - - vec<2, T, Q> fade_xy = detail::fade(vec<2, T, Q>(Pf.x, Pf.y)); - vec<2, T, Q> n_x = mix(vec<2, T, Q>(n00, n01), vec<2, T, Q>(n10, n11), fade_xy.x); - T n_xy = mix(n_x.x, n_x.y, fade_xy.y); - return T(2.3) * n_xy; - } - - // Classic Perlin noise - template - GLM_FUNC_QUALIFIER T perlin(vec<3, T, Q> const& Position) - { - vec<3, T, Q> Pi0 = floor(Position); // Integer part for indexing - vec<3, T, Q> Pi1 = Pi0 + T(1); // Integer part + 1 - Pi0 = detail::mod289(Pi0); - Pi1 = detail::mod289(Pi1); - vec<3, T, Q> Pf0 = fract(Position); // Fractional part for interpolation - vec<3, T, Q> Pf1 = Pf0 - T(1); // Fractional part - 1.0 - vec<4, T, Q> ix(Pi0.x, Pi1.x, Pi0.x, Pi1.x); - vec<4, T, Q> iy = vec<4, T, Q>(vec<2, T, Q>(Pi0.y), vec<2, T, Q>(Pi1.y)); - vec<4, T, Q> iz0(Pi0.z); - vec<4, T, Q> iz1(Pi1.z); - - vec<4, T, Q> ixy = detail::permute(detail::permute(ix) + iy); - vec<4, T, Q> ixy0 = detail::permute(ixy + iz0); - vec<4, T, Q> ixy1 = detail::permute(ixy + iz1); - - vec<4, T, Q> gx0 = ixy0 * T(1.0 / 7.0); - vec<4, T, Q> gy0 = fract(floor(gx0) * T(1.0 / 7.0)) - T(0.5); - gx0 = fract(gx0); - vec<4, T, Q> gz0 = vec<4, T, Q>(0.5) - abs(gx0) - abs(gy0); - vec<4, T, Q> sz0 = step(gz0, vec<4, T, Q>(0.0)); - gx0 -= sz0 * (step(T(0), gx0) - T(0.5)); - gy0 -= sz0 * (step(T(0), gy0) - T(0.5)); - - vec<4, T, Q> gx1 = ixy1 * T(1.0 / 7.0); - vec<4, T, Q> gy1 = fract(floor(gx1) * T(1.0 / 7.0)) - T(0.5); - gx1 = fract(gx1); - vec<4, T, Q> gz1 = vec<4, T, Q>(0.5) - abs(gx1) - abs(gy1); - vec<4, T, Q> sz1 = step(gz1, vec<4, T, Q>(0.0)); - gx1 -= sz1 * (step(T(0), gx1) - T(0.5)); - gy1 -= sz1 * (step(T(0), gy1) - T(0.5)); - - vec<3, T, Q> g000(gx0.x, gy0.x, gz0.x); - vec<3, T, Q> g100(gx0.y, gy0.y, gz0.y); - vec<3, T, Q> g010(gx0.z, gy0.z, gz0.z); - vec<3, T, Q> g110(gx0.w, gy0.w, gz0.w); - vec<3, T, Q> g001(gx1.x, gy1.x, gz1.x); - vec<3, T, Q> g101(gx1.y, gy1.y, gz1.y); - vec<3, T, Q> g011(gx1.z, gy1.z, gz1.z); - vec<3, T, Q> g111(gx1.w, gy1.w, gz1.w); - - vec<4, T, Q> norm0 = detail::taylorInvSqrt(vec<4, T, Q>(dot(g000, g000), dot(g010, g010), dot(g100, g100), dot(g110, g110))); - g000 *= norm0.x; - g010 *= norm0.y; - g100 *= norm0.z; - g110 *= norm0.w; - vec<4, T, Q> norm1 = detail::taylorInvSqrt(vec<4, T, Q>(dot(g001, g001), dot(g011, g011), dot(g101, g101), dot(g111, g111))); - g001 *= norm1.x; - g011 *= norm1.y; - g101 *= norm1.z; - g111 *= norm1.w; - - T n000 = dot(g000, Pf0); - T n100 = dot(g100, vec<3, T, Q>(Pf1.x, Pf0.y, Pf0.z)); - T n010 = dot(g010, vec<3, T, Q>(Pf0.x, Pf1.y, Pf0.z)); - T n110 = dot(g110, vec<3, T, Q>(Pf1.x, Pf1.y, Pf0.z)); - T n001 = dot(g001, vec<3, T, Q>(Pf0.x, Pf0.y, Pf1.z)); - T n101 = dot(g101, vec<3, T, Q>(Pf1.x, Pf0.y, Pf1.z)); - T n011 = dot(g011, vec<3, T, Q>(Pf0.x, Pf1.y, Pf1.z)); - T n111 = dot(g111, Pf1); - - vec<3, T, Q> fade_xyz = detail::fade(Pf0); - vec<4, T, Q> n_z = mix(vec<4, T, Q>(n000, n100, n010, n110), vec<4, T, Q>(n001, n101, n011, n111), fade_xyz.z); - vec<2, T, Q> n_yz = mix(vec<2, T, Q>(n_z.x, n_z.y), vec<2, T, Q>(n_z.z, n_z.w), fade_xyz.y); - T n_xyz = mix(n_yz.x, n_yz.y, fade_xyz.x); - return T(2.2) * n_xyz; - } - /* - // Classic Perlin noise - template - GLM_FUNC_QUALIFIER T perlin(vec<3, T, Q> const& P) - { - vec<3, T, Q> Pi0 = floor(P); // Integer part for indexing - vec<3, T, Q> Pi1 = Pi0 + T(1); // Integer part + 1 - Pi0 = mod(Pi0, T(289)); - Pi1 = mod(Pi1, T(289)); - vec<3, T, Q> Pf0 = fract(P); // Fractional part for interpolation - vec<3, T, Q> Pf1 = Pf0 - T(1); // Fractional part - 1.0 - vec<4, T, Q> ix(Pi0.x, Pi1.x, Pi0.x, Pi1.x); - vec<4, T, Q> iy(Pi0.y, Pi0.y, Pi1.y, Pi1.y); - vec<4, T, Q> iz0(Pi0.z); - vec<4, T, Q> iz1(Pi1.z); - - vec<4, T, Q> ixy = permute(permute(ix) + iy); - vec<4, T, Q> ixy0 = permute(ixy + iz0); - vec<4, T, Q> ixy1 = permute(ixy + iz1); - - vec<4, T, Q> gx0 = ixy0 / T(7); - vec<4, T, Q> gy0 = fract(floor(gx0) / T(7)) - T(0.5); - gx0 = fract(gx0); - vec<4, T, Q> gz0 = vec<4, T, Q>(0.5) - abs(gx0) - abs(gy0); - vec<4, T, Q> sz0 = step(gz0, vec<4, T, Q>(0.0)); - gx0 -= sz0 * (step(0.0, gx0) - T(0.5)); - gy0 -= sz0 * (step(0.0, gy0) - T(0.5)); - - vec<4, T, Q> gx1 = ixy1 / T(7); - vec<4, T, Q> gy1 = fract(floor(gx1) / T(7)) - T(0.5); - gx1 = fract(gx1); - vec<4, T, Q> gz1 = vec<4, T, Q>(0.5) - abs(gx1) - abs(gy1); - vec<4, T, Q> sz1 = step(gz1, vec<4, T, Q>(0.0)); - gx1 -= sz1 * (step(T(0), gx1) - T(0.5)); - gy1 -= sz1 * (step(T(0), gy1) - T(0.5)); - - vec<3, T, Q> g000(gx0.x, gy0.x, gz0.x); - vec<3, T, Q> g100(gx0.y, gy0.y, gz0.y); - vec<3, T, Q> g010(gx0.z, gy0.z, gz0.z); - vec<3, T, Q> g110(gx0.w, gy0.w, gz0.w); - vec<3, T, Q> g001(gx1.x, gy1.x, gz1.x); - vec<3, T, Q> g101(gx1.y, gy1.y, gz1.y); - vec<3, T, Q> g011(gx1.z, gy1.z, gz1.z); - vec<3, T, Q> g111(gx1.w, gy1.w, gz1.w); - - vec<4, T, Q> norm0 = taylorInvSqrt(vec<4, T, Q>(dot(g000, g000), dot(g010, g010), dot(g100, g100), dot(g110, g110))); - g000 *= norm0.x; - g010 *= norm0.y; - g100 *= norm0.z; - g110 *= norm0.w; - vec<4, T, Q> norm1 = taylorInvSqrt(vec<4, T, Q>(dot(g001, g001), dot(g011, g011), dot(g101, g101), dot(g111, g111))); - g001 *= norm1.x; - g011 *= norm1.y; - g101 *= norm1.z; - g111 *= norm1.w; - - T n000 = dot(g000, Pf0); - T n100 = dot(g100, vec<3, T, Q>(Pf1.x, Pf0.y, Pf0.z)); - T n010 = dot(g010, vec<3, T, Q>(Pf0.x, Pf1.y, Pf0.z)); - T n110 = dot(g110, vec<3, T, Q>(Pf1.x, Pf1.y, Pf0.z)); - T n001 = dot(g001, vec<3, T, Q>(Pf0.x, Pf0.y, Pf1.z)); - T n101 = dot(g101, vec<3, T, Q>(Pf1.x, Pf0.y, Pf1.z)); - T n011 = dot(g011, vec<3, T, Q>(Pf0.x, Pf1.y, Pf1.z)); - T n111 = dot(g111, Pf1); - - vec<3, T, Q> fade_xyz = fade(Pf0); - vec<4, T, Q> n_z = mix(vec<4, T, Q>(n000, n100, n010, n110), vec<4, T, Q>(n001, n101, n011, n111), fade_xyz.z); - vec<2, T, Q> n_yz = mix( - vec<2, T, Q>(n_z.x, n_z.y), - vec<2, T, Q>(n_z.z, n_z.w), fade_xyz.y); - T n_xyz = mix(n_yz.x, n_yz.y, fade_xyz.x); - return T(2.2) * n_xyz; - } - */ - // Classic Perlin noise - template - GLM_FUNC_QUALIFIER T perlin(vec<4, T, Q> const& Position) - { - vec<4, T, Q> Pi0 = floor(Position); // Integer part for indexing - vec<4, T, Q> Pi1 = Pi0 + T(1); // Integer part + 1 - Pi0 = mod(Pi0, vec<4, T, Q>(289)); - Pi1 = mod(Pi1, vec<4, T, Q>(289)); - vec<4, T, Q> Pf0 = fract(Position); // Fractional part for interpolation - vec<4, T, Q> Pf1 = Pf0 - T(1); // Fractional part - 1.0 - vec<4, T, Q> ix(Pi0.x, Pi1.x, Pi0.x, Pi1.x); - vec<4, T, Q> iy(Pi0.y, Pi0.y, Pi1.y, Pi1.y); - vec<4, T, Q> iz0(Pi0.z); - vec<4, T, Q> iz1(Pi1.z); - vec<4, T, Q> iw0(Pi0.w); - vec<4, T, Q> iw1(Pi1.w); - - vec<4, T, Q> ixy = detail::permute(detail::permute(ix) + iy); - vec<4, T, Q> ixy0 = detail::permute(ixy + iz0); - vec<4, T, Q> ixy1 = detail::permute(ixy + iz1); - vec<4, T, Q> ixy00 = detail::permute(ixy0 + iw0); - vec<4, T, Q> ixy01 = detail::permute(ixy0 + iw1); - vec<4, T, Q> ixy10 = detail::permute(ixy1 + iw0); - vec<4, T, Q> ixy11 = detail::permute(ixy1 + iw1); - - vec<4, T, Q> gx00 = ixy00 / T(7); - vec<4, T, Q> gy00 = floor(gx00) / T(7); - vec<4, T, Q> gz00 = floor(gy00) / T(6); - gx00 = fract(gx00) - T(0.5); - gy00 = fract(gy00) - T(0.5); - gz00 = fract(gz00) - T(0.5); - vec<4, T, Q> gw00 = vec<4, T, Q>(0.75) - abs(gx00) - abs(gy00) - abs(gz00); - vec<4, T, Q> sw00 = step(gw00, vec<4, T, Q>(0.0)); - gx00 -= sw00 * (step(T(0), gx00) - T(0.5)); - gy00 -= sw00 * (step(T(0), gy00) - T(0.5)); - - vec<4, T, Q> gx01 = ixy01 / T(7); - vec<4, T, Q> gy01 = floor(gx01) / T(7); - vec<4, T, Q> gz01 = floor(gy01) / T(6); - gx01 = fract(gx01) - T(0.5); - gy01 = fract(gy01) - T(0.5); - gz01 = fract(gz01) - T(0.5); - vec<4, T, Q> gw01 = vec<4, T, Q>(0.75) - abs(gx01) - abs(gy01) - abs(gz01); - vec<4, T, Q> sw01 = step(gw01, vec<4, T, Q>(0.0)); - gx01 -= sw01 * (step(T(0), gx01) - T(0.5)); - gy01 -= sw01 * (step(T(0), gy01) - T(0.5)); - - vec<4, T, Q> gx10 = ixy10 / T(7); - vec<4, T, Q> gy10 = floor(gx10) / T(7); - vec<4, T, Q> gz10 = floor(gy10) / T(6); - gx10 = fract(gx10) - T(0.5); - gy10 = fract(gy10) - T(0.5); - gz10 = fract(gz10) - T(0.5); - vec<4, T, Q> gw10 = vec<4, T, Q>(0.75) - abs(gx10) - abs(gy10) - abs(gz10); - vec<4, T, Q> sw10 = step(gw10, vec<4, T, Q>(0)); - gx10 -= sw10 * (step(T(0), gx10) - T(0.5)); - gy10 -= sw10 * (step(T(0), gy10) - T(0.5)); - - vec<4, T, Q> gx11 = ixy11 / T(7); - vec<4, T, Q> gy11 = floor(gx11) / T(7); - vec<4, T, Q> gz11 = floor(gy11) / T(6); - gx11 = fract(gx11) - T(0.5); - gy11 = fract(gy11) - T(0.5); - gz11 = fract(gz11) - T(0.5); - vec<4, T, Q> gw11 = vec<4, T, Q>(0.75) - abs(gx11) - abs(gy11) - abs(gz11); - vec<4, T, Q> sw11 = step(gw11, vec<4, T, Q>(0.0)); - gx11 -= sw11 * (step(T(0), gx11) - T(0.5)); - gy11 -= sw11 * (step(T(0), gy11) - T(0.5)); - - vec<4, T, Q> g0000(gx00.x, gy00.x, gz00.x, gw00.x); - vec<4, T, Q> g1000(gx00.y, gy00.y, gz00.y, gw00.y); - vec<4, T, Q> g0100(gx00.z, gy00.z, gz00.z, gw00.z); - vec<4, T, Q> g1100(gx00.w, gy00.w, gz00.w, gw00.w); - vec<4, T, Q> g0010(gx10.x, gy10.x, gz10.x, gw10.x); - vec<4, T, Q> g1010(gx10.y, gy10.y, gz10.y, gw10.y); - vec<4, T, Q> g0110(gx10.z, gy10.z, gz10.z, gw10.z); - vec<4, T, Q> g1110(gx10.w, gy10.w, gz10.w, gw10.w); - vec<4, T, Q> g0001(gx01.x, gy01.x, gz01.x, gw01.x); - vec<4, T, Q> g1001(gx01.y, gy01.y, gz01.y, gw01.y); - vec<4, T, Q> g0101(gx01.z, gy01.z, gz01.z, gw01.z); - vec<4, T, Q> g1101(gx01.w, gy01.w, gz01.w, gw01.w); - vec<4, T, Q> g0011(gx11.x, gy11.x, gz11.x, gw11.x); - vec<4, T, Q> g1011(gx11.y, gy11.y, gz11.y, gw11.y); - vec<4, T, Q> g0111(gx11.z, gy11.z, gz11.z, gw11.z); - vec<4, T, Q> g1111(gx11.w, gy11.w, gz11.w, gw11.w); - - vec<4, T, Q> norm00 = detail::taylorInvSqrt(vec<4, T, Q>(dot(g0000, g0000), dot(g0100, g0100), dot(g1000, g1000), dot(g1100, g1100))); - g0000 *= norm00.x; - g0100 *= norm00.y; - g1000 *= norm00.z; - g1100 *= norm00.w; - - vec<4, T, Q> norm01 = detail::taylorInvSqrt(vec<4, T, Q>(dot(g0001, g0001), dot(g0101, g0101), dot(g1001, g1001), dot(g1101, g1101))); - g0001 *= norm01.x; - g0101 *= norm01.y; - g1001 *= norm01.z; - g1101 *= norm01.w; - - vec<4, T, Q> norm10 = detail::taylorInvSqrt(vec<4, T, Q>(dot(g0010, g0010), dot(g0110, g0110), dot(g1010, g1010), dot(g1110, g1110))); - g0010 *= norm10.x; - g0110 *= norm10.y; - g1010 *= norm10.z; - g1110 *= norm10.w; - - vec<4, T, Q> norm11 = detail::taylorInvSqrt(vec<4, T, Q>(dot(g0011, g0011), dot(g0111, g0111), dot(g1011, g1011), dot(g1111, g1111))); - g0011 *= norm11.x; - g0111 *= norm11.y; - g1011 *= norm11.z; - g1111 *= norm11.w; - - T n0000 = dot(g0000, Pf0); - T n1000 = dot(g1000, vec<4, T, Q>(Pf1.x, Pf0.y, Pf0.z, Pf0.w)); - T n0100 = dot(g0100, vec<4, T, Q>(Pf0.x, Pf1.y, Pf0.z, Pf0.w)); - T n1100 = dot(g1100, vec<4, T, Q>(Pf1.x, Pf1.y, Pf0.z, Pf0.w)); - T n0010 = dot(g0010, vec<4, T, Q>(Pf0.x, Pf0.y, Pf1.z, Pf0.w)); - T n1010 = dot(g1010, vec<4, T, Q>(Pf1.x, Pf0.y, Pf1.z, Pf0.w)); - T n0110 = dot(g0110, vec<4, T, Q>(Pf0.x, Pf1.y, Pf1.z, Pf0.w)); - T n1110 = dot(g1110, vec<4, T, Q>(Pf1.x, Pf1.y, Pf1.z, Pf0.w)); - T n0001 = dot(g0001, vec<4, T, Q>(Pf0.x, Pf0.y, Pf0.z, Pf1.w)); - T n1001 = dot(g1001, vec<4, T, Q>(Pf1.x, Pf0.y, Pf0.z, Pf1.w)); - T n0101 = dot(g0101, vec<4, T, Q>(Pf0.x, Pf1.y, Pf0.z, Pf1.w)); - T n1101 = dot(g1101, vec<4, T, Q>(Pf1.x, Pf1.y, Pf0.z, Pf1.w)); - T n0011 = dot(g0011, vec<4, T, Q>(Pf0.x, Pf0.y, Pf1.z, Pf1.w)); - T n1011 = dot(g1011, vec<4, T, Q>(Pf1.x, Pf0.y, Pf1.z, Pf1.w)); - T n0111 = dot(g0111, vec<4, T, Q>(Pf0.x, Pf1.y, Pf1.z, Pf1.w)); - T n1111 = dot(g1111, Pf1); - - vec<4, T, Q> fade_xyzw = detail::fade(Pf0); - vec<4, T, Q> n_0w = mix(vec<4, T, Q>(n0000, n1000, n0100, n1100), vec<4, T, Q>(n0001, n1001, n0101, n1101), fade_xyzw.w); - vec<4, T, Q> n_1w = mix(vec<4, T, Q>(n0010, n1010, n0110, n1110), vec<4, T, Q>(n0011, n1011, n0111, n1111), fade_xyzw.w); - vec<4, T, Q> n_zw = mix(n_0w, n_1w, fade_xyzw.z); - vec<2, T, Q> n_yzw = mix(vec<2, T, Q>(n_zw.x, n_zw.y), vec<2, T, Q>(n_zw.z, n_zw.w), fade_xyzw.y); - T n_xyzw = mix(n_yzw.x, n_yzw.y, fade_xyzw.x); - return T(2.2) * n_xyzw; - } - - // Classic Perlin noise, periodic variant - template - GLM_FUNC_QUALIFIER T perlin(vec<2, T, Q> const& Position, vec<2, T, Q> const& rep) - { - vec<4, T, Q> Pi = floor(vec<4, T, Q>(Position.x, Position.y, Position.x, Position.y)) + vec<4, T, Q>(0.0, 0.0, 1.0, 1.0); - vec<4, T, Q> Pf = fract(vec<4, T, Q>(Position.x, Position.y, Position.x, Position.y)) - vec<4, T, Q>(0.0, 0.0, 1.0, 1.0); - Pi = mod(Pi, vec<4, T, Q>(rep.x, rep.y, rep.x, rep.y)); // To create noise with explicit period - Pi = mod(Pi, vec<4, T, Q>(289)); // To avoid truncation effects in permutation - vec<4, T, Q> ix(Pi.x, Pi.z, Pi.x, Pi.z); - vec<4, T, Q> iy(Pi.y, Pi.y, Pi.w, Pi.w); - vec<4, T, Q> fx(Pf.x, Pf.z, Pf.x, Pf.z); - vec<4, T, Q> fy(Pf.y, Pf.y, Pf.w, Pf.w); - - vec<4, T, Q> i = detail::permute(detail::permute(ix) + iy); - - vec<4, T, Q> gx = static_cast(2) * fract(i / T(41)) - T(1); - vec<4, T, Q> gy = abs(gx) - T(0.5); - vec<4, T, Q> tx = floor(gx + T(0.5)); - gx = gx - tx; - - vec<2, T, Q> g00(gx.x, gy.x); - vec<2, T, Q> g10(gx.y, gy.y); - vec<2, T, Q> g01(gx.z, gy.z); - vec<2, T, Q> g11(gx.w, gy.w); - - vec<4, T, Q> norm = detail::taylorInvSqrt(vec<4, T, Q>(dot(g00, g00), dot(g01, g01), dot(g10, g10), dot(g11, g11))); - g00 *= norm.x; - g01 *= norm.y; - g10 *= norm.z; - g11 *= norm.w; - - T n00 = dot(g00, vec<2, T, Q>(fx.x, fy.x)); - T n10 = dot(g10, vec<2, T, Q>(fx.y, fy.y)); - T n01 = dot(g01, vec<2, T, Q>(fx.z, fy.z)); - T n11 = dot(g11, vec<2, T, Q>(fx.w, fy.w)); - - vec<2, T, Q> fade_xy = detail::fade(vec<2, T, Q>(Pf.x, Pf.y)); - vec<2, T, Q> n_x = mix(vec<2, T, Q>(n00, n01), vec<2, T, Q>(n10, n11), fade_xy.x); - T n_xy = mix(n_x.x, n_x.y, fade_xy.y); - return T(2.3) * n_xy; - } - - // Classic Perlin noise, periodic variant - template - GLM_FUNC_QUALIFIER T perlin(vec<3, T, Q> const& Position, vec<3, T, Q> const& rep) - { - vec<3, T, Q> Pi0 = mod(floor(Position), rep); // Integer part, modulo period - vec<3, T, Q> Pi1 = mod(Pi0 + vec<3, T, Q>(T(1)), rep); // Integer part + 1, mod period - Pi0 = mod(Pi0, vec<3, T, Q>(289)); - Pi1 = mod(Pi1, vec<3, T, Q>(289)); - vec<3, T, Q> Pf0 = fract(Position); // Fractional part for interpolation - vec<3, T, Q> Pf1 = Pf0 - vec<3, T, Q>(T(1)); // Fractional part - 1.0 - vec<4, T, Q> ix = vec<4, T, Q>(Pi0.x, Pi1.x, Pi0.x, Pi1.x); - vec<4, T, Q> iy = vec<4, T, Q>(Pi0.y, Pi0.y, Pi1.y, Pi1.y); - vec<4, T, Q> iz0(Pi0.z); - vec<4, T, Q> iz1(Pi1.z); - - vec<4, T, Q> ixy = detail::permute(detail::permute(ix) + iy); - vec<4, T, Q> ixy0 = detail::permute(ixy + iz0); - vec<4, T, Q> ixy1 = detail::permute(ixy + iz1); - - vec<4, T, Q> gx0 = ixy0 / T(7); - vec<4, T, Q> gy0 = fract(floor(gx0) / T(7)) - T(0.5); - gx0 = fract(gx0); - vec<4, T, Q> gz0 = vec<4, T, Q>(0.5) - abs(gx0) - abs(gy0); - vec<4, T, Q> sz0 = step(gz0, vec<4, T, Q>(0)); - gx0 -= sz0 * (step(T(0), gx0) - T(0.5)); - gy0 -= sz0 * (step(T(0), gy0) - T(0.5)); - - vec<4, T, Q> gx1 = ixy1 / T(7); - vec<4, T, Q> gy1 = fract(floor(gx1) / T(7)) - T(0.5); - gx1 = fract(gx1); - vec<4, T, Q> gz1 = vec<4, T, Q>(0.5) - abs(gx1) - abs(gy1); - vec<4, T, Q> sz1 = step(gz1, vec<4, T, Q>(T(0))); - gx1 -= sz1 * (step(T(0), gx1) - T(0.5)); - gy1 -= sz1 * (step(T(0), gy1) - T(0.5)); - - vec<3, T, Q> g000 = vec<3, T, Q>(gx0.x, gy0.x, gz0.x); - vec<3, T, Q> g100 = vec<3, T, Q>(gx0.y, gy0.y, gz0.y); - vec<3, T, Q> g010 = vec<3, T, Q>(gx0.z, gy0.z, gz0.z); - vec<3, T, Q> g110 = vec<3, T, Q>(gx0.w, gy0.w, gz0.w); - vec<3, T, Q> g001 = vec<3, T, Q>(gx1.x, gy1.x, gz1.x); - vec<3, T, Q> g101 = vec<3, T, Q>(gx1.y, gy1.y, gz1.y); - vec<3, T, Q> g011 = vec<3, T, Q>(gx1.z, gy1.z, gz1.z); - vec<3, T, Q> g111 = vec<3, T, Q>(gx1.w, gy1.w, gz1.w); - - vec<4, T, Q> norm0 = detail::taylorInvSqrt(vec<4, T, Q>(dot(g000, g000), dot(g010, g010), dot(g100, g100), dot(g110, g110))); - g000 *= norm0.x; - g010 *= norm0.y; - g100 *= norm0.z; - g110 *= norm0.w; - vec<4, T, Q> norm1 = detail::taylorInvSqrt(vec<4, T, Q>(dot(g001, g001), dot(g011, g011), dot(g101, g101), dot(g111, g111))); - g001 *= norm1.x; - g011 *= norm1.y; - g101 *= norm1.z; - g111 *= norm1.w; - - T n000 = dot(g000, Pf0); - T n100 = dot(g100, vec<3, T, Q>(Pf1.x, Pf0.y, Pf0.z)); - T n010 = dot(g010, vec<3, T, Q>(Pf0.x, Pf1.y, Pf0.z)); - T n110 = dot(g110, vec<3, T, Q>(Pf1.x, Pf1.y, Pf0.z)); - T n001 = dot(g001, vec<3, T, Q>(Pf0.x, Pf0.y, Pf1.z)); - T n101 = dot(g101, vec<3, T, Q>(Pf1.x, Pf0.y, Pf1.z)); - T n011 = dot(g011, vec<3, T, Q>(Pf0.x, Pf1.y, Pf1.z)); - T n111 = dot(g111, Pf1); - - vec<3, T, Q> fade_xyz = detail::fade(Pf0); - vec<4, T, Q> n_z = mix(vec<4, T, Q>(n000, n100, n010, n110), vec<4, T, Q>(n001, n101, n011, n111), fade_xyz.z); - vec<2, T, Q> n_yz = mix(vec<2, T, Q>(n_z.x, n_z.y), vec<2, T, Q>(n_z.z, n_z.w), fade_xyz.y); - T n_xyz = mix(n_yz.x, n_yz.y, fade_xyz.x); - return T(2.2) * n_xyz; - } - - // Classic Perlin noise, periodic version - template - GLM_FUNC_QUALIFIER T perlin(vec<4, T, Q> const& Position, vec<4, T, Q> const& rep) - { - vec<4, T, Q> Pi0 = mod(floor(Position), rep); // Integer part modulo rep - vec<4, T, Q> Pi1 = mod(Pi0 + T(1), rep); // Integer part + 1 mod rep - vec<4, T, Q> Pf0 = fract(Position); // Fractional part for interpolation - vec<4, T, Q> Pf1 = Pf0 - T(1); // Fractional part - 1.0 - vec<4, T, Q> ix = vec<4, T, Q>(Pi0.x, Pi1.x, Pi0.x, Pi1.x); - vec<4, T, Q> iy = vec<4, T, Q>(Pi0.y, Pi0.y, Pi1.y, Pi1.y); - vec<4, T, Q> iz0(Pi0.z); - vec<4, T, Q> iz1(Pi1.z); - vec<4, T, Q> iw0(Pi0.w); - vec<4, T, Q> iw1(Pi1.w); - - vec<4, T, Q> ixy = detail::permute(detail::permute(ix) + iy); - vec<4, T, Q> ixy0 = detail::permute(ixy + iz0); - vec<4, T, Q> ixy1 = detail::permute(ixy + iz1); - vec<4, T, Q> ixy00 = detail::permute(ixy0 + iw0); - vec<4, T, Q> ixy01 = detail::permute(ixy0 + iw1); - vec<4, T, Q> ixy10 = detail::permute(ixy1 + iw0); - vec<4, T, Q> ixy11 = detail::permute(ixy1 + iw1); - - vec<4, T, Q> gx00 = ixy00 / T(7); - vec<4, T, Q> gy00 = floor(gx00) / T(7); - vec<4, T, Q> gz00 = floor(gy00) / T(6); - gx00 = fract(gx00) - T(0.5); - gy00 = fract(gy00) - T(0.5); - gz00 = fract(gz00) - T(0.5); - vec<4, T, Q> gw00 = vec<4, T, Q>(0.75) - abs(gx00) - abs(gy00) - abs(gz00); - vec<4, T, Q> sw00 = step(gw00, vec<4, T, Q>(0)); - gx00 -= sw00 * (step(T(0), gx00) - T(0.5)); - gy00 -= sw00 * (step(T(0), gy00) - T(0.5)); - - vec<4, T, Q> gx01 = ixy01 / T(7); - vec<4, T, Q> gy01 = floor(gx01) / T(7); - vec<4, T, Q> gz01 = floor(gy01) / T(6); - gx01 = fract(gx01) - T(0.5); - gy01 = fract(gy01) - T(0.5); - gz01 = fract(gz01) - T(0.5); - vec<4, T, Q> gw01 = vec<4, T, Q>(0.75) - abs(gx01) - abs(gy01) - abs(gz01); - vec<4, T, Q> sw01 = step(gw01, vec<4, T, Q>(0.0)); - gx01 -= sw01 * (step(T(0), gx01) - T(0.5)); - gy01 -= sw01 * (step(T(0), gy01) - T(0.5)); - - vec<4, T, Q> gx10 = ixy10 / T(7); - vec<4, T, Q> gy10 = floor(gx10) / T(7); - vec<4, T, Q> gz10 = floor(gy10) / T(6); - gx10 = fract(gx10) - T(0.5); - gy10 = fract(gy10) - T(0.5); - gz10 = fract(gz10) - T(0.5); - vec<4, T, Q> gw10 = vec<4, T, Q>(0.75) - abs(gx10) - abs(gy10) - abs(gz10); - vec<4, T, Q> sw10 = step(gw10, vec<4, T, Q>(0.0)); - gx10 -= sw10 * (step(T(0), gx10) - T(0.5)); - gy10 -= sw10 * (step(T(0), gy10) - T(0.5)); - - vec<4, T, Q> gx11 = ixy11 / T(7); - vec<4, T, Q> gy11 = floor(gx11) / T(7); - vec<4, T, Q> gz11 = floor(gy11) / T(6); - gx11 = fract(gx11) - T(0.5); - gy11 = fract(gy11) - T(0.5); - gz11 = fract(gz11) - T(0.5); - vec<4, T, Q> gw11 = vec<4, T, Q>(0.75) - abs(gx11) - abs(gy11) - abs(gz11); - vec<4, T, Q> sw11 = step(gw11, vec<4, T, Q>(T(0))); - gx11 -= sw11 * (step(T(0), gx11) - T(0.5)); - gy11 -= sw11 * (step(T(0), gy11) - T(0.5)); - - vec<4, T, Q> g0000(gx00.x, gy00.x, gz00.x, gw00.x); - vec<4, T, Q> g1000(gx00.y, gy00.y, gz00.y, gw00.y); - vec<4, T, Q> g0100(gx00.z, gy00.z, gz00.z, gw00.z); - vec<4, T, Q> g1100(gx00.w, gy00.w, gz00.w, gw00.w); - vec<4, T, Q> g0010(gx10.x, gy10.x, gz10.x, gw10.x); - vec<4, T, Q> g1010(gx10.y, gy10.y, gz10.y, gw10.y); - vec<4, T, Q> g0110(gx10.z, gy10.z, gz10.z, gw10.z); - vec<4, T, Q> g1110(gx10.w, gy10.w, gz10.w, gw10.w); - vec<4, T, Q> g0001(gx01.x, gy01.x, gz01.x, gw01.x); - vec<4, T, Q> g1001(gx01.y, gy01.y, gz01.y, gw01.y); - vec<4, T, Q> g0101(gx01.z, gy01.z, gz01.z, gw01.z); - vec<4, T, Q> g1101(gx01.w, gy01.w, gz01.w, gw01.w); - vec<4, T, Q> g0011(gx11.x, gy11.x, gz11.x, gw11.x); - vec<4, T, Q> g1011(gx11.y, gy11.y, gz11.y, gw11.y); - vec<4, T, Q> g0111(gx11.z, gy11.z, gz11.z, gw11.z); - vec<4, T, Q> g1111(gx11.w, gy11.w, gz11.w, gw11.w); - - vec<4, T, Q> norm00 = detail::taylorInvSqrt(vec<4, T, Q>(dot(g0000, g0000), dot(g0100, g0100), dot(g1000, g1000), dot(g1100, g1100))); - g0000 *= norm00.x; - g0100 *= norm00.y; - g1000 *= norm00.z; - g1100 *= norm00.w; - - vec<4, T, Q> norm01 = detail::taylorInvSqrt(vec<4, T, Q>(dot(g0001, g0001), dot(g0101, g0101), dot(g1001, g1001), dot(g1101, g1101))); - g0001 *= norm01.x; - g0101 *= norm01.y; - g1001 *= norm01.z; - g1101 *= norm01.w; - - vec<4, T, Q> norm10 = detail::taylorInvSqrt(vec<4, T, Q>(dot(g0010, g0010), dot(g0110, g0110), dot(g1010, g1010), dot(g1110, g1110))); - g0010 *= norm10.x; - g0110 *= norm10.y; - g1010 *= norm10.z; - g1110 *= norm10.w; - - vec<4, T, Q> norm11 = detail::taylorInvSqrt(vec<4, T, Q>(dot(g0011, g0011), dot(g0111, g0111), dot(g1011, g1011), dot(g1111, g1111))); - g0011 *= norm11.x; - g0111 *= norm11.y; - g1011 *= norm11.z; - g1111 *= norm11.w; - - T n0000 = dot(g0000, Pf0); - T n1000 = dot(g1000, vec<4, T, Q>(Pf1.x, Pf0.y, Pf0.z, Pf0.w)); - T n0100 = dot(g0100, vec<4, T, Q>(Pf0.x, Pf1.y, Pf0.z, Pf0.w)); - T n1100 = dot(g1100, vec<4, T, Q>(Pf1.x, Pf1.y, Pf0.z, Pf0.w)); - T n0010 = dot(g0010, vec<4, T, Q>(Pf0.x, Pf0.y, Pf1.z, Pf0.w)); - T n1010 = dot(g1010, vec<4, T, Q>(Pf1.x, Pf0.y, Pf1.z, Pf0.w)); - T n0110 = dot(g0110, vec<4, T, Q>(Pf0.x, Pf1.y, Pf1.z, Pf0.w)); - T n1110 = dot(g1110, vec<4, T, Q>(Pf1.x, Pf1.y, Pf1.z, Pf0.w)); - T n0001 = dot(g0001, vec<4, T, Q>(Pf0.x, Pf0.y, Pf0.z, Pf1.w)); - T n1001 = dot(g1001, vec<4, T, Q>(Pf1.x, Pf0.y, Pf0.z, Pf1.w)); - T n0101 = dot(g0101, vec<4, T, Q>(Pf0.x, Pf1.y, Pf0.z, Pf1.w)); - T n1101 = dot(g1101, vec<4, T, Q>(Pf1.x, Pf1.y, Pf0.z, Pf1.w)); - T n0011 = dot(g0011, vec<4, T, Q>(Pf0.x, Pf0.y, Pf1.z, Pf1.w)); - T n1011 = dot(g1011, vec<4, T, Q>(Pf1.x, Pf0.y, Pf1.z, Pf1.w)); - T n0111 = dot(g0111, vec<4, T, Q>(Pf0.x, Pf1.y, Pf1.z, Pf1.w)); - T n1111 = dot(g1111, Pf1); - - vec<4, T, Q> fade_xyzw = detail::fade(Pf0); - vec<4, T, Q> n_0w = mix(vec<4, T, Q>(n0000, n1000, n0100, n1100), vec<4, T, Q>(n0001, n1001, n0101, n1101), fade_xyzw.w); - vec<4, T, Q> n_1w = mix(vec<4, T, Q>(n0010, n1010, n0110, n1110), vec<4, T, Q>(n0011, n1011, n0111, n1111), fade_xyzw.w); - vec<4, T, Q> n_zw = mix(n_0w, n_1w, fade_xyzw.z); - vec<2, T, Q> n_yzw = mix(vec<2, T, Q>(n_zw.x, n_zw.y), vec<2, T, Q>(n_zw.z, n_zw.w), fade_xyzw.y); - T n_xyzw = mix(n_yzw.x, n_yzw.y, fade_xyzw.x); - return T(2.2) * n_xyzw; - } - - template - GLM_FUNC_QUALIFIER T simplex(glm::vec<2, T, Q> const& v) - { - vec<4, T, Q> const C = vec<4, T, Q>( - T( 0.211324865405187), // (3.0 - sqrt(3.0)) / 6.0 - T( 0.366025403784439), // 0.5 * (sqrt(3.0) - 1.0) - T(-0.577350269189626), // -1.0 + 2.0 * C.x - T( 0.024390243902439)); // 1.0 / 41.0 - - // First corner - vec<2, T, Q> i = floor(v + dot(v, vec<2, T, Q>(C[1]))); - vec<2, T, Q> x0 = v - i + dot(i, vec<2, T, Q>(C[0])); - - // Other corners - //i1.x = step( x0.y, x0.x ); // x0.x > x0.y ? 1.0 : 0.0 - //i1.y = 1.0 - i1.x; - vec<2, T, Q> i1 = (x0.x > x0.y) ? vec<2, T, Q>(1, 0) : vec<2, T, Q>(0, 1); - // x0 = x0 - 0.0 + 0.0 * C.xx ; - // x1 = x0 - i1 + 1.0 * C.xx ; - // x2 = x0 - 1.0 + 2.0 * C.xx ; - vec<4, T, Q> x12 = vec<4, T, Q>(x0.x, x0.y, x0.x, x0.y) + vec<4, T, Q>(C.x, C.x, C.z, C.z); - x12 = vec<4, T, Q>(vec<2, T, Q>(x12) - i1, x12.z, x12.w); - - // Permutations - i = mod(i, vec<2, T, Q>(289)); // Avoid truncation effects in permutation - vec<3, T, Q> p = detail::permute( - detail::permute(i.y + vec<3, T, Q>(T(0), i1.y, T(1))) - + i.x + vec<3, T, Q>(T(0), i1.x, T(1))); - - vec<3, T, Q> m = max(vec<3, T, Q>(0.5) - vec<3, T, Q>( - dot(x0, x0), - dot(vec<2, T, Q>(x12.x, x12.y), vec<2, T, Q>(x12.x, x12.y)), - dot(vec<2, T, Q>(x12.z, x12.w), vec<2, T, Q>(x12.z, x12.w))), vec<3, T, Q>(0)); - m = m * m ; - m = m * m ; - - // Gradients: 41 points uniformly over a line, mapped onto a diamond. - // The ring size 17*17 = 289 is close to a multiple of 41 (41*7 = 287) - - vec<3, T, Q> x = static_cast(2) * fract(p * C.w) - T(1); - vec<3, T, Q> h = abs(x) - T(0.5); - vec<3, T, Q> ox = floor(x + T(0.5)); - vec<3, T, Q> a0 = x - ox; - - // Normalise gradients implicitly by scaling m - // Inlined for speed: m *= taylorInvSqrt( a0*a0 + h*h ); - m *= static_cast(1.79284291400159) - T(0.85373472095314) * (a0 * a0 + h * h); - - // Compute final noise value at P - vec<3, T, Q> g; - g.x = a0.x * x0.x + h.x * x0.y; - //g.yz = a0.yz * x12.xz + h.yz * x12.yw; - g.y = a0.y * x12.x + h.y * x12.y; - g.z = a0.z * x12.z + h.z * x12.w; - return T(130) * dot(m, g); - } - - template - GLM_FUNC_QUALIFIER T simplex(vec<3, T, Q> const& v) - { - vec<2, T, Q> const C(1.0 / 6.0, 1.0 / 3.0); - vec<4, T, Q> const D(0.0, 0.5, 1.0, 2.0); - - // First corner - vec<3, T, Q> i(floor(v + dot(v, vec<3, T, Q>(C.y)))); - vec<3, T, Q> x0(v - i + dot(i, vec<3, T, Q>(C.x))); - - // Other corners - vec<3, T, Q> g(step(vec<3, T, Q>(x0.y, x0.z, x0.x), x0)); - vec<3, T, Q> l(T(1) - g); - vec<3, T, Q> i1(min(g, vec<3, T, Q>(l.z, l.x, l.y))); - vec<3, T, Q> i2(max(g, vec<3, T, Q>(l.z, l.x, l.y))); - - // x0 = x0 - 0.0 + 0.0 * C.xxx; - // x1 = x0 - i1 + 1.0 * C.xxx; - // x2 = x0 - i2 + 2.0 * C.xxx; - // x3 = x0 - 1.0 + 3.0 * C.xxx; - vec<3, T, Q> x1(x0 - i1 + C.x); - vec<3, T, Q> x2(x0 - i2 + C.y); // 2.0*C.x = 1/3 = C.y - vec<3, T, Q> x3(x0 - D.y); // -1.0+3.0*C.x = -0.5 = -D.y - - // Permutations - i = detail::mod289(i); - vec<4, T, Q> p(detail::permute(detail::permute(detail::permute( - i.z + vec<4, T, Q>(T(0), i1.z, i2.z, T(1))) + - i.y + vec<4, T, Q>(T(0), i1.y, i2.y, T(1))) + - i.x + vec<4, T, Q>(T(0), i1.x, i2.x, T(1)))); - - // Gradients: 7x7 points over a square, mapped onto an octahedron. - // The ring size 17*17 = 289 is close to a multiple of 49 (49*6 = 294) - T n_ = static_cast(0.142857142857); // 1.0/7.0 - vec<3, T, Q> ns(n_ * vec<3, T, Q>(D.w, D.y, D.z) - vec<3, T, Q>(D.x, D.z, D.x)); - - vec<4, T, Q> j(p - T(49) * floor(p * ns.z * ns.z)); // mod(p,7*7) - - vec<4, T, Q> x_(floor(j * ns.z)); - vec<4, T, Q> y_(floor(j - T(7) * x_)); // mod(j,N) - - vec<4, T, Q> x(x_ * ns.x + ns.y); - vec<4, T, Q> y(y_ * ns.x + ns.y); - vec<4, T, Q> h(T(1) - abs(x) - abs(y)); - - vec<4, T, Q> b0(x.x, x.y, y.x, y.y); - vec<4, T, Q> b1(x.z, x.w, y.z, y.w); - - // vec4 s0 = vec4(lessThan(b0,0.0))*2.0 - 1.0; - // vec4 s1 = vec4(lessThan(b1,0.0))*2.0 - 1.0; - vec<4, T, Q> s0(floor(b0) * T(2) + T(1)); - vec<4, T, Q> s1(floor(b1) * T(2) + T(1)); - vec<4, T, Q> sh(-step(h, vec<4, T, Q>(0.0))); - - vec<4, T, Q> a0 = vec<4, T, Q>(b0.x, b0.z, b0.y, b0.w) + vec<4, T, Q>(s0.x, s0.z, s0.y, s0.w) * vec<4, T, Q>(sh.x, sh.x, sh.y, sh.y); - vec<4, T, Q> a1 = vec<4, T, Q>(b1.x, b1.z, b1.y, b1.w) + vec<4, T, Q>(s1.x, s1.z, s1.y, s1.w) * vec<4, T, Q>(sh.z, sh.z, sh.w, sh.w); - - vec<3, T, Q> p0(a0.x, a0.y, h.x); - vec<3, T, Q> p1(a0.z, a0.w, h.y); - vec<3, T, Q> p2(a1.x, a1.y, h.z); - vec<3, T, Q> p3(a1.z, a1.w, h.w); - - // Normalise gradients - vec<4, T, Q> norm = detail::taylorInvSqrt(vec<4, T, Q>(dot(p0, p0), dot(p1, p1), dot(p2, p2), dot(p3, p3))); - p0 *= norm.x; - p1 *= norm.y; - p2 *= norm.z; - p3 *= norm.w; - - // Mix final noise value - vec<4, T, Q> m = max(T(0.6) - vec<4, T, Q>(dot(x0, x0), dot(x1, x1), dot(x2, x2), dot(x3, x3)), vec<4, T, Q>(0)); - m = m * m; - return T(42) * dot(m * m, vec<4, T, Q>(dot(p0, x0), dot(p1, x1), dot(p2, x2), dot(p3, x3))); - } - - template - GLM_FUNC_QUALIFIER T simplex(vec<4, T, Q> const& v) - { - vec<4, T, Q> const C( - 0.138196601125011, // (5 - sqrt(5))/20 G4 - 0.276393202250021, // 2 * G4 - 0.414589803375032, // 3 * G4 - -0.447213595499958); // -1 + 4 * G4 - - // (sqrt(5) - 1)/4 = F4, used once below - T const F4 = static_cast(0.309016994374947451); - - // First corner - vec<4, T, Q> i = floor(v + dot(v, vec<4, T, Q>(F4))); - vec<4, T, Q> x0 = v - i + dot(i, vec<4, T, Q>(C.x)); - - // Other corners - - // Rank sorting originally contributed by Bill Licea-Kane, AMD (formerly ATI) - vec<4, T, Q> i0; - vec<3, T, Q> isX = step(vec<3, T, Q>(x0.y, x0.z, x0.w), vec<3, T, Q>(x0.x)); - vec<3, T, Q> isYZ = step(vec<3, T, Q>(x0.z, x0.w, x0.w), vec<3, T, Q>(x0.y, x0.y, x0.z)); - // i0.x = dot(isX, vec3(1.0)); - //i0.x = isX.x + isX.y + isX.z; - //i0.yzw = static_cast(1) - isX; - i0 = vec<4, T, Q>(isX.x + isX.y + isX.z, T(1) - isX); - // i0.y += dot(isYZ.xy, vec2(1.0)); - i0.y += isYZ.x + isYZ.y; - //i0.zw += 1.0 - vec<2, T, Q>(isYZ.x, isYZ.y); - i0.z += static_cast(1) - isYZ.x; - i0.w += static_cast(1) - isYZ.y; - i0.z += isYZ.z; - i0.w += static_cast(1) - isYZ.z; - - // i0 now contains the unique values 0,1,2,3 in each channel - vec<4, T, Q> i3 = clamp(i0, T(0), T(1)); - vec<4, T, Q> i2 = clamp(i0 - T(1), T(0), T(1)); - vec<4, T, Q> i1 = clamp(i0 - T(2), T(0), T(1)); - - // x0 = x0 - 0.0 + 0.0 * C.xxxx - // x1 = x0 - i1 + 0.0 * C.xxxx - // x2 = x0 - i2 + 0.0 * C.xxxx - // x3 = x0 - i3 + 0.0 * C.xxxx - // x4 = x0 - 1.0 + 4.0 * C.xxxx - vec<4, T, Q> x1 = x0 - i1 + C.x; - vec<4, T, Q> x2 = x0 - i2 + C.y; - vec<4, T, Q> x3 = x0 - i3 + C.z; - vec<4, T, Q> x4 = x0 + C.w; - - // Permutations - i = mod(i, vec<4, T, Q>(289)); - T j0 = detail::permute(detail::permute(detail::permute(detail::permute(i.w) + i.z) + i.y) + i.x); - vec<4, T, Q> j1 = detail::permute(detail::permute(detail::permute(detail::permute( - i.w + vec<4, T, Q>(i1.w, i2.w, i3.w, T(1))) + - i.z + vec<4, T, Q>(i1.z, i2.z, i3.z, T(1))) + - i.y + vec<4, T, Q>(i1.y, i2.y, i3.y, T(1))) + - i.x + vec<4, T, Q>(i1.x, i2.x, i3.x, T(1))); - - // Gradients: 7x7x6 points over a cube, mapped onto a 4-cross polytope - // 7*7*6 = 294, which is close to the ring size 17*17 = 289. - vec<4, T, Q> ip = vec<4, T, Q>(T(1) / T(294), T(1) / T(49), T(1) / T(7), T(0)); - - vec<4, T, Q> p0 = gtc::grad4(j0, ip); - vec<4, T, Q> p1 = gtc::grad4(j1.x, ip); - vec<4, T, Q> p2 = gtc::grad4(j1.y, ip); - vec<4, T, Q> p3 = gtc::grad4(j1.z, ip); - vec<4, T, Q> p4 = gtc::grad4(j1.w, ip); - - // Normalise gradients - vec<4, T, Q> norm = detail::taylorInvSqrt(vec<4, T, Q>(dot(p0, p0), dot(p1, p1), dot(p2, p2), dot(p3, p3))); - p0 *= norm.x; - p1 *= norm.y; - p2 *= norm.z; - p3 *= norm.w; - p4 *= detail::taylorInvSqrt(dot(p4, p4)); - - // Mix contributions from the five corners - vec<3, T, Q> m0 = max(T(0.6) - vec<3, T, Q>(dot(x0, x0), dot(x1, x1), dot(x2, x2)), vec<3, T, Q>(0)); - vec<2, T, Q> m1 = max(T(0.6) - vec<2, T, Q>(dot(x3, x3), dot(x4, x4) ), vec<2, T, Q>(0)); - m0 = m0 * m0; - m1 = m1 * m1; - return T(49) * - (dot(m0 * m0, vec<3, T, Q>(dot(p0, x0), dot(p1, x1), dot(p2, x2))) + - dot(m1 * m1, vec<2, T, Q>(dot(p3, x3), dot(p4, x4)))); - } -}//namespace glm diff --git a/third_party/glm/gtc/packing.hpp b/third_party/glm/gtc/packing.hpp deleted file mode 100755 index 7c64aba..0000000 --- a/third_party/glm/gtc/packing.hpp +++ /dev/null @@ -1,728 +0,0 @@ -/// @ref gtc_packing -/// @file glm/gtc/packing.hpp -/// -/// @see core (dependence) -/// -/// @defgroup gtc_packing GLM_GTC_packing -/// @ingroup gtc -/// -/// Include to use the features of this extension. -/// -/// This extension provides a set of function to convert vertors to packed -/// formats. - -#pragma once - -// Dependency: -#include "type_precision.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_GTC_packing extension included") -#endif - -namespace glm -{ - /// @addtogroup gtc_packing - /// @{ - - /// First, converts the normalized floating-point value v into a 8-bit integer value. - /// Then, the results are packed into the returned 8-bit unsigned integer. - /// - /// The conversion for component c of v to fixed point is done as follows: - /// packUnorm1x8: round(clamp(c, 0, +1) * 255.0) - /// - /// @see gtc_packing - /// @see uint16 packUnorm2x8(vec2 const& v) - /// @see uint32 packUnorm4x8(vec4 const& v) - /// @see GLSL packUnorm4x8 man page - /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions - GLM_FUNC_DECL uint8 packUnorm1x8(float v); - - /// Convert a single 8-bit integer to a normalized floating-point value. - /// - /// The conversion for unpacked fixed-point value f to floating point is done as follows: - /// unpackUnorm4x8: f / 255.0 - /// - /// @see gtc_packing - /// @see vec2 unpackUnorm2x8(uint16 p) - /// @see vec4 unpackUnorm4x8(uint32 p) - /// @see GLSL unpackUnorm4x8 man page - /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions - GLM_FUNC_DECL float unpackUnorm1x8(uint8 p); - - /// First, converts each component of the normalized floating-point value v into 8-bit integer values. - /// Then, the results are packed into the returned 16-bit unsigned integer. - /// - /// The conversion for component c of v to fixed point is done as follows: - /// packUnorm2x8: round(clamp(c, 0, +1) * 255.0) - /// - /// The first component of the vector will be written to the least significant bits of the output; - /// the last component will be written to the most significant bits. - /// - /// @see gtc_packing - /// @see uint8 packUnorm1x8(float const& v) - /// @see uint32 packUnorm4x8(vec4 const& v) - /// @see GLSL packUnorm4x8 man page - /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions - GLM_FUNC_DECL uint16 packUnorm2x8(vec2 const& v); - - /// First, unpacks a single 16-bit unsigned integer p into a pair of 8-bit unsigned integers. - /// Then, each component is converted to a normalized floating-point value to generate the returned two-component vector. - /// - /// The conversion for unpacked fixed-point value f to floating point is done as follows: - /// unpackUnorm4x8: f / 255.0 - /// - /// The first component of the returned vector will be extracted from the least significant bits of the input; - /// the last component will be extracted from the most significant bits. - /// - /// @see gtc_packing - /// @see float unpackUnorm1x8(uint8 v) - /// @see vec4 unpackUnorm4x8(uint32 p) - /// @see GLSL unpackUnorm4x8 man page - /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions - GLM_FUNC_DECL vec2 unpackUnorm2x8(uint16 p); - - /// First, converts the normalized floating-point value v into 8-bit integer value. - /// Then, the results are packed into the returned 8-bit unsigned integer. - /// - /// The conversion to fixed point is done as follows: - /// packSnorm1x8: round(clamp(s, -1, +1) * 127.0) - /// - /// @see gtc_packing - /// @see uint16 packSnorm2x8(vec2 const& v) - /// @see uint32 packSnorm4x8(vec4 const& v) - /// @see GLSL packSnorm4x8 man page - /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions - GLM_FUNC_DECL uint8 packSnorm1x8(float s); - - /// First, unpacks a single 8-bit unsigned integer p into a single 8-bit signed integers. - /// Then, the value is converted to a normalized floating-point value to generate the returned scalar. - /// - /// The conversion for unpacked fixed-point value f to floating point is done as follows: - /// unpackSnorm1x8: clamp(f / 127.0, -1, +1) - /// - /// @see gtc_packing - /// @see vec2 unpackSnorm2x8(uint16 p) - /// @see vec4 unpackSnorm4x8(uint32 p) - /// @see GLSL unpackSnorm4x8 man page - /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions - GLM_FUNC_DECL float unpackSnorm1x8(uint8 p); - - /// First, converts each component of the normalized floating-point value v into 8-bit integer values. - /// Then, the results are packed into the returned 16-bit unsigned integer. - /// - /// The conversion for component c of v to fixed point is done as follows: - /// packSnorm2x8: round(clamp(c, -1, +1) * 127.0) - /// - /// The first component of the vector will be written to the least significant bits of the output; - /// the last component will be written to the most significant bits. - /// - /// @see gtc_packing - /// @see uint8 packSnorm1x8(float const& v) - /// @see uint32 packSnorm4x8(vec4 const& v) - /// @see GLSL packSnorm4x8 man page - /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions - GLM_FUNC_DECL uint16 packSnorm2x8(vec2 const& v); - - /// First, unpacks a single 16-bit unsigned integer p into a pair of 8-bit signed integers. - /// Then, each component is converted to a normalized floating-point value to generate the returned two-component vector. - /// - /// The conversion for unpacked fixed-point value f to floating point is done as follows: - /// unpackSnorm2x8: clamp(f / 127.0, -1, +1) - /// - /// The first component of the returned vector will be extracted from the least significant bits of the input; - /// the last component will be extracted from the most significant bits. - /// - /// @see gtc_packing - /// @see float unpackSnorm1x8(uint8 p) - /// @see vec4 unpackSnorm4x8(uint32 p) - /// @see GLSL unpackSnorm4x8 man page - /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions - GLM_FUNC_DECL vec2 unpackSnorm2x8(uint16 p); - - /// First, converts the normalized floating-point value v into a 16-bit integer value. - /// Then, the results are packed into the returned 16-bit unsigned integer. - /// - /// The conversion for component c of v to fixed point is done as follows: - /// packUnorm1x16: round(clamp(c, 0, +1) * 65535.0) - /// - /// @see gtc_packing - /// @see uint16 packSnorm1x16(float const& v) - /// @see uint64 packSnorm4x16(vec4 const& v) - /// @see GLSL packUnorm4x8 man page - /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions - GLM_FUNC_DECL uint16 packUnorm1x16(float v); - - /// First, unpacks a single 16-bit unsigned integer p into a of 16-bit unsigned integers. - /// Then, the value is converted to a normalized floating-point value to generate the returned scalar. - /// - /// The conversion for unpacked fixed-point value f to floating point is done as follows: - /// unpackUnorm1x16: f / 65535.0 - /// - /// @see gtc_packing - /// @see vec2 unpackUnorm2x16(uint32 p) - /// @see vec4 unpackUnorm4x16(uint64 p) - /// @see GLSL unpackUnorm2x16 man page - /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions - GLM_FUNC_DECL float unpackUnorm1x16(uint16 p); - - /// First, converts each component of the normalized floating-point value v into 16-bit integer values. - /// Then, the results are packed into the returned 64-bit unsigned integer. - /// - /// The conversion for component c of v to fixed point is done as follows: - /// packUnorm4x16: round(clamp(c, 0, +1) * 65535.0) - /// - /// The first component of the vector will be written to the least significant bits of the output; - /// the last component will be written to the most significant bits. - /// - /// @see gtc_packing - /// @see uint16 packUnorm1x16(float const& v) - /// @see uint32 packUnorm2x16(vec2 const& v) - /// @see GLSL packUnorm4x8 man page - /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions - GLM_FUNC_DECL uint64 packUnorm4x16(vec4 const& v); - - /// First, unpacks a single 64-bit unsigned integer p into four 16-bit unsigned integers. - /// Then, each component is converted to a normalized floating-point value to generate the returned four-component vector. - /// - /// The conversion for unpacked fixed-point value f to floating point is done as follows: - /// unpackUnormx4x16: f / 65535.0 - /// - /// The first component of the returned vector will be extracted from the least significant bits of the input; - /// the last component will be extracted from the most significant bits. - /// - /// @see gtc_packing - /// @see float unpackUnorm1x16(uint16 p) - /// @see vec2 unpackUnorm2x16(uint32 p) - /// @see GLSL unpackUnorm2x16 man page - /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions - GLM_FUNC_DECL vec4 unpackUnorm4x16(uint64 p); - - /// First, converts the normalized floating-point value v into 16-bit integer value. - /// Then, the results are packed into the returned 16-bit unsigned integer. - /// - /// The conversion to fixed point is done as follows: - /// packSnorm1x8: round(clamp(s, -1, +1) * 32767.0) - /// - /// @see gtc_packing - /// @see uint32 packSnorm2x16(vec2 const& v) - /// @see uint64 packSnorm4x16(vec4 const& v) - /// @see GLSL packSnorm4x8 man page - /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions - GLM_FUNC_DECL uint16 packSnorm1x16(float v); - - /// First, unpacks a single 16-bit unsigned integer p into a single 16-bit signed integers. - /// Then, each component is converted to a normalized floating-point value to generate the returned scalar. - /// - /// The conversion for unpacked fixed-point value f to floating point is done as follows: - /// unpackSnorm1x16: clamp(f / 32767.0, -1, +1) - /// - /// @see gtc_packing - /// @see vec2 unpackSnorm2x16(uint32 p) - /// @see vec4 unpackSnorm4x16(uint64 p) - /// @see GLSL unpackSnorm4x8 man page - /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions - GLM_FUNC_DECL float unpackSnorm1x16(uint16 p); - - /// First, converts each component of the normalized floating-point value v into 16-bit integer values. - /// Then, the results are packed into the returned 64-bit unsigned integer. - /// - /// The conversion for component c of v to fixed point is done as follows: - /// packSnorm2x8: round(clamp(c, -1, +1) * 32767.0) - /// - /// The first component of the vector will be written to the least significant bits of the output; - /// the last component will be written to the most significant bits. - /// - /// @see gtc_packing - /// @see uint16 packSnorm1x16(float const& v) - /// @see uint32 packSnorm2x16(vec2 const& v) - /// @see GLSL packSnorm4x8 man page - /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions - GLM_FUNC_DECL uint64 packSnorm4x16(vec4 const& v); - - /// First, unpacks a single 64-bit unsigned integer p into four 16-bit signed integers. - /// Then, each component is converted to a normalized floating-point value to generate the returned four-component vector. - /// - /// The conversion for unpacked fixed-point value f to floating point is done as follows: - /// unpackSnorm4x16: clamp(f / 32767.0, -1, +1) - /// - /// The first component of the returned vector will be extracted from the least significant bits of the input; - /// the last component will be extracted from the most significant bits. - /// - /// @see gtc_packing - /// @see float unpackSnorm1x16(uint16 p) - /// @see vec2 unpackSnorm2x16(uint32 p) - /// @see GLSL unpackSnorm4x8 man page - /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions - GLM_FUNC_DECL vec4 unpackSnorm4x16(uint64 p); - - /// Returns an unsigned integer obtained by converting the components of a floating-point scalar - /// to the 16-bit floating-point representation found in the OpenGL Specification, - /// and then packing this 16-bit value into a 16-bit unsigned integer. - /// - /// @see gtc_packing - /// @see uint32 packHalf2x16(vec2 const& v) - /// @see uint64 packHalf4x16(vec4 const& v) - /// @see GLSL packHalf2x16 man page - /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions - GLM_FUNC_DECL uint16 packHalf1x16(float v); - - /// Returns a floating-point scalar with components obtained by unpacking a 16-bit unsigned integer into a 16-bit value, - /// interpreted as a 16-bit floating-point number according to the OpenGL Specification, - /// and converting it to 32-bit floating-point values. - /// - /// @see gtc_packing - /// @see vec2 unpackHalf2x16(uint32 const& v) - /// @see vec4 unpackHalf4x16(uint64 const& v) - /// @see GLSL unpackHalf2x16 man page - /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions - GLM_FUNC_DECL float unpackHalf1x16(uint16 v); - - /// Returns an unsigned integer obtained by converting the components of a four-component floating-point vector - /// to the 16-bit floating-point representation found in the OpenGL Specification, - /// and then packing these four 16-bit values into a 64-bit unsigned integer. - /// The first vector component specifies the 16 least-significant bits of the result; - /// the forth component specifies the 16 most-significant bits. - /// - /// @see gtc_packing - /// @see uint16 packHalf1x16(float const& v) - /// @see uint32 packHalf2x16(vec2 const& v) - /// @see GLSL packHalf2x16 man page - /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions - GLM_FUNC_DECL uint64 packHalf4x16(vec4 const& v); - - /// Returns a four-component floating-point vector with components obtained by unpacking a 64-bit unsigned integer into four 16-bit values, - /// interpreting those values as 16-bit floating-point numbers according to the OpenGL Specification, - /// and converting them to 32-bit floating-point values. - /// The first component of the vector is obtained from the 16 least-significant bits of v; - /// the forth component is obtained from the 16 most-significant bits of v. - /// - /// @see gtc_packing - /// @see float unpackHalf1x16(uint16 const& v) - /// @see vec2 unpackHalf2x16(uint32 const& v) - /// @see GLSL unpackHalf2x16 man page - /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions - GLM_FUNC_DECL vec4 unpackHalf4x16(uint64 p); - - /// Returns an unsigned integer obtained by converting the components of a four-component signed integer vector - /// to the 10-10-10-2-bit signed integer representation found in the OpenGL Specification, - /// and then packing these four values into a 32-bit unsigned integer. - /// The first vector component specifies the 10 least-significant bits of the result; - /// the forth component specifies the 2 most-significant bits. - /// - /// @see gtc_packing - /// @see uint32 packI3x10_1x2(uvec4 const& v) - /// @see uint32 packSnorm3x10_1x2(vec4 const& v) - /// @see uint32 packUnorm3x10_1x2(vec4 const& v) - /// @see ivec4 unpackI3x10_1x2(uint32 const& p) - GLM_FUNC_DECL uint32 packI3x10_1x2(ivec4 const& v); - - /// Unpacks a single 32-bit unsigned integer p into three 10-bit and one 2-bit signed integers. - /// - /// The first component of the returned vector will be extracted from the least significant bits of the input; - /// the last component will be extracted from the most significant bits. - /// - /// @see gtc_packing - /// @see uint32 packU3x10_1x2(uvec4 const& v) - /// @see vec4 unpackSnorm3x10_1x2(uint32 const& p); - /// @see uvec4 unpackI3x10_1x2(uint32 const& p); - GLM_FUNC_DECL ivec4 unpackI3x10_1x2(uint32 p); - - /// Returns an unsigned integer obtained by converting the components of a four-component unsigned integer vector - /// to the 10-10-10-2-bit unsigned integer representation found in the OpenGL Specification, - /// and then packing these four values into a 32-bit unsigned integer. - /// The first vector component specifies the 10 least-significant bits of the result; - /// the forth component specifies the 2 most-significant bits. - /// - /// @see gtc_packing - /// @see uint32 packI3x10_1x2(ivec4 const& v) - /// @see uint32 packSnorm3x10_1x2(vec4 const& v) - /// @see uint32 packUnorm3x10_1x2(vec4 const& v) - /// @see ivec4 unpackU3x10_1x2(uint32 const& p) - GLM_FUNC_DECL uint32 packU3x10_1x2(uvec4 const& v); - - /// Unpacks a single 32-bit unsigned integer p into three 10-bit and one 2-bit unsigned integers. - /// - /// The first component of the returned vector will be extracted from the least significant bits of the input; - /// the last component will be extracted from the most significant bits. - /// - /// @see gtc_packing - /// @see uint32 packU3x10_1x2(uvec4 const& v) - /// @see vec4 unpackSnorm3x10_1x2(uint32 const& p); - /// @see uvec4 unpackI3x10_1x2(uint32 const& p); - GLM_FUNC_DECL uvec4 unpackU3x10_1x2(uint32 p); - - /// First, converts the first three components of the normalized floating-point value v into 10-bit signed integer values. - /// Then, converts the forth component of the normalized floating-point value v into 2-bit signed integer values. - /// Then, the results are packed into the returned 32-bit unsigned integer. - /// - /// The conversion for component c of v to fixed point is done as follows: - /// packSnorm3x10_1x2(xyz): round(clamp(c, -1, +1) * 511.0) - /// packSnorm3x10_1x2(w): round(clamp(c, -1, +1) * 1.0) - /// - /// The first vector component specifies the 10 least-significant bits of the result; - /// the forth component specifies the 2 most-significant bits. - /// - /// @see gtc_packing - /// @see vec4 unpackSnorm3x10_1x2(uint32 const& p) - /// @see uint32 packUnorm3x10_1x2(vec4 const& v) - /// @see uint32 packU3x10_1x2(uvec4 const& v) - /// @see uint32 packI3x10_1x2(ivec4 const& v) - GLM_FUNC_DECL uint32 packSnorm3x10_1x2(vec4 const& v); - - /// First, unpacks a single 32-bit unsigned integer p into four 16-bit signed integers. - /// Then, each component is converted to a normalized floating-point value to generate the returned four-component vector. - /// - /// The conversion for unpacked fixed-point value f to floating point is done as follows: - /// unpackSnorm3x10_1x2(xyz): clamp(f / 511.0, -1, +1) - /// unpackSnorm3x10_1x2(w): clamp(f / 511.0, -1, +1) - /// - /// The first component of the returned vector will be extracted from the least significant bits of the input; - /// the last component will be extracted from the most significant bits. - /// - /// @see gtc_packing - /// @see uint32 packSnorm3x10_1x2(vec4 const& v) - /// @see vec4 unpackUnorm3x10_1x2(uint32 const& p)) - /// @see uvec4 unpackI3x10_1x2(uint32 const& p) - /// @see uvec4 unpackU3x10_1x2(uint32 const& p) - GLM_FUNC_DECL vec4 unpackSnorm3x10_1x2(uint32 p); - - /// First, converts the first three components of the normalized floating-point value v into 10-bit unsigned integer values. - /// Then, converts the forth component of the normalized floating-point value v into 2-bit signed uninteger values. - /// Then, the results are packed into the returned 32-bit unsigned integer. - /// - /// The conversion for component c of v to fixed point is done as follows: - /// packUnorm3x10_1x2(xyz): round(clamp(c, 0, +1) * 1023.0) - /// packUnorm3x10_1x2(w): round(clamp(c, 0, +1) * 3.0) - /// - /// The first vector component specifies the 10 least-significant bits of the result; - /// the forth component specifies the 2 most-significant bits. - /// - /// @see gtc_packing - /// @see vec4 unpackUnorm3x10_1x2(uint32 const& p) - /// @see uint32 packUnorm3x10_1x2(vec4 const& v) - /// @see uint32 packU3x10_1x2(uvec4 const& v) - /// @see uint32 packI3x10_1x2(ivec4 const& v) - GLM_FUNC_DECL uint32 packUnorm3x10_1x2(vec4 const& v); - - /// First, unpacks a single 32-bit unsigned integer p into four 16-bit signed integers. - /// Then, each component is converted to a normalized floating-point value to generate the returned four-component vector. - /// - /// The conversion for unpacked fixed-point value f to floating point is done as follows: - /// unpackSnorm3x10_1x2(xyz): clamp(f / 1023.0, 0, +1) - /// unpackSnorm3x10_1x2(w): clamp(f / 3.0, 0, +1) - /// - /// The first component of the returned vector will be extracted from the least significant bits of the input; - /// the last component will be extracted from the most significant bits. - /// - /// @see gtc_packing - /// @see uint32 packSnorm3x10_1x2(vec4 const& v) - /// @see vec4 unpackInorm3x10_1x2(uint32 const& p)) - /// @see uvec4 unpackI3x10_1x2(uint32 const& p) - /// @see uvec4 unpackU3x10_1x2(uint32 const& p) - GLM_FUNC_DECL vec4 unpackUnorm3x10_1x2(uint32 p); - - /// First, converts the first two components of the normalized floating-point value v into 11-bit signless floating-point values. - /// Then, converts the third component of the normalized floating-point value v into a 10-bit signless floating-point value. - /// Then, the results are packed into the returned 32-bit unsigned integer. - /// - /// The first vector component specifies the 11 least-significant bits of the result; - /// the last component specifies the 10 most-significant bits. - /// - /// @see gtc_packing - /// @see vec3 unpackF2x11_1x10(uint32 const& p) - GLM_FUNC_DECL uint32 packF2x11_1x10(vec3 const& v); - - /// First, unpacks a single 32-bit unsigned integer p into two 11-bit signless floating-point values and one 10-bit signless floating-point value . - /// Then, each component is converted to a normalized floating-point value to generate the returned three-component vector. - /// - /// The first component of the returned vector will be extracted from the least significant bits of the input; - /// the last component will be extracted from the most significant bits. - /// - /// @see gtc_packing - /// @see uint32 packF2x11_1x10(vec3 const& v) - GLM_FUNC_DECL vec3 unpackF2x11_1x10(uint32 p); - - - /// First, converts the first two components of the normalized floating-point value v into 11-bit signless floating-point values. - /// Then, converts the third component of the normalized floating-point value v into a 10-bit signless floating-point value. - /// Then, the results are packed into the returned 32-bit unsigned integer. - /// - /// The first vector component specifies the 11 least-significant bits of the result; - /// the last component specifies the 10 most-significant bits. - /// - /// packF3x9_E1x5 allows encoding into RGBE / RGB9E5 format - /// - /// @see gtc_packing - /// @see vec3 unpackF3x9_E1x5(uint32 const& p) - GLM_FUNC_DECL uint32 packF3x9_E1x5(vec3 const& v); - - /// First, unpacks a single 32-bit unsigned integer p into two 11-bit signless floating-point values and one 10-bit signless floating-point value . - /// Then, each component is converted to a normalized floating-point value to generate the returned three-component vector. - /// - /// The first component of the returned vector will be extracted from the least significant bits of the input; - /// the last component will be extracted from the most significant bits. - /// - /// unpackF3x9_E1x5 allows decoding RGBE / RGB9E5 data - /// - /// @see gtc_packing - /// @see uint32 packF3x9_E1x5(vec3 const& v) - GLM_FUNC_DECL vec3 unpackF3x9_E1x5(uint32 p); - - /// Returns an unsigned integer vector obtained by converting the components of a floating-point vector - /// to the 16-bit floating-point representation found in the OpenGL Specification. - /// The first vector component specifies the 16 least-significant bits of the result; - /// the forth component specifies the 16 most-significant bits. - /// - /// @see gtc_packing - /// @see vec<3, T, Q> unpackRGBM(vec<4, T, Q> const& p) - /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions - template - GLM_FUNC_DECL vec<4, T, Q> packRGBM(vec<3, T, Q> const& rgb); - - /// Returns a floating-point vector with components obtained by reinterpreting an integer vector as 16-bit floating-point numbers and converting them to 32-bit floating-point values. - /// The first component of the vector is obtained from the 16 least-significant bits of v; - /// the forth component is obtained from the 16 most-significant bits of v. - /// - /// @see gtc_packing - /// @see vec<4, T, Q> packRGBM(vec<3, float, Q> const& v) - /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions - template - GLM_FUNC_DECL vec<3, T, Q> unpackRGBM(vec<4, T, Q> const& rgbm); - - /// Returns an unsigned integer vector obtained by converting the components of a floating-point vector - /// to the 16-bit floating-point representation found in the OpenGL Specification. - /// The first vector component specifies the 16 least-significant bits of the result; - /// the forth component specifies the 16 most-significant bits. - /// - /// @see gtc_packing - /// @see vec unpackHalf(vec const& p) - /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions - template - GLM_FUNC_DECL vec packHalf(vec const& v); - - /// Returns a floating-point vector with components obtained by reinterpreting an integer vector as 16-bit floating-point numbers and converting them to 32-bit floating-point values. - /// The first component of the vector is obtained from the 16 least-significant bits of v; - /// the forth component is obtained from the 16 most-significant bits of v. - /// - /// @see gtc_packing - /// @see vec packHalf(vec const& v) - /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions - template - GLM_FUNC_DECL vec unpackHalf(vec const& p); - - /// Convert each component of the normalized floating-point vector into unsigned integer values. - /// - /// @see gtc_packing - /// @see vec unpackUnorm(vec const& p); - template - GLM_FUNC_DECL vec packUnorm(vec const& v); - - /// Convert a packed integer to a normalized floating-point vector. - /// - /// @see gtc_packing - /// @see vec packUnorm(vec const& v) - template - GLM_FUNC_DECL vec unpackUnorm(vec const& v); - - /// Convert each component of the normalized floating-point vector into signed integer values. - /// - /// @see gtc_packing - /// @see vec unpackSnorm(vec const& p); - template - GLM_FUNC_DECL vec packSnorm(vec const& v); - - /// Convert a packed integer to a normalized floating-point vector. - /// - /// @see gtc_packing - /// @see vec packSnorm(vec const& v) - template - GLM_FUNC_DECL vec unpackSnorm(vec const& v); - - /// Convert each component of the normalized floating-point vector into unsigned integer values. - /// - /// @see gtc_packing - /// @see vec2 unpackUnorm2x4(uint8 p) - GLM_FUNC_DECL uint8 packUnorm2x4(vec2 const& v); - - /// Convert a packed integer to a normalized floating-point vector. - /// - /// @see gtc_packing - /// @see uint8 packUnorm2x4(vec2 const& v) - GLM_FUNC_DECL vec2 unpackUnorm2x4(uint8 p); - - /// Convert each component of the normalized floating-point vector into unsigned integer values. - /// - /// @see gtc_packing - /// @see vec4 unpackUnorm4x4(uint16 p) - GLM_FUNC_DECL uint16 packUnorm4x4(vec4 const& v); - - /// Convert a packed integer to a normalized floating-point vector. - /// - /// @see gtc_packing - /// @see uint16 packUnorm4x4(vec4 const& v) - GLM_FUNC_DECL vec4 unpackUnorm4x4(uint16 p); - - /// Convert each component of the normalized floating-point vector into unsigned integer values. - /// - /// @see gtc_packing - /// @see vec3 unpackUnorm1x5_1x6_1x5(uint16 p) - GLM_FUNC_DECL uint16 packUnorm1x5_1x6_1x5(vec3 const& v); - - /// Convert a packed integer to a normalized floating-point vector. - /// - /// @see gtc_packing - /// @see uint16 packUnorm1x5_1x6_1x5(vec3 const& v) - GLM_FUNC_DECL vec3 unpackUnorm1x5_1x6_1x5(uint16 p); - - /// Convert each component of the normalized floating-point vector into unsigned integer values. - /// - /// @see gtc_packing - /// @see vec4 unpackUnorm3x5_1x1(uint16 p) - GLM_FUNC_DECL uint16 packUnorm3x5_1x1(vec4 const& v); - - /// Convert a packed integer to a normalized floating-point vector. - /// - /// @see gtc_packing - /// @see uint16 packUnorm3x5_1x1(vec4 const& v) - GLM_FUNC_DECL vec4 unpackUnorm3x5_1x1(uint16 p); - - /// Convert each component of the normalized floating-point vector into unsigned integer values. - /// - /// @see gtc_packing - /// @see vec3 unpackUnorm2x3_1x2(uint8 p) - GLM_FUNC_DECL uint8 packUnorm2x3_1x2(vec3 const& v); - - /// Convert a packed integer to a normalized floating-point vector. - /// - /// @see gtc_packing - /// @see uint8 packUnorm2x3_1x2(vec3 const& v) - GLM_FUNC_DECL vec3 unpackUnorm2x3_1x2(uint8 p); - - - - /// Convert each component from an integer vector into a packed integer. - /// - /// @see gtc_packing - /// @see i8vec2 unpackInt2x8(int16 p) - GLM_FUNC_DECL int16 packInt2x8(i8vec2 const& v); - - /// Convert a packed integer into an integer vector. - /// - /// @see gtc_packing - /// @see int16 packInt2x8(i8vec2 const& v) - GLM_FUNC_DECL i8vec2 unpackInt2x8(int16 p); - - /// Convert each component from an integer vector into a packed unsigned integer. - /// - /// @see gtc_packing - /// @see u8vec2 unpackInt2x8(uint16 p) - GLM_FUNC_DECL uint16 packUint2x8(u8vec2 const& v); - - /// Convert a packed integer into an integer vector. - /// - /// @see gtc_packing - /// @see uint16 packInt2x8(u8vec2 const& v) - GLM_FUNC_DECL u8vec2 unpackUint2x8(uint16 p); - - /// Convert each component from an integer vector into a packed integer. - /// - /// @see gtc_packing - /// @see i8vec4 unpackInt4x8(int32 p) - GLM_FUNC_DECL int32 packInt4x8(i8vec4 const& v); - - /// Convert a packed integer into an integer vector. - /// - /// @see gtc_packing - /// @see int32 packInt2x8(i8vec4 const& v) - GLM_FUNC_DECL i8vec4 unpackInt4x8(int32 p); - - /// Convert each component from an integer vector into a packed unsigned integer. - /// - /// @see gtc_packing - /// @see u8vec4 unpackUint4x8(uint32 p) - GLM_FUNC_DECL uint32 packUint4x8(u8vec4 const& v); - - /// Convert a packed integer into an integer vector. - /// - /// @see gtc_packing - /// @see uint32 packUint4x8(u8vec2 const& v) - GLM_FUNC_DECL u8vec4 unpackUint4x8(uint32 p); - - /// Convert each component from an integer vector into a packed integer. - /// - /// @see gtc_packing - /// @see i16vec2 unpackInt2x16(int p) - GLM_FUNC_DECL int packInt2x16(i16vec2 const& v); - - /// Convert a packed integer into an integer vector. - /// - /// @see gtc_packing - /// @see int packInt2x16(i16vec2 const& v) - GLM_FUNC_DECL i16vec2 unpackInt2x16(int p); - - /// Convert each component from an integer vector into a packed integer. - /// - /// @see gtc_packing - /// @see i16vec4 unpackInt4x16(int64 p) - GLM_FUNC_DECL int64 packInt4x16(i16vec4 const& v); - - /// Convert a packed integer into an integer vector. - /// - /// @see gtc_packing - /// @see int64 packInt4x16(i16vec4 const& v) - GLM_FUNC_DECL i16vec4 unpackInt4x16(int64 p); - - /// Convert each component from an integer vector into a packed unsigned integer. - /// - /// @see gtc_packing - /// @see u16vec2 unpackUint2x16(uint p) - GLM_FUNC_DECL uint packUint2x16(u16vec2 const& v); - - /// Convert a packed integer into an integer vector. - /// - /// @see gtc_packing - /// @see uint packUint2x16(u16vec2 const& v) - GLM_FUNC_DECL u16vec2 unpackUint2x16(uint p); - - /// Convert each component from an integer vector into a packed unsigned integer. - /// - /// @see gtc_packing - /// @see u16vec4 unpackUint4x16(uint64 p) - GLM_FUNC_DECL uint64 packUint4x16(u16vec4 const& v); - - /// Convert a packed integer into an integer vector. - /// - /// @see gtc_packing - /// @see uint64 packUint4x16(u16vec4 const& v) - GLM_FUNC_DECL u16vec4 unpackUint4x16(uint64 p); - - /// Convert each component from an integer vector into a packed integer. - /// - /// @see gtc_packing - /// @see i32vec2 unpackInt2x32(int p) - GLM_FUNC_DECL int64 packInt2x32(i32vec2 const& v); - - /// Convert a packed integer into an integer vector. - /// - /// @see gtc_packing - /// @see int packInt2x16(i32vec2 const& v) - GLM_FUNC_DECL i32vec2 unpackInt2x32(int64 p); - - /// Convert each component from an integer vector into a packed unsigned integer. - /// - /// @see gtc_packing - /// @see u32vec2 unpackUint2x32(int p) - GLM_FUNC_DECL uint64 packUint2x32(u32vec2 const& v); - - /// Convert a packed integer into an integer vector. - /// - /// @see gtc_packing - /// @see int packUint2x16(u32vec2 const& v) - GLM_FUNC_DECL u32vec2 unpackUint2x32(uint64 p); - - - /// @} -}// namespace glm - -#include "packing.inl" diff --git a/third_party/glm/gtc/packing.inl b/third_party/glm/gtc/packing.inl deleted file mode 100755 index 8c906e1..0000000 --- a/third_party/glm/gtc/packing.inl +++ /dev/null @@ -1,938 +0,0 @@ -/// @ref gtc_packing - -#include "../ext/scalar_relational.hpp" -#include "../ext/vector_relational.hpp" -#include "../common.hpp" -#include "../vec2.hpp" -#include "../vec3.hpp" -#include "../vec4.hpp" -#include "../detail/type_half.hpp" -#include -#include - -namespace glm{ -namespace detail -{ - GLM_FUNC_QUALIFIER glm::uint16 float2half(glm::uint32 f) - { - // 10 bits => EE EEEFFFFF - // 11 bits => EEE EEFFFFFF - // Half bits => SEEEEEFF FFFFFFFF - // Float bits => SEEEEEEE EFFFFFFF FFFFFFFF FFFFFFFF - - // 0x00007c00 => 00000000 00000000 01111100 00000000 - // 0x000003ff => 00000000 00000000 00000011 11111111 - // 0x38000000 => 00111000 00000000 00000000 00000000 - // 0x7f800000 => 01111111 10000000 00000000 00000000 - // 0x00008000 => 00000000 00000000 10000000 00000000 - return - ((f >> 16) & 0x8000) | // sign - ((((f & 0x7f800000) - 0x38000000) >> 13) & 0x7c00) | // exponential - ((f >> 13) & 0x03ff); // Mantissa - } - - GLM_FUNC_QUALIFIER glm::uint32 float2packed11(glm::uint32 f) - { - // 10 bits => EE EEEFFFFF - // 11 bits => EEE EEFFFFFF - // Half bits => SEEEEEFF FFFFFFFF - // Float bits => SEEEEEEE EFFFFFFF FFFFFFFF FFFFFFFF - - // 0x000007c0 => 00000000 00000000 00000111 11000000 - // 0x00007c00 => 00000000 00000000 01111100 00000000 - // 0x000003ff => 00000000 00000000 00000011 11111111 - // 0x38000000 => 00111000 00000000 00000000 00000000 - // 0x7f800000 => 01111111 10000000 00000000 00000000 - // 0x00008000 => 00000000 00000000 10000000 00000000 - return - ((((f & 0x7f800000) - 0x38000000) >> 17) & 0x07c0) | // exponential - ((f >> 17) & 0x003f); // Mantissa - } - - GLM_FUNC_QUALIFIER glm::uint32 packed11ToFloat(glm::uint32 p) - { - // 10 bits => EE EEEFFFFF - // 11 bits => EEE EEFFFFFF - // Half bits => SEEEEEFF FFFFFFFF - // Float bits => SEEEEEEE EFFFFFFF FFFFFFFF FFFFFFFF - - // 0x000007c0 => 00000000 00000000 00000111 11000000 - // 0x00007c00 => 00000000 00000000 01111100 00000000 - // 0x000003ff => 00000000 00000000 00000011 11111111 - // 0x38000000 => 00111000 00000000 00000000 00000000 - // 0x7f800000 => 01111111 10000000 00000000 00000000 - // 0x00008000 => 00000000 00000000 10000000 00000000 - return - ((((p & 0x07c0) << 17) + 0x38000000) & 0x7f800000) | // exponential - ((p & 0x003f) << 17); // Mantissa - } - - GLM_FUNC_QUALIFIER glm::uint32 float2packed10(glm::uint32 f) - { - // 10 bits => EE EEEFFFFF - // 11 bits => EEE EEFFFFFF - // Half bits => SEEEEEFF FFFFFFFF - // Float bits => SEEEEEEE EFFFFFFF FFFFFFFF FFFFFFFF - - // 0x0000001F => 00000000 00000000 00000000 00011111 - // 0x0000003F => 00000000 00000000 00000000 00111111 - // 0x000003E0 => 00000000 00000000 00000011 11100000 - // 0x000007C0 => 00000000 00000000 00000111 11000000 - // 0x00007C00 => 00000000 00000000 01111100 00000000 - // 0x000003FF => 00000000 00000000 00000011 11111111 - // 0x38000000 => 00111000 00000000 00000000 00000000 - // 0x7f800000 => 01111111 10000000 00000000 00000000 - // 0x00008000 => 00000000 00000000 10000000 00000000 - return - ((((f & 0x7f800000) - 0x38000000) >> 18) & 0x03E0) | // exponential - ((f >> 18) & 0x001f); // Mantissa - } - - GLM_FUNC_QUALIFIER glm::uint32 packed10ToFloat(glm::uint32 p) - { - // 10 bits => EE EEEFFFFF - // 11 bits => EEE EEFFFFFF - // Half bits => SEEEEEFF FFFFFFFF - // Float bits => SEEEEEEE EFFFFFFF FFFFFFFF FFFFFFFF - - // 0x0000001F => 00000000 00000000 00000000 00011111 - // 0x0000003F => 00000000 00000000 00000000 00111111 - // 0x000003E0 => 00000000 00000000 00000011 11100000 - // 0x000007C0 => 00000000 00000000 00000111 11000000 - // 0x00007C00 => 00000000 00000000 01111100 00000000 - // 0x000003FF => 00000000 00000000 00000011 11111111 - // 0x38000000 => 00111000 00000000 00000000 00000000 - // 0x7f800000 => 01111111 10000000 00000000 00000000 - // 0x00008000 => 00000000 00000000 10000000 00000000 - return - ((((p & 0x03E0) << 18) + 0x38000000) & 0x7f800000) | // exponential - ((p & 0x001f) << 18); // Mantissa - } - - GLM_FUNC_QUALIFIER glm::uint half2float(glm::uint h) - { - return ((h & 0x8000) << 16) | ((( h & 0x7c00) + 0x1C000) << 13) | ((h & 0x03FF) << 13); - } - - GLM_FUNC_QUALIFIER glm::uint floatTo11bit(float x) - { - if(x == 0.0f) - return 0u; - else if(glm::isnan(x)) - return ~0u; - else if(glm::isinf(x)) - return 0x1Fu << 6u; - - uint Pack = 0u; - memcpy(&Pack, &x, sizeof(Pack)); - return float2packed11(Pack); - } - - GLM_FUNC_QUALIFIER float packed11bitToFloat(glm::uint x) - { - if(x == 0) - return 0.0f; - else if(x == ((1 << 11) - 1)) - return ~0;//NaN - else if(x == (0x1f << 6)) - return ~0;//Inf - - uint Result = packed11ToFloat(x); - - float Temp = 0; - memcpy(&Temp, &Result, sizeof(Temp)); - return Temp; - } - - GLM_FUNC_QUALIFIER glm::uint floatTo10bit(float x) - { - if(x == 0.0f) - return 0u; - else if(glm::isnan(x)) - return ~0u; - else if(glm::isinf(x)) - return 0x1Fu << 5u; - - uint Pack = 0; - memcpy(&Pack, &x, sizeof(Pack)); - return float2packed10(Pack); - } - - GLM_FUNC_QUALIFIER float packed10bitToFloat(glm::uint x) - { - if(x == 0) - return 0.0f; - else if(x == ((1 << 10) - 1)) - return ~0;//NaN - else if(x == (0x1f << 5)) - return ~0;//Inf - - uint Result = packed10ToFloat(x); - - float Temp = 0; - memcpy(&Temp, &Result, sizeof(Temp)); - return Temp; - } - -// GLM_FUNC_QUALIFIER glm::uint f11_f11_f10(float x, float y, float z) -// { -// return ((floatTo11bit(x) & ((1 << 11) - 1)) << 0) | ((floatTo11bit(y) & ((1 << 11) - 1)) << 11) | ((floatTo10bit(z) & ((1 << 10) - 1)) << 22); -// } - - union u3u3u2 - { - struct - { - uint x : 3; - uint y : 3; - uint z : 2; - } data; - uint8 pack; - }; - - union u4u4 - { - struct - { - uint x : 4; - uint y : 4; - } data; - uint8 pack; - }; - - union u4u4u4u4 - { - struct - { - uint x : 4; - uint y : 4; - uint z : 4; - uint w : 4; - } data; - uint16 pack; - }; - - union u5u6u5 - { - struct - { - uint x : 5; - uint y : 6; - uint z : 5; - } data; - uint16 pack; - }; - - union u5u5u5u1 - { - struct - { - uint x : 5; - uint y : 5; - uint z : 5; - uint w : 1; - } data; - uint16 pack; - }; - - union u10u10u10u2 - { - struct - { - uint x : 10; - uint y : 10; - uint z : 10; - uint w : 2; - } data; - uint32 pack; - }; - - union i10i10i10i2 - { - struct - { - int x : 10; - int y : 10; - int z : 10; - int w : 2; - } data; - uint32 pack; - }; - - union u9u9u9e5 - { - struct - { - uint x : 9; - uint y : 9; - uint z : 9; - uint w : 5; - } data; - uint32 pack; - }; - - template - struct compute_half - {}; - - template - struct compute_half<1, Q> - { - GLM_FUNC_QUALIFIER static vec<1, uint16, Q> pack(vec<1, float, Q> const& v) - { - int16 const Unpack(detail::toFloat16(v.x)); - u16vec1 Packed; - memcpy(&Packed, &Unpack, sizeof(Packed)); - return Packed; - } - - GLM_FUNC_QUALIFIER static vec<1, float, Q> unpack(vec<1, uint16, Q> const& v) - { - i16vec1 Unpack; - memcpy(&Unpack, &v, sizeof(Unpack)); - return vec<1, float, Q>(detail::toFloat32(v.x)); - } - }; - - template - struct compute_half<2, Q> - { - GLM_FUNC_QUALIFIER static vec<2, uint16, Q> pack(vec<2, float, Q> const& v) - { - vec<2, int16, Q> const Unpack(detail::toFloat16(v.x), detail::toFloat16(v.y)); - u16vec2 Packed; - memcpy(&Packed, &Unpack, sizeof(Packed)); - return Packed; - } - - GLM_FUNC_QUALIFIER static vec<2, float, Q> unpack(vec<2, uint16, Q> const& v) - { - i16vec2 Unpack; - memcpy(&Unpack, &v, sizeof(Unpack)); - return vec<2, float, Q>(detail::toFloat32(v.x), detail::toFloat32(v.y)); - } - }; - - template - struct compute_half<3, Q> - { - GLM_FUNC_QUALIFIER static vec<3, uint16, Q> pack(vec<3, float, Q> const& v) - { - vec<3, int16, Q> const Unpack(detail::toFloat16(v.x), detail::toFloat16(v.y), detail::toFloat16(v.z)); - u16vec3 Packed; - memcpy(&Packed, &Unpack, sizeof(Packed)); - return Packed; - } - - GLM_FUNC_QUALIFIER static vec<3, float, Q> unpack(vec<3, uint16, Q> const& v) - { - i16vec3 Unpack; - memcpy(&Unpack, &v, sizeof(Unpack)); - return vec<3, float, Q>(detail::toFloat32(v.x), detail::toFloat32(v.y), detail::toFloat32(v.z)); - } - }; - - template - struct compute_half<4, Q> - { - GLM_FUNC_QUALIFIER static vec<4, uint16, Q> pack(vec<4, float, Q> const& v) - { - vec<4, int16, Q> const Unpack(detail::toFloat16(v.x), detail::toFloat16(v.y), detail::toFloat16(v.z), detail::toFloat16(v.w)); - u16vec4 Packed; - memcpy(&Packed, &Unpack, sizeof(Packed)); - return Packed; - } - - GLM_FUNC_QUALIFIER static vec<4, float, Q> unpack(vec<4, uint16, Q> const& v) - { - i16vec4 Unpack; - memcpy(&Unpack, &v, sizeof(Unpack)); - return vec<4, float, Q>(detail::toFloat32(v.x), detail::toFloat32(v.y), detail::toFloat32(v.z), detail::toFloat32(v.w)); - } - }; -}//namespace detail - - GLM_FUNC_QUALIFIER uint8 packUnorm1x8(float v) - { - return static_cast(round(clamp(v, 0.0f, 1.0f) * 255.0f)); - } - - GLM_FUNC_QUALIFIER float unpackUnorm1x8(uint8 p) - { - float const Unpack(p); - return Unpack * static_cast(0.0039215686274509803921568627451); // 1 / 255 - } - - GLM_FUNC_QUALIFIER uint16 packUnorm2x8(vec2 const& v) - { - u8vec2 const Topack(round(clamp(v, 0.0f, 1.0f) * 255.0f)); - - uint16 Unpack = 0; - memcpy(&Unpack, &Topack, sizeof(Unpack)); - return Unpack; - } - - GLM_FUNC_QUALIFIER vec2 unpackUnorm2x8(uint16 p) - { - u8vec2 Unpack; - memcpy(&Unpack, &p, sizeof(Unpack)); - return vec2(Unpack) * float(0.0039215686274509803921568627451); // 1 / 255 - } - - GLM_FUNC_QUALIFIER uint8 packSnorm1x8(float v) - { - int8 const Topack(static_cast(round(clamp(v ,-1.0f, 1.0f) * 127.0f))); - uint8 Packed = 0; - memcpy(&Packed, &Topack, sizeof(Packed)); - return Packed; - } - - GLM_FUNC_QUALIFIER float unpackSnorm1x8(uint8 p) - { - int8 Unpack = 0; - memcpy(&Unpack, &p, sizeof(Unpack)); - return clamp( - static_cast(Unpack) * 0.00787401574803149606299212598425f, // 1.0f / 127.0f - -1.0f, 1.0f); - } - - GLM_FUNC_QUALIFIER uint16 packSnorm2x8(vec2 const& v) - { - i8vec2 const Topack(round(clamp(v, -1.0f, 1.0f) * 127.0f)); - uint16 Packed = 0; - memcpy(&Packed, &Topack, sizeof(Packed)); - return Packed; - } - - GLM_FUNC_QUALIFIER vec2 unpackSnorm2x8(uint16 p) - { - i8vec2 Unpack; - memcpy(&Unpack, &p, sizeof(Unpack)); - return clamp( - vec2(Unpack) * 0.00787401574803149606299212598425f, // 1.0f / 127.0f - -1.0f, 1.0f); - } - - GLM_FUNC_QUALIFIER uint16 packUnorm1x16(float s) - { - return static_cast(round(clamp(s, 0.0f, 1.0f) * 65535.0f)); - } - - GLM_FUNC_QUALIFIER float unpackUnorm1x16(uint16 p) - { - float const Unpack(p); - return Unpack * 1.5259021896696421759365224689097e-5f; // 1.0 / 65535.0 - } - - GLM_FUNC_QUALIFIER uint64 packUnorm4x16(vec4 const& v) - { - u16vec4 const Topack(round(clamp(v , 0.0f, 1.0f) * 65535.0f)); - uint64 Packed = 0; - memcpy(&Packed, &Topack, sizeof(Packed)); - return Packed; - } - - GLM_FUNC_QUALIFIER vec4 unpackUnorm4x16(uint64 p) - { - u16vec4 Unpack; - memcpy(&Unpack, &p, sizeof(Unpack)); - return vec4(Unpack) * 1.5259021896696421759365224689097e-5f; // 1.0 / 65535.0 - } - - GLM_FUNC_QUALIFIER uint16 packSnorm1x16(float v) - { - int16 const Topack = static_cast(round(clamp(v ,-1.0f, 1.0f) * 32767.0f)); - uint16 Packed = 0; - memcpy(&Packed, &Topack, sizeof(Packed)); - return Packed; - } - - GLM_FUNC_QUALIFIER float unpackSnorm1x16(uint16 p) - { - int16 Unpack = 0; - memcpy(&Unpack, &p, sizeof(Unpack)); - return clamp( - static_cast(Unpack) * 3.0518509475997192297128208258309e-5f, //1.0f / 32767.0f, - -1.0f, 1.0f); - } - - GLM_FUNC_QUALIFIER uint64 packSnorm4x16(vec4 const& v) - { - i16vec4 const Topack(round(clamp(v ,-1.0f, 1.0f) * 32767.0f)); - uint64 Packed = 0; - memcpy(&Packed, &Topack, sizeof(Packed)); - return Packed; - } - - GLM_FUNC_QUALIFIER vec4 unpackSnorm4x16(uint64 p) - { - i16vec4 Unpack; - memcpy(&Unpack, &p, sizeof(Unpack)); - return clamp( - vec4(Unpack) * 3.0518509475997192297128208258309e-5f, //1.0f / 32767.0f, - -1.0f, 1.0f); - } - - GLM_FUNC_QUALIFIER uint16 packHalf1x16(float v) - { - int16 const Topack(detail::toFloat16(v)); - uint16 Packed = 0; - memcpy(&Packed, &Topack, sizeof(Packed)); - return Packed; - } - - GLM_FUNC_QUALIFIER float unpackHalf1x16(uint16 v) - { - int16 Unpack = 0; - memcpy(&Unpack, &v, sizeof(Unpack)); - return detail::toFloat32(Unpack); - } - - GLM_FUNC_QUALIFIER uint64 packHalf4x16(glm::vec4 const& v) - { - i16vec4 const Unpack( - detail::toFloat16(v.x), - detail::toFloat16(v.y), - detail::toFloat16(v.z), - detail::toFloat16(v.w)); - uint64 Packed = 0; - memcpy(&Packed, &Unpack, sizeof(Packed)); - return Packed; - } - - GLM_FUNC_QUALIFIER glm::vec4 unpackHalf4x16(uint64 v) - { - i16vec4 Unpack; - memcpy(&Unpack, &v, sizeof(Unpack)); - return vec4( - detail::toFloat32(Unpack.x), - detail::toFloat32(Unpack.y), - detail::toFloat32(Unpack.z), - detail::toFloat32(Unpack.w)); - } - - GLM_FUNC_QUALIFIER uint32 packI3x10_1x2(ivec4 const& v) - { - detail::i10i10i10i2 Result; - Result.data.x = v.x; - Result.data.y = v.y; - Result.data.z = v.z; - Result.data.w = v.w; - return Result.pack; - } - - GLM_FUNC_QUALIFIER ivec4 unpackI3x10_1x2(uint32 v) - { - detail::i10i10i10i2 Unpack; - Unpack.pack = v; - return ivec4( - Unpack.data.x, - Unpack.data.y, - Unpack.data.z, - Unpack.data.w); - } - - GLM_FUNC_QUALIFIER uint32 packU3x10_1x2(uvec4 const& v) - { - detail::u10u10u10u2 Result; - Result.data.x = v.x; - Result.data.y = v.y; - Result.data.z = v.z; - Result.data.w = v.w; - return Result.pack; - } - - GLM_FUNC_QUALIFIER uvec4 unpackU3x10_1x2(uint32 v) - { - detail::u10u10u10u2 Unpack; - Unpack.pack = v; - return uvec4( - Unpack.data.x, - Unpack.data.y, - Unpack.data.z, - Unpack.data.w); - } - - GLM_FUNC_QUALIFIER uint32 packSnorm3x10_1x2(vec4 const& v) - { - ivec4 const Pack(round(clamp(v,-1.0f, 1.0f) * vec4(511.f, 511.f, 511.f, 1.f))); - - detail::i10i10i10i2 Result; - Result.data.x = Pack.x; - Result.data.y = Pack.y; - Result.data.z = Pack.z; - Result.data.w = Pack.w; - return Result.pack; - } - - GLM_FUNC_QUALIFIER vec4 unpackSnorm3x10_1x2(uint32 v) - { - detail::i10i10i10i2 Unpack; - Unpack.pack = v; - - vec4 const Result(Unpack.data.x, Unpack.data.y, Unpack.data.z, Unpack.data.w); - - return clamp(Result * vec4(1.f / 511.f, 1.f / 511.f, 1.f / 511.f, 1.f), -1.0f, 1.0f); - } - - GLM_FUNC_QUALIFIER uint32 packUnorm3x10_1x2(vec4 const& v) - { - uvec4 const Unpack(round(clamp(v, 0.0f, 1.0f) * vec4(1023.f, 1023.f, 1023.f, 3.f))); - - detail::u10u10u10u2 Result; - Result.data.x = Unpack.x; - Result.data.y = Unpack.y; - Result.data.z = Unpack.z; - Result.data.w = Unpack.w; - return Result.pack; - } - - GLM_FUNC_QUALIFIER vec4 unpackUnorm3x10_1x2(uint32 v) - { - vec4 const ScaleFactors(1.0f / 1023.f, 1.0f / 1023.f, 1.0f / 1023.f, 1.0f / 3.f); - - detail::u10u10u10u2 Unpack; - Unpack.pack = v; - return vec4(Unpack.data.x, Unpack.data.y, Unpack.data.z, Unpack.data.w) * ScaleFactors; - } - - GLM_FUNC_QUALIFIER uint32 packF2x11_1x10(vec3 const& v) - { - return - ((detail::floatTo11bit(v.x) & ((1 << 11) - 1)) << 0) | - ((detail::floatTo11bit(v.y) & ((1 << 11) - 1)) << 11) | - ((detail::floatTo10bit(v.z) & ((1 << 10) - 1)) << 22); - } - - GLM_FUNC_QUALIFIER vec3 unpackF2x11_1x10(uint32 v) - { - return vec3( - detail::packed11bitToFloat(v >> 0), - detail::packed11bitToFloat(v >> 11), - detail::packed10bitToFloat(v >> 22)); - } - - GLM_FUNC_QUALIFIER uint32 packF3x9_E1x5(vec3 const& v) - { - float const SharedExpMax = (pow(2.0f, 9.0f - 1.0f) / pow(2.0f, 9.0f)) * pow(2.0f, 31.f - 15.f); - vec3 const Color = clamp(v, 0.0f, SharedExpMax); - float const MaxColor = max(Color.x, max(Color.y, Color.z)); - - float const ExpSharedP = max(-15.f - 1.f, floor(log2(MaxColor))) + 1.0f + 15.f; - float const MaxShared = floor(MaxColor / pow(2.0f, (ExpSharedP - 15.f - 9.f)) + 0.5f); - float const ExpShared = equal(MaxShared, pow(2.0f, 9.0f), epsilon()) ? ExpSharedP + 1.0f : ExpSharedP; - - uvec3 const ColorComp(floor(Color / pow(2.f, (ExpShared - 15.f - 9.f)) + 0.5f)); - - detail::u9u9u9e5 Unpack; - Unpack.data.x = ColorComp.x; - Unpack.data.y = ColorComp.y; - Unpack.data.z = ColorComp.z; - Unpack.data.w = uint(ExpShared); - return Unpack.pack; - } - - GLM_FUNC_QUALIFIER vec3 unpackF3x9_E1x5(uint32 v) - { - detail::u9u9u9e5 Unpack; - Unpack.pack = v; - - return vec3(Unpack.data.x, Unpack.data.y, Unpack.data.z) * pow(2.0f, Unpack.data.w - 15.f - 9.f); - } - - // Based on Brian Karis http://graphicrants.blogspot.fr/2009/04/rgbm-color-encoding.html - template - GLM_FUNC_QUALIFIER vec<4, T, Q> packRGBM(vec<3, T, Q> const& rgb) - { - vec<3, T, Q> const Color(rgb * static_cast(1.0 / 6.0)); - T Alpha = clamp(max(max(Color.x, Color.y), max(Color.z, static_cast(1e-6))), static_cast(0), static_cast(1)); - Alpha = ceil(Alpha * static_cast(255.0)) / static_cast(255.0); - return vec<4, T, Q>(Color / Alpha, Alpha); - } - - template - GLM_FUNC_QUALIFIER vec<3, T, Q> unpackRGBM(vec<4, T, Q> const& rgbm) - { - return vec<3, T, Q>(rgbm.x, rgbm.y, rgbm.z) * rgbm.w * static_cast(6); - } - - template - GLM_FUNC_QUALIFIER vec packHalf(vec const& v) - { - return detail::compute_half::pack(v); - } - - template - GLM_FUNC_QUALIFIER vec unpackHalf(vec const& v) - { - return detail::compute_half::unpack(v); - } - - template - GLM_FUNC_QUALIFIER vec packUnorm(vec const& v) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "uintType must be an integer type"); - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "floatType must be a floating point type"); - - return vec(round(clamp(v, static_cast(0), static_cast(1)) * static_cast(std::numeric_limits::max()))); - } - - template - GLM_FUNC_QUALIFIER vec unpackUnorm(vec const& v) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "uintType must be an integer type"); - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "floatType must be a floating point type"); - - return vec(v) * (static_cast(1) / static_cast(std::numeric_limits::max())); - } - - template - GLM_FUNC_QUALIFIER vec packSnorm(vec const& v) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "uintType must be an integer type"); - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "floatType must be a floating point type"); - - return vec(round(clamp(v , static_cast(-1), static_cast(1)) * static_cast(std::numeric_limits::max()))); - } - - template - GLM_FUNC_QUALIFIER vec unpackSnorm(vec const& v) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_integer, "uintType must be an integer type"); - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "floatType must be a floating point type"); - - return clamp(vec(v) * (static_cast(1) / static_cast(std::numeric_limits::max())), static_cast(-1), static_cast(1)); - } - - GLM_FUNC_QUALIFIER uint8 packUnorm2x4(vec2 const& v) - { - u32vec2 const Unpack(round(clamp(v, 0.0f, 1.0f) * 15.0f)); - detail::u4u4 Result; - Result.data.x = Unpack.x; - Result.data.y = Unpack.y; - return Result.pack; - } - - GLM_FUNC_QUALIFIER vec2 unpackUnorm2x4(uint8 v) - { - float const ScaleFactor(1.f / 15.f); - detail::u4u4 Unpack; - Unpack.pack = v; - return vec2(Unpack.data.x, Unpack.data.y) * ScaleFactor; - } - - GLM_FUNC_QUALIFIER uint16 packUnorm4x4(vec4 const& v) - { - u32vec4 const Unpack(round(clamp(v, 0.0f, 1.0f) * 15.0f)); - detail::u4u4u4u4 Result; - Result.data.x = Unpack.x; - Result.data.y = Unpack.y; - Result.data.z = Unpack.z; - Result.data.w = Unpack.w; - return Result.pack; - } - - GLM_FUNC_QUALIFIER vec4 unpackUnorm4x4(uint16 v) - { - float const ScaleFactor(1.f / 15.f); - detail::u4u4u4u4 Unpack; - Unpack.pack = v; - return vec4(Unpack.data.x, Unpack.data.y, Unpack.data.z, Unpack.data.w) * ScaleFactor; - } - - GLM_FUNC_QUALIFIER uint16 packUnorm1x5_1x6_1x5(vec3 const& v) - { - u32vec3 const Unpack(round(clamp(v, 0.0f, 1.0f) * vec3(31.f, 63.f, 31.f))); - detail::u5u6u5 Result; - Result.data.x = Unpack.x; - Result.data.y = Unpack.y; - Result.data.z = Unpack.z; - return Result.pack; - } - - GLM_FUNC_QUALIFIER vec3 unpackUnorm1x5_1x6_1x5(uint16 v) - { - vec3 const ScaleFactor(1.f / 31.f, 1.f / 63.f, 1.f / 31.f); - detail::u5u6u5 Unpack; - Unpack.pack = v; - return vec3(Unpack.data.x, Unpack.data.y, Unpack.data.z) * ScaleFactor; - } - - GLM_FUNC_QUALIFIER uint16 packUnorm3x5_1x1(vec4 const& v) - { - u32vec4 const Unpack(round(clamp(v, 0.0f, 1.0f) * vec4(31.f, 31.f, 31.f, 1.f))); - detail::u5u5u5u1 Result; - Result.data.x = Unpack.x; - Result.data.y = Unpack.y; - Result.data.z = Unpack.z; - Result.data.w = Unpack.w; - return Result.pack; - } - - GLM_FUNC_QUALIFIER vec4 unpackUnorm3x5_1x1(uint16 v) - { - vec4 const ScaleFactor(1.f / 31.f, 1.f / 31.f, 1.f / 31.f, 1.f); - detail::u5u5u5u1 Unpack; - Unpack.pack = v; - return vec4(Unpack.data.x, Unpack.data.y, Unpack.data.z, Unpack.data.w) * ScaleFactor; - } - - GLM_FUNC_QUALIFIER uint8 packUnorm2x3_1x2(vec3 const& v) - { - u32vec3 const Unpack(round(clamp(v, 0.0f, 1.0f) * vec3(7.f, 7.f, 3.f))); - detail::u3u3u2 Result; - Result.data.x = Unpack.x; - Result.data.y = Unpack.y; - Result.data.z = Unpack.z; - return Result.pack; - } - - GLM_FUNC_QUALIFIER vec3 unpackUnorm2x3_1x2(uint8 v) - { - vec3 const ScaleFactor(1.f / 7.f, 1.f / 7.f, 1.f / 3.f); - detail::u3u3u2 Unpack; - Unpack.pack = v; - return vec3(Unpack.data.x, Unpack.data.y, Unpack.data.z) * ScaleFactor; - } - - GLM_FUNC_QUALIFIER int16 packInt2x8(i8vec2 const& v) - { - int16 Pack = 0; - memcpy(&Pack, &v, sizeof(Pack)); - return Pack; - } - - GLM_FUNC_QUALIFIER i8vec2 unpackInt2x8(int16 p) - { - i8vec2 Unpack; - memcpy(&Unpack, &p, sizeof(Unpack)); - return Unpack; - } - - GLM_FUNC_QUALIFIER uint16 packUint2x8(u8vec2 const& v) - { - uint16 Pack = 0; - memcpy(&Pack, &v, sizeof(Pack)); - return Pack; - } - - GLM_FUNC_QUALIFIER u8vec2 unpackUint2x8(uint16 p) - { - u8vec2 Unpack; - memcpy(&Unpack, &p, sizeof(Unpack)); - return Unpack; - } - - GLM_FUNC_QUALIFIER int32 packInt4x8(i8vec4 const& v) - { - int32 Pack = 0; - memcpy(&Pack, &v, sizeof(Pack)); - return Pack; - } - - GLM_FUNC_QUALIFIER i8vec4 unpackInt4x8(int32 p) - { - i8vec4 Unpack; - memcpy(&Unpack, &p, sizeof(Unpack)); - return Unpack; - } - - GLM_FUNC_QUALIFIER uint32 packUint4x8(u8vec4 const& v) - { - uint32 Pack = 0; - memcpy(&Pack, &v, sizeof(Pack)); - return Pack; - } - - GLM_FUNC_QUALIFIER u8vec4 unpackUint4x8(uint32 p) - { - u8vec4 Unpack; - memcpy(&Unpack, &p, sizeof(Unpack)); - return Unpack; - } - - GLM_FUNC_QUALIFIER int packInt2x16(i16vec2 const& v) - { - int Pack = 0; - memcpy(&Pack, &v, sizeof(Pack)); - return Pack; - } - - GLM_FUNC_QUALIFIER i16vec2 unpackInt2x16(int p) - { - i16vec2 Unpack; - memcpy(&Unpack, &p, sizeof(Unpack)); - return Unpack; - } - - GLM_FUNC_QUALIFIER int64 packInt4x16(i16vec4 const& v) - { - int64 Pack = 0; - memcpy(&Pack, &v, sizeof(Pack)); - return Pack; - } - - GLM_FUNC_QUALIFIER i16vec4 unpackInt4x16(int64 p) - { - i16vec4 Unpack; - memcpy(&Unpack, &p, sizeof(Unpack)); - return Unpack; - } - - GLM_FUNC_QUALIFIER uint packUint2x16(u16vec2 const& v) - { - uint Pack = 0; - memcpy(&Pack, &v, sizeof(Pack)); - return Pack; - } - - GLM_FUNC_QUALIFIER u16vec2 unpackUint2x16(uint p) - { - u16vec2 Unpack; - memcpy(&Unpack, &p, sizeof(Unpack)); - return Unpack; - } - - GLM_FUNC_QUALIFIER uint64 packUint4x16(u16vec4 const& v) - { - uint64 Pack = 0; - memcpy(&Pack, &v, sizeof(Pack)); - return Pack; - } - - GLM_FUNC_QUALIFIER u16vec4 unpackUint4x16(uint64 p) - { - u16vec4 Unpack; - memcpy(&Unpack, &p, sizeof(Unpack)); - return Unpack; - } - - GLM_FUNC_QUALIFIER int64 packInt2x32(i32vec2 const& v) - { - int64 Pack = 0; - memcpy(&Pack, &v, sizeof(Pack)); - return Pack; - } - - GLM_FUNC_QUALIFIER i32vec2 unpackInt2x32(int64 p) - { - i32vec2 Unpack; - memcpy(&Unpack, &p, sizeof(Unpack)); - return Unpack; - } - - GLM_FUNC_QUALIFIER uint64 packUint2x32(u32vec2 const& v) - { - uint64 Pack = 0; - memcpy(&Pack, &v, sizeof(Pack)); - return Pack; - } - - GLM_FUNC_QUALIFIER u32vec2 unpackUint2x32(uint64 p) - { - u32vec2 Unpack; - memcpy(&Unpack, &p, sizeof(Unpack)); - return Unpack; - } -}//namespace glm - diff --git a/third_party/glm/gtc/quaternion.hpp b/third_party/glm/gtc/quaternion.hpp deleted file mode 100755 index 359e072..0000000 --- a/third_party/glm/gtc/quaternion.hpp +++ /dev/null @@ -1,173 +0,0 @@ -/// @ref gtc_quaternion -/// @file glm/gtc/quaternion.hpp -/// -/// @see core (dependence) -/// @see gtc_constants (dependence) -/// -/// @defgroup gtc_quaternion GLM_GTC_quaternion -/// @ingroup gtc -/// -/// Include to use the features of this extension. -/// -/// Defines a templated quaternion type and several quaternion operations. - -#pragma once - -// Dependency: -#include "../gtc/constants.hpp" -#include "../gtc/matrix_transform.hpp" -#include "../ext/vector_relational.hpp" -#include "../ext/quaternion_common.hpp" -#include "../ext/quaternion_float.hpp" -#include "../ext/quaternion_float_precision.hpp" -#include "../ext/quaternion_double.hpp" -#include "../ext/quaternion_double_precision.hpp" -#include "../ext/quaternion_relational.hpp" -#include "../ext/quaternion_geometric.hpp" -#include "../ext/quaternion_trigonometric.hpp" -#include "../ext/quaternion_transform.hpp" -#include "../detail/type_mat3x3.hpp" -#include "../detail/type_mat4x4.hpp" -#include "../detail/type_vec3.hpp" -#include "../detail/type_vec4.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_GTC_quaternion extension included") -#endif - -namespace glm -{ - /// @addtogroup gtc_quaternion - /// @{ - - /// Returns euler angles, pitch as x, yaw as y, roll as z. - /// The result is expressed in radians. - /// - /// @tparam T Floating-point scalar types. - /// - /// @see gtc_quaternion - template - GLM_FUNC_DECL vec<3, T, Q> eulerAngles(qua const& x); - - /// Returns roll value of euler angles expressed in radians. - /// - /// @tparam T Floating-point scalar types. - /// - /// @see gtc_quaternion - template - GLM_FUNC_DECL T roll(qua const& x); - - /// Returns pitch value of euler angles expressed in radians. - /// - /// @tparam T Floating-point scalar types. - /// - /// @see gtc_quaternion - template - GLM_FUNC_DECL T pitch(qua const& x); - - /// Returns yaw value of euler angles expressed in radians. - /// - /// @tparam T Floating-point scalar types. - /// - /// @see gtc_quaternion - template - GLM_FUNC_DECL T yaw(qua const& x); - - /// Converts a quaternion to a 3 * 3 matrix. - /// - /// @tparam T Floating-point scalar types. - /// - /// @see gtc_quaternion - template - GLM_FUNC_DECL mat<3, 3, T, Q> mat3_cast(qua const& x); - - /// Converts a quaternion to a 4 * 4 matrix. - /// - /// @tparam T Floating-point scalar types. - /// - /// @see gtc_quaternion - template - GLM_FUNC_DECL mat<4, 4, T, Q> mat4_cast(qua const& x); - - /// Converts a pure rotation 3 * 3 matrix to a quaternion. - /// - /// @tparam T Floating-point scalar types. - /// - /// @see gtc_quaternion - template - GLM_FUNC_DECL qua quat_cast(mat<3, 3, T, Q> const& x); - - /// Converts a pure rotation 4 * 4 matrix to a quaternion. - /// - /// @tparam T Floating-point scalar types. - /// - /// @see gtc_quaternion - template - GLM_FUNC_DECL qua quat_cast(mat<4, 4, T, Q> const& x); - - /// Returns the component-wise comparison result of x < y. - /// - /// @tparam T Floating-point scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see ext_quaternion_relational - template - GLM_FUNC_DECL vec<4, bool, Q> lessThan(qua const& x, qua const& y); - - /// Returns the component-wise comparison of result x <= y. - /// - /// @tparam T Floating-point scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see ext_quaternion_relational - template - GLM_FUNC_DECL vec<4, bool, Q> lessThanEqual(qua const& x, qua const& y); - - /// Returns the component-wise comparison of result x > y. - /// - /// @tparam T Floating-point scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see ext_quaternion_relational - template - GLM_FUNC_DECL vec<4, bool, Q> greaterThan(qua const& x, qua const& y); - - /// Returns the component-wise comparison of result x >= y. - /// - /// @tparam T Floating-point scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see ext_quaternion_relational - template - GLM_FUNC_DECL vec<4, bool, Q> greaterThanEqual(qua const& x, qua const& y); - - /// Build a look at quaternion based on the default handedness. - /// - /// @param direction Desired forward direction. Needs to be normalized. - /// @param up Up vector, how the camera is oriented. Typically (0, 1, 0). - template - GLM_FUNC_DECL qua quatLookAt( - vec<3, T, Q> const& direction, - vec<3, T, Q> const& up); - - /// Build a right-handed look at quaternion. - /// - /// @param direction Desired forward direction onto which the -z-axis gets mapped. Needs to be normalized. - /// @param up Up vector, how the camera is oriented. Typically (0, 1, 0). - template - GLM_FUNC_DECL qua quatLookAtRH( - vec<3, T, Q> const& direction, - vec<3, T, Q> const& up); - - /// Build a left-handed look at quaternion. - /// - /// @param direction Desired forward direction onto which the +z-axis gets mapped. Needs to be normalized. - /// @param up Up vector, how the camera is oriented. Typically (0, 1, 0). - template - GLM_FUNC_DECL qua quatLookAtLH( - vec<3, T, Q> const& direction, - vec<3, T, Q> const& up); - /// @} -} //namespace glm - -#include "quaternion.inl" diff --git a/third_party/glm/gtc/quaternion.inl b/third_party/glm/gtc/quaternion.inl deleted file mode 100755 index 9dd037e..0000000 --- a/third_party/glm/gtc/quaternion.inl +++ /dev/null @@ -1,200 +0,0 @@ -#include "../trigonometric.hpp" -#include "../geometric.hpp" -#include "../exponential.hpp" -#include "epsilon.hpp" -#include - -namespace glm -{ - template - GLM_FUNC_QUALIFIER vec<3, T, Q> eulerAngles(qua const& x) - { - return vec<3, T, Q>(pitch(x), yaw(x), roll(x)); - } - - template - GLM_FUNC_QUALIFIER T roll(qua const& q) - { - return static_cast(atan(static_cast(2) * (q.x * q.y + q.w * q.z), q.w * q.w + q.x * q.x - q.y * q.y - q.z * q.z)); - } - - template - GLM_FUNC_QUALIFIER T pitch(qua const& q) - { - //return T(atan(T(2) * (q.y * q.z + q.w * q.x), q.w * q.w - q.x * q.x - q.y * q.y + q.z * q.z)); - T const y = static_cast(2) * (q.y * q.z + q.w * q.x); - T const x = q.w * q.w - q.x * q.x - q.y * q.y + q.z * q.z; - - if(all(equal(vec<2, T, Q>(x, y), vec<2, T, Q>(0), epsilon()))) //avoid atan2(0,0) - handle singularity - Matiis - return static_cast(static_cast(2) * atan(q.x, q.w)); - - return static_cast(atan(y, x)); - } - - template - GLM_FUNC_QUALIFIER T yaw(qua const& q) - { - return asin(clamp(static_cast(-2) * (q.x * q.z - q.w * q.y), static_cast(-1), static_cast(1))); - } - - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> mat3_cast(qua const& q) - { - mat<3, 3, T, Q> Result(T(1)); - T qxx(q.x * q.x); - T qyy(q.y * q.y); - T qzz(q.z * q.z); - T qxz(q.x * q.z); - T qxy(q.x * q.y); - T qyz(q.y * q.z); - T qwx(q.w * q.x); - T qwy(q.w * q.y); - T qwz(q.w * q.z); - - Result[0][0] = T(1) - T(2) * (qyy + qzz); - Result[0][1] = T(2) * (qxy + qwz); - Result[0][2] = T(2) * (qxz - qwy); - - Result[1][0] = T(2) * (qxy - qwz); - Result[1][1] = T(1) - T(2) * (qxx + qzz); - Result[1][2] = T(2) * (qyz + qwx); - - Result[2][0] = T(2) * (qxz + qwy); - Result[2][1] = T(2) * (qyz - qwx); - Result[2][2] = T(1) - T(2) * (qxx + qyy); - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> mat4_cast(qua const& q) - { - return mat<4, 4, T, Q>(mat3_cast(q)); - } - - template - GLM_FUNC_QUALIFIER qua quat_cast(mat<3, 3, T, Q> const& m) - { - T fourXSquaredMinus1 = m[0][0] - m[1][1] - m[2][2]; - T fourYSquaredMinus1 = m[1][1] - m[0][0] - m[2][2]; - T fourZSquaredMinus1 = m[2][2] - m[0][0] - m[1][1]; - T fourWSquaredMinus1 = m[0][0] + m[1][1] + m[2][2]; - - int biggestIndex = 0; - T fourBiggestSquaredMinus1 = fourWSquaredMinus1; - if(fourXSquaredMinus1 > fourBiggestSquaredMinus1) - { - fourBiggestSquaredMinus1 = fourXSquaredMinus1; - biggestIndex = 1; - } - if(fourYSquaredMinus1 > fourBiggestSquaredMinus1) - { - fourBiggestSquaredMinus1 = fourYSquaredMinus1; - biggestIndex = 2; - } - if(fourZSquaredMinus1 > fourBiggestSquaredMinus1) - { - fourBiggestSquaredMinus1 = fourZSquaredMinus1; - biggestIndex = 3; - } - - T biggestVal = sqrt(fourBiggestSquaredMinus1 + static_cast(1)) * static_cast(0.5); - T mult = static_cast(0.25) / biggestVal; - - switch(biggestIndex) - { - case 0: - return qua(biggestVal, (m[1][2] - m[2][1]) * mult, (m[2][0] - m[0][2]) * mult, (m[0][1] - m[1][0]) * mult); - case 1: - return qua((m[1][2] - m[2][1]) * mult, biggestVal, (m[0][1] + m[1][0]) * mult, (m[2][0] + m[0][2]) * mult); - case 2: - return qua((m[2][0] - m[0][2]) * mult, (m[0][1] + m[1][0]) * mult, biggestVal, (m[1][2] + m[2][1]) * mult); - case 3: - return qua((m[0][1] - m[1][0]) * mult, (m[2][0] + m[0][2]) * mult, (m[1][2] + m[2][1]) * mult, biggestVal); - default: // Silence a -Wswitch-default warning in GCC. Should never actually get here. Assert is just for sanity. - assert(false); - return qua(1, 0, 0, 0); - } - } - - template - GLM_FUNC_QUALIFIER qua quat_cast(mat<4, 4, T, Q> const& m4) - { - return quat_cast(mat<3, 3, T, Q>(m4)); - } - - template - GLM_FUNC_QUALIFIER vec<4, bool, Q> lessThan(qua const& x, qua const& y) - { - vec<4, bool, Q> Result; - for(length_t i = 0; i < x.length(); ++i) - Result[i] = x[i] < y[i]; - return Result; - } - - template - GLM_FUNC_QUALIFIER vec<4, bool, Q> lessThanEqual(qua const& x, qua const& y) - { - vec<4, bool, Q> Result; - for(length_t i = 0; i < x.length(); ++i) - Result[i] = x[i] <= y[i]; - return Result; - } - - template - GLM_FUNC_QUALIFIER vec<4, bool, Q> greaterThan(qua const& x, qua const& y) - { - vec<4, bool, Q> Result; - for(length_t i = 0; i < x.length(); ++i) - Result[i] = x[i] > y[i]; - return Result; - } - - template - GLM_FUNC_QUALIFIER vec<4, bool, Q> greaterThanEqual(qua const& x, qua const& y) - { - vec<4, bool, Q> Result; - for(length_t i = 0; i < x.length(); ++i) - Result[i] = x[i] >= y[i]; - return Result; - } - - - template - GLM_FUNC_QUALIFIER qua quatLookAt(vec<3, T, Q> const& direction, vec<3, T, Q> const& up) - { -# if GLM_CONFIG_CLIP_CONTROL & GLM_CLIP_CONTROL_LH_BIT - return quatLookAtLH(direction, up); -# else - return quatLookAtRH(direction, up); -# endif - } - - template - GLM_FUNC_QUALIFIER qua quatLookAtRH(vec<3, T, Q> const& direction, vec<3, T, Q> const& up) - { - mat<3, 3, T, Q> Result; - - Result[2] = -direction; - Result[0] = normalize(cross(up, Result[2])); - Result[1] = cross(Result[2], Result[0]); - - return quat_cast(Result); - } - - template - GLM_FUNC_QUALIFIER qua quatLookAtLH(vec<3, T, Q> const& direction, vec<3, T, Q> const& up) - { - mat<3, 3, T, Q> Result; - - Result[2] = direction; - Result[0] = normalize(cross(up, Result[2])); - Result[1] = cross(Result[2], Result[0]); - - return quat_cast(Result); - } -}//namespace glm - -#if GLM_CONFIG_SIMD == GLM_ENABLE -# include "quaternion_simd.inl" -#endif - diff --git a/third_party/glm/gtc/quaternion_simd.inl b/third_party/glm/gtc/quaternion_simd.inl deleted file mode 100755 index e69de29..0000000 diff --git a/third_party/glm/gtc/random.hpp b/third_party/glm/gtc/random.hpp deleted file mode 100755 index 9a85958..0000000 --- a/third_party/glm/gtc/random.hpp +++ /dev/null @@ -1,82 +0,0 @@ -/// @ref gtc_random -/// @file glm/gtc/random.hpp -/// -/// @see core (dependence) -/// @see gtx_random (extended) -/// -/// @defgroup gtc_random GLM_GTC_random -/// @ingroup gtc -/// -/// Include to use the features of this extension. -/// -/// Generate random number from various distribution methods. - -#pragma once - -// Dependency: -#include "../ext/scalar_int_sized.hpp" -#include "../ext/scalar_uint_sized.hpp" -#include "../detail/qualifier.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_GTC_random extension included") -#endif - -namespace glm -{ - /// @addtogroup gtc_random - /// @{ - - /// Generate random numbers in the interval [Min, Max], according a linear distribution - /// - /// @param Min Minimum value included in the sampling - /// @param Max Maximum value included in the sampling - /// @tparam genType Value type. Currently supported: float or double scalars. - /// @see gtc_random - template - GLM_FUNC_DECL genType linearRand(genType Min, genType Max); - - /// Generate random numbers in the interval [Min, Max], according a linear distribution - /// - /// @param Min Minimum value included in the sampling - /// @param Max Maximum value included in the sampling - /// @tparam T Value type. Currently supported: float or double. - /// - /// @see gtc_random - template - GLM_FUNC_DECL vec linearRand(vec const& Min, vec const& Max); - - /// Generate random numbers in the interval [Min, Max], according a gaussian distribution - /// - /// @see gtc_random - template - GLM_FUNC_DECL genType gaussRand(genType Mean, genType Deviation); - - /// Generate a random 2D vector which coordinates are regulary distributed on a circle of a given radius - /// - /// @see gtc_random - template - GLM_FUNC_DECL vec<2, T, defaultp> circularRand(T Radius); - - /// Generate a random 3D vector which coordinates are regulary distributed on a sphere of a given radius - /// - /// @see gtc_random - template - GLM_FUNC_DECL vec<3, T, defaultp> sphericalRand(T Radius); - - /// Generate a random 2D vector which coordinates are regulary distributed within the area of a disk of a given radius - /// - /// @see gtc_random - template - GLM_FUNC_DECL vec<2, T, defaultp> diskRand(T Radius); - - /// Generate a random 3D vector which coordinates are regulary distributed within the volume of a ball of a given radius - /// - /// @see gtc_random - template - GLM_FUNC_DECL vec<3, T, defaultp> ballRand(T Radius); - - /// @} -}//namespace glm - -#include "random.inl" diff --git a/third_party/glm/gtc/random.inl b/third_party/glm/gtc/random.inl deleted file mode 100755 index 7048509..0000000 --- a/third_party/glm/gtc/random.inl +++ /dev/null @@ -1,303 +0,0 @@ -#include "../geometric.hpp" -#include "../exponential.hpp" -#include "../trigonometric.hpp" -#include "../detail/type_vec1.hpp" -#include -#include -#include -#include - -namespace glm{ -namespace detail -{ - template - struct compute_rand - { - GLM_FUNC_QUALIFIER static vec call(); - }; - - template - struct compute_rand<1, uint8, P> - { - GLM_FUNC_QUALIFIER static vec<1, uint8, P> call() - { - return vec<1, uint8, P>( - std::rand() % std::numeric_limits::max()); - } - }; - - template - struct compute_rand<2, uint8, P> - { - GLM_FUNC_QUALIFIER static vec<2, uint8, P> call() - { - return vec<2, uint8, P>( - std::rand() % std::numeric_limits::max(), - std::rand() % std::numeric_limits::max()); - } - }; - - template - struct compute_rand<3, uint8, P> - { - GLM_FUNC_QUALIFIER static vec<3, uint8, P> call() - { - return vec<3, uint8, P>( - std::rand() % std::numeric_limits::max(), - std::rand() % std::numeric_limits::max(), - std::rand() % std::numeric_limits::max()); - } - }; - - template - struct compute_rand<4, uint8, P> - { - GLM_FUNC_QUALIFIER static vec<4, uint8, P> call() - { - return vec<4, uint8, P>( - std::rand() % std::numeric_limits::max(), - std::rand() % std::numeric_limits::max(), - std::rand() % std::numeric_limits::max(), - std::rand() % std::numeric_limits::max()); - } - }; - - template - struct compute_rand - { - GLM_FUNC_QUALIFIER static vec call() - { - return - (vec(compute_rand::call()) << static_cast(8)) | - (vec(compute_rand::call()) << static_cast(0)); - } - }; - - template - struct compute_rand - { - GLM_FUNC_QUALIFIER static vec call() - { - return - (vec(compute_rand::call()) << static_cast(16)) | - (vec(compute_rand::call()) << static_cast(0)); - } - }; - - template - struct compute_rand - { - GLM_FUNC_QUALIFIER static vec call() - { - return - (vec(compute_rand::call()) << static_cast(32)) | - (vec(compute_rand::call()) << static_cast(0)); - } - }; - - template - struct compute_linearRand - { - GLM_FUNC_QUALIFIER static vec call(vec const& Min, vec const& Max); - }; - - template - struct compute_linearRand - { - GLM_FUNC_QUALIFIER static vec call(vec const& Min, vec const& Max) - { - return (vec(compute_rand::call() % vec(Max + static_cast(1) - Min))) + Min; - } - }; - - template - struct compute_linearRand - { - GLM_FUNC_QUALIFIER static vec call(vec const& Min, vec const& Max) - { - return (compute_rand::call() % (Max + static_cast(1) - Min)) + Min; - } - }; - - template - struct compute_linearRand - { - GLM_FUNC_QUALIFIER static vec call(vec const& Min, vec const& Max) - { - return (vec(compute_rand::call() % vec(Max + static_cast(1) - Min))) + Min; - } - }; - - template - struct compute_linearRand - { - GLM_FUNC_QUALIFIER static vec call(vec const& Min, vec const& Max) - { - return (compute_rand::call() % (Max + static_cast(1) - Min)) + Min; - } - }; - - template - struct compute_linearRand - { - GLM_FUNC_QUALIFIER static vec call(vec const& Min, vec const& Max) - { - return (vec(compute_rand::call() % vec(Max + static_cast(1) - Min))) + Min; - } - }; - - template - struct compute_linearRand - { - GLM_FUNC_QUALIFIER static vec call(vec const& Min, vec const& Max) - { - return (compute_rand::call() % (Max + static_cast(1) - Min)) + Min; - } - }; - - template - struct compute_linearRand - { - GLM_FUNC_QUALIFIER static vec call(vec const& Min, vec const& Max) - { - return (vec(compute_rand::call() % vec(Max + static_cast(1) - Min))) + Min; - } - }; - - template - struct compute_linearRand - { - GLM_FUNC_QUALIFIER static vec call(vec const& Min, vec const& Max) - { - return (compute_rand::call() % (Max + static_cast(1) - Min)) + Min; - } - }; - - template - struct compute_linearRand - { - GLM_FUNC_QUALIFIER static vec call(vec const& Min, vec const& Max) - { - return vec(compute_rand::call()) / static_cast(std::numeric_limits::max()) * (Max - Min) + Min; - } - }; - - template - struct compute_linearRand - { - GLM_FUNC_QUALIFIER static vec call(vec const& Min, vec const& Max) - { - return vec(compute_rand::call()) / static_cast(std::numeric_limits::max()) * (Max - Min) + Min; - } - }; - - template - struct compute_linearRand - { - GLM_FUNC_QUALIFIER static vec call(vec const& Min, vec const& Max) - { - return vec(compute_rand::call()) / static_cast(std::numeric_limits::max()) * (Max - Min) + Min; - } - }; -}//namespace detail - - template - GLM_FUNC_QUALIFIER genType linearRand(genType Min, genType Max) - { - return detail::compute_linearRand<1, genType, highp>::call( - vec<1, genType, highp>(Min), - vec<1, genType, highp>(Max)).x; - } - - template - GLM_FUNC_QUALIFIER vec linearRand(vec const& Min, vec const& Max) - { - return detail::compute_linearRand::call(Min, Max); - } - - template - GLM_FUNC_QUALIFIER genType gaussRand(genType Mean, genType Deviation) - { - genType w, x1, x2; - - do - { - x1 = linearRand(genType(-1), genType(1)); - x2 = linearRand(genType(-1), genType(1)); - - w = x1 * x1 + x2 * x2; - } while(w > genType(1)); - - return static_cast(x2 * Deviation * Deviation * sqrt((genType(-2) * log(w)) / w) + Mean); - } - - template - GLM_FUNC_QUALIFIER vec gaussRand(vec const& Mean, vec const& Deviation) - { - return detail::functor2::call(gaussRand, Mean, Deviation); - } - - template - GLM_FUNC_QUALIFIER vec<2, T, defaultp> diskRand(T Radius) - { - assert(Radius > static_cast(0)); - - vec<2, T, defaultp> Result(T(0)); - T LenRadius(T(0)); - - do - { - Result = linearRand( - vec<2, T, defaultp>(-Radius), - vec<2, T, defaultp>(Radius)); - LenRadius = length(Result); - } - while(LenRadius > Radius); - - return Result; - } - - template - GLM_FUNC_QUALIFIER vec<3, T, defaultp> ballRand(T Radius) - { - assert(Radius > static_cast(0)); - - vec<3, T, defaultp> Result(T(0)); - T LenRadius(T(0)); - - do - { - Result = linearRand( - vec<3, T, defaultp>(-Radius), - vec<3, T, defaultp>(Radius)); - LenRadius = length(Result); - } - while(LenRadius > Radius); - - return Result; - } - - template - GLM_FUNC_QUALIFIER vec<2, T, defaultp> circularRand(T Radius) - { - assert(Radius > static_cast(0)); - - T a = linearRand(T(0), static_cast(6.283185307179586476925286766559)); - return vec<2, T, defaultp>(glm::cos(a), glm::sin(a)) * Radius; - } - - template - GLM_FUNC_QUALIFIER vec<3, T, defaultp> sphericalRand(T Radius) - { - assert(Radius > static_cast(0)); - - T theta = linearRand(T(0), T(6.283185307179586476925286766559f)); - T phi = std::acos(linearRand(T(-1.0f), T(1.0f))); - - T x = std::sin(phi) * std::cos(theta); - T y = std::sin(phi) * std::sin(theta); - T z = std::cos(phi); - - return vec<3, T, defaultp>(x, y, z) * Radius; - } -}//namespace glm diff --git a/third_party/glm/gtc/reciprocal.hpp b/third_party/glm/gtc/reciprocal.hpp deleted file mode 100755 index c7d1330..0000000 --- a/third_party/glm/gtc/reciprocal.hpp +++ /dev/null @@ -1,135 +0,0 @@ -/// @ref gtc_reciprocal -/// @file glm/gtc/reciprocal.hpp -/// -/// @see core (dependence) -/// -/// @defgroup gtc_reciprocal GLM_GTC_reciprocal -/// @ingroup gtc -/// -/// Include to use the features of this extension. -/// -/// Define secant, cosecant and cotangent functions. - -#pragma once - -// Dependencies -#include "../detail/setup.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_GTC_reciprocal extension included") -#endif - -namespace glm -{ - /// @addtogroup gtc_reciprocal - /// @{ - - /// Secant function. - /// hypotenuse / adjacent or 1 / cos(x) - /// - /// @tparam genType Floating-point scalar or vector types. - /// - /// @see gtc_reciprocal - template - GLM_FUNC_DECL genType sec(genType angle); - - /// Cosecant function. - /// hypotenuse / opposite or 1 / sin(x) - /// - /// @tparam genType Floating-point scalar or vector types. - /// - /// @see gtc_reciprocal - template - GLM_FUNC_DECL genType csc(genType angle); - - /// Cotangent function. - /// adjacent / opposite or 1 / tan(x) - /// - /// @tparam genType Floating-point scalar or vector types. - /// - /// @see gtc_reciprocal - template - GLM_FUNC_DECL genType cot(genType angle); - - /// Inverse secant function. - /// - /// @return Return an angle expressed in radians. - /// @tparam genType Floating-point scalar or vector types. - /// - /// @see gtc_reciprocal - template - GLM_FUNC_DECL genType asec(genType x); - - /// Inverse cosecant function. - /// - /// @return Return an angle expressed in radians. - /// @tparam genType Floating-point scalar or vector types. - /// - /// @see gtc_reciprocal - template - GLM_FUNC_DECL genType acsc(genType x); - - /// Inverse cotangent function. - /// - /// @return Return an angle expressed in radians. - /// @tparam genType Floating-point scalar or vector types. - /// - /// @see gtc_reciprocal - template - GLM_FUNC_DECL genType acot(genType x); - - /// Secant hyperbolic function. - /// - /// @tparam genType Floating-point scalar or vector types. - /// - /// @see gtc_reciprocal - template - GLM_FUNC_DECL genType sech(genType angle); - - /// Cosecant hyperbolic function. - /// - /// @tparam genType Floating-point scalar or vector types. - /// - /// @see gtc_reciprocal - template - GLM_FUNC_DECL genType csch(genType angle); - - /// Cotangent hyperbolic function. - /// - /// @tparam genType Floating-point scalar or vector types. - /// - /// @see gtc_reciprocal - template - GLM_FUNC_DECL genType coth(genType angle); - - /// Inverse secant hyperbolic function. - /// - /// @return Return an angle expressed in radians. - /// @tparam genType Floating-point scalar or vector types. - /// - /// @see gtc_reciprocal - template - GLM_FUNC_DECL genType asech(genType x); - - /// Inverse cosecant hyperbolic function. - /// - /// @return Return an angle expressed in radians. - /// @tparam genType Floating-point scalar or vector types. - /// - /// @see gtc_reciprocal - template - GLM_FUNC_DECL genType acsch(genType x); - - /// Inverse cotangent hyperbolic function. - /// - /// @return Return an angle expressed in radians. - /// @tparam genType Floating-point scalar or vector types. - /// - /// @see gtc_reciprocal - template - GLM_FUNC_DECL genType acoth(genType x); - - /// @} -}//namespace glm - -#include "reciprocal.inl" diff --git a/third_party/glm/gtc/reciprocal.inl b/third_party/glm/gtc/reciprocal.inl deleted file mode 100755 index d88729e..0000000 --- a/third_party/glm/gtc/reciprocal.inl +++ /dev/null @@ -1,191 +0,0 @@ -/// @ref gtc_reciprocal - -#include "../trigonometric.hpp" -#include - -namespace glm -{ - // sec - template - GLM_FUNC_QUALIFIER genType sec(genType angle) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'sec' only accept floating-point values"); - return genType(1) / glm::cos(angle); - } - - template - GLM_FUNC_QUALIFIER vec sec(vec const& x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'sec' only accept floating-point inputs"); - return detail::functor1::call(sec, x); - } - - // csc - template - GLM_FUNC_QUALIFIER genType csc(genType angle) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'csc' only accept floating-point values"); - return genType(1) / glm::sin(angle); - } - - template - GLM_FUNC_QUALIFIER vec csc(vec const& x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'csc' only accept floating-point inputs"); - return detail::functor1::call(csc, x); - } - - // cot - template - GLM_FUNC_QUALIFIER genType cot(genType angle) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'cot' only accept floating-point values"); - - genType const pi_over_2 = genType(3.1415926535897932384626433832795 / 2.0); - return glm::tan(pi_over_2 - angle); - } - - template - GLM_FUNC_QUALIFIER vec cot(vec const& x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'cot' only accept floating-point inputs"); - return detail::functor1::call(cot, x); - } - - // asec - template - GLM_FUNC_QUALIFIER genType asec(genType x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'asec' only accept floating-point values"); - return acos(genType(1) / x); - } - - template - GLM_FUNC_QUALIFIER vec asec(vec const& x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'asec' only accept floating-point inputs"); - return detail::functor1::call(asec, x); - } - - // acsc - template - GLM_FUNC_QUALIFIER genType acsc(genType x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'acsc' only accept floating-point values"); - return asin(genType(1) / x); - } - - template - GLM_FUNC_QUALIFIER vec acsc(vec const& x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'acsc' only accept floating-point inputs"); - return detail::functor1::call(acsc, x); - } - - // acot - template - GLM_FUNC_QUALIFIER genType acot(genType x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'acot' only accept floating-point values"); - - genType const pi_over_2 = genType(3.1415926535897932384626433832795 / 2.0); - return pi_over_2 - atan(x); - } - - template - GLM_FUNC_QUALIFIER vec acot(vec const& x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'acot' only accept floating-point inputs"); - return detail::functor1::call(acot, x); - } - - // sech - template - GLM_FUNC_QUALIFIER genType sech(genType angle) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'sech' only accept floating-point values"); - return genType(1) / glm::cosh(angle); - } - - template - GLM_FUNC_QUALIFIER vec sech(vec const& x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'sech' only accept floating-point inputs"); - return detail::functor1::call(sech, x); - } - - // csch - template - GLM_FUNC_QUALIFIER genType csch(genType angle) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'csch' only accept floating-point values"); - return genType(1) / glm::sinh(angle); - } - - template - GLM_FUNC_QUALIFIER vec csch(vec const& x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'csch' only accept floating-point inputs"); - return detail::functor1::call(csch, x); - } - - // coth - template - GLM_FUNC_QUALIFIER genType coth(genType angle) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'coth' only accept floating-point values"); - return glm::cosh(angle) / glm::sinh(angle); - } - - template - GLM_FUNC_QUALIFIER vec coth(vec const& x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'coth' only accept floating-point inputs"); - return detail::functor1::call(coth, x); - } - - // asech - template - GLM_FUNC_QUALIFIER genType asech(genType x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'asech' only accept floating-point values"); - return acosh(genType(1) / x); - } - - template - GLM_FUNC_QUALIFIER vec asech(vec const& x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'asech' only accept floating-point inputs"); - return detail::functor1::call(asech, x); - } - - // acsch - template - GLM_FUNC_QUALIFIER genType acsch(genType x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'acsch' only accept floating-point values"); - return asinh(genType(1) / x); - } - - template - GLM_FUNC_QUALIFIER vec acsch(vec const& x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'acsch' only accept floating-point inputs"); - return detail::functor1::call(acsch, x); - } - - // acoth - template - GLM_FUNC_QUALIFIER genType acoth(genType x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'acoth' only accept floating-point values"); - return atanh(genType(1) / x); - } - - template - GLM_FUNC_QUALIFIER vec acoth(vec const& x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'acoth' only accept floating-point inputs"); - return detail::functor1::call(acoth, x); - } -}//namespace glm diff --git a/third_party/glm/gtc/round.hpp b/third_party/glm/gtc/round.hpp deleted file mode 100755 index 56edbbc..0000000 --- a/third_party/glm/gtc/round.hpp +++ /dev/null @@ -1,160 +0,0 @@ -/// @ref gtc_round -/// @file glm/gtc/round.hpp -/// -/// @see core (dependence) -/// @see gtc_round (dependence) -/// -/// @defgroup gtc_round GLM_GTC_round -/// @ingroup gtc -/// -/// Include to use the features of this extension. -/// -/// Rounding value to specific boundings - -#pragma once - -// Dependencies -#include "../detail/setup.hpp" -#include "../detail/qualifier.hpp" -#include "../detail/_vectorize.hpp" -#include "../vector_relational.hpp" -#include "../common.hpp" -#include - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_GTC_round extension included") -#endif - -namespace glm -{ - /// @addtogroup gtc_round - /// @{ - - /// Return the power of two number which value is just higher the input value, - /// round up to a power of two. - /// - /// @see gtc_round - template - GLM_FUNC_DECL genIUType ceilPowerOfTwo(genIUType v); - - /// Return the power of two number which value is just higher the input value, - /// round up to a power of two. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point or integer scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see gtc_round - template - GLM_FUNC_DECL vec ceilPowerOfTwo(vec const& v); - - /// Return the power of two number which value is just lower the input value, - /// round down to a power of two. - /// - /// @see gtc_round - template - GLM_FUNC_DECL genIUType floorPowerOfTwo(genIUType v); - - /// Return the power of two number which value is just lower the input value, - /// round down to a power of two. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point or integer scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see gtc_round - template - GLM_FUNC_DECL vec floorPowerOfTwo(vec const& v); - - /// Return the power of two number which value is the closet to the input value. - /// - /// @see gtc_round - template - GLM_FUNC_DECL genIUType roundPowerOfTwo(genIUType v); - - /// Return the power of two number which value is the closet to the input value. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point or integer scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see gtc_round - template - GLM_FUNC_DECL vec roundPowerOfTwo(vec const& v); - - /// Higher multiple number of Source. - /// - /// @tparam genType Floating-point or integer scalar or vector types. - /// - /// @param v Source value to which is applied the function - /// @param Multiple Must be a null or positive value - /// - /// @see gtc_round - template - GLM_FUNC_DECL genType ceilMultiple(genType v, genType Multiple); - - /// Higher multiple number of Source. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point or integer scalar types - /// @tparam Q Value from qualifier enum - /// - /// @param v Source values to which is applied the function - /// @param Multiple Must be a null or positive value - /// - /// @see gtc_round - template - GLM_FUNC_DECL vec ceilMultiple(vec const& v, vec const& Multiple); - - /// Lower multiple number of Source. - /// - /// @tparam genType Floating-point or integer scalar or vector types. - /// - /// @param v Source value to which is applied the function - /// @param Multiple Must be a null or positive value - /// - /// @see gtc_round - template - GLM_FUNC_DECL genType floorMultiple(genType v, genType Multiple); - - /// Lower multiple number of Source. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point or integer scalar types - /// @tparam Q Value from qualifier enum - /// - /// @param v Source values to which is applied the function - /// @param Multiple Must be a null or positive value - /// - /// @see gtc_round - template - GLM_FUNC_DECL vec floorMultiple(vec const& v, vec const& Multiple); - - /// Lower multiple number of Source. - /// - /// @tparam genType Floating-point or integer scalar or vector types. - /// - /// @param v Source value to which is applied the function - /// @param Multiple Must be a null or positive value - /// - /// @see gtc_round - template - GLM_FUNC_DECL genType roundMultiple(genType v, genType Multiple); - - /// Lower multiple number of Source. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point or integer scalar types - /// @tparam Q Value from qualifier enum - /// - /// @param v Source values to which is applied the function - /// @param Multiple Must be a null or positive value - /// - /// @see gtc_round - template - GLM_FUNC_DECL vec roundMultiple(vec const& v, vec const& Multiple); - - /// @} -} //namespace glm - -#include "round.inl" diff --git a/third_party/glm/gtc/round.inl b/third_party/glm/gtc/round.inl deleted file mode 100755 index 48411e4..0000000 --- a/third_party/glm/gtc/round.inl +++ /dev/null @@ -1,155 +0,0 @@ -/// @ref gtc_round - -#include "../integer.hpp" -#include "../ext/vector_integer.hpp" - -namespace glm{ -namespace detail -{ - template - struct compute_roundMultiple {}; - - template<> - struct compute_roundMultiple - { - template - GLM_FUNC_QUALIFIER static genType call(genType Source, genType Multiple) - { - if (Source >= genType(0)) - return Source - std::fmod(Source, Multiple); - else - { - genType Tmp = Source + genType(1); - return Tmp - std::fmod(Tmp, Multiple) - Multiple; - } - } - }; - - template<> - struct compute_roundMultiple - { - template - GLM_FUNC_QUALIFIER static genType call(genType Source, genType Multiple) - { - if (Source >= genType(0)) - return Source - Source % Multiple; - else - { - genType Tmp = Source + genType(1); - return Tmp - Tmp % Multiple - Multiple; - } - } - }; - - template<> - struct compute_roundMultiple - { - template - GLM_FUNC_QUALIFIER static genType call(genType Source, genType Multiple) - { - if (Source >= genType(0)) - return Source - Source % Multiple; - else - { - genType Tmp = Source + genType(1); - return Tmp - Tmp % Multiple - Multiple; - } - } - }; -}//namespace detail - - ////////////////// - // ceilPowerOfTwo - - template - GLM_FUNC_QUALIFIER genType ceilPowerOfTwo(genType value) - { - return detail::compute_ceilPowerOfTwo<1, genType, defaultp, std::numeric_limits::is_signed>::call(vec<1, genType, defaultp>(value)).x; - } - - template - GLM_FUNC_QUALIFIER vec ceilPowerOfTwo(vec const& v) - { - return detail::compute_ceilPowerOfTwo::is_signed>::call(v); - } - - /////////////////// - // floorPowerOfTwo - - template - GLM_FUNC_QUALIFIER genType floorPowerOfTwo(genType value) - { - return isPowerOfTwo(value) ? value : static_cast(1) << findMSB(value); - } - - template - GLM_FUNC_QUALIFIER vec floorPowerOfTwo(vec const& v) - { - return detail::functor1::call(floorPowerOfTwo, v); - } - - /////////////////// - // roundPowerOfTwo - - template - GLM_FUNC_QUALIFIER genIUType roundPowerOfTwo(genIUType value) - { - if(isPowerOfTwo(value)) - return value; - - genIUType const prev = static_cast(1) << findMSB(value); - genIUType const next = prev << static_cast(1); - return (next - value) < (value - prev) ? next : prev; - } - - template - GLM_FUNC_QUALIFIER vec roundPowerOfTwo(vec const& v) - { - return detail::functor1::call(roundPowerOfTwo, v); - } - - ////////////////////// - // ceilMultiple - - template - GLM_FUNC_QUALIFIER genType ceilMultiple(genType Source, genType Multiple) - { - return detail::compute_ceilMultiple::is_iec559, std::numeric_limits::is_signed>::call(Source, Multiple); - } - - template - GLM_FUNC_QUALIFIER vec ceilMultiple(vec const& Source, vec const& Multiple) - { - return detail::functor2::call(ceilMultiple, Source, Multiple); - } - - ////////////////////// - // floorMultiple - - template - GLM_FUNC_QUALIFIER genType floorMultiple(genType Source, genType Multiple) - { - return detail::compute_floorMultiple::is_iec559, std::numeric_limits::is_signed>::call(Source, Multiple); - } - - template - GLM_FUNC_QUALIFIER vec floorMultiple(vec const& Source, vec const& Multiple) - { - return detail::functor2::call(floorMultiple, Source, Multiple); - } - - ////////////////////// - // roundMultiple - - template - GLM_FUNC_QUALIFIER genType roundMultiple(genType Source, genType Multiple) - { - return detail::compute_roundMultiple::is_iec559, std::numeric_limits::is_signed>::call(Source, Multiple); - } - - template - GLM_FUNC_QUALIFIER vec roundMultiple(vec const& Source, vec const& Multiple) - { - return detail::functor2::call(roundMultiple, Source, Multiple); - } -}//namespace glm diff --git a/third_party/glm/gtc/type_aligned.hpp b/third_party/glm/gtc/type_aligned.hpp deleted file mode 100755 index 5403abf..0000000 --- a/third_party/glm/gtc/type_aligned.hpp +++ /dev/null @@ -1,1315 +0,0 @@ -/// @ref gtc_type_aligned -/// @file glm/gtc/type_aligned.hpp -/// -/// @see core (dependence) -/// -/// @defgroup gtc_type_aligned GLM_GTC_type_aligned -/// @ingroup gtc -/// -/// Include to use the features of this extension. -/// -/// Aligned types allowing SIMD optimizations of vectors and matrices types - -#pragma once - -#if (GLM_CONFIG_ALIGNED_GENTYPES == GLM_DISABLE) -# error "GLM: Aligned gentypes require to enable C++ language extensions. Define GLM_FORCE_ALIGNED_GENTYPES before including GLM headers to use aligned types." -#endif - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_GTC_type_aligned extension included") -#endif - -#include "../mat4x4.hpp" -#include "../mat4x3.hpp" -#include "../mat4x2.hpp" -#include "../mat3x4.hpp" -#include "../mat3x3.hpp" -#include "../mat3x2.hpp" -#include "../mat2x4.hpp" -#include "../mat2x3.hpp" -#include "../mat2x2.hpp" -#include "../gtc/vec1.hpp" -#include "../vec2.hpp" -#include "../vec3.hpp" -#include "../vec4.hpp" - -namespace glm -{ - /// @addtogroup gtc_type_aligned - /// @{ - - // -- *vec1 -- - - /// 1 component vector aligned in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef vec<1, float, aligned_highp> aligned_highp_vec1; - - /// 1 component vector aligned in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef vec<1, float, aligned_mediump> aligned_mediump_vec1; - - /// 1 component vector aligned in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef vec<1, float, aligned_lowp> aligned_lowp_vec1; - - /// 1 component vector aligned in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef vec<1, double, aligned_highp> aligned_highp_dvec1; - - /// 1 component vector aligned in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef vec<1, double, aligned_mediump> aligned_mediump_dvec1; - - /// 1 component vector aligned in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef vec<1, double, aligned_lowp> aligned_lowp_dvec1; - - /// 1 component vector aligned in memory of signed integer numbers. - typedef vec<1, int, aligned_highp> aligned_highp_ivec1; - - /// 1 component vector aligned in memory of signed integer numbers. - typedef vec<1, int, aligned_mediump> aligned_mediump_ivec1; - - /// 1 component vector aligned in memory of signed integer numbers. - typedef vec<1, int, aligned_lowp> aligned_lowp_ivec1; - - /// 1 component vector aligned in memory of unsigned integer numbers. - typedef vec<1, uint, aligned_highp> aligned_highp_uvec1; - - /// 1 component vector aligned in memory of unsigned integer numbers. - typedef vec<1, uint, aligned_mediump> aligned_mediump_uvec1; - - /// 1 component vector aligned in memory of unsigned integer numbers. - typedef vec<1, uint, aligned_lowp> aligned_lowp_uvec1; - - /// 1 component vector aligned in memory of bool values. - typedef vec<1, bool, aligned_highp> aligned_highp_bvec1; - - /// 1 component vector aligned in memory of bool values. - typedef vec<1, bool, aligned_mediump> aligned_mediump_bvec1; - - /// 1 component vector aligned in memory of bool values. - typedef vec<1, bool, aligned_lowp> aligned_lowp_bvec1; - - /// 1 component vector tightly packed in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef vec<1, float, packed_highp> packed_highp_vec1; - - /// 1 component vector tightly packed in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef vec<1, float, packed_mediump> packed_mediump_vec1; - - /// 1 component vector tightly packed in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef vec<1, float, packed_lowp> packed_lowp_vec1; - - /// 1 component vector tightly packed in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef vec<1, double, packed_highp> packed_highp_dvec1; - - /// 1 component vector tightly packed in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef vec<1, double, packed_mediump> packed_mediump_dvec1; - - /// 1 component vector tightly packed in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef vec<1, double, packed_lowp> packed_lowp_dvec1; - - /// 1 component vector tightly packed in memory of signed integer numbers. - typedef vec<1, int, packed_highp> packed_highp_ivec1; - - /// 1 component vector tightly packed in memory of signed integer numbers. - typedef vec<1, int, packed_mediump> packed_mediump_ivec1; - - /// 1 component vector tightly packed in memory of signed integer numbers. - typedef vec<1, int, packed_lowp> packed_lowp_ivec1; - - /// 1 component vector tightly packed in memory of unsigned integer numbers. - typedef vec<1, uint, packed_highp> packed_highp_uvec1; - - /// 1 component vector tightly packed in memory of unsigned integer numbers. - typedef vec<1, uint, packed_mediump> packed_mediump_uvec1; - - /// 1 component vector tightly packed in memory of unsigned integer numbers. - typedef vec<1, uint, packed_lowp> packed_lowp_uvec1; - - /// 1 component vector tightly packed in memory of bool values. - typedef vec<1, bool, packed_highp> packed_highp_bvec1; - - /// 1 component vector tightly packed in memory of bool values. - typedef vec<1, bool, packed_mediump> packed_mediump_bvec1; - - /// 1 component vector tightly packed in memory of bool values. - typedef vec<1, bool, packed_lowp> packed_lowp_bvec1; - - // -- *vec2 -- - - /// 2 components vector aligned in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef vec<2, float, aligned_highp> aligned_highp_vec2; - - /// 2 components vector aligned in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef vec<2, float, aligned_mediump> aligned_mediump_vec2; - - /// 2 components vector aligned in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef vec<2, float, aligned_lowp> aligned_lowp_vec2; - - /// 2 components vector aligned in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef vec<2, double, aligned_highp> aligned_highp_dvec2; - - /// 2 components vector aligned in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef vec<2, double, aligned_mediump> aligned_mediump_dvec2; - - /// 2 components vector aligned in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef vec<2, double, aligned_lowp> aligned_lowp_dvec2; - - /// 2 components vector aligned in memory of signed integer numbers. - typedef vec<2, int, aligned_highp> aligned_highp_ivec2; - - /// 2 components vector aligned in memory of signed integer numbers. - typedef vec<2, int, aligned_mediump> aligned_mediump_ivec2; - - /// 2 components vector aligned in memory of signed integer numbers. - typedef vec<2, int, aligned_lowp> aligned_lowp_ivec2; - - /// 2 components vector aligned in memory of unsigned integer numbers. - typedef vec<2, uint, aligned_highp> aligned_highp_uvec2; - - /// 2 components vector aligned in memory of unsigned integer numbers. - typedef vec<2, uint, aligned_mediump> aligned_mediump_uvec2; - - /// 2 components vector aligned in memory of unsigned integer numbers. - typedef vec<2, uint, aligned_lowp> aligned_lowp_uvec2; - - /// 2 components vector aligned in memory of bool values. - typedef vec<2, bool, aligned_highp> aligned_highp_bvec2; - - /// 2 components vector aligned in memory of bool values. - typedef vec<2, bool, aligned_mediump> aligned_mediump_bvec2; - - /// 2 components vector aligned in memory of bool values. - typedef vec<2, bool, aligned_lowp> aligned_lowp_bvec2; - - /// 2 components vector tightly packed in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef vec<2, float, packed_highp> packed_highp_vec2; - - /// 2 components vector tightly packed in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef vec<2, float, packed_mediump> packed_mediump_vec2; - - /// 2 components vector tightly packed in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef vec<2, float, packed_lowp> packed_lowp_vec2; - - /// 2 components vector tightly packed in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef vec<2, double, packed_highp> packed_highp_dvec2; - - /// 2 components vector tightly packed in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef vec<2, double, packed_mediump> packed_mediump_dvec2; - - /// 2 components vector tightly packed in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef vec<2, double, packed_lowp> packed_lowp_dvec2; - - /// 2 components vector tightly packed in memory of signed integer numbers. - typedef vec<2, int, packed_highp> packed_highp_ivec2; - - /// 2 components vector tightly packed in memory of signed integer numbers. - typedef vec<2, int, packed_mediump> packed_mediump_ivec2; - - /// 2 components vector tightly packed in memory of signed integer numbers. - typedef vec<2, int, packed_lowp> packed_lowp_ivec2; - - /// 2 components vector tightly packed in memory of unsigned integer numbers. - typedef vec<2, uint, packed_highp> packed_highp_uvec2; - - /// 2 components vector tightly packed in memory of unsigned integer numbers. - typedef vec<2, uint, packed_mediump> packed_mediump_uvec2; - - /// 2 components vector tightly packed in memory of unsigned integer numbers. - typedef vec<2, uint, packed_lowp> packed_lowp_uvec2; - - /// 2 components vector tightly packed in memory of bool values. - typedef vec<2, bool, packed_highp> packed_highp_bvec2; - - /// 2 components vector tightly packed in memory of bool values. - typedef vec<2, bool, packed_mediump> packed_mediump_bvec2; - - /// 2 components vector tightly packed in memory of bool values. - typedef vec<2, bool, packed_lowp> packed_lowp_bvec2; - - // -- *vec3 -- - - /// 3 components vector aligned in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef vec<3, float, aligned_highp> aligned_highp_vec3; - - /// 3 components vector aligned in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef vec<3, float, aligned_mediump> aligned_mediump_vec3; - - /// 3 components vector aligned in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef vec<3, float, aligned_lowp> aligned_lowp_vec3; - - /// 3 components vector aligned in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef vec<3, double, aligned_highp> aligned_highp_dvec3; - - /// 3 components vector aligned in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef vec<3, double, aligned_mediump> aligned_mediump_dvec3; - - /// 3 components vector aligned in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef vec<3, double, aligned_lowp> aligned_lowp_dvec3; - - /// 3 components vector aligned in memory of signed integer numbers. - typedef vec<3, int, aligned_highp> aligned_highp_ivec3; - - /// 3 components vector aligned in memory of signed integer numbers. - typedef vec<3, int, aligned_mediump> aligned_mediump_ivec3; - - /// 3 components vector aligned in memory of signed integer numbers. - typedef vec<3, int, aligned_lowp> aligned_lowp_ivec3; - - /// 3 components vector aligned in memory of unsigned integer numbers. - typedef vec<3, uint, aligned_highp> aligned_highp_uvec3; - - /// 3 components vector aligned in memory of unsigned integer numbers. - typedef vec<3, uint, aligned_mediump> aligned_mediump_uvec3; - - /// 3 components vector aligned in memory of unsigned integer numbers. - typedef vec<3, uint, aligned_lowp> aligned_lowp_uvec3; - - /// 3 components vector aligned in memory of bool values. - typedef vec<3, bool, aligned_highp> aligned_highp_bvec3; - - /// 3 components vector aligned in memory of bool values. - typedef vec<3, bool, aligned_mediump> aligned_mediump_bvec3; - - /// 3 components vector aligned in memory of bool values. - typedef vec<3, bool, aligned_lowp> aligned_lowp_bvec3; - - /// 3 components vector tightly packed in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef vec<3, float, packed_highp> packed_highp_vec3; - - /// 3 components vector tightly packed in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef vec<3, float, packed_mediump> packed_mediump_vec3; - - /// 3 components vector tightly packed in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef vec<3, float, packed_lowp> packed_lowp_vec3; - - /// 3 components vector tightly packed in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef vec<3, double, packed_highp> packed_highp_dvec3; - - /// 3 components vector tightly packed in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef vec<3, double, packed_mediump> packed_mediump_dvec3; - - /// 3 components vector tightly packed in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef vec<3, double, packed_lowp> packed_lowp_dvec3; - - /// 3 components vector tightly packed in memory of signed integer numbers. - typedef vec<3, int, packed_highp> packed_highp_ivec3; - - /// 3 components vector tightly packed in memory of signed integer numbers. - typedef vec<3, int, packed_mediump> packed_mediump_ivec3; - - /// 3 components vector tightly packed in memory of signed integer numbers. - typedef vec<3, int, packed_lowp> packed_lowp_ivec3; - - /// 3 components vector tightly packed in memory of unsigned integer numbers. - typedef vec<3, uint, packed_highp> packed_highp_uvec3; - - /// 3 components vector tightly packed in memory of unsigned integer numbers. - typedef vec<3, uint, packed_mediump> packed_mediump_uvec3; - - /// 3 components vector tightly packed in memory of unsigned integer numbers. - typedef vec<3, uint, packed_lowp> packed_lowp_uvec3; - - /// 3 components vector tightly packed in memory of bool values. - typedef vec<3, bool, packed_highp> packed_highp_bvec3; - - /// 3 components vector tightly packed in memory of bool values. - typedef vec<3, bool, packed_mediump> packed_mediump_bvec3; - - /// 3 components vector tightly packed in memory of bool values. - typedef vec<3, bool, packed_lowp> packed_lowp_bvec3; - - // -- *vec4 -- - - /// 4 components vector aligned in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef vec<4, float, aligned_highp> aligned_highp_vec4; - - /// 4 components vector aligned in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef vec<4, float, aligned_mediump> aligned_mediump_vec4; - - /// 4 components vector aligned in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef vec<4, float, aligned_lowp> aligned_lowp_vec4; - - /// 4 components vector aligned in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef vec<4, double, aligned_highp> aligned_highp_dvec4; - - /// 4 components vector aligned in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef vec<4, double, aligned_mediump> aligned_mediump_dvec4; - - /// 4 components vector aligned in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef vec<4, double, aligned_lowp> aligned_lowp_dvec4; - - /// 4 components vector aligned in memory of signed integer numbers. - typedef vec<4, int, aligned_highp> aligned_highp_ivec4; - - /// 4 components vector aligned in memory of signed integer numbers. - typedef vec<4, int, aligned_mediump> aligned_mediump_ivec4; - - /// 4 components vector aligned in memory of signed integer numbers. - typedef vec<4, int, aligned_lowp> aligned_lowp_ivec4; - - /// 4 components vector aligned in memory of unsigned integer numbers. - typedef vec<4, uint, aligned_highp> aligned_highp_uvec4; - - /// 4 components vector aligned in memory of unsigned integer numbers. - typedef vec<4, uint, aligned_mediump> aligned_mediump_uvec4; - - /// 4 components vector aligned in memory of unsigned integer numbers. - typedef vec<4, uint, aligned_lowp> aligned_lowp_uvec4; - - /// 4 components vector aligned in memory of bool values. - typedef vec<4, bool, aligned_highp> aligned_highp_bvec4; - - /// 4 components vector aligned in memory of bool values. - typedef vec<4, bool, aligned_mediump> aligned_mediump_bvec4; - - /// 4 components vector aligned in memory of bool values. - typedef vec<4, bool, aligned_lowp> aligned_lowp_bvec4; - - /// 4 components vector tightly packed in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef vec<4, float, packed_highp> packed_highp_vec4; - - /// 4 components vector tightly packed in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef vec<4, float, packed_mediump> packed_mediump_vec4; - - /// 4 components vector tightly packed in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef vec<4, float, packed_lowp> packed_lowp_vec4; - - /// 4 components vector tightly packed in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef vec<4, double, packed_highp> packed_highp_dvec4; - - /// 4 components vector tightly packed in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef vec<4, double, packed_mediump> packed_mediump_dvec4; - - /// 4 components vector tightly packed in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef vec<4, double, packed_lowp> packed_lowp_dvec4; - - /// 4 components vector tightly packed in memory of signed integer numbers. - typedef vec<4, int, packed_highp> packed_highp_ivec4; - - /// 4 components vector tightly packed in memory of signed integer numbers. - typedef vec<4, int, packed_mediump> packed_mediump_ivec4; - - /// 4 components vector tightly packed in memory of signed integer numbers. - typedef vec<4, int, packed_lowp> packed_lowp_ivec4; - - /// 4 components vector tightly packed in memory of unsigned integer numbers. - typedef vec<4, uint, packed_highp> packed_highp_uvec4; - - /// 4 components vector tightly packed in memory of unsigned integer numbers. - typedef vec<4, uint, packed_mediump> packed_mediump_uvec4; - - /// 4 components vector tightly packed in memory of unsigned integer numbers. - typedef vec<4, uint, packed_lowp> packed_lowp_uvec4; - - /// 4 components vector tightly packed in memory of bool values. - typedef vec<4, bool, packed_highp> packed_highp_bvec4; - - /// 4 components vector tightly packed in memory of bool values. - typedef vec<4, bool, packed_mediump> packed_mediump_bvec4; - - /// 4 components vector tightly packed in memory of bool values. - typedef vec<4, bool, packed_lowp> packed_lowp_bvec4; - - // -- *mat2 -- - - /// 2 by 2 matrix aligned in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef mat<2, 2, float, aligned_highp> aligned_highp_mat2; - - /// 2 by 2 matrix aligned in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef mat<2, 2, float, aligned_mediump> aligned_mediump_mat2; - - /// 2 by 2 matrix aligned in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef mat<2, 2, float, aligned_lowp> aligned_lowp_mat2; - - /// 2 by 2 matrix aligned in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef mat<2, 2, double, aligned_highp> aligned_highp_dmat2; - - /// 2 by 2 matrix aligned in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef mat<2, 2, double, aligned_mediump> aligned_mediump_dmat2; - - /// 2 by 2 matrix aligned in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef mat<2, 2, double, aligned_lowp> aligned_lowp_dmat2; - - /// 2 by 2 matrix tightly packed in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef mat<2, 2, float, packed_highp> packed_highp_mat2; - - /// 2 by 2 matrix tightly packed in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef mat<2, 2, float, packed_mediump> packed_mediump_mat2; - - /// 2 by 2 matrix tightly packed in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef mat<2, 2, float, packed_lowp> packed_lowp_mat2; - - /// 2 by 2 matrix tightly packed in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef mat<2, 2, double, packed_highp> packed_highp_dmat2; - - /// 2 by 2 matrix tightly packed in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef mat<2, 2, double, packed_mediump> packed_mediump_dmat2; - - /// 2 by 2 matrix tightly packed in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef mat<2, 2, double, packed_lowp> packed_lowp_dmat2; - - // -- *mat3 -- - - /// 3 by 3 matrix aligned in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef mat<3, 3, float, aligned_highp> aligned_highp_mat3; - - /// 3 by 3 matrix aligned in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef mat<3, 3, float, aligned_mediump> aligned_mediump_mat3; - - /// 3 by 3 matrix aligned in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef mat<3, 3, float, aligned_lowp> aligned_lowp_mat3; - - /// 3 by 3 matrix aligned in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef mat<3, 3, double, aligned_highp> aligned_highp_dmat3; - - /// 3 by 3 matrix aligned in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef mat<3, 3, double, aligned_mediump> aligned_mediump_dmat3; - - /// 3 by 3 matrix aligned in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef mat<3, 3, double, aligned_lowp> aligned_lowp_dmat3; - - /// 3 by 3 matrix tightly packed in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef mat<3, 3, float, packed_highp> packed_highp_mat3; - - /// 3 by 3 matrix tightly packed in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef mat<3, 3, float, packed_mediump> packed_mediump_mat3; - - /// 3 by 3 matrix tightly packed in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef mat<3, 3, float, packed_lowp> packed_lowp_mat3; - - /// 3 by 3 matrix tightly packed in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef mat<3, 3, double, packed_highp> packed_highp_dmat3; - - /// 3 by 3 matrix tightly packed in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef mat<3, 3, double, packed_mediump> packed_mediump_dmat3; - - /// 3 by 3 matrix tightly packed in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef mat<3, 3, double, packed_lowp> packed_lowp_dmat3; - - // -- *mat4 -- - - /// 4 by 4 matrix aligned in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef mat<4, 4, float, aligned_highp> aligned_highp_mat4; - - /// 4 by 4 matrix aligned in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef mat<4, 4, float, aligned_mediump> aligned_mediump_mat4; - - /// 4 by 4 matrix aligned in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef mat<4, 4, float, aligned_lowp> aligned_lowp_mat4; - - /// 4 by 4 matrix aligned in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef mat<4, 4, double, aligned_highp> aligned_highp_dmat4; - - /// 4 by 4 matrix aligned in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef mat<4, 4, double, aligned_mediump> aligned_mediump_dmat4; - - /// 4 by 4 matrix aligned in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef mat<4, 4, double, aligned_lowp> aligned_lowp_dmat4; - - /// 4 by 4 matrix tightly packed in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef mat<4, 4, float, packed_highp> packed_highp_mat4; - - /// 4 by 4 matrix tightly packed in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef mat<4, 4, float, packed_mediump> packed_mediump_mat4; - - /// 4 by 4 matrix tightly packed in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef mat<4, 4, float, packed_lowp> packed_lowp_mat4; - - /// 4 by 4 matrix tightly packed in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef mat<4, 4, double, packed_highp> packed_highp_dmat4; - - /// 4 by 4 matrix tightly packed in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef mat<4, 4, double, packed_mediump> packed_mediump_dmat4; - - /// 4 by 4 matrix tightly packed in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef mat<4, 4, double, packed_lowp> packed_lowp_dmat4; - - // -- *mat2x2 -- - - /// 2 by 2 matrix aligned in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef mat<2, 2, float, aligned_highp> aligned_highp_mat2x2; - - /// 2 by 2 matrix aligned in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef mat<2, 2, float, aligned_mediump> aligned_mediump_mat2x2; - - /// 2 by 2 matrix aligned in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef mat<2, 2, float, aligned_lowp> aligned_lowp_mat2x2; - - /// 2 by 2 matrix aligned in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef mat<2, 2, double, aligned_highp> aligned_highp_dmat2x2; - - /// 2 by 2 matrix aligned in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef mat<2, 2, double, aligned_mediump> aligned_mediump_dmat2x2; - - /// 2 by 2 matrix aligned in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef mat<2, 2, double, aligned_lowp> aligned_lowp_dmat2x2; - - /// 2 by 2 matrix tightly packed in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef mat<2, 2, float, packed_highp> packed_highp_mat2x2; - - /// 2 by 2 matrix tightly packed in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef mat<2, 2, float, packed_mediump> packed_mediump_mat2x2; - - /// 2 by 2 matrix tightly packed in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef mat<2, 2, float, packed_lowp> packed_lowp_mat2x2; - - /// 2 by 2 matrix tightly packed in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef mat<2, 2, double, packed_highp> packed_highp_dmat2x2; - - /// 2 by 2 matrix tightly packed in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef mat<2, 2, double, packed_mediump> packed_mediump_dmat2x2; - - /// 2 by 2 matrix tightly packed in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef mat<2, 2, double, packed_lowp> packed_lowp_dmat2x2; - - // -- *mat2x3 -- - - /// 2 by 3 matrix aligned in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef mat<2, 3, float, aligned_highp> aligned_highp_mat2x3; - - /// 2 by 3 matrix aligned in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef mat<2, 3, float, aligned_mediump> aligned_mediump_mat2x3; - - /// 2 by 3 matrix aligned in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef mat<2, 3, float, aligned_lowp> aligned_lowp_mat2x3; - - /// 2 by 3 matrix aligned in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef mat<2, 3, double, aligned_highp> aligned_highp_dmat2x3; - - /// 2 by 3 matrix aligned in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef mat<2, 3, double, aligned_mediump> aligned_mediump_dmat2x3; - - /// 2 by 3 matrix aligned in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef mat<2, 3, double, aligned_lowp> aligned_lowp_dmat2x3; - - /// 2 by 3 matrix tightly packed in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef mat<2, 3, float, packed_highp> packed_highp_mat2x3; - - /// 2 by 3 matrix tightly packed in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef mat<2, 3, float, packed_mediump> packed_mediump_mat2x3; - - /// 2 by 3 matrix tightly packed in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef mat<2, 3, float, packed_lowp> packed_lowp_mat2x3; - - /// 2 by 3 matrix tightly packed in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef mat<2, 3, double, packed_highp> packed_highp_dmat2x3; - - /// 2 by 3 matrix tightly packed in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef mat<2, 3, double, packed_mediump> packed_mediump_dmat2x3; - - /// 2 by 3 matrix tightly packed in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef mat<2, 3, double, packed_lowp> packed_lowp_dmat2x3; - - // -- *mat2x4 -- - - /// 2 by 4 matrix aligned in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef mat<2, 4, float, aligned_highp> aligned_highp_mat2x4; - - /// 2 by 4 matrix aligned in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef mat<2, 4, float, aligned_mediump> aligned_mediump_mat2x4; - - /// 2 by 4 matrix aligned in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef mat<2, 4, float, aligned_lowp> aligned_lowp_mat2x4; - - /// 2 by 4 matrix aligned in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef mat<2, 4, double, aligned_highp> aligned_highp_dmat2x4; - - /// 2 by 4 matrix aligned in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef mat<2, 4, double, aligned_mediump> aligned_mediump_dmat2x4; - - /// 2 by 4 matrix aligned in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef mat<2, 4, double, aligned_lowp> aligned_lowp_dmat2x4; - - /// 2 by 4 matrix tightly packed in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef mat<2, 4, float, packed_highp> packed_highp_mat2x4; - - /// 2 by 4 matrix tightly packed in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef mat<2, 4, float, packed_mediump> packed_mediump_mat2x4; - - /// 2 by 4 matrix tightly packed in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef mat<2, 4, float, packed_lowp> packed_lowp_mat2x4; - - /// 2 by 4 matrix tightly packed in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef mat<2, 4, double, packed_highp> packed_highp_dmat2x4; - - /// 2 by 4 matrix tightly packed in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef mat<2, 4, double, packed_mediump> packed_mediump_dmat2x4; - - /// 2 by 4 matrix tightly packed in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef mat<2, 4, double, packed_lowp> packed_lowp_dmat2x4; - - // -- *mat3x2 -- - - /// 3 by 2 matrix aligned in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef mat<3, 2, float, aligned_highp> aligned_highp_mat3x2; - - /// 3 by 2 matrix aligned in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef mat<3, 2, float, aligned_mediump> aligned_mediump_mat3x2; - - /// 3 by 2 matrix aligned in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef mat<3, 2, float, aligned_lowp> aligned_lowp_mat3x2; - - /// 3 by 2 matrix aligned in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef mat<3, 2, double, aligned_highp> aligned_highp_dmat3x2; - - /// 3 by 2 matrix aligned in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef mat<3, 2, double, aligned_mediump> aligned_mediump_dmat3x2; - - /// 3 by 2 matrix aligned in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef mat<3, 2, double, aligned_lowp> aligned_lowp_dmat3x2; - - /// 3 by 2 matrix tightly packed in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef mat<3, 2, float, packed_highp> packed_highp_mat3x2; - - /// 3 by 2 matrix tightly packed in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef mat<3, 2, float, packed_mediump> packed_mediump_mat3x2; - - /// 3 by 2 matrix tightly packed in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef mat<3, 2, float, packed_lowp> packed_lowp_mat3x2; - - /// 3 by 2 matrix tightly packed in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef mat<3, 2, double, packed_highp> packed_highp_dmat3x2; - - /// 3 by 2 matrix tightly packed in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef mat<3, 2, double, packed_mediump> packed_mediump_dmat3x2; - - /// 3 by 2 matrix tightly packed in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef mat<3, 2, double, packed_lowp> packed_lowp_dmat3x2; - - // -- *mat3x3 -- - - /// 3 by 3 matrix aligned in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef mat<3, 3, float, aligned_highp> aligned_highp_mat3x3; - - /// 3 by 3 matrix aligned in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef mat<3, 3, float, aligned_mediump> aligned_mediump_mat3x3; - - /// 3 by 3 matrix aligned in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef mat<3, 3, float, aligned_lowp> aligned_lowp_mat3x3; - - /// 3 by 3 matrix aligned in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef mat<3, 3, double, aligned_highp> aligned_highp_dmat3x3; - - /// 3 by 3 matrix aligned in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef mat<3, 3, double, aligned_mediump> aligned_mediump_dmat3x3; - - /// 3 by 3 matrix aligned in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef mat<3, 3, double, aligned_lowp> aligned_lowp_dmat3x3; - - /// 3 by 3 matrix tightly packed in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef mat<3, 3, float, packed_highp> packed_highp_mat3x3; - - /// 3 by 3 matrix tightly packed in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef mat<3, 3, float, packed_mediump> packed_mediump_mat3x3; - - /// 3 by 3 matrix tightly packed in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef mat<3, 3, float, packed_lowp> packed_lowp_mat3x3; - - /// 3 by 3 matrix tightly packed in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef mat<3, 3, double, packed_highp> packed_highp_dmat3x3; - - /// 3 by 3 matrix tightly packed in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef mat<3, 3, double, packed_mediump> packed_mediump_dmat3x3; - - /// 3 by 3 matrix tightly packed in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef mat<3, 3, double, packed_lowp> packed_lowp_dmat3x3; - - // -- *mat3x4 -- - - /// 3 by 4 matrix aligned in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef mat<3, 4, float, aligned_highp> aligned_highp_mat3x4; - - /// 3 by 4 matrix aligned in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef mat<3, 4, float, aligned_mediump> aligned_mediump_mat3x4; - - /// 3 by 4 matrix aligned in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef mat<3, 4, float, aligned_lowp> aligned_lowp_mat3x4; - - /// 3 by 4 matrix aligned in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef mat<3, 4, double, aligned_highp> aligned_highp_dmat3x4; - - /// 3 by 4 matrix aligned in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef mat<3, 4, double, aligned_mediump> aligned_mediump_dmat3x4; - - /// 3 by 4 matrix aligned in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef mat<3, 4, double, aligned_lowp> aligned_lowp_dmat3x4; - - /// 3 by 4 matrix tightly packed in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef mat<3, 4, float, packed_highp> packed_highp_mat3x4; - - /// 3 by 4 matrix tightly packed in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef mat<3, 4, float, packed_mediump> packed_mediump_mat3x4; - - /// 3 by 4 matrix tightly packed in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef mat<3, 4, float, packed_lowp> packed_lowp_mat3x4; - - /// 3 by 4 matrix tightly packed in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef mat<3, 4, double, packed_highp> packed_highp_dmat3x4; - - /// 3 by 4 matrix tightly packed in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef mat<3, 4, double, packed_mediump> packed_mediump_dmat3x4; - - /// 3 by 4 matrix tightly packed in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef mat<3, 4, double, packed_lowp> packed_lowp_dmat3x4; - - // -- *mat4x2 -- - - /// 4 by 2 matrix aligned in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef mat<4, 2, float, aligned_highp> aligned_highp_mat4x2; - - /// 4 by 2 matrix aligned in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef mat<4, 2, float, aligned_mediump> aligned_mediump_mat4x2; - - /// 4 by 2 matrix aligned in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef mat<4, 2, float, aligned_lowp> aligned_lowp_mat4x2; - - /// 4 by 2 matrix aligned in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef mat<4, 2, double, aligned_highp> aligned_highp_dmat4x2; - - /// 4 by 2 matrix aligned in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef mat<4, 2, double, aligned_mediump> aligned_mediump_dmat4x2; - - /// 4 by 2 matrix aligned in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef mat<4, 2, double, aligned_lowp> aligned_lowp_dmat4x2; - - /// 4 by 2 matrix tightly packed in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef mat<4, 2, float, packed_highp> packed_highp_mat4x2; - - /// 4 by 2 matrix tightly packed in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef mat<4, 2, float, packed_mediump> packed_mediump_mat4x2; - - /// 4 by 2 matrix tightly packed in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef mat<4, 2, float, packed_lowp> packed_lowp_mat4x2; - - /// 4 by 2 matrix tightly packed in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef mat<4, 2, double, packed_highp> packed_highp_dmat4x2; - - /// 4 by 2 matrix tightly packed in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef mat<4, 2, double, packed_mediump> packed_mediump_dmat4x2; - - /// 4 by 2 matrix tightly packed in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef mat<4, 2, double, packed_lowp> packed_lowp_dmat4x2; - - // -- *mat4x3 -- - - /// 4 by 3 matrix aligned in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef mat<4, 3, float, aligned_highp> aligned_highp_mat4x3; - - /// 4 by 3 matrix aligned in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef mat<4, 3, float, aligned_mediump> aligned_mediump_mat4x3; - - /// 4 by 3 matrix aligned in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef mat<4, 3, float, aligned_lowp> aligned_lowp_mat4x3; - - /// 4 by 3 matrix aligned in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef mat<4, 3, double, aligned_highp> aligned_highp_dmat4x3; - - /// 4 by 3 matrix aligned in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef mat<4, 3, double, aligned_mediump> aligned_mediump_dmat4x3; - - /// 4 by 3 matrix aligned in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef mat<4, 3, double, aligned_lowp> aligned_lowp_dmat4x3; - - /// 4 by 3 matrix tightly packed in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef mat<4, 3, float, packed_highp> packed_highp_mat4x3; - - /// 4 by 3 matrix tightly packed in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef mat<4, 3, float, packed_mediump> packed_mediump_mat4x3; - - /// 4 by 3 matrix tightly packed in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef mat<4, 3, float, packed_lowp> packed_lowp_mat4x3; - - /// 4 by 3 matrix tightly packed in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef mat<4, 3, double, packed_highp> packed_highp_dmat4x3; - - /// 4 by 3 matrix tightly packed in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef mat<4, 3, double, packed_mediump> packed_mediump_dmat4x3; - - /// 4 by 3 matrix tightly packed in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef mat<4, 3, double, packed_lowp> packed_lowp_dmat4x3; - - // -- *mat4x4 -- - - /// 4 by 4 matrix aligned in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef mat<4, 4, float, aligned_highp> aligned_highp_mat4x4; - - /// 4 by 4 matrix aligned in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef mat<4, 4, float, aligned_mediump> aligned_mediump_mat4x4; - - /// 4 by 4 matrix aligned in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef mat<4, 4, float, aligned_lowp> aligned_lowp_mat4x4; - - /// 4 by 4 matrix aligned in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef mat<4, 4, double, aligned_highp> aligned_highp_dmat4x4; - - /// 4 by 4 matrix aligned in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef mat<4, 4, double, aligned_mediump> aligned_mediump_dmat4x4; - - /// 4 by 4 matrix aligned in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef mat<4, 4, double, aligned_lowp> aligned_lowp_dmat4x4; - - /// 4 by 4 matrix tightly packed in memory of single-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef mat<4, 4, float, packed_highp> packed_highp_mat4x4; - - /// 4 by 4 matrix tightly packed in memory of single-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef mat<4, 4, float, packed_mediump> packed_mediump_mat4x4; - - /// 4 by 4 matrix tightly packed in memory of single-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef mat<4, 4, float, packed_lowp> packed_lowp_mat4x4; - - /// 4 by 4 matrix tightly packed in memory of double-precision floating-point numbers using high precision arithmetic in term of ULPs. - typedef mat<4, 4, double, packed_highp> packed_highp_dmat4x4; - - /// 4 by 4 matrix tightly packed in memory of double-precision floating-point numbers using medium precision arithmetic in term of ULPs. - typedef mat<4, 4, double, packed_mediump> packed_mediump_dmat4x4; - - /// 4 by 4 matrix tightly packed in memory of double-precision floating-point numbers using low precision arithmetic in term of ULPs. - typedef mat<4, 4, double, packed_lowp> packed_lowp_dmat4x4; - - // -- default -- - -#if(defined(GLM_PRECISION_LOWP_FLOAT)) - typedef aligned_lowp_vec1 aligned_vec1; - typedef aligned_lowp_vec2 aligned_vec2; - typedef aligned_lowp_vec3 aligned_vec3; - typedef aligned_lowp_vec4 aligned_vec4; - typedef packed_lowp_vec1 packed_vec1; - typedef packed_lowp_vec2 packed_vec2; - typedef packed_lowp_vec3 packed_vec3; - typedef packed_lowp_vec4 packed_vec4; - - typedef aligned_lowp_mat2 aligned_mat2; - typedef aligned_lowp_mat3 aligned_mat3; - typedef aligned_lowp_mat4 aligned_mat4; - typedef packed_lowp_mat2 packed_mat2; - typedef packed_lowp_mat3 packed_mat3; - typedef packed_lowp_mat4 packed_mat4; - - typedef aligned_lowp_mat2x2 aligned_mat2x2; - typedef aligned_lowp_mat2x3 aligned_mat2x3; - typedef aligned_lowp_mat2x4 aligned_mat2x4; - typedef aligned_lowp_mat3x2 aligned_mat3x2; - typedef aligned_lowp_mat3x3 aligned_mat3x3; - typedef aligned_lowp_mat3x4 aligned_mat3x4; - typedef aligned_lowp_mat4x2 aligned_mat4x2; - typedef aligned_lowp_mat4x3 aligned_mat4x3; - typedef aligned_lowp_mat4x4 aligned_mat4x4; - typedef packed_lowp_mat2x2 packed_mat2x2; - typedef packed_lowp_mat2x3 packed_mat2x3; - typedef packed_lowp_mat2x4 packed_mat2x4; - typedef packed_lowp_mat3x2 packed_mat3x2; - typedef packed_lowp_mat3x3 packed_mat3x3; - typedef packed_lowp_mat3x4 packed_mat3x4; - typedef packed_lowp_mat4x2 packed_mat4x2; - typedef packed_lowp_mat4x3 packed_mat4x3; - typedef packed_lowp_mat4x4 packed_mat4x4; -#elif(defined(GLM_PRECISION_MEDIUMP_FLOAT)) - typedef aligned_mediump_vec1 aligned_vec1; - typedef aligned_mediump_vec2 aligned_vec2; - typedef aligned_mediump_vec3 aligned_vec3; - typedef aligned_mediump_vec4 aligned_vec4; - typedef packed_mediump_vec1 packed_vec1; - typedef packed_mediump_vec2 packed_vec2; - typedef packed_mediump_vec3 packed_vec3; - typedef packed_mediump_vec4 packed_vec4; - - typedef aligned_mediump_mat2 aligned_mat2; - typedef aligned_mediump_mat3 aligned_mat3; - typedef aligned_mediump_mat4 aligned_mat4; - typedef packed_mediump_mat2 packed_mat2; - typedef packed_mediump_mat3 packed_mat3; - typedef packed_mediump_mat4 packed_mat4; - - typedef aligned_mediump_mat2x2 aligned_mat2x2; - typedef aligned_mediump_mat2x3 aligned_mat2x3; - typedef aligned_mediump_mat2x4 aligned_mat2x4; - typedef aligned_mediump_mat3x2 aligned_mat3x2; - typedef aligned_mediump_mat3x3 aligned_mat3x3; - typedef aligned_mediump_mat3x4 aligned_mat3x4; - typedef aligned_mediump_mat4x2 aligned_mat4x2; - typedef aligned_mediump_mat4x3 aligned_mat4x3; - typedef aligned_mediump_mat4x4 aligned_mat4x4; - typedef packed_mediump_mat2x2 packed_mat2x2; - typedef packed_mediump_mat2x3 packed_mat2x3; - typedef packed_mediump_mat2x4 packed_mat2x4; - typedef packed_mediump_mat3x2 packed_mat3x2; - typedef packed_mediump_mat3x3 packed_mat3x3; - typedef packed_mediump_mat3x4 packed_mat3x4; - typedef packed_mediump_mat4x2 packed_mat4x2; - typedef packed_mediump_mat4x3 packed_mat4x3; - typedef packed_mediump_mat4x4 packed_mat4x4; -#else //defined(GLM_PRECISION_HIGHP_FLOAT) - /// 1 component vector aligned in memory of single-precision floating-point numbers. - typedef aligned_highp_vec1 aligned_vec1; - - /// 2 components vector aligned in memory of single-precision floating-point numbers. - typedef aligned_highp_vec2 aligned_vec2; - - /// 3 components vector aligned in memory of single-precision floating-point numbers. - typedef aligned_highp_vec3 aligned_vec3; - - /// 4 components vector aligned in memory of single-precision floating-point numbers. - typedef aligned_highp_vec4 aligned_vec4; - - /// 1 component vector tightly packed in memory of single-precision floating-point numbers. - typedef packed_highp_vec1 packed_vec1; - - /// 2 components vector tightly packed in memory of single-precision floating-point numbers. - typedef packed_highp_vec2 packed_vec2; - - /// 3 components vector tightly packed in memory of single-precision floating-point numbers. - typedef packed_highp_vec3 packed_vec3; - - /// 4 components vector tightly packed in memory of single-precision floating-point numbers. - typedef packed_highp_vec4 packed_vec4; - - /// 2 by 2 matrix tightly aligned in memory of single-precision floating-point numbers. - typedef aligned_highp_mat2 aligned_mat2; - - /// 3 by 3 matrix tightly aligned in memory of single-precision floating-point numbers. - typedef aligned_highp_mat3 aligned_mat3; - - /// 4 by 4 matrix tightly aligned in memory of single-precision floating-point numbers. - typedef aligned_highp_mat4 aligned_mat4; - - /// 2 by 2 matrix tightly packed in memory of single-precision floating-point numbers. - typedef packed_highp_mat2 packed_mat2; - - /// 3 by 3 matrix tightly packed in memory of single-precision floating-point numbers. - typedef packed_highp_mat3 packed_mat3; - - /// 4 by 4 matrix tightly packed in memory of single-precision floating-point numbers. - typedef packed_highp_mat4 packed_mat4; - - /// 2 by 2 matrix tightly aligned in memory of single-precision floating-point numbers. - typedef aligned_highp_mat2x2 aligned_mat2x2; - - /// 2 by 3 matrix tightly aligned in memory of single-precision floating-point numbers. - typedef aligned_highp_mat2x3 aligned_mat2x3; - - /// 2 by 4 matrix tightly aligned in memory of single-precision floating-point numbers. - typedef aligned_highp_mat2x4 aligned_mat2x4; - - /// 3 by 2 matrix tightly aligned in memory of single-precision floating-point numbers. - typedef aligned_highp_mat3x2 aligned_mat3x2; - - /// 3 by 3 matrix tightly aligned in memory of single-precision floating-point numbers. - typedef aligned_highp_mat3x3 aligned_mat3x3; - - /// 3 by 4 matrix tightly aligned in memory of single-precision floating-point numbers. - typedef aligned_highp_mat3x4 aligned_mat3x4; - - /// 4 by 2 matrix tightly aligned in memory of single-precision floating-point numbers. - typedef aligned_highp_mat4x2 aligned_mat4x2; - - /// 4 by 3 matrix tightly aligned in memory of single-precision floating-point numbers. - typedef aligned_highp_mat4x3 aligned_mat4x3; - - /// 4 by 4 matrix tightly aligned in memory of single-precision floating-point numbers. - typedef aligned_highp_mat4x4 aligned_mat4x4; - - /// 2 by 2 matrix tightly packed in memory of single-precision floating-point numbers. - typedef packed_highp_mat2x2 packed_mat2x2; - - /// 2 by 3 matrix tightly packed in memory of single-precision floating-point numbers. - typedef packed_highp_mat2x3 packed_mat2x3; - - /// 2 by 4 matrix tightly packed in memory of single-precision floating-point numbers. - typedef packed_highp_mat2x4 packed_mat2x4; - - /// 3 by 2 matrix tightly packed in memory of single-precision floating-point numbers. - typedef packed_highp_mat3x2 packed_mat3x2; - - /// 3 by 3 matrix tightly packed in memory of single-precision floating-point numbers. - typedef packed_highp_mat3x3 packed_mat3x3; - - /// 3 by 4 matrix tightly packed in memory of single-precision floating-point numbers. - typedef packed_highp_mat3x4 packed_mat3x4; - - /// 4 by 2 matrix tightly packed in memory of single-precision floating-point numbers. - typedef packed_highp_mat4x2 packed_mat4x2; - - /// 4 by 3 matrix tightly packed in memory of single-precision floating-point numbers. - typedef packed_highp_mat4x3 packed_mat4x3; - - /// 4 by 4 matrix tightly packed in memory of single-precision floating-point numbers. - typedef packed_highp_mat4x4 packed_mat4x4; -#endif//GLM_PRECISION - -#if(defined(GLM_PRECISION_LOWP_DOUBLE)) - typedef aligned_lowp_dvec1 aligned_dvec1; - typedef aligned_lowp_dvec2 aligned_dvec2; - typedef aligned_lowp_dvec3 aligned_dvec3; - typedef aligned_lowp_dvec4 aligned_dvec4; - typedef packed_lowp_dvec1 packed_dvec1; - typedef packed_lowp_dvec2 packed_dvec2; - typedef packed_lowp_dvec3 packed_dvec3; - typedef packed_lowp_dvec4 packed_dvec4; - - typedef aligned_lowp_dmat2 aligned_dmat2; - typedef aligned_lowp_dmat3 aligned_dmat3; - typedef aligned_lowp_dmat4 aligned_dmat4; - typedef packed_lowp_dmat2 packed_dmat2; - typedef packed_lowp_dmat3 packed_dmat3; - typedef packed_lowp_dmat4 packed_dmat4; - - typedef aligned_lowp_dmat2x2 aligned_dmat2x2; - typedef aligned_lowp_dmat2x3 aligned_dmat2x3; - typedef aligned_lowp_dmat2x4 aligned_dmat2x4; - typedef aligned_lowp_dmat3x2 aligned_dmat3x2; - typedef aligned_lowp_dmat3x3 aligned_dmat3x3; - typedef aligned_lowp_dmat3x4 aligned_dmat3x4; - typedef aligned_lowp_dmat4x2 aligned_dmat4x2; - typedef aligned_lowp_dmat4x3 aligned_dmat4x3; - typedef aligned_lowp_dmat4x4 aligned_dmat4x4; - typedef packed_lowp_dmat2x2 packed_dmat2x2; - typedef packed_lowp_dmat2x3 packed_dmat2x3; - typedef packed_lowp_dmat2x4 packed_dmat2x4; - typedef packed_lowp_dmat3x2 packed_dmat3x2; - typedef packed_lowp_dmat3x3 packed_dmat3x3; - typedef packed_lowp_dmat3x4 packed_dmat3x4; - typedef packed_lowp_dmat4x2 packed_dmat4x2; - typedef packed_lowp_dmat4x3 packed_dmat4x3; - typedef packed_lowp_dmat4x4 packed_dmat4x4; -#elif(defined(GLM_PRECISION_MEDIUMP_DOUBLE)) - typedef aligned_mediump_dvec1 aligned_dvec1; - typedef aligned_mediump_dvec2 aligned_dvec2; - typedef aligned_mediump_dvec3 aligned_dvec3; - typedef aligned_mediump_dvec4 aligned_dvec4; - typedef packed_mediump_dvec1 packed_dvec1; - typedef packed_mediump_dvec2 packed_dvec2; - typedef packed_mediump_dvec3 packed_dvec3; - typedef packed_mediump_dvec4 packed_dvec4; - - typedef aligned_mediump_dmat2 aligned_dmat2; - typedef aligned_mediump_dmat3 aligned_dmat3; - typedef aligned_mediump_dmat4 aligned_dmat4; - typedef packed_mediump_dmat2 packed_dmat2; - typedef packed_mediump_dmat3 packed_dmat3; - typedef packed_mediump_dmat4 packed_dmat4; - - typedef aligned_mediump_dmat2x2 aligned_dmat2x2; - typedef aligned_mediump_dmat2x3 aligned_dmat2x3; - typedef aligned_mediump_dmat2x4 aligned_dmat2x4; - typedef aligned_mediump_dmat3x2 aligned_dmat3x2; - typedef aligned_mediump_dmat3x3 aligned_dmat3x3; - typedef aligned_mediump_dmat3x4 aligned_dmat3x4; - typedef aligned_mediump_dmat4x2 aligned_dmat4x2; - typedef aligned_mediump_dmat4x3 aligned_dmat4x3; - typedef aligned_mediump_dmat4x4 aligned_dmat4x4; - typedef packed_mediump_dmat2x2 packed_dmat2x2; - typedef packed_mediump_dmat2x3 packed_dmat2x3; - typedef packed_mediump_dmat2x4 packed_dmat2x4; - typedef packed_mediump_dmat3x2 packed_dmat3x2; - typedef packed_mediump_dmat3x3 packed_dmat3x3; - typedef packed_mediump_dmat3x4 packed_dmat3x4; - typedef packed_mediump_dmat4x2 packed_dmat4x2; - typedef packed_mediump_dmat4x3 packed_dmat4x3; - typedef packed_mediump_dmat4x4 packed_dmat4x4; -#else //defined(GLM_PRECISION_HIGHP_DOUBLE) - /// 1 component vector aligned in memory of double-precision floating-point numbers. - typedef aligned_highp_dvec1 aligned_dvec1; - - /// 2 components vector aligned in memory of double-precision floating-point numbers. - typedef aligned_highp_dvec2 aligned_dvec2; - - /// 3 components vector aligned in memory of double-precision floating-point numbers. - typedef aligned_highp_dvec3 aligned_dvec3; - - /// 4 components vector aligned in memory of double-precision floating-point numbers. - typedef aligned_highp_dvec4 aligned_dvec4; - - /// 1 component vector tightly packed in memory of double-precision floating-point numbers. - typedef packed_highp_dvec1 packed_dvec1; - - /// 2 components vector tightly packed in memory of double-precision floating-point numbers. - typedef packed_highp_dvec2 packed_dvec2; - - /// 3 components vector tightly packed in memory of double-precision floating-point numbers. - typedef packed_highp_dvec3 packed_dvec3; - - /// 4 components vector tightly packed in memory of double-precision floating-point numbers. - typedef packed_highp_dvec4 packed_dvec4; - - /// 2 by 2 matrix tightly aligned in memory of double-precision floating-point numbers. - typedef aligned_highp_dmat2 aligned_dmat2; - - /// 3 by 3 matrix tightly aligned in memory of double-precision floating-point numbers. - typedef aligned_highp_dmat3 aligned_dmat3; - - /// 4 by 4 matrix tightly aligned in memory of double-precision floating-point numbers. - typedef aligned_highp_dmat4 aligned_dmat4; - - /// 2 by 2 matrix tightly packed in memory of double-precision floating-point numbers. - typedef packed_highp_dmat2 packed_dmat2; - - /// 3 by 3 matrix tightly packed in memory of double-precision floating-point numbers. - typedef packed_highp_dmat3 packed_dmat3; - - /// 4 by 4 matrix tightly packed in memory of double-precision floating-point numbers. - typedef packed_highp_dmat4 packed_dmat4; - - /// 2 by 2 matrix tightly aligned in memory of double-precision floating-point numbers. - typedef aligned_highp_dmat2x2 aligned_dmat2x2; - - /// 2 by 3 matrix tightly aligned in memory of double-precision floating-point numbers. - typedef aligned_highp_dmat2x3 aligned_dmat2x3; - - /// 2 by 4 matrix tightly aligned in memory of double-precision floating-point numbers. - typedef aligned_highp_dmat2x4 aligned_dmat2x4; - - /// 3 by 2 matrix tightly aligned in memory of double-precision floating-point numbers. - typedef aligned_highp_dmat3x2 aligned_dmat3x2; - - /// 3 by 3 matrix tightly aligned in memory of double-precision floating-point numbers. - typedef aligned_highp_dmat3x3 aligned_dmat3x3; - - /// 3 by 4 matrix tightly aligned in memory of double-precision floating-point numbers. - typedef aligned_highp_dmat3x4 aligned_dmat3x4; - - /// 4 by 2 matrix tightly aligned in memory of double-precision floating-point numbers. - typedef aligned_highp_dmat4x2 aligned_dmat4x2; - - /// 4 by 3 matrix tightly aligned in memory of double-precision floating-point numbers. - typedef aligned_highp_dmat4x3 aligned_dmat4x3; - - /// 4 by 4 matrix tightly aligned in memory of double-precision floating-point numbers. - typedef aligned_highp_dmat4x4 aligned_dmat4x4; - - /// 2 by 2 matrix tightly packed in memory of double-precision floating-point numbers. - typedef packed_highp_dmat2x2 packed_dmat2x2; - - /// 2 by 3 matrix tightly packed in memory of double-precision floating-point numbers. - typedef packed_highp_dmat2x3 packed_dmat2x3; - - /// 2 by 4 matrix tightly packed in memory of double-precision floating-point numbers. - typedef packed_highp_dmat2x4 packed_dmat2x4; - - /// 3 by 2 matrix tightly packed in memory of double-precision floating-point numbers. - typedef packed_highp_dmat3x2 packed_dmat3x2; - - /// 3 by 3 matrix tightly packed in memory of double-precision floating-point numbers. - typedef packed_highp_dmat3x3 packed_dmat3x3; - - /// 3 by 4 matrix tightly packed in memory of double-precision floating-point numbers. - typedef packed_highp_dmat3x4 packed_dmat3x4; - - /// 4 by 2 matrix tightly packed in memory of double-precision floating-point numbers. - typedef packed_highp_dmat4x2 packed_dmat4x2; - - /// 4 by 3 matrix tightly packed in memory of double-precision floating-point numbers. - typedef packed_highp_dmat4x3 packed_dmat4x3; - - /// 4 by 4 matrix tightly packed in memory of double-precision floating-point numbers. - typedef packed_highp_dmat4x4 packed_dmat4x4; -#endif//GLM_PRECISION - -#if(defined(GLM_PRECISION_LOWP_INT)) - typedef aligned_lowp_ivec1 aligned_ivec1; - typedef aligned_lowp_ivec2 aligned_ivec2; - typedef aligned_lowp_ivec3 aligned_ivec3; - typedef aligned_lowp_ivec4 aligned_ivec4; -#elif(defined(GLM_PRECISION_MEDIUMP_INT)) - typedef aligned_mediump_ivec1 aligned_ivec1; - typedef aligned_mediump_ivec2 aligned_ivec2; - typedef aligned_mediump_ivec3 aligned_ivec3; - typedef aligned_mediump_ivec4 aligned_ivec4; -#else //defined(GLM_PRECISION_HIGHP_INT) - /// 1 component vector aligned in memory of signed integer numbers. - typedef aligned_highp_ivec1 aligned_ivec1; - - /// 2 components vector aligned in memory of signed integer numbers. - typedef aligned_highp_ivec2 aligned_ivec2; - - /// 3 components vector aligned in memory of signed integer numbers. - typedef aligned_highp_ivec3 aligned_ivec3; - - /// 4 components vector aligned in memory of signed integer numbers. - typedef aligned_highp_ivec4 aligned_ivec4; - - /// 1 component vector tightly packed in memory of signed integer numbers. - typedef packed_highp_ivec1 packed_ivec1; - - /// 2 components vector tightly packed in memory of signed integer numbers. - typedef packed_highp_ivec2 packed_ivec2; - - /// 3 components vector tightly packed in memory of signed integer numbers. - typedef packed_highp_ivec3 packed_ivec3; - - /// 4 components vector tightly packed in memory of signed integer numbers. - typedef packed_highp_ivec4 packed_ivec4; -#endif//GLM_PRECISION - - // -- Unsigned integer definition -- - -#if(defined(GLM_PRECISION_LOWP_UINT)) - typedef aligned_lowp_uvec1 aligned_uvec1; - typedef aligned_lowp_uvec2 aligned_uvec2; - typedef aligned_lowp_uvec3 aligned_uvec3; - typedef aligned_lowp_uvec4 aligned_uvec4; -#elif(defined(GLM_PRECISION_MEDIUMP_UINT)) - typedef aligned_mediump_uvec1 aligned_uvec1; - typedef aligned_mediump_uvec2 aligned_uvec2; - typedef aligned_mediump_uvec3 aligned_uvec3; - typedef aligned_mediump_uvec4 aligned_uvec4; -#else //defined(GLM_PRECISION_HIGHP_UINT) - /// 1 component vector aligned in memory of unsigned integer numbers. - typedef aligned_highp_uvec1 aligned_uvec1; - - /// 2 components vector aligned in memory of unsigned integer numbers. - typedef aligned_highp_uvec2 aligned_uvec2; - - /// 3 components vector aligned in memory of unsigned integer numbers. - typedef aligned_highp_uvec3 aligned_uvec3; - - /// 4 components vector aligned in memory of unsigned integer numbers. - typedef aligned_highp_uvec4 aligned_uvec4; - - /// 1 component vector tightly packed in memory of unsigned integer numbers. - typedef packed_highp_uvec1 packed_uvec1; - - /// 2 components vector tightly packed in memory of unsigned integer numbers. - typedef packed_highp_uvec2 packed_uvec2; - - /// 3 components vector tightly packed in memory of unsigned integer numbers. - typedef packed_highp_uvec3 packed_uvec3; - - /// 4 components vector tightly packed in memory of unsigned integer numbers. - typedef packed_highp_uvec4 packed_uvec4; -#endif//GLM_PRECISION - -#if(defined(GLM_PRECISION_LOWP_BOOL)) - typedef aligned_lowp_bvec1 aligned_bvec1; - typedef aligned_lowp_bvec2 aligned_bvec2; - typedef aligned_lowp_bvec3 aligned_bvec3; - typedef aligned_lowp_bvec4 aligned_bvec4; -#elif(defined(GLM_PRECISION_MEDIUMP_BOOL)) - typedef aligned_mediump_bvec1 aligned_bvec1; - typedef aligned_mediump_bvec2 aligned_bvec2; - typedef aligned_mediump_bvec3 aligned_bvec3; - typedef aligned_mediump_bvec4 aligned_bvec4; -#else //defined(GLM_PRECISION_HIGHP_BOOL) - /// 1 component vector aligned in memory of bool values. - typedef aligned_highp_bvec1 aligned_bvec1; - - /// 2 components vector aligned in memory of bool values. - typedef aligned_highp_bvec2 aligned_bvec2; - - /// 3 components vector aligned in memory of bool values. - typedef aligned_highp_bvec3 aligned_bvec3; - - /// 4 components vector aligned in memory of bool values. - typedef aligned_highp_bvec4 aligned_bvec4; - - /// 1 components vector tightly packed in memory of bool values. - typedef packed_highp_bvec1 packed_bvec1; - - /// 2 components vector tightly packed in memory of bool values. - typedef packed_highp_bvec2 packed_bvec2; - - /// 3 components vector tightly packed in memory of bool values. - typedef packed_highp_bvec3 packed_bvec3; - - /// 4 components vector tightly packed in memory of bool values. - typedef packed_highp_bvec4 packed_bvec4; -#endif//GLM_PRECISION - - /// @} -}//namespace glm diff --git a/third_party/glm/gtc/type_precision.hpp b/third_party/glm/gtc/type_precision.hpp deleted file mode 100755 index 250bc4f..0000000 --- a/third_party/glm/gtc/type_precision.hpp +++ /dev/null @@ -1,2138 +0,0 @@ -/// @ref gtc_type_precision -/// @file glm/gtc/type_precision.hpp -/// -/// @see core (dependence) -/// @see gtc_quaternion (dependence) -/// -/// @defgroup gtc_type_precision GLM_GTC_type_precision -/// @ingroup gtc -/// -/// Include to use the features of this extension. -/// -/// Defines specific C++-based qualifier types. - -#pragma once - -// Dependency: -#include "../gtc/quaternion.hpp" -#include "../gtc/vec1.hpp" -#include "../ext/scalar_int_sized.hpp" -#include "../ext/scalar_uint_sized.hpp" -#include "../detail/type_vec2.hpp" -#include "../detail/type_vec3.hpp" -#include "../detail/type_vec4.hpp" -#include "../detail/type_mat2x2.hpp" -#include "../detail/type_mat2x3.hpp" -#include "../detail/type_mat2x4.hpp" -#include "../detail/type_mat3x2.hpp" -#include "../detail/type_mat3x3.hpp" -#include "../detail/type_mat3x4.hpp" -#include "../detail/type_mat4x2.hpp" -#include "../detail/type_mat4x3.hpp" -#include "../detail/type_mat4x4.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_GTC_type_precision extension included") -#endif - -namespace glm -{ - /////////////////////////// - // Signed int vector types - - /// @addtogroup gtc_type_precision - /// @{ - - /// Low qualifier 8 bit signed integer type. - /// @see gtc_type_precision - typedef detail::int8 lowp_int8; - - /// Low qualifier 16 bit signed integer type. - /// @see gtc_type_precision - typedef detail::int16 lowp_int16; - - /// Low qualifier 32 bit signed integer type. - /// @see gtc_type_precision - typedef detail::int32 lowp_int32; - - /// Low qualifier 64 bit signed integer type. - /// @see gtc_type_precision - typedef detail::int64 lowp_int64; - - /// Low qualifier 8 bit signed integer type. - /// @see gtc_type_precision - typedef detail::int8 lowp_int8_t; - - /// Low qualifier 16 bit signed integer type. - /// @see gtc_type_precision - typedef detail::int16 lowp_int16_t; - - /// Low qualifier 32 bit signed integer type. - /// @see gtc_type_precision - typedef detail::int32 lowp_int32_t; - - /// Low qualifier 64 bit signed integer type. - /// @see gtc_type_precision - typedef detail::int64 lowp_int64_t; - - /// Low qualifier 8 bit signed integer type. - /// @see gtc_type_precision - typedef detail::int8 lowp_i8; - - /// Low qualifier 16 bit signed integer type. - /// @see gtc_type_precision - typedef detail::int16 lowp_i16; - - /// Low qualifier 32 bit signed integer type. - /// @see gtc_type_precision - typedef detail::int32 lowp_i32; - - /// Low qualifier 64 bit signed integer type. - /// @see gtc_type_precision - typedef detail::int64 lowp_i64; - - /// Medium qualifier 8 bit signed integer type. - /// @see gtc_type_precision - typedef detail::int8 mediump_int8; - - /// Medium qualifier 16 bit signed integer type. - /// @see gtc_type_precision - typedef detail::int16 mediump_int16; - - /// Medium qualifier 32 bit signed integer type. - /// @see gtc_type_precision - typedef detail::int32 mediump_int32; - - /// Medium qualifier 64 bit signed integer type. - /// @see gtc_type_precision - typedef detail::int64 mediump_int64; - - /// Medium qualifier 8 bit signed integer type. - /// @see gtc_type_precision - typedef detail::int8 mediump_int8_t; - - /// Medium qualifier 16 bit signed integer type. - /// @see gtc_type_precision - typedef detail::int16 mediump_int16_t; - - /// Medium qualifier 32 bit signed integer type. - /// @see gtc_type_precision - typedef detail::int32 mediump_int32_t; - - /// Medium qualifier 64 bit signed integer type. - /// @see gtc_type_precision - typedef detail::int64 mediump_int64_t; - - /// Medium qualifier 8 bit signed integer type. - /// @see gtc_type_precision - typedef detail::int8 mediump_i8; - - /// Medium qualifier 16 bit signed integer type. - /// @see gtc_type_precision - typedef detail::int16 mediump_i16; - - /// Medium qualifier 32 bit signed integer type. - /// @see gtc_type_precision - typedef detail::int32 mediump_i32; - - /// Medium qualifier 64 bit signed integer type. - /// @see gtc_type_precision - typedef detail::int64 mediump_i64; - - /// High qualifier 8 bit signed integer type. - /// @see gtc_type_precision - typedef detail::int8 highp_int8; - - /// High qualifier 16 bit signed integer type. - /// @see gtc_type_precision - typedef detail::int16 highp_int16; - - /// High qualifier 32 bit signed integer type. - /// @see gtc_type_precision - typedef detail::int32 highp_int32; - - /// High qualifier 64 bit signed integer type. - /// @see gtc_type_precision - typedef detail::int64 highp_int64; - - /// High qualifier 8 bit signed integer type. - /// @see gtc_type_precision - typedef detail::int8 highp_int8_t; - - /// High qualifier 16 bit signed integer type. - /// @see gtc_type_precision - typedef detail::int16 highp_int16_t; - - /// 32 bit signed integer type. - /// @see gtc_type_precision - typedef detail::int32 highp_int32_t; - - /// High qualifier 64 bit signed integer type. - /// @see gtc_type_precision - typedef detail::int64 highp_int64_t; - - /// High qualifier 8 bit signed integer type. - /// @see gtc_type_precision - typedef detail::int8 highp_i8; - - /// High qualifier 16 bit signed integer type. - /// @see gtc_type_precision - typedef detail::int16 highp_i16; - - /// High qualifier 32 bit signed integer type. - /// @see gtc_type_precision - typedef detail::int32 highp_i32; - - /// High qualifier 64 bit signed integer type. - /// @see gtc_type_precision - typedef detail::int64 highp_i64; - - -#if GLM_HAS_EXTENDED_INTEGER_TYPE - using std::int8_t; - using std::int16_t; - using std::int32_t; - using std::int64_t; -#else - /// 8 bit signed integer type. - /// @see gtc_type_precision - typedef detail::int8 int8_t; - - /// 16 bit signed integer type. - /// @see gtc_type_precision - typedef detail::int16 int16_t; - - /// 32 bit signed integer type. - /// @see gtc_type_precision - typedef detail::int32 int32_t; - - /// 64 bit signed integer type. - /// @see gtc_type_precision - typedef detail::int64 int64_t; -#endif - - /// 8 bit signed integer type. - /// @see gtc_type_precision - typedef detail::int8 i8; - - /// 16 bit signed integer type. - /// @see gtc_type_precision - typedef detail::int16 i16; - - /// 32 bit signed integer type. - /// @see gtc_type_precision - typedef detail::int32 i32; - - /// 64 bit signed integer type. - /// @see gtc_type_precision - typedef detail::int64 i64; - - - - /// Low qualifier 8 bit signed integer scalar type. - /// @see gtc_type_precision - typedef vec<1, i8, lowp> lowp_i8vec1; - - /// Low qualifier 8 bit signed integer vector of 2 components type. - /// @see gtc_type_precision - typedef vec<2, i8, lowp> lowp_i8vec2; - - /// Low qualifier 8 bit signed integer vector of 3 components type. - /// @see gtc_type_precision - typedef vec<3, i8, lowp> lowp_i8vec3; - - /// Low qualifier 8 bit signed integer vector of 4 components type. - /// @see gtc_type_precision - typedef vec<4, i8, lowp> lowp_i8vec4; - - - /// Medium qualifier 8 bit signed integer scalar type. - /// @see gtc_type_precision - typedef vec<1, i8, mediump> mediump_i8vec1; - - /// Medium qualifier 8 bit signed integer vector of 2 components type. - /// @see gtc_type_precision - typedef vec<2, i8, mediump> mediump_i8vec2; - - /// Medium qualifier 8 bit signed integer vector of 3 components type. - /// @see gtc_type_precision - typedef vec<3, i8, mediump> mediump_i8vec3; - - /// Medium qualifier 8 bit signed integer vector of 4 components type. - /// @see gtc_type_precision - typedef vec<4, i8, mediump> mediump_i8vec4; - - - /// High qualifier 8 bit signed integer scalar type. - /// @see gtc_type_precision - typedef vec<1, i8, highp> highp_i8vec1; - - /// High qualifier 8 bit signed integer vector of 2 components type. - /// @see gtc_type_precision - typedef vec<2, i8, highp> highp_i8vec2; - - /// High qualifier 8 bit signed integer vector of 3 components type. - /// @see gtc_type_precision - typedef vec<3, i8, highp> highp_i8vec3; - - /// High qualifier 8 bit signed integer vector of 4 components type. - /// @see gtc_type_precision - typedef vec<4, i8, highp> highp_i8vec4; - - - - /// 8 bit signed integer scalar type. - /// @see gtc_type_precision - typedef vec<1, i8, defaultp> i8vec1; - - /// 8 bit signed integer vector of 2 components type. - /// @see gtc_type_precision - typedef vec<2, i8, defaultp> i8vec2; - - /// 8 bit signed integer vector of 3 components type. - /// @see gtc_type_precision - typedef vec<3, i8, defaultp> i8vec3; - - /// 8 bit signed integer vector of 4 components type. - /// @see gtc_type_precision - typedef vec<4, i8, defaultp> i8vec4; - - - - - - /// Low qualifier 16 bit signed integer scalar type. - /// @see gtc_type_precision - typedef vec<1, i16, lowp> lowp_i16vec1; - - /// Low qualifier 16 bit signed integer vector of 2 components type. - /// @see gtc_type_precision - typedef vec<2, i16, lowp> lowp_i16vec2; - - /// Low qualifier 16 bit signed integer vector of 3 components type. - /// @see gtc_type_precision - typedef vec<3, i16, lowp> lowp_i16vec3; - - /// Low qualifier 16 bit signed integer vector of 4 components type. - /// @see gtc_type_precision - typedef vec<4, i16, lowp> lowp_i16vec4; - - - /// Medium qualifier 16 bit signed integer scalar type. - /// @see gtc_type_precision - typedef vec<1, i16, mediump> mediump_i16vec1; - - /// Medium qualifier 16 bit signed integer vector of 2 components type. - /// @see gtc_type_precision - typedef vec<2, i16, mediump> mediump_i16vec2; - - /// Medium qualifier 16 bit signed integer vector of 3 components type. - /// @see gtc_type_precision - typedef vec<3, i16, mediump> mediump_i16vec3; - - /// Medium qualifier 16 bit signed integer vector of 4 components type. - /// @see gtc_type_precision - typedef vec<4, i16, mediump> mediump_i16vec4; - - - /// High qualifier 16 bit signed integer scalar type. - /// @see gtc_type_precision - typedef vec<1, i16, highp> highp_i16vec1; - - /// High qualifier 16 bit signed integer vector of 2 components type. - /// @see gtc_type_precision - typedef vec<2, i16, highp> highp_i16vec2; - - /// High qualifier 16 bit signed integer vector of 3 components type. - /// @see gtc_type_precision - typedef vec<3, i16, highp> highp_i16vec3; - - /// High qualifier 16 bit signed integer vector of 4 components type. - /// @see gtc_type_precision - typedef vec<4, i16, highp> highp_i16vec4; - - - - - /// 16 bit signed integer scalar type. - /// @see gtc_type_precision - typedef vec<1, i16, defaultp> i16vec1; - - /// 16 bit signed integer vector of 2 components type. - /// @see gtc_type_precision - typedef vec<2, i16, defaultp> i16vec2; - - /// 16 bit signed integer vector of 3 components type. - /// @see gtc_type_precision - typedef vec<3, i16, defaultp> i16vec3; - - /// 16 bit signed integer vector of 4 components type. - /// @see gtc_type_precision - typedef vec<4, i16, defaultp> i16vec4; - - - - /// Low qualifier 32 bit signed integer scalar type. - /// @see gtc_type_precision - typedef vec<1, i32, lowp> lowp_i32vec1; - - /// Low qualifier 32 bit signed integer vector of 2 components type. - /// @see gtc_type_precision - typedef vec<2, i32, lowp> lowp_i32vec2; - - /// Low qualifier 32 bit signed integer vector of 3 components type. - /// @see gtc_type_precision - typedef vec<3, i32, lowp> lowp_i32vec3; - - /// Low qualifier 32 bit signed integer vector of 4 components type. - /// @see gtc_type_precision - typedef vec<4, i32, lowp> lowp_i32vec4; - - - /// Medium qualifier 32 bit signed integer scalar type. - /// @see gtc_type_precision - typedef vec<1, i32, mediump> mediump_i32vec1; - - /// Medium qualifier 32 bit signed integer vector of 2 components type. - /// @see gtc_type_precision - typedef vec<2, i32, mediump> mediump_i32vec2; - - /// Medium qualifier 32 bit signed integer vector of 3 components type. - /// @see gtc_type_precision - typedef vec<3, i32, mediump> mediump_i32vec3; - - /// Medium qualifier 32 bit signed integer vector of 4 components type. - /// @see gtc_type_precision - typedef vec<4, i32, mediump> mediump_i32vec4; - - - /// High qualifier 32 bit signed integer scalar type. - /// @see gtc_type_precision - typedef vec<1, i32, highp> highp_i32vec1; - - /// High qualifier 32 bit signed integer vector of 2 components type. - /// @see gtc_type_precision - typedef vec<2, i32, highp> highp_i32vec2; - - /// High qualifier 32 bit signed integer vector of 3 components type. - /// @see gtc_type_precision - typedef vec<3, i32, highp> highp_i32vec3; - - /// High qualifier 32 bit signed integer vector of 4 components type. - /// @see gtc_type_precision - typedef vec<4, i32, highp> highp_i32vec4; - - - /// 32 bit signed integer scalar type. - /// @see gtc_type_precision - typedef vec<1, i32, defaultp> i32vec1; - - /// 32 bit signed integer vector of 2 components type. - /// @see gtc_type_precision - typedef vec<2, i32, defaultp> i32vec2; - - /// 32 bit signed integer vector of 3 components type. - /// @see gtc_type_precision - typedef vec<3, i32, defaultp> i32vec3; - - /// 32 bit signed integer vector of 4 components type. - /// @see gtc_type_precision - typedef vec<4, i32, defaultp> i32vec4; - - - - - /// Low qualifier 64 bit signed integer scalar type. - /// @see gtc_type_precision - typedef vec<1, i64, lowp> lowp_i64vec1; - - /// Low qualifier 64 bit signed integer vector of 2 components type. - /// @see gtc_type_precision - typedef vec<2, i64, lowp> lowp_i64vec2; - - /// Low qualifier 64 bit signed integer vector of 3 components type. - /// @see gtc_type_precision - typedef vec<3, i64, lowp> lowp_i64vec3; - - /// Low qualifier 64 bit signed integer vector of 4 components type. - /// @see gtc_type_precision - typedef vec<4, i64, lowp> lowp_i64vec4; - - - /// Medium qualifier 64 bit signed integer scalar type. - /// @see gtc_type_precision - typedef vec<1, i64, mediump> mediump_i64vec1; - - /// Medium qualifier 64 bit signed integer vector of 2 components type. - /// @see gtc_type_precision - typedef vec<2, i64, mediump> mediump_i64vec2; - - /// Medium qualifier 64 bit signed integer vector of 3 components type. - /// @see gtc_type_precision - typedef vec<3, i64, mediump> mediump_i64vec3; - - /// Medium qualifier 64 bit signed integer vector of 4 components type. - /// @see gtc_type_precision - typedef vec<4, i64, mediump> mediump_i64vec4; - - - /// High qualifier 64 bit signed integer scalar type. - /// @see gtc_type_precision - typedef vec<1, i64, highp> highp_i64vec1; - - /// High qualifier 64 bit signed integer vector of 2 components type. - /// @see gtc_type_precision - typedef vec<2, i64, highp> highp_i64vec2; - - /// High qualifier 64 bit signed integer vector of 3 components type. - /// @see gtc_type_precision - typedef vec<3, i64, highp> highp_i64vec3; - - /// High qualifier 64 bit signed integer vector of 4 components type. - /// @see gtc_type_precision - typedef vec<4, i64, highp> highp_i64vec4; - - - /// 64 bit signed integer scalar type. - /// @see gtc_type_precision - typedef vec<1, i64, defaultp> i64vec1; - - /// 64 bit signed integer vector of 2 components type. - /// @see gtc_type_precision - typedef vec<2, i64, defaultp> i64vec2; - - /// 64 bit signed integer vector of 3 components type. - /// @see gtc_type_precision - typedef vec<3, i64, defaultp> i64vec3; - - /// 64 bit signed integer vector of 4 components type. - /// @see gtc_type_precision - typedef vec<4, i64, defaultp> i64vec4; - - - ///////////////////////////// - // Unsigned int vector types - - /// Low qualifier 8 bit unsigned integer type. - /// @see gtc_type_precision - typedef detail::uint8 lowp_uint8; - - /// Low qualifier 16 bit unsigned integer type. - /// @see gtc_type_precision - typedef detail::uint16 lowp_uint16; - - /// Low qualifier 32 bit unsigned integer type. - /// @see gtc_type_precision - typedef detail::uint32 lowp_uint32; - - /// Low qualifier 64 bit unsigned integer type. - /// @see gtc_type_precision - typedef detail::uint64 lowp_uint64; - - /// Low qualifier 8 bit unsigned integer type. - /// @see gtc_type_precision - typedef detail::uint8 lowp_uint8_t; - - /// Low qualifier 16 bit unsigned integer type. - /// @see gtc_type_precision - typedef detail::uint16 lowp_uint16_t; - - /// Low qualifier 32 bit unsigned integer type. - /// @see gtc_type_precision - typedef detail::uint32 lowp_uint32_t; - - /// Low qualifier 64 bit unsigned integer type. - /// @see gtc_type_precision - typedef detail::uint64 lowp_uint64_t; - - /// Low qualifier 8 bit unsigned integer type. - /// @see gtc_type_precision - typedef detail::uint8 lowp_u8; - - /// Low qualifier 16 bit unsigned integer type. - /// @see gtc_type_precision - typedef detail::uint16 lowp_u16; - - /// Low qualifier 32 bit unsigned integer type. - /// @see gtc_type_precision - typedef detail::uint32 lowp_u32; - - /// Low qualifier 64 bit unsigned integer type. - /// @see gtc_type_precision - typedef detail::uint64 lowp_u64; - - /// Medium qualifier 8 bit unsigned integer type. - /// @see gtc_type_precision - typedef detail::uint8 mediump_uint8; - - /// Medium qualifier 16 bit unsigned integer type. - /// @see gtc_type_precision - typedef detail::uint16 mediump_uint16; - - /// Medium qualifier 32 bit unsigned integer type. - /// @see gtc_type_precision - typedef detail::uint32 mediump_uint32; - - /// Medium qualifier 64 bit unsigned integer type. - /// @see gtc_type_precision - typedef detail::uint64 mediump_uint64; - - /// Medium qualifier 8 bit unsigned integer type. - /// @see gtc_type_precision - typedef detail::uint8 mediump_uint8_t; - - /// Medium qualifier 16 bit unsigned integer type. - /// @see gtc_type_precision - typedef detail::uint16 mediump_uint16_t; - - /// Medium qualifier 32 bit unsigned integer type. - /// @see gtc_type_precision - typedef detail::uint32 mediump_uint32_t; - - /// Medium qualifier 64 bit unsigned integer type. - /// @see gtc_type_precision - typedef detail::uint64 mediump_uint64_t; - - /// Medium qualifier 8 bit unsigned integer type. - /// @see gtc_type_precision - typedef detail::uint8 mediump_u8; - - /// Medium qualifier 16 bit unsigned integer type. - /// @see gtc_type_precision - typedef detail::uint16 mediump_u16; - - /// Medium qualifier 32 bit unsigned integer type. - /// @see gtc_type_precision - typedef detail::uint32 mediump_u32; - - /// Medium qualifier 64 bit unsigned integer type. - /// @see gtc_type_precision - typedef detail::uint64 mediump_u64; - - /// High qualifier 8 bit unsigned integer type. - /// @see gtc_type_precision - typedef detail::uint8 highp_uint8; - - /// High qualifier 16 bit unsigned integer type. - /// @see gtc_type_precision - typedef detail::uint16 highp_uint16; - - /// High qualifier 32 bit unsigned integer type. - /// @see gtc_type_precision - typedef detail::uint32 highp_uint32; - - /// High qualifier 64 bit unsigned integer type. - /// @see gtc_type_precision - typedef detail::uint64 highp_uint64; - - /// High qualifier 8 bit unsigned integer type. - /// @see gtc_type_precision - typedef detail::uint8 highp_uint8_t; - - /// High qualifier 16 bit unsigned integer type. - /// @see gtc_type_precision - typedef detail::uint16 highp_uint16_t; - - /// High qualifier 32 bit unsigned integer type. - /// @see gtc_type_precision - typedef detail::uint32 highp_uint32_t; - - /// High qualifier 64 bit unsigned integer type. - /// @see gtc_type_precision - typedef detail::uint64 highp_uint64_t; - - /// High qualifier 8 bit unsigned integer type. - /// @see gtc_type_precision - typedef detail::uint8 highp_u8; - - /// High qualifier 16 bit unsigned integer type. - /// @see gtc_type_precision - typedef detail::uint16 highp_u16; - - /// High qualifier 32 bit unsigned integer type. - /// @see gtc_type_precision - typedef detail::uint32 highp_u32; - - /// High qualifier 64 bit unsigned integer type. - /// @see gtc_type_precision - typedef detail::uint64 highp_u64; - -#if GLM_HAS_EXTENDED_INTEGER_TYPE - using std::uint8_t; - using std::uint16_t; - using std::uint32_t; - using std::uint64_t; -#else - /// Default qualifier 8 bit unsigned integer type. - /// @see gtc_type_precision - typedef detail::uint8 uint8_t; - - /// Default qualifier 16 bit unsigned integer type. - /// @see gtc_type_precision - typedef detail::uint16 uint16_t; - - /// Default qualifier 32 bit unsigned integer type. - /// @see gtc_type_precision - typedef detail::uint32 uint32_t; - - /// Default qualifier 64 bit unsigned integer type. - /// @see gtc_type_precision - typedef detail::uint64 uint64_t; -#endif - - /// Default qualifier 8 bit unsigned integer type. - /// @see gtc_type_precision - typedef detail::uint8 u8; - - /// Default qualifier 16 bit unsigned integer type. - /// @see gtc_type_precision - typedef detail::uint16 u16; - - /// Default qualifier 32 bit unsigned integer type. - /// @see gtc_type_precision - typedef detail::uint32 u32; - - /// Default qualifier 64 bit unsigned integer type. - /// @see gtc_type_precision - typedef detail::uint64 u64; - - - - - - ////////////////////// - // Float vector types - - /// Single-qualifier floating-point scalar. - /// @see gtc_type_precision - typedef float float32; - - /// Double-qualifier floating-point scalar. - /// @see gtc_type_precision - typedef double float64; - - /// Low 32 bit single-qualifier floating-point scalar. - /// @see gtc_type_precision - typedef float32 lowp_float32; - - /// Low 64 bit double-qualifier floating-point scalar. - /// @see gtc_type_precision - typedef float64 lowp_float64; - - /// Low 32 bit single-qualifier floating-point scalar. - /// @see gtc_type_precision - typedef float32 lowp_float32_t; - - /// Low 64 bit double-qualifier floating-point scalar. - /// @see gtc_type_precision - typedef float64 lowp_float64_t; - - /// Low 32 bit single-qualifier floating-point scalar. - /// @see gtc_type_precision - typedef float32 lowp_f32; - - /// Low 64 bit double-qualifier floating-point scalar. - /// @see gtc_type_precision - typedef float64 lowp_f64; - - /// Low 32 bit single-qualifier floating-point scalar. - /// @see gtc_type_precision - typedef float32 lowp_float32; - - /// Low 64 bit double-qualifier floating-point scalar. - /// @see gtc_type_precision - typedef float64 lowp_float64; - - /// Low 32 bit single-qualifier floating-point scalar. - /// @see gtc_type_precision - typedef float32 lowp_float32_t; - - /// Low 64 bit double-qualifier floating-point scalar. - /// @see gtc_type_precision - typedef float64 lowp_float64_t; - - /// Low 32 bit single-qualifier floating-point scalar. - /// @see gtc_type_precision - typedef float32 lowp_f32; - - /// Low 64 bit double-qualifier floating-point scalar. - /// @see gtc_type_precision - typedef float64 lowp_f64; - - - /// Low 32 bit single-qualifier floating-point scalar. - /// @see gtc_type_precision - typedef float32 lowp_float32; - - /// Low 64 bit double-qualifier floating-point scalar. - /// @see gtc_type_precision - typedef float64 lowp_float64; - - /// Low 32 bit single-qualifier floating-point scalar. - /// @see gtc_type_precision - typedef float32 lowp_float32_t; - - /// Low 64 bit double-qualifier floating-point scalar. - /// @see gtc_type_precision - typedef float64 lowp_float64_t; - - /// Low 32 bit single-qualifier floating-point scalar. - /// @see gtc_type_precision - typedef float32 lowp_f32; - - /// Low 64 bit double-qualifier floating-point scalar. - /// @see gtc_type_precision - typedef float64 lowp_f64; - - - /// Medium 32 bit single-qualifier floating-point scalar. - /// @see gtc_type_precision - typedef float32 mediump_float32; - - /// Medium 64 bit double-qualifier floating-point scalar. - /// @see gtc_type_precision - typedef float64 mediump_float64; - - /// Medium 32 bit single-qualifier floating-point scalar. - /// @see gtc_type_precision - typedef float32 mediump_float32_t; - - /// Medium 64 bit double-qualifier floating-point scalar. - /// @see gtc_type_precision - typedef float64 mediump_float64_t; - - /// Medium 32 bit single-qualifier floating-point scalar. - /// @see gtc_type_precision - typedef float32 mediump_f32; - - /// Medium 64 bit double-qualifier floating-point scalar. - /// @see gtc_type_precision - typedef float64 mediump_f64; - - - /// High 32 bit single-qualifier floating-point scalar. - /// @see gtc_type_precision - typedef float32 highp_float32; - - /// High 64 bit double-qualifier floating-point scalar. - /// @see gtc_type_precision - typedef float64 highp_float64; - - /// High 32 bit single-qualifier floating-point scalar. - /// @see gtc_type_precision - typedef float32 highp_float32_t; - - /// High 64 bit double-qualifier floating-point scalar. - /// @see gtc_type_precision - typedef float64 highp_float64_t; - - /// High 32 bit single-qualifier floating-point scalar. - /// @see gtc_type_precision - typedef float32 highp_f32; - - /// High 64 bit double-qualifier floating-point scalar. - /// @see gtc_type_precision - typedef float64 highp_f64; - - -#if(defined(GLM_PRECISION_LOWP_FLOAT)) - /// Default 32 bit single-qualifier floating-point scalar. - /// @see gtc_type_precision - typedef lowp_float32_t float32_t; - - /// Default 64 bit double-qualifier floating-point scalar. - /// @see gtc_type_precision - typedef lowp_float64_t float64_t; - - /// Default 32 bit single-qualifier floating-point scalar. - /// @see gtc_type_precision - typedef lowp_f32 f32; - - /// Default 64 bit double-qualifier floating-point scalar. - /// @see gtc_type_precision - typedef lowp_f64 f64; - -#elif(defined(GLM_PRECISION_MEDIUMP_FLOAT)) - /// Default 32 bit single-qualifier floating-point scalar. - /// @see gtc_type_precision - typedef mediump_float32 float32_t; - - /// Default 64 bit double-qualifier floating-point scalar. - /// @see gtc_type_precision - typedef mediump_float64 float64_t; - - /// Default 32 bit single-qualifier floating-point scalar. - /// @see gtc_type_precision - typedef mediump_float32 f32; - - /// Default 64 bit double-qualifier floating-point scalar. - /// @see gtc_type_precision - typedef mediump_float64 f64; - -#else//(defined(GLM_PRECISION_HIGHP_FLOAT)) - - /// Default 32 bit single-qualifier floating-point scalar. - /// @see gtc_type_precision - typedef highp_float32_t float32_t; - - /// Default 64 bit double-qualifier floating-point scalar. - /// @see gtc_type_precision - typedef highp_float64_t float64_t; - - /// Default 32 bit single-qualifier floating-point scalar. - /// @see gtc_type_precision - typedef highp_float32_t f32; - - /// Default 64 bit double-qualifier floating-point scalar. - /// @see gtc_type_precision - typedef highp_float64_t f64; -#endif - - - /// Low single-qualifier floating-point vector of 1 component. - /// @see gtc_type_precision - typedef vec<1, float, lowp> lowp_fvec1; - - /// Low single-qualifier floating-point vector of 2 components. - /// @see gtc_type_precision - typedef vec<2, float, lowp> lowp_fvec2; - - /// Low single-qualifier floating-point vector of 3 components. - /// @see gtc_type_precision - typedef vec<3, float, lowp> lowp_fvec3; - - /// Low single-qualifier floating-point vector of 4 components. - /// @see gtc_type_precision - typedef vec<4, float, lowp> lowp_fvec4; - - - /// Medium single-qualifier floating-point vector of 1 component. - /// @see gtc_type_precision - typedef vec<1, float, mediump> mediump_fvec1; - - /// Medium Single-qualifier floating-point vector of 2 components. - /// @see gtc_type_precision - typedef vec<2, float, mediump> mediump_fvec2; - - /// Medium Single-qualifier floating-point vector of 3 components. - /// @see gtc_type_precision - typedef vec<3, float, mediump> mediump_fvec3; - - /// Medium Single-qualifier floating-point vector of 4 components. - /// @see gtc_type_precision - typedef vec<4, float, mediump> mediump_fvec4; - - - /// High single-qualifier floating-point vector of 1 component. - /// @see gtc_type_precision - typedef vec<1, float, highp> highp_fvec1; - - /// High Single-qualifier floating-point vector of 2 components. - /// @see core_precision - typedef vec<2, float, highp> highp_fvec2; - - /// High Single-qualifier floating-point vector of 3 components. - /// @see core_precision - typedef vec<3, float, highp> highp_fvec3; - - /// High Single-qualifier floating-point vector of 4 components. - /// @see core_precision - typedef vec<4, float, highp> highp_fvec4; - - - /// Low single-qualifier floating-point vector of 1 component. - /// @see gtc_type_precision - typedef vec<1, f32, lowp> lowp_f32vec1; - - /// Low single-qualifier floating-point vector of 2 components. - /// @see core_precision - typedef vec<2, f32, lowp> lowp_f32vec2; - - /// Low single-qualifier floating-point vector of 3 components. - /// @see core_precision - typedef vec<3, f32, lowp> lowp_f32vec3; - - /// Low single-qualifier floating-point vector of 4 components. - /// @see core_precision - typedef vec<4, f32, lowp> lowp_f32vec4; - - /// Medium single-qualifier floating-point vector of 1 component. - /// @see gtc_type_precision - typedef vec<1, f32, mediump> mediump_f32vec1; - - /// Medium single-qualifier floating-point vector of 2 components. - /// @see core_precision - typedef vec<2, f32, mediump> mediump_f32vec2; - - /// Medium single-qualifier floating-point vector of 3 components. - /// @see core_precision - typedef vec<3, f32, mediump> mediump_f32vec3; - - /// Medium single-qualifier floating-point vector of 4 components. - /// @see core_precision - typedef vec<4, f32, mediump> mediump_f32vec4; - - /// High single-qualifier floating-point vector of 1 component. - /// @see gtc_type_precision - typedef vec<1, f32, highp> highp_f32vec1; - - /// High single-qualifier floating-point vector of 2 components. - /// @see gtc_type_precision - typedef vec<2, f32, highp> highp_f32vec2; - - /// High single-qualifier floating-point vector of 3 components. - /// @see gtc_type_precision - typedef vec<3, f32, highp> highp_f32vec3; - - /// High single-qualifier floating-point vector of 4 components. - /// @see gtc_type_precision - typedef vec<4, f32, highp> highp_f32vec4; - - - /// Low double-qualifier floating-point vector of 1 component. - /// @see gtc_type_precision - typedef vec<1, f64, lowp> lowp_f64vec1; - - /// Low double-qualifier floating-point vector of 2 components. - /// @see gtc_type_precision - typedef vec<2, f64, lowp> lowp_f64vec2; - - /// Low double-qualifier floating-point vector of 3 components. - /// @see gtc_type_precision - typedef vec<3, f64, lowp> lowp_f64vec3; - - /// Low double-qualifier floating-point vector of 4 components. - /// @see gtc_type_precision - typedef vec<4, f64, lowp> lowp_f64vec4; - - /// Medium double-qualifier floating-point vector of 1 component. - /// @see gtc_type_precision - typedef vec<1, f64, mediump> mediump_f64vec1; - - /// Medium double-qualifier floating-point vector of 2 components. - /// @see gtc_type_precision - typedef vec<2, f64, mediump> mediump_f64vec2; - - /// Medium double-qualifier floating-point vector of 3 components. - /// @see gtc_type_precision - typedef vec<3, f64, mediump> mediump_f64vec3; - - /// Medium double-qualifier floating-point vector of 4 components. - /// @see gtc_type_precision - typedef vec<4, f64, mediump> mediump_f64vec4; - - /// High double-qualifier floating-point vector of 1 component. - /// @see gtc_type_precision - typedef vec<1, f64, highp> highp_f64vec1; - - /// High double-qualifier floating-point vector of 2 components. - /// @see gtc_type_precision - typedef vec<2, f64, highp> highp_f64vec2; - - /// High double-qualifier floating-point vector of 3 components. - /// @see gtc_type_precision - typedef vec<3, f64, highp> highp_f64vec3; - - /// High double-qualifier floating-point vector of 4 components. - /// @see gtc_type_precision - typedef vec<4, f64, highp> highp_f64vec4; - - - - ////////////////////// - // Float matrix types - - /// Low single-qualifier floating-point 1x1 matrix. - /// @see gtc_type_precision - //typedef lowp_f32 lowp_fmat1x1; - - /// Low single-qualifier floating-point 2x2 matrix. - /// @see gtc_type_precision - typedef mat<2, 2, f32, lowp> lowp_fmat2x2; - - /// Low single-qualifier floating-point 2x3 matrix. - /// @see gtc_type_precision - typedef mat<2, 3, f32, lowp> lowp_fmat2x3; - - /// Low single-qualifier floating-point 2x4 matrix. - /// @see gtc_type_precision - typedef mat<2, 4, f32, lowp> lowp_fmat2x4; - - /// Low single-qualifier floating-point 3x2 matrix. - /// @see gtc_type_precision - typedef mat<3, 2, f32, lowp> lowp_fmat3x2; - - /// Low single-qualifier floating-point 3x3 matrix. - /// @see gtc_type_precision - typedef mat<3, 3, f32, lowp> lowp_fmat3x3; - - /// Low single-qualifier floating-point 3x4 matrix. - /// @see gtc_type_precision - typedef mat<3, 4, f32, lowp> lowp_fmat3x4; - - /// Low single-qualifier floating-point 4x2 matrix. - /// @see gtc_type_precision - typedef mat<4, 2, f32, lowp> lowp_fmat4x2; - - /// Low single-qualifier floating-point 4x3 matrix. - /// @see gtc_type_precision - typedef mat<4, 3, f32, lowp> lowp_fmat4x3; - - /// Low single-qualifier floating-point 4x4 matrix. - /// @see gtc_type_precision - typedef mat<4, 4, f32, lowp> lowp_fmat4x4; - - /// Low single-qualifier floating-point 1x1 matrix. - /// @see gtc_type_precision - //typedef lowp_fmat1x1 lowp_fmat1; - - /// Low single-qualifier floating-point 2x2 matrix. - /// @see gtc_type_precision - typedef lowp_fmat2x2 lowp_fmat2; - - /// Low single-qualifier floating-point 3x3 matrix. - /// @see gtc_type_precision - typedef lowp_fmat3x3 lowp_fmat3; - - /// Low single-qualifier floating-point 4x4 matrix. - /// @see gtc_type_precision - typedef lowp_fmat4x4 lowp_fmat4; - - - /// Medium single-qualifier floating-point 1x1 matrix. - /// @see gtc_type_precision - //typedef mediump_f32 mediump_fmat1x1; - - /// Medium single-qualifier floating-point 2x2 matrix. - /// @see gtc_type_precision - typedef mat<2, 2, f32, mediump> mediump_fmat2x2; - - /// Medium single-qualifier floating-point 2x3 matrix. - /// @see gtc_type_precision - typedef mat<2, 3, f32, mediump> mediump_fmat2x3; - - /// Medium single-qualifier floating-point 2x4 matrix. - /// @see gtc_type_precision - typedef mat<2, 4, f32, mediump> mediump_fmat2x4; - - /// Medium single-qualifier floating-point 3x2 matrix. - /// @see gtc_type_precision - typedef mat<3, 2, f32, mediump> mediump_fmat3x2; - - /// Medium single-qualifier floating-point 3x3 matrix. - /// @see gtc_type_precision - typedef mat<3, 3, f32, mediump> mediump_fmat3x3; - - /// Medium single-qualifier floating-point 3x4 matrix. - /// @see gtc_type_precision - typedef mat<3, 4, f32, mediump> mediump_fmat3x4; - - /// Medium single-qualifier floating-point 4x2 matrix. - /// @see gtc_type_precision - typedef mat<4, 2, f32, mediump> mediump_fmat4x2; - - /// Medium single-qualifier floating-point 4x3 matrix. - /// @see gtc_type_precision - typedef mat<4, 3, f32, mediump> mediump_fmat4x3; - - /// Medium single-qualifier floating-point 4x4 matrix. - /// @see gtc_type_precision - typedef mat<4, 4, f32, mediump> mediump_fmat4x4; - - /// Medium single-qualifier floating-point 1x1 matrix. - /// @see gtc_type_precision - //typedef mediump_fmat1x1 mediump_fmat1; - - /// Medium single-qualifier floating-point 2x2 matrix. - /// @see gtc_type_precision - typedef mediump_fmat2x2 mediump_fmat2; - - /// Medium single-qualifier floating-point 3x3 matrix. - /// @see gtc_type_precision - typedef mediump_fmat3x3 mediump_fmat3; - - /// Medium single-qualifier floating-point 4x4 matrix. - /// @see gtc_type_precision - typedef mediump_fmat4x4 mediump_fmat4; - - - /// High single-qualifier floating-point 1x1 matrix. - /// @see gtc_type_precision - //typedef highp_f32 highp_fmat1x1; - - /// High single-qualifier floating-point 2x2 matrix. - /// @see gtc_type_precision - typedef mat<2, 2, f32, highp> highp_fmat2x2; - - /// High single-qualifier floating-point 2x3 matrix. - /// @see gtc_type_precision - typedef mat<2, 3, f32, highp> highp_fmat2x3; - - /// High single-qualifier floating-point 2x4 matrix. - /// @see gtc_type_precision - typedef mat<2, 4, f32, highp> highp_fmat2x4; - - /// High single-qualifier floating-point 3x2 matrix. - /// @see gtc_type_precision - typedef mat<3, 2, f32, highp> highp_fmat3x2; - - /// High single-qualifier floating-point 3x3 matrix. - /// @see gtc_type_precision - typedef mat<3, 3, f32, highp> highp_fmat3x3; - - /// High single-qualifier floating-point 3x4 matrix. - /// @see gtc_type_precision - typedef mat<3, 4, f32, highp> highp_fmat3x4; - - /// High single-qualifier floating-point 4x2 matrix. - /// @see gtc_type_precision - typedef mat<4, 2, f32, highp> highp_fmat4x2; - - /// High single-qualifier floating-point 4x3 matrix. - /// @see gtc_type_precision - typedef mat<4, 3, f32, highp> highp_fmat4x3; - - /// High single-qualifier floating-point 4x4 matrix. - /// @see gtc_type_precision - typedef mat<4, 4, f32, highp> highp_fmat4x4; - - /// High single-qualifier floating-point 1x1 matrix. - /// @see gtc_type_precision - //typedef highp_fmat1x1 highp_fmat1; - - /// High single-qualifier floating-point 2x2 matrix. - /// @see gtc_type_precision - typedef highp_fmat2x2 highp_fmat2; - - /// High single-qualifier floating-point 3x3 matrix. - /// @see gtc_type_precision - typedef highp_fmat3x3 highp_fmat3; - - /// High single-qualifier floating-point 4x4 matrix. - /// @see gtc_type_precision - typedef highp_fmat4x4 highp_fmat4; - - - /// Low single-qualifier floating-point 1x1 matrix. - /// @see gtc_type_precision - //typedef f32 lowp_f32mat1x1; - - /// Low single-qualifier floating-point 2x2 matrix. - /// @see gtc_type_precision - typedef mat<2, 2, f32, lowp> lowp_f32mat2x2; - - /// Low single-qualifier floating-point 2x3 matrix. - /// @see gtc_type_precision - typedef mat<2, 3, f32, lowp> lowp_f32mat2x3; - - /// Low single-qualifier floating-point 2x4 matrix. - /// @see gtc_type_precision - typedef mat<2, 4, f32, lowp> lowp_f32mat2x4; - - /// Low single-qualifier floating-point 3x2 matrix. - /// @see gtc_type_precision - typedef mat<3, 2, f32, lowp> lowp_f32mat3x2; - - /// Low single-qualifier floating-point 3x3 matrix. - /// @see gtc_type_precision - typedef mat<3, 3, f32, lowp> lowp_f32mat3x3; - - /// Low single-qualifier floating-point 3x4 matrix. - /// @see gtc_type_precision - typedef mat<3, 4, f32, lowp> lowp_f32mat3x4; - - /// Low single-qualifier floating-point 4x2 matrix. - /// @see gtc_type_precision - typedef mat<4, 2, f32, lowp> lowp_f32mat4x2; - - /// Low single-qualifier floating-point 4x3 matrix. - /// @see gtc_type_precision - typedef mat<4, 3, f32, lowp> lowp_f32mat4x3; - - /// Low single-qualifier floating-point 4x4 matrix. - /// @see gtc_type_precision - typedef mat<4, 4, f32, lowp> lowp_f32mat4x4; - - /// Low single-qualifier floating-point 1x1 matrix. - /// @see gtc_type_precision - //typedef detail::tmat1x1 lowp_f32mat1; - - /// Low single-qualifier floating-point 2x2 matrix. - /// @see gtc_type_precision - typedef lowp_f32mat2x2 lowp_f32mat2; - - /// Low single-qualifier floating-point 3x3 matrix. - /// @see gtc_type_precision - typedef lowp_f32mat3x3 lowp_f32mat3; - - /// Low single-qualifier floating-point 4x4 matrix. - /// @see gtc_type_precision - typedef lowp_f32mat4x4 lowp_f32mat4; - - - /// High single-qualifier floating-point 1x1 matrix. - /// @see gtc_type_precision - //typedef f32 mediump_f32mat1x1; - - /// Low single-qualifier floating-point 2x2 matrix. - /// @see gtc_type_precision - typedef mat<2, 2, f32, mediump> mediump_f32mat2x2; - - /// Medium single-qualifier floating-point 2x3 matrix. - /// @see gtc_type_precision - typedef mat<2, 3, f32, mediump> mediump_f32mat2x3; - - /// Medium single-qualifier floating-point 2x4 matrix. - /// @see gtc_type_precision - typedef mat<2, 4, f32, mediump> mediump_f32mat2x4; - - /// Medium single-qualifier floating-point 3x2 matrix. - /// @see gtc_type_precision - typedef mat<3, 2, f32, mediump> mediump_f32mat3x2; - - /// Medium single-qualifier floating-point 3x3 matrix. - /// @see gtc_type_precision - typedef mat<3, 3, f32, mediump> mediump_f32mat3x3; - - /// Medium single-qualifier floating-point 3x4 matrix. - /// @see gtc_type_precision - typedef mat<3, 4, f32, mediump> mediump_f32mat3x4; - - /// Medium single-qualifier floating-point 4x2 matrix. - /// @see gtc_type_precision - typedef mat<4, 2, f32, mediump> mediump_f32mat4x2; - - /// Medium single-qualifier floating-point 4x3 matrix. - /// @see gtc_type_precision - typedef mat<4, 3, f32, mediump> mediump_f32mat4x3; - - /// Medium single-qualifier floating-point 4x4 matrix. - /// @see gtc_type_precision - typedef mat<4, 4, f32, mediump> mediump_f32mat4x4; - - /// Medium single-qualifier floating-point 1x1 matrix. - /// @see gtc_type_precision - //typedef detail::tmat1x1 f32mat1; - - /// Medium single-qualifier floating-point 2x2 matrix. - /// @see gtc_type_precision - typedef mediump_f32mat2x2 mediump_f32mat2; - - /// Medium single-qualifier floating-point 3x3 matrix. - /// @see gtc_type_precision - typedef mediump_f32mat3x3 mediump_f32mat3; - - /// Medium single-qualifier floating-point 4x4 matrix. - /// @see gtc_type_precision - typedef mediump_f32mat4x4 mediump_f32mat4; - - - /// High single-qualifier floating-point 1x1 matrix. - /// @see gtc_type_precision - //typedef f32 highp_f32mat1x1; - - /// High single-qualifier floating-point 2x2 matrix. - /// @see gtc_type_precision - typedef mat<2, 2, f32, highp> highp_f32mat2x2; - - /// High single-qualifier floating-point 2x3 matrix. - /// @see gtc_type_precision - typedef mat<2, 3, f32, highp> highp_f32mat2x3; - - /// High single-qualifier floating-point 2x4 matrix. - /// @see gtc_type_precision - typedef mat<2, 4, f32, highp> highp_f32mat2x4; - - /// High single-qualifier floating-point 3x2 matrix. - /// @see gtc_type_precision - typedef mat<3, 2, f32, highp> highp_f32mat3x2; - - /// High single-qualifier floating-point 3x3 matrix. - /// @see gtc_type_precision - typedef mat<3, 3, f32, highp> highp_f32mat3x3; - - /// High single-qualifier floating-point 3x4 matrix. - /// @see gtc_type_precision - typedef mat<3, 4, f32, highp> highp_f32mat3x4; - - /// High single-qualifier floating-point 4x2 matrix. - /// @see gtc_type_precision - typedef mat<4, 2, f32, highp> highp_f32mat4x2; - - /// High single-qualifier floating-point 4x3 matrix. - /// @see gtc_type_precision - typedef mat<4, 3, f32, highp> highp_f32mat4x3; - - /// High single-qualifier floating-point 4x4 matrix. - /// @see gtc_type_precision - typedef mat<4, 4, f32, highp> highp_f32mat4x4; - - /// High single-qualifier floating-point 1x1 matrix. - /// @see gtc_type_precision - //typedef detail::tmat1x1 f32mat1; - - /// High single-qualifier floating-point 2x2 matrix. - /// @see gtc_type_precision - typedef highp_f32mat2x2 highp_f32mat2; - - /// High single-qualifier floating-point 3x3 matrix. - /// @see gtc_type_precision - typedef highp_f32mat3x3 highp_f32mat3; - - /// High single-qualifier floating-point 4x4 matrix. - /// @see gtc_type_precision - typedef highp_f32mat4x4 highp_f32mat4; - - - /// Low double-qualifier floating-point 1x1 matrix. - /// @see gtc_type_precision - //typedef f64 lowp_f64mat1x1; - - /// Low double-qualifier floating-point 2x2 matrix. - /// @see gtc_type_precision - typedef mat<2, 2, f64, lowp> lowp_f64mat2x2; - - /// Low double-qualifier floating-point 2x3 matrix. - /// @see gtc_type_precision - typedef mat<2, 3, f64, lowp> lowp_f64mat2x3; - - /// Low double-qualifier floating-point 2x4 matrix. - /// @see gtc_type_precision - typedef mat<2, 4, f64, lowp> lowp_f64mat2x4; - - /// Low double-qualifier floating-point 3x2 matrix. - /// @see gtc_type_precision - typedef mat<3, 2, f64, lowp> lowp_f64mat3x2; - - /// Low double-qualifier floating-point 3x3 matrix. - /// @see gtc_type_precision - typedef mat<3, 3, f64, lowp> lowp_f64mat3x3; - - /// Low double-qualifier floating-point 3x4 matrix. - /// @see gtc_type_precision - typedef mat<3, 4, f64, lowp> lowp_f64mat3x4; - - /// Low double-qualifier floating-point 4x2 matrix. - /// @see gtc_type_precision - typedef mat<4, 2, f64, lowp> lowp_f64mat4x2; - - /// Low double-qualifier floating-point 4x3 matrix. - /// @see gtc_type_precision - typedef mat<4, 3, f64, lowp> lowp_f64mat4x3; - - /// Low double-qualifier floating-point 4x4 matrix. - /// @see gtc_type_precision - typedef mat<4, 4, f64, lowp> lowp_f64mat4x4; - - /// Low double-qualifier floating-point 1x1 matrix. - /// @see gtc_type_precision - //typedef lowp_f64mat1x1 lowp_f64mat1; - - /// Low double-qualifier floating-point 2x2 matrix. - /// @see gtc_type_precision - typedef lowp_f64mat2x2 lowp_f64mat2; - - /// Low double-qualifier floating-point 3x3 matrix. - /// @see gtc_type_precision - typedef lowp_f64mat3x3 lowp_f64mat3; - - /// Low double-qualifier floating-point 4x4 matrix. - /// @see gtc_type_precision - typedef lowp_f64mat4x4 lowp_f64mat4; - - - /// Medium double-qualifier floating-point 1x1 matrix. - /// @see gtc_type_precision - //typedef f64 Highp_f64mat1x1; - - /// Medium double-qualifier floating-point 2x2 matrix. - /// @see gtc_type_precision - typedef mat<2, 2, f64, mediump> mediump_f64mat2x2; - - /// Medium double-qualifier floating-point 2x3 matrix. - /// @see gtc_type_precision - typedef mat<2, 3, f64, mediump> mediump_f64mat2x3; - - /// Medium double-qualifier floating-point 2x4 matrix. - /// @see gtc_type_precision - typedef mat<2, 4, f64, mediump> mediump_f64mat2x4; - - /// Medium double-qualifier floating-point 3x2 matrix. - /// @see gtc_type_precision - typedef mat<3, 2, f64, mediump> mediump_f64mat3x2; - - /// Medium double-qualifier floating-point 3x3 matrix. - /// @see gtc_type_precision - typedef mat<3, 3, f64, mediump> mediump_f64mat3x3; - - /// Medium double-qualifier floating-point 3x4 matrix. - /// @see gtc_type_precision - typedef mat<3, 4, f64, mediump> mediump_f64mat3x4; - - /// Medium double-qualifier floating-point 4x2 matrix. - /// @see gtc_type_precision - typedef mat<4, 2, f64, mediump> mediump_f64mat4x2; - - /// Medium double-qualifier floating-point 4x3 matrix. - /// @see gtc_type_precision - typedef mat<4, 3, f64, mediump> mediump_f64mat4x3; - - /// Medium double-qualifier floating-point 4x4 matrix. - /// @see gtc_type_precision - typedef mat<4, 4, f64, mediump> mediump_f64mat4x4; - - /// Medium double-qualifier floating-point 1x1 matrix. - /// @see gtc_type_precision - //typedef mediump_f64mat1x1 mediump_f64mat1; - - /// Medium double-qualifier floating-point 2x2 matrix. - /// @see gtc_type_precision - typedef mediump_f64mat2x2 mediump_f64mat2; - - /// Medium double-qualifier floating-point 3x3 matrix. - /// @see gtc_type_precision - typedef mediump_f64mat3x3 mediump_f64mat3; - - /// Medium double-qualifier floating-point 4x4 matrix. - /// @see gtc_type_precision - typedef mediump_f64mat4x4 mediump_f64mat4; - - /// High double-qualifier floating-point 1x1 matrix. - /// @see gtc_type_precision - //typedef f64 highp_f64mat1x1; - - /// High double-qualifier floating-point 2x2 matrix. - /// @see gtc_type_precision - typedef mat<2, 2, f64, highp> highp_f64mat2x2; - - /// High double-qualifier floating-point 2x3 matrix. - /// @see gtc_type_precision - typedef mat<2, 3, f64, highp> highp_f64mat2x3; - - /// High double-qualifier floating-point 2x4 matrix. - /// @see gtc_type_precision - typedef mat<2, 4, f64, highp> highp_f64mat2x4; - - /// High double-qualifier floating-point 3x2 matrix. - /// @see gtc_type_precision - typedef mat<3, 2, f64, highp> highp_f64mat3x2; - - /// High double-qualifier floating-point 3x3 matrix. - /// @see gtc_type_precision - typedef mat<3, 3, f64, highp> highp_f64mat3x3; - - /// High double-qualifier floating-point 3x4 matrix. - /// @see gtc_type_precision - typedef mat<3, 4, f64, highp> highp_f64mat3x4; - - /// High double-qualifier floating-point 4x2 matrix. - /// @see gtc_type_precision - typedef mat<4, 2, f64, highp> highp_f64mat4x2; - - /// High double-qualifier floating-point 4x3 matrix. - /// @see gtc_type_precision - typedef mat<4, 3, f64, highp> highp_f64mat4x3; - - /// High double-qualifier floating-point 4x4 matrix. - /// @see gtc_type_precision - typedef mat<4, 4, f64, highp> highp_f64mat4x4; - - /// High double-qualifier floating-point 1x1 matrix. - /// @see gtc_type_precision - //typedef highp_f64mat1x1 highp_f64mat1; - - /// High double-qualifier floating-point 2x2 matrix. - /// @see gtc_type_precision - typedef highp_f64mat2x2 highp_f64mat2; - - /// High double-qualifier floating-point 3x3 matrix. - /// @see gtc_type_precision - typedef highp_f64mat3x3 highp_f64mat3; - - /// High double-qualifier floating-point 4x4 matrix. - /// @see gtc_type_precision - typedef highp_f64mat4x4 highp_f64mat4; - - - - - /// Low qualifier 8 bit unsigned integer scalar type. - /// @see gtc_type_precision - typedef vec<1, u8, lowp> lowp_u8vec1; - - /// Low qualifier 8 bit unsigned integer vector of 2 components type. - /// @see gtc_type_precision - typedef vec<2, u8, lowp> lowp_u8vec2; - - /// Low qualifier 8 bit unsigned integer vector of 3 components type. - /// @see gtc_type_precision - typedef vec<3, u8, lowp> lowp_u8vec3; - - /// Low qualifier 8 bit unsigned integer vector of 4 components type. - /// @see gtc_type_precision - typedef vec<4, u8, lowp> lowp_u8vec4; - - - /// Medium qualifier 8 bit unsigned integer scalar type. - /// @see gtc_type_precision - typedef vec<1, u8, mediump> mediump_u8vec1; - - /// Medium qualifier 8 bit unsigned integer vector of 2 components type. - /// @see gtc_type_precision - typedef vec<2, u8, mediump> mediump_u8vec2; - - /// Medium qualifier 8 bit unsigned integer vector of 3 components type. - /// @see gtc_type_precision - typedef vec<3, u8, mediump> mediump_u8vec3; - - /// Medium qualifier 8 bit unsigned integer vector of 4 components type. - /// @see gtc_type_precision - typedef vec<4, u8, mediump> mediump_u8vec4; - - - /// High qualifier 8 bit unsigned integer scalar type. - /// @see gtc_type_precision - typedef vec<1, u8, highp> highp_u8vec1; - - /// High qualifier 8 bit unsigned integer vector of 2 components type. - /// @see gtc_type_precision - typedef vec<2, u8, highp> highp_u8vec2; - - /// High qualifier 8 bit unsigned integer vector of 3 components type. - /// @see gtc_type_precision - typedef vec<3, u8, highp> highp_u8vec3; - - /// High qualifier 8 bit unsigned integer vector of 4 components type. - /// @see gtc_type_precision - typedef vec<4, u8, highp> highp_u8vec4; - - - - /// Default qualifier 8 bit unsigned integer scalar type. - /// @see gtc_type_precision - typedef vec<1, u8, defaultp> u8vec1; - - /// Default qualifier 8 bit unsigned integer vector of 2 components type. - /// @see gtc_type_precision - typedef vec<2, u8, defaultp> u8vec2; - - /// Default qualifier 8 bit unsigned integer vector of 3 components type. - /// @see gtc_type_precision - typedef vec<3, u8, defaultp> u8vec3; - - /// Default qualifier 8 bit unsigned integer vector of 4 components type. - /// @see gtc_type_precision - typedef vec<4, u8, defaultp> u8vec4; - - - - - /// Low qualifier 16 bit unsigned integer scalar type. - /// @see gtc_type_precision - typedef vec<1, u16, lowp> lowp_u16vec1; - - /// Low qualifier 16 bit unsigned integer vector of 2 components type. - /// @see gtc_type_precision - typedef vec<2, u16, lowp> lowp_u16vec2; - - /// Low qualifier 16 bit unsigned integer vector of 3 components type. - /// @see gtc_type_precision - typedef vec<3, u16, lowp> lowp_u16vec3; - - /// Low qualifier 16 bit unsigned integer vector of 4 components type. - /// @see gtc_type_precision - typedef vec<4, u16, lowp> lowp_u16vec4; - - - /// Medium qualifier 16 bit unsigned integer scalar type. - /// @see gtc_type_precision - typedef vec<1, u16, mediump> mediump_u16vec1; - - /// Medium qualifier 16 bit unsigned integer vector of 2 components type. - /// @see gtc_type_precision - typedef vec<2, u16, mediump> mediump_u16vec2; - - /// Medium qualifier 16 bit unsigned integer vector of 3 components type. - /// @see gtc_type_precision - typedef vec<3, u16, mediump> mediump_u16vec3; - - /// Medium qualifier 16 bit unsigned integer vector of 4 components type. - /// @see gtc_type_precision - typedef vec<4, u16, mediump> mediump_u16vec4; - - - /// High qualifier 16 bit unsigned integer scalar type. - /// @see gtc_type_precision - typedef vec<1, u16, highp> highp_u16vec1; - - /// High qualifier 16 bit unsigned integer vector of 2 components type. - /// @see gtc_type_precision - typedef vec<2, u16, highp> highp_u16vec2; - - /// High qualifier 16 bit unsigned integer vector of 3 components type. - /// @see gtc_type_precision - typedef vec<3, u16, highp> highp_u16vec3; - - /// High qualifier 16 bit unsigned integer vector of 4 components type. - /// @see gtc_type_precision - typedef vec<4, u16, highp> highp_u16vec4; - - - - - /// Default qualifier 16 bit unsigned integer scalar type. - /// @see gtc_type_precision - typedef vec<1, u16, defaultp> u16vec1; - - /// Default qualifier 16 bit unsigned integer vector of 2 components type. - /// @see gtc_type_precision - typedef vec<2, u16, defaultp> u16vec2; - - /// Default qualifier 16 bit unsigned integer vector of 3 components type. - /// @see gtc_type_precision - typedef vec<3, u16, defaultp> u16vec3; - - /// Default qualifier 16 bit unsigned integer vector of 4 components type. - /// @see gtc_type_precision - typedef vec<4, u16, defaultp> u16vec4; - - - - /// Low qualifier 32 bit unsigned integer scalar type. - /// @see gtc_type_precision - typedef vec<1, u32, lowp> lowp_u32vec1; - - /// Low qualifier 32 bit unsigned integer vector of 2 components type. - /// @see gtc_type_precision - typedef vec<2, u32, lowp> lowp_u32vec2; - - /// Low qualifier 32 bit unsigned integer vector of 3 components type. - /// @see gtc_type_precision - typedef vec<3, u32, lowp> lowp_u32vec3; - - /// Low qualifier 32 bit unsigned integer vector of 4 components type. - /// @see gtc_type_precision - typedef vec<4, u32, lowp> lowp_u32vec4; - - - /// Medium qualifier 32 bit unsigned integer scalar type. - /// @see gtc_type_precision - typedef vec<1, u32, mediump> mediump_u32vec1; - - /// Medium qualifier 32 bit unsigned integer vector of 2 components type. - /// @see gtc_type_precision - typedef vec<2, u32, mediump> mediump_u32vec2; - - /// Medium qualifier 32 bit unsigned integer vector of 3 components type. - /// @see gtc_type_precision - typedef vec<3, u32, mediump> mediump_u32vec3; - - /// Medium qualifier 32 bit unsigned integer vector of 4 components type. - /// @see gtc_type_precision - typedef vec<4, u32, mediump> mediump_u32vec4; - - - /// High qualifier 32 bit unsigned integer scalar type. - /// @see gtc_type_precision - typedef vec<1, u32, highp> highp_u32vec1; - - /// High qualifier 32 bit unsigned integer vector of 2 components type. - /// @see gtc_type_precision - typedef vec<2, u32, highp> highp_u32vec2; - - /// High qualifier 32 bit unsigned integer vector of 3 components type. - /// @see gtc_type_precision - typedef vec<3, u32, highp> highp_u32vec3; - - /// High qualifier 32 bit unsigned integer vector of 4 components type. - /// @see gtc_type_precision - typedef vec<4, u32, highp> highp_u32vec4; - - - - /// Default qualifier 32 bit unsigned integer scalar type. - /// @see gtc_type_precision - typedef vec<1, u32, defaultp> u32vec1; - - /// Default qualifier 32 bit unsigned integer vector of 2 components type. - /// @see gtc_type_precision - typedef vec<2, u32, defaultp> u32vec2; - - /// Default qualifier 32 bit unsigned integer vector of 3 components type. - /// @see gtc_type_precision - typedef vec<3, u32, defaultp> u32vec3; - - /// Default qualifier 32 bit unsigned integer vector of 4 components type. - /// @see gtc_type_precision - typedef vec<4, u32, defaultp> u32vec4; - - - - - /// Low qualifier 64 bit unsigned integer scalar type. - /// @see gtc_type_precision - typedef vec<1, u64, lowp> lowp_u64vec1; - - /// Low qualifier 64 bit unsigned integer vector of 2 components type. - /// @see gtc_type_precision - typedef vec<2, u64, lowp> lowp_u64vec2; - - /// Low qualifier 64 bit unsigned integer vector of 3 components type. - /// @see gtc_type_precision - typedef vec<3, u64, lowp> lowp_u64vec3; - - /// Low qualifier 64 bit unsigned integer vector of 4 components type. - /// @see gtc_type_precision - typedef vec<4, u64, lowp> lowp_u64vec4; - - - /// Medium qualifier 64 bit unsigned integer scalar type. - /// @see gtc_type_precision - typedef vec<1, u64, mediump> mediump_u64vec1; - - /// Medium qualifier 64 bit unsigned integer vector of 2 components type. - /// @see gtc_type_precision - typedef vec<2, u64, mediump> mediump_u64vec2; - - /// Medium qualifier 64 bit unsigned integer vector of 3 components type. - /// @see gtc_type_precision - typedef vec<3, u64, mediump> mediump_u64vec3; - - /// Medium qualifier 64 bit unsigned integer vector of 4 components type. - /// @see gtc_type_precision - typedef vec<4, u64, mediump> mediump_u64vec4; - - - /// High qualifier 64 bit unsigned integer scalar type. - /// @see gtc_type_precision - typedef vec<1, u64, highp> highp_u64vec1; - - /// High qualifier 64 bit unsigned integer vector of 2 components type. - /// @see gtc_type_precision - typedef vec<2, u64, highp> highp_u64vec2; - - /// High qualifier 64 bit unsigned integer vector of 3 components type. - /// @see gtc_type_precision - typedef vec<3, u64, highp> highp_u64vec3; - - /// High qualifier 64 bit unsigned integer vector of 4 components type. - /// @see gtc_type_precision - typedef vec<4, u64, highp> highp_u64vec4; - - - - - /// Default qualifier 64 bit unsigned integer scalar type. - /// @see gtc_type_precision - typedef vec<1, u64, defaultp> u64vec1; - - /// Default qualifier 64 bit unsigned integer vector of 2 components type. - /// @see gtc_type_precision - typedef vec<2, u64, defaultp> u64vec2; - - /// Default qualifier 64 bit unsigned integer vector of 3 components type. - /// @see gtc_type_precision - typedef vec<3, u64, defaultp> u64vec3; - - /// Default qualifier 64 bit unsigned integer vector of 4 components type. - /// @see gtc_type_precision - typedef vec<4, u64, defaultp> u64vec4; - - - ////////////////////// - // Float vector types - - /// 32 bit single-qualifier floating-point scalar. - /// @see gtc_type_precision - typedef float32 float32_t; - - /// 32 bit single-qualifier floating-point scalar. - /// @see gtc_type_precision - typedef float32 f32; - -# ifndef GLM_FORCE_SINGLE_ONLY - - /// 64 bit double-qualifier floating-point scalar. - /// @see gtc_type_precision - typedef float64 float64_t; - - /// 64 bit double-qualifier floating-point scalar. - /// @see gtc_type_precision - typedef float64 f64; -# endif//GLM_FORCE_SINGLE_ONLY - - /// Single-qualifier floating-point vector of 1 component. - /// @see gtc_type_precision - typedef vec<1, float, defaultp> fvec1; - - /// Single-qualifier floating-point vector of 2 components. - /// @see gtc_type_precision - typedef vec<2, float, defaultp> fvec2; - - /// Single-qualifier floating-point vector of 3 components. - /// @see gtc_type_precision - typedef vec<3, float, defaultp> fvec3; - - /// Single-qualifier floating-point vector of 4 components. - /// @see gtc_type_precision - typedef vec<4, float, defaultp> fvec4; - - - /// Single-qualifier floating-point vector of 1 component. - /// @see gtc_type_precision - typedef vec<1, f32, defaultp> f32vec1; - - /// Single-qualifier floating-point vector of 2 components. - /// @see gtc_type_precision - typedef vec<2, f32, defaultp> f32vec2; - - /// Single-qualifier floating-point vector of 3 components. - /// @see gtc_type_precision - typedef vec<3, f32, defaultp> f32vec3; - - /// Single-qualifier floating-point vector of 4 components. - /// @see gtc_type_precision - typedef vec<4, f32, defaultp> f32vec4; - -# ifndef GLM_FORCE_SINGLE_ONLY - /// Double-qualifier floating-point vector of 1 component. - /// @see gtc_type_precision - typedef vec<1, f64, defaultp> f64vec1; - - /// Double-qualifier floating-point vector of 2 components. - /// @see gtc_type_precision - typedef vec<2, f64, defaultp> f64vec2; - - /// Double-qualifier floating-point vector of 3 components. - /// @see gtc_type_precision - typedef vec<3, f64, defaultp> f64vec3; - - /// Double-qualifier floating-point vector of 4 components. - /// @see gtc_type_precision - typedef vec<4, f64, defaultp> f64vec4; -# endif//GLM_FORCE_SINGLE_ONLY - - - ////////////////////// - // Float matrix types - - /// Single-qualifier floating-point 1x1 matrix. - /// @see gtc_type_precision - //typedef detail::tmat1x1 fmat1; - - /// Single-qualifier floating-point 2x2 matrix. - /// @see gtc_type_precision - typedef mat<2, 2, f32, defaultp> fmat2; - - /// Single-qualifier floating-point 3x3 matrix. - /// @see gtc_type_precision - typedef mat<3, 3, f32, defaultp> fmat3; - - /// Single-qualifier floating-point 4x4 matrix. - /// @see gtc_type_precision - typedef mat<4, 4, f32, defaultp> fmat4; - - - /// Single-qualifier floating-point 1x1 matrix. - /// @see gtc_type_precision - //typedef f32 fmat1x1; - - /// Single-qualifier floating-point 2x2 matrix. - /// @see gtc_type_precision - typedef mat<2, 2, f32, defaultp> fmat2x2; - - /// Single-qualifier floating-point 2x3 matrix. - /// @see gtc_type_precision - typedef mat<2, 3, f32, defaultp> fmat2x3; - - /// Single-qualifier floating-point 2x4 matrix. - /// @see gtc_type_precision - typedef mat<2, 4, f32, defaultp> fmat2x4; - - /// Single-qualifier floating-point 3x2 matrix. - /// @see gtc_type_precision - typedef mat<3, 2, f32, defaultp> fmat3x2; - - /// Single-qualifier floating-point 3x3 matrix. - /// @see gtc_type_precision - typedef mat<3, 3, f32, defaultp> fmat3x3; - - /// Single-qualifier floating-point 3x4 matrix. - /// @see gtc_type_precision - typedef mat<3, 4, f32, defaultp> fmat3x4; - - /// Single-qualifier floating-point 4x2 matrix. - /// @see gtc_type_precision - typedef mat<4, 2, f32, defaultp> fmat4x2; - - /// Single-qualifier floating-point 4x3 matrix. - /// @see gtc_type_precision - typedef mat<4, 3, f32, defaultp> fmat4x3; - - /// Single-qualifier floating-point 4x4 matrix. - /// @see gtc_type_precision - typedef mat<4, 4, f32, defaultp> fmat4x4; - - - /// Single-qualifier floating-point 1x1 matrix. - /// @see gtc_type_precision - //typedef detail::tmat1x1 f32mat1; - - /// Single-qualifier floating-point 2x2 matrix. - /// @see gtc_type_precision - typedef mat<2, 2, f32, defaultp> f32mat2; - - /// Single-qualifier floating-point 3x3 matrix. - /// @see gtc_type_precision - typedef mat<3, 3, f32, defaultp> f32mat3; - - /// Single-qualifier floating-point 4x4 matrix. - /// @see gtc_type_precision - typedef mat<4, 4, f32, defaultp> f32mat4; - - - /// Single-qualifier floating-point 1x1 matrix. - /// @see gtc_type_precision - //typedef f32 f32mat1x1; - - /// Single-qualifier floating-point 2x2 matrix. - /// @see gtc_type_precision - typedef mat<2, 2, f32, defaultp> f32mat2x2; - - /// Single-qualifier floating-point 2x3 matrix. - /// @see gtc_type_precision - typedef mat<2, 3, f32, defaultp> f32mat2x3; - - /// Single-qualifier floating-point 2x4 matrix. - /// @see gtc_type_precision - typedef mat<2, 4, f32, defaultp> f32mat2x4; - - /// Single-qualifier floating-point 3x2 matrix. - /// @see gtc_type_precision - typedef mat<3, 2, f32, defaultp> f32mat3x2; - - /// Single-qualifier floating-point 3x3 matrix. - /// @see gtc_type_precision - typedef mat<3, 3, f32, defaultp> f32mat3x3; - - /// Single-qualifier floating-point 3x4 matrix. - /// @see gtc_type_precision - typedef mat<3, 4, f32, defaultp> f32mat3x4; - - /// Single-qualifier floating-point 4x2 matrix. - /// @see gtc_type_precision - typedef mat<4, 2, f32, defaultp> f32mat4x2; - - /// Single-qualifier floating-point 4x3 matrix. - /// @see gtc_type_precision - typedef mat<4, 3, f32, defaultp> f32mat4x3; - - /// Single-qualifier floating-point 4x4 matrix. - /// @see gtc_type_precision - typedef mat<4, 4, f32, defaultp> f32mat4x4; - - -# ifndef GLM_FORCE_SINGLE_ONLY - - /// Double-qualifier floating-point 1x1 matrix. - /// @see gtc_type_precision - //typedef detail::tmat1x1 f64mat1; - - /// Double-qualifier floating-point 2x2 matrix. - /// @see gtc_type_precision - typedef mat<2, 2, f64, defaultp> f64mat2; - - /// Double-qualifier floating-point 3x3 matrix. - /// @see gtc_type_precision - typedef mat<3, 3, f64, defaultp> f64mat3; - - /// Double-qualifier floating-point 4x4 matrix. - /// @see gtc_type_precision - typedef mat<4, 4, f64, defaultp> f64mat4; - - - /// Double-qualifier floating-point 1x1 matrix. - /// @see gtc_type_precision - //typedef f64 f64mat1x1; - - /// Double-qualifier floating-point 2x2 matrix. - /// @see gtc_type_precision - typedef mat<2, 2, f64, defaultp> f64mat2x2; - - /// Double-qualifier floating-point 2x3 matrix. - /// @see gtc_type_precision - typedef mat<2, 3, f64, defaultp> f64mat2x3; - - /// Double-qualifier floating-point 2x4 matrix. - /// @see gtc_type_precision - typedef mat<2, 4, f64, defaultp> f64mat2x4; - - /// Double-qualifier floating-point 3x2 matrix. - /// @see gtc_type_precision - typedef mat<3, 2, f64, defaultp> f64mat3x2; - - /// Double-qualifier floating-point 3x3 matrix. - /// @see gtc_type_precision - typedef mat<3, 3, f64, defaultp> f64mat3x3; - - /// Double-qualifier floating-point 3x4 matrix. - /// @see gtc_type_precision - typedef mat<3, 4, f64, defaultp> f64mat3x4; - - /// Double-qualifier floating-point 4x2 matrix. - /// @see gtc_type_precision - typedef mat<4, 2, f64, defaultp> f64mat4x2; - - /// Double-qualifier floating-point 4x3 matrix. - /// @see gtc_type_precision - typedef mat<4, 3, f64, defaultp> f64mat4x3; - - /// Double-qualifier floating-point 4x4 matrix. - /// @see gtc_type_precision - typedef mat<4, 4, f64, defaultp> f64mat4x4; - -# endif//GLM_FORCE_SINGLE_ONLY - - ////////////////////////// - // Quaternion types - - /// Single-qualifier floating-point quaternion. - /// @see gtc_type_precision - typedef qua f32quat; - - /// Low single-qualifier floating-point quaternion. - /// @see gtc_type_precision - typedef qua lowp_f32quat; - - /// Low double-qualifier floating-point quaternion. - /// @see gtc_type_precision - typedef qua lowp_f64quat; - - /// Medium single-qualifier floating-point quaternion. - /// @see gtc_type_precision - typedef qua mediump_f32quat; - -# ifndef GLM_FORCE_SINGLE_ONLY - - /// Medium double-qualifier floating-point quaternion. - /// @see gtc_type_precision - typedef qua mediump_f64quat; - - /// High single-qualifier floating-point quaternion. - /// @see gtc_type_precision - typedef qua highp_f32quat; - - /// High double-qualifier floating-point quaternion. - /// @see gtc_type_precision - typedef qua highp_f64quat; - - /// Double-qualifier floating-point quaternion. - /// @see gtc_type_precision - typedef qua f64quat; - -# endif//GLM_FORCE_SINGLE_ONLY - - /// @} -}//namespace glm - -#include "type_precision.inl" diff --git a/third_party/glm/gtc/type_precision.inl b/third_party/glm/gtc/type_precision.inl deleted file mode 100755 index ae80912..0000000 --- a/third_party/glm/gtc/type_precision.inl +++ /dev/null @@ -1,6 +0,0 @@ -/// @ref gtc_precision - -namespace glm -{ - -} diff --git a/third_party/glm/gtc/type_ptr.hpp b/third_party/glm/gtc/type_ptr.hpp deleted file mode 100755 index d7e625a..0000000 --- a/third_party/glm/gtc/type_ptr.hpp +++ /dev/null @@ -1,230 +0,0 @@ -/// @ref gtc_type_ptr -/// @file glm/gtc/type_ptr.hpp -/// -/// @see core (dependence) -/// @see gtc_quaternion (dependence) -/// -/// @defgroup gtc_type_ptr GLM_GTC_type_ptr -/// @ingroup gtc -/// -/// Include to use the features of this extension. -/// -/// Handles the interaction between pointers and vector, matrix types. -/// -/// This extension defines an overloaded function, glm::value_ptr. It returns -/// a pointer to the memory layout of the object. Matrix types store their values -/// in column-major order. -/// -/// This is useful for uploading data to matrices or copying data to buffer objects. -/// -/// Example: -/// @code -/// #include -/// #include -/// -/// glm::vec3 aVector(3); -/// glm::mat4 someMatrix(1.0); -/// -/// glUniform3fv(uniformLoc, 1, glm::value_ptr(aVector)); -/// glUniformMatrix4fv(uniformMatrixLoc, 1, GL_FALSE, glm::value_ptr(someMatrix)); -/// @endcode -/// -/// need to be included to use the features of this extension. - -#pragma once - -// Dependency: -#include "../gtc/quaternion.hpp" -#include "../gtc/vec1.hpp" -#include "../vec2.hpp" -#include "../vec3.hpp" -#include "../vec4.hpp" -#include "../mat2x2.hpp" -#include "../mat2x3.hpp" -#include "../mat2x4.hpp" -#include "../mat3x2.hpp" -#include "../mat3x3.hpp" -#include "../mat3x4.hpp" -#include "../mat4x2.hpp" -#include "../mat4x3.hpp" -#include "../mat4x4.hpp" -#include - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_GTC_type_ptr extension included") -#endif - -namespace glm -{ - /// @addtogroup gtc_type_ptr - /// @{ - - /// Return the constant address to the data of the input parameter. - /// @see gtc_type_ptr - template - GLM_FUNC_DECL typename genType::value_type const * value_ptr(genType const& v); - - /// Build a vector from a pointer. - /// @see gtc_type_ptr - template - GLM_FUNC_DECL vec<1, T, Q> make_vec1(vec<1, T, Q> const& v); - - /// Build a vector from a pointer. - /// @see gtc_type_ptr - template - GLM_FUNC_DECL vec<1, T, Q> make_vec1(vec<2, T, Q> const& v); - - /// Build a vector from a pointer. - /// @see gtc_type_ptr - template - GLM_FUNC_DECL vec<1, T, Q> make_vec1(vec<3, T, Q> const& v); - - /// Build a vector from a pointer. - /// @see gtc_type_ptr - template - GLM_FUNC_DECL vec<1, T, Q> make_vec1(vec<4, T, Q> const& v); - - /// Build a vector from a pointer. - /// @see gtc_type_ptr - template - GLM_FUNC_DECL vec<2, T, Q> make_vec2(vec<1, T, Q> const& v); - - /// Build a vector from a pointer. - /// @see gtc_type_ptr - template - GLM_FUNC_DECL vec<2, T, Q> make_vec2(vec<2, T, Q> const& v); - - /// Build a vector from a pointer. - /// @see gtc_type_ptr - template - GLM_FUNC_DECL vec<2, T, Q> make_vec2(vec<3, T, Q> const& v); - - /// Build a vector from a pointer. - /// @see gtc_type_ptr - template - GLM_FUNC_DECL vec<2, T, Q> make_vec2(vec<4, T, Q> const& v); - - /// Build a vector from a pointer. - /// @see gtc_type_ptr - template - GLM_FUNC_DECL vec<3, T, Q> make_vec3(vec<1, T, Q> const& v); - - /// Build a vector from a pointer. - /// @see gtc_type_ptr - template - GLM_FUNC_DECL vec<3, T, Q> make_vec3(vec<2, T, Q> const& v); - - /// Build a vector from a pointer. - /// @see gtc_type_ptr - template - GLM_FUNC_DECL vec<3, T, Q> make_vec3(vec<3, T, Q> const& v); - - /// Build a vector from a pointer. - /// @see gtc_type_ptr - template - GLM_FUNC_DECL vec<3, T, Q> make_vec3(vec<4, T, Q> const& v); - - /// Build a vector from a pointer. - /// @see gtc_type_ptr - template - GLM_FUNC_DECL vec<4, T, Q> make_vec4(vec<1, T, Q> const& v); - - /// Build a vector from a pointer. - /// @see gtc_type_ptr - template - GLM_FUNC_DECL vec<4, T, Q> make_vec4(vec<2, T, Q> const& v); - - /// Build a vector from a pointer. - /// @see gtc_type_ptr - template - GLM_FUNC_DECL vec<4, T, Q> make_vec4(vec<3, T, Q> const& v); - - /// Build a vector from a pointer. - /// @see gtc_type_ptr - template - GLM_FUNC_DECL vec<4, T, Q> make_vec4(vec<4, T, Q> const& v); - - /// Build a vector from a pointer. - /// @see gtc_type_ptr - template - GLM_FUNC_DECL vec<2, T, defaultp> make_vec2(T const * const ptr); - - /// Build a vector from a pointer. - /// @see gtc_type_ptr - template - GLM_FUNC_DECL vec<3, T, defaultp> make_vec3(T const * const ptr); - - /// Build a vector from a pointer. - /// @see gtc_type_ptr - template - GLM_FUNC_DECL vec<4, T, defaultp> make_vec4(T const * const ptr); - - /// Build a matrix from a pointer. - /// @see gtc_type_ptr - template - GLM_FUNC_DECL mat<2, 2, T, defaultp> make_mat2x2(T const * const ptr); - - /// Build a matrix from a pointer. - /// @see gtc_type_ptr - template - GLM_FUNC_DECL mat<2, 3, T, defaultp> make_mat2x3(T const * const ptr); - - /// Build a matrix from a pointer. - /// @see gtc_type_ptr - template - GLM_FUNC_DECL mat<2, 4, T, defaultp> make_mat2x4(T const * const ptr); - - /// Build a matrix from a pointer. - /// @see gtc_type_ptr - template - GLM_FUNC_DECL mat<3, 2, T, defaultp> make_mat3x2(T const * const ptr); - - /// Build a matrix from a pointer. - /// @see gtc_type_ptr - template - GLM_FUNC_DECL mat<3, 3, T, defaultp> make_mat3x3(T const * const ptr); - - /// Build a matrix from a pointer. - /// @see gtc_type_ptr - template - GLM_FUNC_DECL mat<3, 4, T, defaultp> make_mat3x4(T const * const ptr); - - /// Build a matrix from a pointer. - /// @see gtc_type_ptr - template - GLM_FUNC_DECL mat<4, 2, T, defaultp> make_mat4x2(T const * const ptr); - - /// Build a matrix from a pointer. - /// @see gtc_type_ptr - template - GLM_FUNC_DECL mat<4, 3, T, defaultp> make_mat4x3(T const * const ptr); - - /// Build a matrix from a pointer. - /// @see gtc_type_ptr - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> make_mat4x4(T const * const ptr); - - /// Build a matrix from a pointer. - /// @see gtc_type_ptr - template - GLM_FUNC_DECL mat<2, 2, T, defaultp> make_mat2(T const * const ptr); - - /// Build a matrix from a pointer. - /// @see gtc_type_ptr - template - GLM_FUNC_DECL mat<3, 3, T, defaultp> make_mat3(T const * const ptr); - - /// Build a matrix from a pointer. - /// @see gtc_type_ptr - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> make_mat4(T const * const ptr); - - /// Build a quaternion from a pointer. - /// @see gtc_type_ptr - template - GLM_FUNC_DECL qua make_quat(T const * const ptr); - - /// @} -}//namespace glm - -#include "type_ptr.inl" diff --git a/third_party/glm/gtc/type_ptr.inl b/third_party/glm/gtc/type_ptr.inl deleted file mode 100755 index 71df4d3..0000000 --- a/third_party/glm/gtc/type_ptr.inl +++ /dev/null @@ -1,386 +0,0 @@ -/// @ref gtc_type_ptr - -#include - -namespace glm -{ - /// @addtogroup gtc_type_ptr - /// @{ - - template - GLM_FUNC_QUALIFIER T const* value_ptr(vec<2, T, Q> const& v) - { - return &(v.x); - } - - template - GLM_FUNC_QUALIFIER T* value_ptr(vec<2, T, Q>& v) - { - return &(v.x); - } - - template - GLM_FUNC_QUALIFIER T const * value_ptr(vec<3, T, Q> const& v) - { - return &(v.x); - } - - template - GLM_FUNC_QUALIFIER T* value_ptr(vec<3, T, Q>& v) - { - return &(v.x); - } - - template - GLM_FUNC_QUALIFIER T const* value_ptr(vec<4, T, Q> const& v) - { - return &(v.x); - } - - template - GLM_FUNC_QUALIFIER T* value_ptr(vec<4, T, Q>& v) - { - return &(v.x); - } - - template - GLM_FUNC_QUALIFIER T const* value_ptr(mat<2, 2, T, Q> const& m) - { - return &(m[0].x); - } - - template - GLM_FUNC_QUALIFIER T* value_ptr(mat<2, 2, T, Q>& m) - { - return &(m[0].x); - } - - template - GLM_FUNC_QUALIFIER T const* value_ptr(mat<3, 3, T, Q> const& m) - { - return &(m[0].x); - } - - template - GLM_FUNC_QUALIFIER T* value_ptr(mat<3, 3, T, Q>& m) - { - return &(m[0].x); - } - - template - GLM_FUNC_QUALIFIER T const* value_ptr(mat<4, 4, T, Q> const& m) - { - return &(m[0].x); - } - - template - GLM_FUNC_QUALIFIER T* value_ptr(mat<4, 4, T, Q>& m) - { - return &(m[0].x); - } - - template - GLM_FUNC_QUALIFIER T const* value_ptr(mat<2, 3, T, Q> const& m) - { - return &(m[0].x); - } - - template - GLM_FUNC_QUALIFIER T* value_ptr(mat<2, 3, T, Q>& m) - { - return &(m[0].x); - } - - template - GLM_FUNC_QUALIFIER T const* value_ptr(mat<3, 2, T, Q> const& m) - { - return &(m[0].x); - } - - template - GLM_FUNC_QUALIFIER T* value_ptr(mat<3, 2, T, Q>& m) - { - return &(m[0].x); - } - - template - GLM_FUNC_QUALIFIER T const* value_ptr(mat<2, 4, T, Q> const& m) - { - return &(m[0].x); - } - - template - GLM_FUNC_QUALIFIER T* value_ptr(mat<2, 4, T, Q>& m) - { - return &(m[0].x); - } - - template - GLM_FUNC_QUALIFIER T const* value_ptr(mat<4, 2, T, Q> const& m) - { - return &(m[0].x); - } - - template - GLM_FUNC_QUALIFIER T* value_ptr(mat<4, 2, T, Q>& m) - { - return &(m[0].x); - } - - template - GLM_FUNC_QUALIFIER T const* value_ptr(mat<3, 4, T, Q> const& m) - { - return &(m[0].x); - } - - template - GLM_FUNC_QUALIFIER T* value_ptr(mat<3, 4, T, Q>& m) - { - return &(m[0].x); - } - - template - GLM_FUNC_QUALIFIER T const* value_ptr(mat<4, 3, T, Q> const& m) - { - return &(m[0].x); - } - - template - GLM_FUNC_QUALIFIER T * value_ptr(mat<4, 3, T, Q>& m) - { - return &(m[0].x); - } - - template - GLM_FUNC_QUALIFIER T const * value_ptr(qua const& q) - { - return &(q[0]); - } - - template - GLM_FUNC_QUALIFIER T* value_ptr(qua& q) - { - return &(q[0]); - } - - template - inline vec<1, T, Q> make_vec1(vec<1, T, Q> const& v) - { - return v; - } - - template - inline vec<1, T, Q> make_vec1(vec<2, T, Q> const& v) - { - return vec<1, T, Q>(v); - } - - template - inline vec<1, T, Q> make_vec1(vec<3, T, Q> const& v) - { - return vec<1, T, Q>(v); - } - - template - inline vec<1, T, Q> make_vec1(vec<4, T, Q> const& v) - { - return vec<1, T, Q>(v); - } - - template - inline vec<2, T, Q> make_vec2(vec<1, T, Q> const& v) - { - return vec<2, T, Q>(v.x, static_cast(0)); - } - - template - inline vec<2, T, Q> make_vec2(vec<2, T, Q> const& v) - { - return v; - } - - template - inline vec<2, T, Q> make_vec2(vec<3, T, Q> const& v) - { - return vec<2, T, Q>(v); - } - - template - inline vec<2, T, Q> make_vec2(vec<4, T, Q> const& v) - { - return vec<2, T, Q>(v); - } - - template - inline vec<3, T, Q> make_vec3(vec<1, T, Q> const& v) - { - return vec<3, T, Q>(v.x, static_cast(0), static_cast(0)); - } - - template - inline vec<3, T, Q> make_vec3(vec<2, T, Q> const& v) - { - return vec<3, T, Q>(v.x, v.y, static_cast(0)); - } - - template - inline vec<3, T, Q> make_vec3(vec<3, T, Q> const& v) - { - return v; - } - - template - inline vec<3, T, Q> make_vec3(vec<4, T, Q> const& v) - { - return vec<3, T, Q>(v); - } - - template - inline vec<4, T, Q> make_vec4(vec<1, T, Q> const& v) - { - return vec<4, T, Q>(v.x, static_cast(0), static_cast(0), static_cast(1)); - } - - template - inline vec<4, T, Q> make_vec4(vec<2, T, Q> const& v) - { - return vec<4, T, Q>(v.x, v.y, static_cast(0), static_cast(1)); - } - - template - inline vec<4, T, Q> make_vec4(vec<3, T, Q> const& v) - { - return vec<4, T, Q>(v.x, v.y, v.z, static_cast(1)); - } - - template - inline vec<4, T, Q> make_vec4(vec<4, T, Q> const& v) - { - return v; - } - - template - GLM_FUNC_QUALIFIER vec<2, T, defaultp> make_vec2(T const *const ptr) - { - vec<2, T, defaultp> Result; - memcpy(value_ptr(Result), ptr, sizeof(vec<2, T, defaultp>)); - return Result; - } - - template - GLM_FUNC_QUALIFIER vec<3, T, defaultp> make_vec3(T const *const ptr) - { - vec<3, T, defaultp> Result; - memcpy(value_ptr(Result), ptr, sizeof(vec<3, T, defaultp>)); - return Result; - } - - template - GLM_FUNC_QUALIFIER vec<4, T, defaultp> make_vec4(T const *const ptr) - { - vec<4, T, defaultp> Result; - memcpy(value_ptr(Result), ptr, sizeof(vec<4, T, defaultp>)); - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<2, 2, T, defaultp> make_mat2x2(T const *const ptr) - { - mat<2, 2, T, defaultp> Result; - memcpy(value_ptr(Result), ptr, sizeof(mat<2, 2, T, defaultp>)); - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<2, 3, T, defaultp> make_mat2x3(T const *const ptr) - { - mat<2, 3, T, defaultp> Result; - memcpy(value_ptr(Result), ptr, sizeof(mat<2, 3, T, defaultp>)); - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<2, 4, T, defaultp> make_mat2x4(T const *const ptr) - { - mat<2, 4, T, defaultp> Result; - memcpy(value_ptr(Result), ptr, sizeof(mat<2, 4, T, defaultp>)); - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<3, 2, T, defaultp> make_mat3x2(T const *const ptr) - { - mat<3, 2, T, defaultp> Result; - memcpy(value_ptr(Result), ptr, sizeof(mat<3, 2, T, defaultp>)); - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<3, 3, T, defaultp> make_mat3x3(T const *const ptr) - { - mat<3, 3, T, defaultp> Result; - memcpy(value_ptr(Result), ptr, sizeof(mat<3, 3, T, defaultp>)); - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<3, 4, T, defaultp> make_mat3x4(T const *const ptr) - { - mat<3, 4, T, defaultp> Result; - memcpy(value_ptr(Result), ptr, sizeof(mat<3, 4, T, defaultp>)); - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 2, T, defaultp> make_mat4x2(T const *const ptr) - { - mat<4, 2, T, defaultp> Result; - memcpy(value_ptr(Result), ptr, sizeof(mat<4, 2, T, defaultp>)); - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 3, T, defaultp> make_mat4x3(T const *const ptr) - { - mat<4, 3, T, defaultp> Result; - memcpy(value_ptr(Result), ptr, sizeof(mat<4, 3, T, defaultp>)); - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> make_mat4x4(T const *const ptr) - { - mat<4, 4, T, defaultp> Result; - memcpy(value_ptr(Result), ptr, sizeof(mat<4, 4, T, defaultp>)); - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<2, 2, T, defaultp> make_mat2(T const *const ptr) - { - return make_mat2x2(ptr); - } - - template - GLM_FUNC_QUALIFIER mat<3, 3, T, defaultp> make_mat3(T const *const ptr) - { - return make_mat3x3(ptr); - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> make_mat4(T const *const ptr) - { - return make_mat4x4(ptr); - } - - template - GLM_FUNC_QUALIFIER qua make_quat(T const *const ptr) - { - qua Result; - memcpy(value_ptr(Result), ptr, sizeof(qua)); - return Result; - } - - /// @} -}//namespace glm - diff --git a/third_party/glm/gtc/ulp.hpp b/third_party/glm/gtc/ulp.hpp deleted file mode 100755 index 0d80a75..0000000 --- a/third_party/glm/gtc/ulp.hpp +++ /dev/null @@ -1,152 +0,0 @@ -/// @ref gtc_ulp -/// @file glm/gtc/ulp.hpp -/// -/// @see core (dependence) -/// -/// @defgroup gtc_ulp GLM_GTC_ulp -/// @ingroup gtc -/// -/// Include to use the features of this extension. -/// -/// Allow the measurement of the accuracy of a function against a reference -/// implementation. This extension works on floating-point data and provide results -/// in ULP. - -#pragma once - -// Dependencies -#include "../detail/setup.hpp" -#include "../detail/qualifier.hpp" -#include "../detail/_vectorize.hpp" -#include "../ext/scalar_int_sized.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_GTC_ulp extension included") -#endif - -namespace glm -{ - /// Return the next ULP value(s) after the input value(s). - /// - /// @tparam genType A floating-point scalar type. - /// - /// @see gtc_ulp - template - GLM_FUNC_DECL genType next_float(genType x); - - /// Return the previous ULP value(s) before the input value(s). - /// - /// @tparam genType A floating-point scalar type. - /// - /// @see gtc_ulp - template - GLM_FUNC_DECL genType prev_float(genType x); - - /// Return the value(s) ULP distance after the input value(s). - /// - /// @tparam genType A floating-point scalar type. - /// - /// @see gtc_ulp - template - GLM_FUNC_DECL genType next_float(genType x, int ULPs); - - /// Return the value(s) ULP distance before the input value(s). - /// - /// @tparam genType A floating-point scalar type. - /// - /// @see gtc_ulp - template - GLM_FUNC_DECL genType prev_float(genType x, int ULPs); - - /// Return the distance in the number of ULP between 2 single-precision floating-point scalars. - /// - /// @see gtc_ulp - GLM_FUNC_DECL int float_distance(float x, float y); - - /// Return the distance in the number of ULP between 2 double-precision floating-point scalars. - /// - /// @see gtc_ulp - GLM_FUNC_DECL int64 float_distance(double x, double y); - - /// Return the next ULP value(s) after the input value(s). - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point - /// @tparam Q Value from qualifier enum - /// - /// @see gtc_ulp - template - GLM_FUNC_DECL vec next_float(vec const& x); - - /// Return the value(s) ULP distance after the input value(s). - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point - /// @tparam Q Value from qualifier enum - /// - /// @see gtc_ulp - template - GLM_FUNC_DECL vec next_float(vec const& x, int ULPs); - - /// Return the value(s) ULP distance after the input value(s). - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point - /// @tparam Q Value from qualifier enum - /// - /// @see gtc_ulp - template - GLM_FUNC_DECL vec next_float(vec const& x, vec const& ULPs); - - /// Return the previous ULP value(s) before the input value(s). - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point - /// @tparam Q Value from qualifier enum - /// - /// @see gtc_ulp - template - GLM_FUNC_DECL vec prev_float(vec const& x); - - /// Return the value(s) ULP distance before the input value(s). - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point - /// @tparam Q Value from qualifier enum - /// - /// @see gtc_ulp - template - GLM_FUNC_DECL vec prev_float(vec const& x, int ULPs); - - /// Return the value(s) ULP distance before the input value(s). - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point - /// @tparam Q Value from qualifier enum - /// - /// @see gtc_ulp - template - GLM_FUNC_DECL vec prev_float(vec const& x, vec const& ULPs); - - /// Return the distance in the number of ULP between 2 single-precision floating-point scalars. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam Q Value from qualifier enum - /// - /// @see gtc_ulp - template - GLM_FUNC_DECL vec float_distance(vec const& x, vec const& y); - - /// Return the distance in the number of ULP between 2 double-precision floating-point scalars. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam Q Value from qualifier enum - /// - /// @see gtc_ulp - template - GLM_FUNC_DECL vec float_distance(vec const& x, vec const& y); - - /// @} -}//namespace glm - -#include "ulp.inl" diff --git a/third_party/glm/gtc/ulp.inl b/third_party/glm/gtc/ulp.inl deleted file mode 100755 index 4ecbd3f..0000000 --- a/third_party/glm/gtc/ulp.inl +++ /dev/null @@ -1,173 +0,0 @@ -/// @ref gtc_ulp - -#include "../ext/scalar_ulp.hpp" - -namespace glm -{ - template<> - GLM_FUNC_QUALIFIER float next_float(float x) - { -# if GLM_HAS_CXX11_STL - return std::nextafter(x, std::numeric_limits::max()); -# elif((GLM_COMPILER & GLM_COMPILER_VC) || ((GLM_COMPILER & GLM_COMPILER_INTEL) && (GLM_PLATFORM & GLM_PLATFORM_WINDOWS))) - return detail::nextafterf(x, FLT_MAX); -# elif(GLM_PLATFORM & GLM_PLATFORM_ANDROID) - return __builtin_nextafterf(x, FLT_MAX); -# else - return nextafterf(x, FLT_MAX); -# endif - } - - template<> - GLM_FUNC_QUALIFIER double next_float(double x) - { -# if GLM_HAS_CXX11_STL - return std::nextafter(x, std::numeric_limits::max()); -# elif((GLM_COMPILER & GLM_COMPILER_VC) || ((GLM_COMPILER & GLM_COMPILER_INTEL) && (GLM_PLATFORM & GLM_PLATFORM_WINDOWS))) - return detail::nextafter(x, std::numeric_limits::max()); -# elif(GLM_PLATFORM & GLM_PLATFORM_ANDROID) - return __builtin_nextafter(x, DBL_MAX); -# else - return nextafter(x, DBL_MAX); -# endif - } - - template - GLM_FUNC_QUALIFIER T next_float(T x, int ULPs) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'next_float' only accept floating-point input"); - assert(ULPs >= 0); - - T temp = x; - for (int i = 0; i < ULPs; ++i) - temp = next_float(temp); - return temp; - } - - GLM_FUNC_QUALIFIER float prev_float(float x) - { -# if GLM_HAS_CXX11_STL - return std::nextafter(x, std::numeric_limits::min()); -# elif((GLM_COMPILER & GLM_COMPILER_VC) || ((GLM_COMPILER & GLM_COMPILER_INTEL) && (GLM_PLATFORM & GLM_PLATFORM_WINDOWS))) - return detail::nextafterf(x, FLT_MIN); -# elif(GLM_PLATFORM & GLM_PLATFORM_ANDROID) - return __builtin_nextafterf(x, FLT_MIN); -# else - return nextafterf(x, FLT_MIN); -# endif - } - - GLM_FUNC_QUALIFIER double prev_float(double x) - { -# if GLM_HAS_CXX11_STL - return std::nextafter(x, std::numeric_limits::min()); -# elif((GLM_COMPILER & GLM_COMPILER_VC) || ((GLM_COMPILER & GLM_COMPILER_INTEL) && (GLM_PLATFORM & GLM_PLATFORM_WINDOWS))) - return _nextafter(x, DBL_MIN); -# elif(GLM_PLATFORM & GLM_PLATFORM_ANDROID) - return __builtin_nextafter(x, DBL_MIN); -# else - return nextafter(x, DBL_MIN); -# endif - } - - template - GLM_FUNC_QUALIFIER T prev_float(T x, int ULPs) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'prev_float' only accept floating-point input"); - assert(ULPs >= 0); - - T temp = x; - for (int i = 0; i < ULPs; ++i) - temp = prev_float(temp); - return temp; - } - - GLM_FUNC_QUALIFIER int float_distance(float x, float y) - { - detail::float_t const a(x); - detail::float_t const b(y); - - return abs(a.i - b.i); - } - - GLM_FUNC_QUALIFIER int64 float_distance(double x, double y) - { - detail::float_t const a(x); - detail::float_t const b(y); - - return abs(a.i - b.i); - } - - template - GLM_FUNC_QUALIFIER vec next_float(vec const& x) - { - vec Result; - for (length_t i = 0, n = Result.length(); i < n; ++i) - Result[i] = next_float(x[i]); - return Result; - } - - template - GLM_FUNC_QUALIFIER vec next_float(vec const& x, int ULPs) - { - vec Result; - for (length_t i = 0, n = Result.length(); i < n; ++i) - Result[i] = next_float(x[i], ULPs); - return Result; - } - - template - GLM_FUNC_QUALIFIER vec next_float(vec const& x, vec const& ULPs) - { - vec Result; - for (length_t i = 0, n = Result.length(); i < n; ++i) - Result[i] = next_float(x[i], ULPs[i]); - return Result; - } - - template - GLM_FUNC_QUALIFIER vec prev_float(vec const& x) - { - vec Result; - for (length_t i = 0, n = Result.length(); i < n; ++i) - Result[i] = prev_float(x[i]); - return Result; - } - - template - GLM_FUNC_QUALIFIER vec prev_float(vec const& x, int ULPs) - { - vec Result; - for (length_t i = 0, n = Result.length(); i < n; ++i) - Result[i] = prev_float(x[i], ULPs); - return Result; - } - - template - GLM_FUNC_QUALIFIER vec prev_float(vec const& x, vec const& ULPs) - { - vec Result; - for (length_t i = 0, n = Result.length(); i < n; ++i) - Result[i] = prev_float(x[i], ULPs[i]); - return Result; - } - - template - GLM_FUNC_QUALIFIER vec float_distance(vec const& x, vec const& y) - { - vec Result; - for (length_t i = 0, n = Result.length(); i < n; ++i) - Result[i] = float_distance(x[i], y[i]); - return Result; - } - - template - GLM_FUNC_QUALIFIER vec float_distance(vec const& x, vec const& y) - { - vec Result; - for (length_t i = 0, n = Result.length(); i < n; ++i) - Result[i] = float_distance(x[i], y[i]); - return Result; - } -}//namespace glm - diff --git a/third_party/glm/gtc/vec1.hpp b/third_party/glm/gtc/vec1.hpp deleted file mode 100755 index c20be87..0000000 --- a/third_party/glm/gtc/vec1.hpp +++ /dev/null @@ -1,30 +0,0 @@ -/// @ref gtc_vec1 -/// @file glm/gtc/vec1.hpp -/// -/// @see core (dependence) -/// -/// @defgroup gtc_vec1 GLM_GTC_vec1 -/// @ingroup gtc -/// -/// Include to use the features of this extension. -/// -/// Add vec1, ivec1, uvec1 and bvec1 types. - -#pragma once - -// Dependency: -#include "../ext/vector_bool1.hpp" -#include "../ext/vector_bool1_precision.hpp" -#include "../ext/vector_float1.hpp" -#include "../ext/vector_float1_precision.hpp" -#include "../ext/vector_double1.hpp" -#include "../ext/vector_double1_precision.hpp" -#include "../ext/vector_int1.hpp" -#include "../ext/vector_int1_precision.hpp" -#include "../ext/vector_uint1.hpp" -#include "../ext/vector_uint1_precision.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# pragma message("GLM: GLM_GTC_vec1 extension included") -#endif - diff --git a/third_party/glm/gtx/associated_min_max.hpp b/third_party/glm/gtx/associated_min_max.hpp deleted file mode 100755 index d1a41c0..0000000 --- a/third_party/glm/gtx/associated_min_max.hpp +++ /dev/null @@ -1,207 +0,0 @@ -/// @ref gtx_associated_min_max -/// @file glm/gtx/associated_min_max.hpp -/// -/// @see core (dependence) -/// @see gtx_extented_min_max (dependence) -/// -/// @defgroup gtx_associated_min_max GLM_GTX_associated_min_max -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// @brief Min and max functions that return associated values not the compared onces. - -#pragma once - -// Dependency: -#include "../glm.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_associated_min_max is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_associated_min_max extension included") -# endif -#endif - -namespace glm -{ - /// @addtogroup gtx_associated_min_max - /// @{ - - /// Minimum comparison between 2 variables and returns 2 associated variable values - /// @see gtx_associated_min_max - template - GLM_FUNC_DECL U associatedMin(T x, U a, T y, U b); - - /// Minimum comparison between 2 variables and returns 2 associated variable values - /// @see gtx_associated_min_max - template - GLM_FUNC_DECL vec<2, U, Q> associatedMin( - vec const& x, vec const& a, - vec const& y, vec const& b); - - /// Minimum comparison between 2 variables and returns 2 associated variable values - /// @see gtx_associated_min_max - template - GLM_FUNC_DECL vec associatedMin( - T x, const vec& a, - T y, const vec& b); - - /// Minimum comparison between 2 variables and returns 2 associated variable values - /// @see gtx_associated_min_max - template - GLM_FUNC_DECL vec associatedMin( - vec const& x, U a, - vec const& y, U b); - - /// Minimum comparison between 3 variables and returns 3 associated variable values - /// @see gtx_associated_min_max - template - GLM_FUNC_DECL U associatedMin( - T x, U a, - T y, U b, - T z, U c); - - /// Minimum comparison between 3 variables and returns 3 associated variable values - /// @see gtx_associated_min_max - template - GLM_FUNC_DECL vec associatedMin( - vec const& x, vec const& a, - vec const& y, vec const& b, - vec const& z, vec const& c); - - /// Minimum comparison between 4 variables and returns 4 associated variable values - /// @see gtx_associated_min_max - template - GLM_FUNC_DECL U associatedMin( - T x, U a, - T y, U b, - T z, U c, - T w, U d); - - /// Minimum comparison between 4 variables and returns 4 associated variable values - /// @see gtx_associated_min_max - template - GLM_FUNC_DECL vec associatedMin( - vec const& x, vec const& a, - vec const& y, vec const& b, - vec const& z, vec const& c, - vec const& w, vec const& d); - - /// Minimum comparison between 4 variables and returns 4 associated variable values - /// @see gtx_associated_min_max - template - GLM_FUNC_DECL vec associatedMin( - T x, vec const& a, - T y, vec const& b, - T z, vec const& c, - T w, vec const& d); - - /// Minimum comparison between 4 variables and returns 4 associated variable values - /// @see gtx_associated_min_max - template - GLM_FUNC_DECL vec associatedMin( - vec const& x, U a, - vec const& y, U b, - vec const& z, U c, - vec const& w, U d); - - /// Maximum comparison between 2 variables and returns 2 associated variable values - /// @see gtx_associated_min_max - template - GLM_FUNC_DECL U associatedMax(T x, U a, T y, U b); - - /// Maximum comparison between 2 variables and returns 2 associated variable values - /// @see gtx_associated_min_max - template - GLM_FUNC_DECL vec<2, U, Q> associatedMax( - vec const& x, vec const& a, - vec const& y, vec const& b); - - /// Maximum comparison between 2 variables and returns 2 associated variable values - /// @see gtx_associated_min_max - template - GLM_FUNC_DECL vec associatedMax( - T x, vec const& a, - T y, vec const& b); - - /// Maximum comparison between 2 variables and returns 2 associated variable values - /// @see gtx_associated_min_max - template - GLM_FUNC_DECL vec associatedMax( - vec const& x, U a, - vec const& y, U b); - - /// Maximum comparison between 3 variables and returns 3 associated variable values - /// @see gtx_associated_min_max - template - GLM_FUNC_DECL U associatedMax( - T x, U a, - T y, U b, - T z, U c); - - /// Maximum comparison between 3 variables and returns 3 associated variable values - /// @see gtx_associated_min_max - template - GLM_FUNC_DECL vec associatedMax( - vec const& x, vec const& a, - vec const& y, vec const& b, - vec const& z, vec const& c); - - /// Maximum comparison between 3 variables and returns 3 associated variable values - /// @see gtx_associated_min_max - template - GLM_FUNC_DECL vec associatedMax( - T x, vec const& a, - T y, vec const& b, - T z, vec const& c); - - /// Maximum comparison between 3 variables and returns 3 associated variable values - /// @see gtx_associated_min_max - template - GLM_FUNC_DECL vec associatedMax( - vec const& x, U a, - vec const& y, U b, - vec const& z, U c); - - /// Maximum comparison between 4 variables and returns 4 associated variable values - /// @see gtx_associated_min_max - template - GLM_FUNC_DECL U associatedMax( - T x, U a, - T y, U b, - T z, U c, - T w, U d); - - /// Maximum comparison between 4 variables and returns 4 associated variable values - /// @see gtx_associated_min_max - template - GLM_FUNC_DECL vec associatedMax( - vec const& x, vec const& a, - vec const& y, vec const& b, - vec const& z, vec const& c, - vec const& w, vec const& d); - - /// Maximum comparison between 4 variables and returns 4 associated variable values - /// @see gtx_associated_min_max - template - GLM_FUNC_DECL vec associatedMax( - T x, vec const& a, - T y, vec const& b, - T z, vec const& c, - T w, vec const& d); - - /// Maximum comparison between 4 variables and returns 4 associated variable values - /// @see gtx_associated_min_max - template - GLM_FUNC_DECL vec associatedMax( - vec const& x, U a, - vec const& y, U b, - vec const& z, U c, - vec const& w, U d); - - /// @} -} //namespace glm - -#include "associated_min_max.inl" diff --git a/third_party/glm/gtx/associated_min_max.inl b/third_party/glm/gtx/associated_min_max.inl deleted file mode 100755 index 5186c47..0000000 --- a/third_party/glm/gtx/associated_min_max.inl +++ /dev/null @@ -1,354 +0,0 @@ -/// @ref gtx_associated_min_max - -namespace glm{ - -// Min comparison between 2 variables -template -GLM_FUNC_QUALIFIER U associatedMin(T x, U a, T y, U b) -{ - return x < y ? a : b; -} - -template -GLM_FUNC_QUALIFIER vec<2, U, Q> associatedMin -( - vec const& x, vec const& a, - vec const& y, vec const& b -) -{ - vec Result; - for(length_t i = 0, n = Result.length(); i < n; ++i) - Result[i] = x[i] < y[i] ? a[i] : b[i]; - return Result; -} - -template -GLM_FUNC_QUALIFIER vec associatedMin -( - T x, const vec& a, - T y, const vec& b -) -{ - vec Result; - for(length_t i = 0, n = Result.length(); i < n; ++i) - Result[i] = x < y ? a[i] : b[i]; - return Result; -} - -template -GLM_FUNC_QUALIFIER vec associatedMin -( - vec const& x, U a, - vec const& y, U b -) -{ - vec Result; - for(length_t i = 0, n = Result.length(); i < n; ++i) - Result[i] = x[i] < y[i] ? a : b; - return Result; -} - -// Min comparison between 3 variables -template -GLM_FUNC_QUALIFIER U associatedMin -( - T x, U a, - T y, U b, - T z, U c -) -{ - U Result = x < y ? (x < z ? a : c) : (y < z ? b : c); - return Result; -} - -template -GLM_FUNC_QUALIFIER vec associatedMin -( - vec const& x, vec const& a, - vec const& y, vec const& b, - vec const& z, vec const& c -) -{ - vec Result; - for(length_t i = 0, n = Result.length(); i < n; ++i) - Result[i] = x[i] < y[i] ? (x[i] < z[i] ? a[i] : c[i]) : (y[i] < z[i] ? b[i] : c[i]); - return Result; -} - -// Min comparison between 4 variables -template -GLM_FUNC_QUALIFIER U associatedMin -( - T x, U a, - T y, U b, - T z, U c, - T w, U d -) -{ - T Test1 = min(x, y); - T Test2 = min(z, w); - U Result1 = x < y ? a : b; - U Result2 = z < w ? c : d; - U Result = Test1 < Test2 ? Result1 : Result2; - return Result; -} - -// Min comparison between 4 variables -template -GLM_FUNC_QUALIFIER vec associatedMin -( - vec const& x, vec const& a, - vec const& y, vec const& b, - vec const& z, vec const& c, - vec const& w, vec const& d -) -{ - vec Result; - for(length_t i = 0, n = Result.length(); i < n; ++i) - { - T Test1 = min(x[i], y[i]); - T Test2 = min(z[i], w[i]); - U Result1 = x[i] < y[i] ? a[i] : b[i]; - U Result2 = z[i] < w[i] ? c[i] : d[i]; - Result[i] = Test1 < Test2 ? Result1 : Result2; - } - return Result; -} - -// Min comparison between 4 variables -template -GLM_FUNC_QUALIFIER vec associatedMin -( - T x, vec const& a, - T y, vec const& b, - T z, vec const& c, - T w, vec const& d -) -{ - T Test1 = min(x, y); - T Test2 = min(z, w); - - vec Result; - for(length_t i = 0, n = Result.length(); i < n; ++i) - { - U Result1 = x < y ? a[i] : b[i]; - U Result2 = z < w ? c[i] : d[i]; - Result[i] = Test1 < Test2 ? Result1 : Result2; - } - return Result; -} - -// Min comparison between 4 variables -template -GLM_FUNC_QUALIFIER vec associatedMin -( - vec const& x, U a, - vec const& y, U b, - vec const& z, U c, - vec const& w, U d -) -{ - vec Result; - for(length_t i = 0, n = Result.length(); i < n; ++i) - { - T Test1 = min(x[i], y[i]); - T Test2 = min(z[i], w[i]); - U Result1 = x[i] < y[i] ? a : b; - U Result2 = z[i] < w[i] ? c : d; - Result[i] = Test1 < Test2 ? Result1 : Result2; - } - return Result; -} - -// Max comparison between 2 variables -template -GLM_FUNC_QUALIFIER U associatedMax(T x, U a, T y, U b) -{ - return x > y ? a : b; -} - -// Max comparison between 2 variables -template -GLM_FUNC_QUALIFIER vec<2, U, Q> associatedMax -( - vec const& x, vec const& a, - vec const& y, vec const& b -) -{ - vec Result; - for(length_t i = 0, n = Result.length(); i < n; ++i) - Result[i] = x[i] > y[i] ? a[i] : b[i]; - return Result; -} - -// Max comparison between 2 variables -template -GLM_FUNC_QUALIFIER vec associatedMax -( - T x, vec const& a, - T y, vec const& b -) -{ - vec Result; - for(length_t i = 0, n = Result.length(); i < n; ++i) - Result[i] = x > y ? a[i] : b[i]; - return Result; -} - -// Max comparison between 2 variables -template -GLM_FUNC_QUALIFIER vec associatedMax -( - vec const& x, U a, - vec const& y, U b -) -{ - vec Result; - for(length_t i = 0, n = Result.length(); i < n; ++i) - Result[i] = x[i] > y[i] ? a : b; - return Result; -} - -// Max comparison between 3 variables -template -GLM_FUNC_QUALIFIER U associatedMax -( - T x, U a, - T y, U b, - T z, U c -) -{ - U Result = x > y ? (x > z ? a : c) : (y > z ? b : c); - return Result; -} - -// Max comparison between 3 variables -template -GLM_FUNC_QUALIFIER vec associatedMax -( - vec const& x, vec const& a, - vec const& y, vec const& b, - vec const& z, vec const& c -) -{ - vec Result; - for(length_t i = 0, n = Result.length(); i < n; ++i) - Result[i] = x[i] > y[i] ? (x[i] > z[i] ? a[i] : c[i]) : (y[i] > z[i] ? b[i] : c[i]); - return Result; -} - -// Max comparison between 3 variables -template -GLM_FUNC_QUALIFIER vec associatedMax -( - T x, vec const& a, - T y, vec const& b, - T z, vec const& c -) -{ - vec Result; - for(length_t i = 0, n = Result.length(); i < n; ++i) - Result[i] = x > y ? (x > z ? a[i] : c[i]) : (y > z ? b[i] : c[i]); - return Result; -} - -// Max comparison between 3 variables -template -GLM_FUNC_QUALIFIER vec associatedMax -( - vec const& x, U a, - vec const& y, U b, - vec const& z, U c -) -{ - vec Result; - for(length_t i = 0, n = Result.length(); i < n; ++i) - Result[i] = x[i] > y[i] ? (x[i] > z[i] ? a : c) : (y[i] > z[i] ? b : c); - return Result; -} - -// Max comparison between 4 variables -template -GLM_FUNC_QUALIFIER U associatedMax -( - T x, U a, - T y, U b, - T z, U c, - T w, U d -) -{ - T Test1 = max(x, y); - T Test2 = max(z, w); - U Result1 = x > y ? a : b; - U Result2 = z > w ? c : d; - U Result = Test1 > Test2 ? Result1 : Result2; - return Result; -} - -// Max comparison between 4 variables -template -GLM_FUNC_QUALIFIER vec associatedMax -( - vec const& x, vec const& a, - vec const& y, vec const& b, - vec const& z, vec const& c, - vec const& w, vec const& d -) -{ - vec Result; - for(length_t i = 0, n = Result.length(); i < n; ++i) - { - T Test1 = max(x[i], y[i]); - T Test2 = max(z[i], w[i]); - U Result1 = x[i] > y[i] ? a[i] : b[i]; - U Result2 = z[i] > w[i] ? c[i] : d[i]; - Result[i] = Test1 > Test2 ? Result1 : Result2; - } - return Result; -} - -// Max comparison between 4 variables -template -GLM_FUNC_QUALIFIER vec associatedMax -( - T x, vec const& a, - T y, vec const& b, - T z, vec const& c, - T w, vec const& d -) -{ - T Test1 = max(x, y); - T Test2 = max(z, w); - - vec Result; - for(length_t i = 0, n = Result.length(); i < n; ++i) - { - U Result1 = x > y ? a[i] : b[i]; - U Result2 = z > w ? c[i] : d[i]; - Result[i] = Test1 > Test2 ? Result1 : Result2; - } - return Result; -} - -// Max comparison between 4 variables -template -GLM_FUNC_QUALIFIER vec associatedMax -( - vec const& x, U a, - vec const& y, U b, - vec const& z, U c, - vec const& w, U d -) -{ - vec Result; - for(length_t i = 0, n = Result.length(); i < n; ++i) - { - T Test1 = max(x[i], y[i]); - T Test2 = max(z[i], w[i]); - U Result1 = x[i] > y[i] ? a : b; - U Result2 = z[i] > w[i] ? c : d; - Result[i] = Test1 > Test2 ? Result1 : Result2; - } - return Result; -} -}//namespace glm diff --git a/third_party/glm/gtx/bit.hpp b/third_party/glm/gtx/bit.hpp deleted file mode 100755 index 60a7aef..0000000 --- a/third_party/glm/gtx/bit.hpp +++ /dev/null @@ -1,98 +0,0 @@ -/// @ref gtx_bit -/// @file glm/gtx/bit.hpp -/// -/// @see core (dependence) -/// -/// @defgroup gtx_bit GLM_GTX_bit -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Allow to perform bit operations on integer values - -#pragma once - -// Dependencies -#include "../gtc/bitfield.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_bit is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_bit extension included") -# endif -#endif - -namespace glm -{ - /// @addtogroup gtx_bit - /// @{ - - /// @see gtx_bit - template - GLM_FUNC_DECL genIUType highestBitValue(genIUType Value); - - /// @see gtx_bit - template - GLM_FUNC_DECL genIUType lowestBitValue(genIUType Value); - - /// Find the highest bit set to 1 in a integer variable and return its value. - /// - /// @see gtx_bit - template - GLM_FUNC_DECL vec highestBitValue(vec const& value); - - /// Return the power of two number which value is just higher the input value. - /// Deprecated, use ceilPowerOfTwo from GTC_round instead - /// - /// @see gtc_round - /// @see gtx_bit - template - GLM_DEPRECATED GLM_FUNC_DECL genIUType powerOfTwoAbove(genIUType Value); - - /// Return the power of two number which value is just higher the input value. - /// Deprecated, use ceilPowerOfTwo from GTC_round instead - /// - /// @see gtc_round - /// @see gtx_bit - template - GLM_DEPRECATED GLM_FUNC_DECL vec powerOfTwoAbove(vec const& value); - - /// Return the power of two number which value is just lower the input value. - /// Deprecated, use floorPowerOfTwo from GTC_round instead - /// - /// @see gtc_round - /// @see gtx_bit - template - GLM_DEPRECATED GLM_FUNC_DECL genIUType powerOfTwoBelow(genIUType Value); - - /// Return the power of two number which value is just lower the input value. - /// Deprecated, use floorPowerOfTwo from GTC_round instead - /// - /// @see gtc_round - /// @see gtx_bit - template - GLM_DEPRECATED GLM_FUNC_DECL vec powerOfTwoBelow(vec const& value); - - /// Return the power of two number which value is the closet to the input value. - /// Deprecated, use roundPowerOfTwo from GTC_round instead - /// - /// @see gtc_round - /// @see gtx_bit - template - GLM_DEPRECATED GLM_FUNC_DECL genIUType powerOfTwoNearest(genIUType Value); - - /// Return the power of two number which value is the closet to the input value. - /// Deprecated, use roundPowerOfTwo from GTC_round instead - /// - /// @see gtc_round - /// @see gtx_bit - template - GLM_DEPRECATED GLM_FUNC_DECL vec powerOfTwoNearest(vec const& value); - - /// @} -} //namespace glm - - -#include "bit.inl" - diff --git a/third_party/glm/gtx/bit.inl b/third_party/glm/gtx/bit.inl deleted file mode 100755 index 621b626..0000000 --- a/third_party/glm/gtx/bit.inl +++ /dev/null @@ -1,92 +0,0 @@ -/// @ref gtx_bit - -namespace glm -{ - /////////////////// - // highestBitValue - - template - GLM_FUNC_QUALIFIER genIUType highestBitValue(genIUType Value) - { - genIUType tmp = Value; - genIUType result = genIUType(0); - while(tmp) - { - result = (tmp & (~tmp + 1)); // grab lowest bit - tmp &= ~result; // clear lowest bit - } - return result; - } - - template - GLM_FUNC_QUALIFIER vec highestBitValue(vec const& v) - { - return detail::functor1::call(highestBitValue, v); - } - - /////////////////// - // lowestBitValue - - template - GLM_FUNC_QUALIFIER genIUType lowestBitValue(genIUType Value) - { - return (Value & (~Value + 1)); - } - - template - GLM_FUNC_QUALIFIER vec lowestBitValue(vec const& v) - { - return detail::functor1::call(lowestBitValue, v); - } - - /////////////////// - // powerOfTwoAbove - - template - GLM_FUNC_QUALIFIER genType powerOfTwoAbove(genType value) - { - return isPowerOfTwo(value) ? value : highestBitValue(value) << 1; - } - - template - GLM_FUNC_QUALIFIER vec powerOfTwoAbove(vec const& v) - { - return detail::functor1::call(powerOfTwoAbove, v); - } - - /////////////////// - // powerOfTwoBelow - - template - GLM_FUNC_QUALIFIER genType powerOfTwoBelow(genType value) - { - return isPowerOfTwo(value) ? value : highestBitValue(value); - } - - template - GLM_FUNC_QUALIFIER vec powerOfTwoBelow(vec const& v) - { - return detail::functor1::call(powerOfTwoBelow, v); - } - - ///////////////////// - // powerOfTwoNearest - - template - GLM_FUNC_QUALIFIER genType powerOfTwoNearest(genType value) - { - if(isPowerOfTwo(value)) - return value; - - genType const prev = highestBitValue(value); - genType const next = prev << 1; - return (next - value) < (value - prev) ? next : prev; - } - - template - GLM_FUNC_QUALIFIER vec powerOfTwoNearest(vec const& v) - { - return detail::functor1::call(powerOfTwoNearest, v); - } - -}//namespace glm diff --git a/third_party/glm/gtx/closest_point.hpp b/third_party/glm/gtx/closest_point.hpp deleted file mode 100755 index de6dbbf..0000000 --- a/third_party/glm/gtx/closest_point.hpp +++ /dev/null @@ -1,49 +0,0 @@ -/// @ref gtx_closest_point -/// @file glm/gtx/closest_point.hpp -/// -/// @see core (dependence) -/// -/// @defgroup gtx_closest_point GLM_GTX_closest_point -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Find the point on a straight line which is the closet of a point. - -#pragma once - -// Dependency: -#include "../glm.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_closest_point is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_closest_point extension included") -# endif -#endif - -namespace glm -{ - /// @addtogroup gtx_closest_point - /// @{ - - /// Find the point on a straight line which is the closet of a point. - /// @see gtx_closest_point - template - GLM_FUNC_DECL vec<3, T, Q> closestPointOnLine( - vec<3, T, Q> const& point, - vec<3, T, Q> const& a, - vec<3, T, Q> const& b); - - /// 2d lines work as well - template - GLM_FUNC_DECL vec<2, T, Q> closestPointOnLine( - vec<2, T, Q> const& point, - vec<2, T, Q> const& a, - vec<2, T, Q> const& b); - - /// @} -}// namespace glm - -#include "closest_point.inl" diff --git a/third_party/glm/gtx/closest_point.inl b/third_party/glm/gtx/closest_point.inl deleted file mode 100755 index 0a39b04..0000000 --- a/third_party/glm/gtx/closest_point.inl +++ /dev/null @@ -1,45 +0,0 @@ -/// @ref gtx_closest_point - -namespace glm -{ - template - GLM_FUNC_QUALIFIER vec<3, T, Q> closestPointOnLine - ( - vec<3, T, Q> const& point, - vec<3, T, Q> const& a, - vec<3, T, Q> const& b - ) - { - T LineLength = distance(a, b); - vec<3, T, Q> Vector = point - a; - vec<3, T, Q> LineDirection = (b - a) / LineLength; - - // Project Vector to LineDirection to get the distance of point from a - T Distance = dot(Vector, LineDirection); - - if(Distance <= T(0)) return a; - if(Distance >= LineLength) return b; - return a + LineDirection * Distance; - } - - template - GLM_FUNC_QUALIFIER vec<2, T, Q> closestPointOnLine - ( - vec<2, T, Q> const& point, - vec<2, T, Q> const& a, - vec<2, T, Q> const& b - ) - { - T LineLength = distance(a, b); - vec<2, T, Q> Vector = point - a; - vec<2, T, Q> LineDirection = (b - a) / LineLength; - - // Project Vector to LineDirection to get the distance of point from a - T Distance = dot(Vector, LineDirection); - - if(Distance <= T(0)) return a; - if(Distance >= LineLength) return b; - return a + LineDirection * Distance; - } - -}//namespace glm diff --git a/third_party/glm/gtx/color_encoding.hpp b/third_party/glm/gtx/color_encoding.hpp deleted file mode 100755 index 96ded2a..0000000 --- a/third_party/glm/gtx/color_encoding.hpp +++ /dev/null @@ -1,54 +0,0 @@ -/// @ref gtx_color_encoding -/// @file glm/gtx/color_encoding.hpp -/// -/// @see core (dependence) -/// @see gtx_color_encoding (dependence) -/// -/// @defgroup gtx_color_encoding GLM_GTX_color_encoding -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// @brief Allow to perform bit operations on integer values - -#pragma once - -// Dependencies -#include "../detail/setup.hpp" -#include "../detail/qualifier.hpp" -#include "../vec3.hpp" -#include - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTC_color_encoding is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTC_color_encoding extension included") -# endif -#endif - -namespace glm -{ - /// @addtogroup gtx_color_encoding - /// @{ - - /// Convert a linear sRGB color to D65 YUV. - template - GLM_FUNC_DECL vec<3, T, Q> convertLinearSRGBToD65XYZ(vec<3, T, Q> const& ColorLinearSRGB); - - /// Convert a linear sRGB color to D50 YUV. - template - GLM_FUNC_DECL vec<3, T, Q> convertLinearSRGBToD50XYZ(vec<3, T, Q> const& ColorLinearSRGB); - - /// Convert a D65 YUV color to linear sRGB. - template - GLM_FUNC_DECL vec<3, T, Q> convertD65XYZToLinearSRGB(vec<3, T, Q> const& ColorD65XYZ); - - /// Convert a D65 YUV color to D50 YUV. - template - GLM_FUNC_DECL vec<3, T, Q> convertD65XYZToD50XYZ(vec<3, T, Q> const& ColorD65XYZ); - - /// @} -} //namespace glm - -#include "color_encoding.inl" diff --git a/third_party/glm/gtx/color_encoding.inl b/third_party/glm/gtx/color_encoding.inl deleted file mode 100755 index e50fa3e..0000000 --- a/third_party/glm/gtx/color_encoding.inl +++ /dev/null @@ -1,45 +0,0 @@ -/// @ref gtx_color_encoding - -namespace glm -{ - template - GLM_FUNC_QUALIFIER vec<3, T, Q> convertLinearSRGBToD65XYZ(vec<3, T, Q> const& ColorLinearSRGB) - { - vec<3, T, Q> const M(0.490f, 0.17697f, 0.2f); - vec<3, T, Q> const N(0.31f, 0.8124f, 0.01063f); - vec<3, T, Q> const O(0.490f, 0.01f, 0.99f); - - return (M * ColorLinearSRGB + N * ColorLinearSRGB + O * ColorLinearSRGB) * static_cast(5.650675255693055f); - } - - template - GLM_FUNC_QUALIFIER vec<3, T, Q> convertLinearSRGBToD50XYZ(vec<3, T, Q> const& ColorLinearSRGB) - { - vec<3, T, Q> const M(0.436030342570117f, 0.222438466210245f, 0.013897440074263f); - vec<3, T, Q> const N(0.385101860087134f, 0.716942745571917f, 0.097076381494207f); - vec<3, T, Q> const O(0.143067806654203f, 0.060618777416563f, 0.713926257896652f); - - return M * ColorLinearSRGB + N * ColorLinearSRGB + O * ColorLinearSRGB; - } - - template - GLM_FUNC_QUALIFIER vec<3, T, Q> convertD65XYZToLinearSRGB(vec<3, T, Q> const& ColorD65XYZ) - { - vec<3, T, Q> const M(0.41847f, -0.091169f, 0.0009209f); - vec<3, T, Q> const N(-0.15866f, 0.25243f, 0.015708f); - vec<3, T, Q> const O(0.0009209f, -0.0025498f, 0.1786f); - - return M * ColorD65XYZ + N * ColorD65XYZ + O * ColorD65XYZ; - } - - template - GLM_FUNC_QUALIFIER vec<3, T, Q> convertD65XYZToD50XYZ(vec<3, T, Q> const& ColorD65XYZ) - { - vec<3, T, Q> const M(+1.047844353856414f, +0.029549007606644f, -0.009250984365223f); - vec<3, T, Q> const N(+0.022898981050086f, +0.990508028941971f, +0.015072338237051f); - vec<3, T, Q> const O(-0.050206647741605f, -0.017074711360960f, +0.751717835079977f); - - return M * ColorD65XYZ + N * ColorD65XYZ + O * ColorD65XYZ; - } - -}//namespace glm diff --git a/third_party/glm/gtx/color_space.hpp b/third_party/glm/gtx/color_space.hpp deleted file mode 100755 index a634392..0000000 --- a/third_party/glm/gtx/color_space.hpp +++ /dev/null @@ -1,72 +0,0 @@ -/// @ref gtx_color_space -/// @file glm/gtx/color_space.hpp -/// -/// @see core (dependence) -/// -/// @defgroup gtx_color_space GLM_GTX_color_space -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Related to RGB to HSV conversions and operations. - -#pragma once - -// Dependency: -#include "../glm.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_color_space is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_color_space extension included") -# endif -#endif - -namespace glm -{ - /// @addtogroup gtx_color_space - /// @{ - - /// Converts a color from HSV color space to its color in RGB color space. - /// @see gtx_color_space - template - GLM_FUNC_DECL vec<3, T, Q> rgbColor( - vec<3, T, Q> const& hsvValue); - - /// Converts a color from RGB color space to its color in HSV color space. - /// @see gtx_color_space - template - GLM_FUNC_DECL vec<3, T, Q> hsvColor( - vec<3, T, Q> const& rgbValue); - - /// Build a saturation matrix. - /// @see gtx_color_space - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> saturation( - T const s); - - /// Modify the saturation of a color. - /// @see gtx_color_space - template - GLM_FUNC_DECL vec<3, T, Q> saturation( - T const s, - vec<3, T, Q> const& color); - - /// Modify the saturation of a color. - /// @see gtx_color_space - template - GLM_FUNC_DECL vec<4, T, Q> saturation( - T const s, - vec<4, T, Q> const& color); - - /// Compute color luminosity associating ratios (0.33, 0.59, 0.11) to RGB canals. - /// @see gtx_color_space - template - GLM_FUNC_DECL T luminosity( - vec<3, T, Q> const& color); - - /// @} -}//namespace glm - -#include "color_space.inl" diff --git a/third_party/glm/gtx/color_space.inl b/third_party/glm/gtx/color_space.inl deleted file mode 100755 index f698afe..0000000 --- a/third_party/glm/gtx/color_space.inl +++ /dev/null @@ -1,141 +0,0 @@ -/// @ref gtx_color_space - -namespace glm -{ - template - GLM_FUNC_QUALIFIER vec<3, T, Q> rgbColor(const vec<3, T, Q>& hsvColor) - { - vec<3, T, Q> hsv = hsvColor; - vec<3, T, Q> rgbColor; - - if(hsv.y == static_cast(0)) - // achromatic (grey) - rgbColor = vec<3, T, Q>(hsv.z); - else - { - T sector = floor(hsv.x * (T(1) / T(60))); - T frac = (hsv.x * (T(1) / T(60))) - sector; - // factorial part of h - T o = hsv.z * (T(1) - hsv.y); - T p = hsv.z * (T(1) - hsv.y * frac); - T q = hsv.z * (T(1) - hsv.y * (T(1) - frac)); - - switch(int(sector)) - { - default: - case 0: - rgbColor.r = hsv.z; - rgbColor.g = q; - rgbColor.b = o; - break; - case 1: - rgbColor.r = p; - rgbColor.g = hsv.z; - rgbColor.b = o; - break; - case 2: - rgbColor.r = o; - rgbColor.g = hsv.z; - rgbColor.b = q; - break; - case 3: - rgbColor.r = o; - rgbColor.g = p; - rgbColor.b = hsv.z; - break; - case 4: - rgbColor.r = q; - rgbColor.g = o; - rgbColor.b = hsv.z; - break; - case 5: - rgbColor.r = hsv.z; - rgbColor.g = o; - rgbColor.b = p; - break; - } - } - - return rgbColor; - } - - template - GLM_FUNC_QUALIFIER vec<3, T, Q> hsvColor(const vec<3, T, Q>& rgbColor) - { - vec<3, T, Q> hsv = rgbColor; - float Min = min(min(rgbColor.r, rgbColor.g), rgbColor.b); - float Max = max(max(rgbColor.r, rgbColor.g), rgbColor.b); - float Delta = Max - Min; - - hsv.z = Max; - - if(Max != static_cast(0)) - { - hsv.y = Delta / hsv.z; - T h = static_cast(0); - - if(rgbColor.r == Max) - // between yellow & magenta - h = static_cast(0) + T(60) * (rgbColor.g - rgbColor.b) / Delta; - else if(rgbColor.g == Max) - // between cyan & yellow - h = static_cast(120) + T(60) * (rgbColor.b - rgbColor.r) / Delta; - else - // between magenta & cyan - h = static_cast(240) + T(60) * (rgbColor.r - rgbColor.g) / Delta; - - if(h < T(0)) - hsv.x = h + T(360); - else - hsv.x = h; - } - else - { - // If r = g = b = 0 then s = 0, h is undefined - hsv.y = static_cast(0); - hsv.x = static_cast(0); - } - - return hsv; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> saturation(T const s) - { - vec<3, T, defaultp> rgbw = vec<3, T, defaultp>(T(0.2126), T(0.7152), T(0.0722)); - - vec<3, T, defaultp> const col((T(1) - s) * rgbw); - - mat<4, 4, T, defaultp> result(T(1)); - result[0][0] = col.x + s; - result[0][1] = col.x; - result[0][2] = col.x; - result[1][0] = col.y; - result[1][1] = col.y + s; - result[1][2] = col.y; - result[2][0] = col.z; - result[2][1] = col.z; - result[2][2] = col.z + s; - - return result; - } - - template - GLM_FUNC_QUALIFIER vec<3, T, Q> saturation(const T s, const vec<3, T, Q>& color) - { - return vec<3, T, Q>(saturation(s) * vec<4, T, Q>(color, T(0))); - } - - template - GLM_FUNC_QUALIFIER vec<4, T, Q> saturation(const T s, const vec<4, T, Q>& color) - { - return saturation(s) * color; - } - - template - GLM_FUNC_QUALIFIER T luminosity(const vec<3, T, Q>& color) - { - const vec<3, T, Q> tmp = vec<3, T, Q>(0.33, 0.59, 0.11); - return dot(color, tmp); - } -}//namespace glm diff --git a/third_party/glm/gtx/color_space_YCoCg.hpp b/third_party/glm/gtx/color_space_YCoCg.hpp deleted file mode 100755 index dd2b771..0000000 --- a/third_party/glm/gtx/color_space_YCoCg.hpp +++ /dev/null @@ -1,60 +0,0 @@ -/// @ref gtx_color_space_YCoCg -/// @file glm/gtx/color_space_YCoCg.hpp -/// -/// @see core (dependence) -/// -/// @defgroup gtx_color_space_YCoCg GLM_GTX_color_space_YCoCg -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// RGB to YCoCg conversions and operations - -#pragma once - -// Dependency: -#include "../glm.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_color_space_YCoCg is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_color_space_YCoCg extension included") -# endif -#endif - -namespace glm -{ - /// @addtogroup gtx_color_space_YCoCg - /// @{ - - /// Convert a color from RGB color space to YCoCg color space. - /// @see gtx_color_space_YCoCg - template - GLM_FUNC_DECL vec<3, T, Q> rgb2YCoCg( - vec<3, T, Q> const& rgbColor); - - /// Convert a color from YCoCg color space to RGB color space. - /// @see gtx_color_space_YCoCg - template - GLM_FUNC_DECL vec<3, T, Q> YCoCg2rgb( - vec<3, T, Q> const& YCoCgColor); - - /// Convert a color from RGB color space to YCoCgR color space. - /// @see "YCoCg-R: A Color Space with RGB Reversibility and Low Dynamic Range" - /// @see gtx_color_space_YCoCg - template - GLM_FUNC_DECL vec<3, T, Q> rgb2YCoCgR( - vec<3, T, Q> const& rgbColor); - - /// Convert a color from YCoCgR color space to RGB color space. - /// @see "YCoCg-R: A Color Space with RGB Reversibility and Low Dynamic Range" - /// @see gtx_color_space_YCoCg - template - GLM_FUNC_DECL vec<3, T, Q> YCoCgR2rgb( - vec<3, T, Q> const& YCoCgColor); - - /// @} -}//namespace glm - -#include "color_space_YCoCg.inl" diff --git a/third_party/glm/gtx/color_space_YCoCg.inl b/third_party/glm/gtx/color_space_YCoCg.inl deleted file mode 100755 index 83ba857..0000000 --- a/third_party/glm/gtx/color_space_YCoCg.inl +++ /dev/null @@ -1,107 +0,0 @@ -/// @ref gtx_color_space_YCoCg - -namespace glm -{ - template - GLM_FUNC_QUALIFIER vec<3, T, Q> rgb2YCoCg - ( - vec<3, T, Q> const& rgbColor - ) - { - vec<3, T, Q> result; - result.x/*Y */ = rgbColor.r / T(4) + rgbColor.g / T(2) + rgbColor.b / T(4); - result.y/*Co*/ = rgbColor.r / T(2) + rgbColor.g * T(0) - rgbColor.b / T(2); - result.z/*Cg*/ = - rgbColor.r / T(4) + rgbColor.g / T(2) - rgbColor.b / T(4); - return result; - } - - template - GLM_FUNC_QUALIFIER vec<3, T, Q> YCoCg2rgb - ( - vec<3, T, Q> const& YCoCgColor - ) - { - vec<3, T, Q> result; - result.r = YCoCgColor.x + YCoCgColor.y - YCoCgColor.z; - result.g = YCoCgColor.x + YCoCgColor.z; - result.b = YCoCgColor.x - YCoCgColor.y - YCoCgColor.z; - return result; - } - - template - class compute_YCoCgR { - public: - static GLM_FUNC_QUALIFIER vec<3, T, Q> rgb2YCoCgR - ( - vec<3, T, Q> const& rgbColor - ) - { - vec<3, T, Q> result; - result.x/*Y */ = rgbColor.g * static_cast(0.5) + (rgbColor.r + rgbColor.b) * static_cast(0.25); - result.y/*Co*/ = rgbColor.r - rgbColor.b; - result.z/*Cg*/ = rgbColor.g - (rgbColor.r + rgbColor.b) * static_cast(0.5); - return result; - } - - static GLM_FUNC_QUALIFIER vec<3, T, Q> YCoCgR2rgb - ( - vec<3, T, Q> const& YCoCgRColor - ) - { - vec<3, T, Q> result; - T tmp = YCoCgRColor.x - (YCoCgRColor.z * static_cast(0.5)); - result.g = YCoCgRColor.z + tmp; - result.b = tmp - (YCoCgRColor.y * static_cast(0.5)); - result.r = result.b + YCoCgRColor.y; - return result; - } - }; - - template - class compute_YCoCgR { - public: - static GLM_FUNC_QUALIFIER vec<3, T, Q> rgb2YCoCgR - ( - vec<3, T, Q> const& rgbColor - ) - { - vec<3, T, Q> result; - result.y/*Co*/ = rgbColor.r - rgbColor.b; - T tmp = rgbColor.b + (result.y >> 1); - result.z/*Cg*/ = rgbColor.g - tmp; - result.x/*Y */ = tmp + (result.z >> 1); - return result; - } - - static GLM_FUNC_QUALIFIER vec<3, T, Q> YCoCgR2rgb - ( - vec<3, T, Q> const& YCoCgRColor - ) - { - vec<3, T, Q> result; - T tmp = YCoCgRColor.x - (YCoCgRColor.z >> 1); - result.g = YCoCgRColor.z + tmp; - result.b = tmp - (YCoCgRColor.y >> 1); - result.r = result.b + YCoCgRColor.y; - return result; - } - }; - - template - GLM_FUNC_QUALIFIER vec<3, T, Q> rgb2YCoCgR - ( - vec<3, T, Q> const& rgbColor - ) - { - return compute_YCoCgR::is_integer>::rgb2YCoCgR(rgbColor); - } - - template - GLM_FUNC_QUALIFIER vec<3, T, Q> YCoCgR2rgb - ( - vec<3, T, Q> const& YCoCgRColor - ) - { - return compute_YCoCgR::is_integer>::YCoCgR2rgb(YCoCgRColor); - } -}//namespace glm diff --git a/third_party/glm/gtx/common.hpp b/third_party/glm/gtx/common.hpp deleted file mode 100755 index 254ada2..0000000 --- a/third_party/glm/gtx/common.hpp +++ /dev/null @@ -1,76 +0,0 @@ -/// @ref gtx_common -/// @file glm/gtx/common.hpp -/// -/// @see core (dependence) -/// -/// @defgroup gtx_common GLM_GTX_common -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// @brief Provide functions to increase the compatibility with Cg and HLSL languages - -#pragma once - -// Dependencies: -#include "../vec2.hpp" -#include "../vec3.hpp" -#include "../vec4.hpp" -#include "../gtc/vec1.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_common is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_common extension included") -# endif -#endif - -namespace glm -{ - /// @addtogroup gtx_common - /// @{ - - /// Returns true if x is a denormalized number - /// Numbers whose absolute value is too small to be represented in the normal format are represented in an alternate, denormalized format. - /// This format is less precise but can represent values closer to zero. - /// - /// @tparam genType Floating-point scalar or vector types. - /// - /// @see GLSL isnan man page - /// @see GLSL 4.20.8 specification, section 8.3 Common Functions - template - GLM_FUNC_DECL typename genType::bool_type isdenormal(genType const& x); - - /// Similar to 'mod' but with a different rounding and integer support. - /// Returns 'x - y * trunc(x/y)' instead of 'x - y * floor(x/y)' - /// - /// @see GLSL mod vs HLSL fmod - /// @see GLSL mod man page - template - GLM_FUNC_DECL vec fmod(vec const& v); - - /// Returns whether vector components values are within an interval. A open interval excludes its endpoints, and is denoted with square brackets. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point or integer scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see ext_vector_relational - template - GLM_FUNC_DECL vec openBounded(vec const& Value, vec const& Min, vec const& Max); - - /// Returns whether vector components values are within an interval. A closed interval includes its endpoints, and is denoted with square brackets. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point or integer scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see ext_vector_relational - template - GLM_FUNC_DECL vec closeBounded(vec const& Value, vec const& Min, vec const& Max); - - /// @} -}//namespace glm - -#include "common.inl" diff --git a/third_party/glm/gtx/common.inl b/third_party/glm/gtx/common.inl deleted file mode 100755 index 4ad2126..0000000 --- a/third_party/glm/gtx/common.inl +++ /dev/null @@ -1,125 +0,0 @@ -/// @ref gtx_common - -#include -#include "../gtc/epsilon.hpp" -#include "../gtc/constants.hpp" - -namespace glm{ -namespace detail -{ - template - struct compute_fmod - { - GLM_FUNC_QUALIFIER static vec call(vec const& a, vec const& b) - { - return detail::functor2::call(std::fmod, a, b); - } - }; - - template - struct compute_fmod - { - GLM_FUNC_QUALIFIER static vec call(vec const& a, vec const& b) - { - return a % b; - } - }; -}//namespace detail - - template - GLM_FUNC_QUALIFIER bool isdenormal(T const& x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'isdenormal' only accept floating-point inputs"); - -# if GLM_HAS_CXX11_STL - return std::fpclassify(x) == FP_SUBNORMAL; -# else - return epsilonNotEqual(x, static_cast(0), epsilon()) && std::fabs(x) < std::numeric_limits::min(); -# endif - } - - template - GLM_FUNC_QUALIFIER typename vec<1, T, Q>::bool_type isdenormal - ( - vec<1, T, Q> const& x - ) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'isdenormal' only accept floating-point inputs"); - - return typename vec<1, T, Q>::bool_type( - isdenormal(x.x)); - } - - template - GLM_FUNC_QUALIFIER typename vec<2, T, Q>::bool_type isdenormal - ( - vec<2, T, Q> const& x - ) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'isdenormal' only accept floating-point inputs"); - - return typename vec<2, T, Q>::bool_type( - isdenormal(x.x), - isdenormal(x.y)); - } - - template - GLM_FUNC_QUALIFIER typename vec<3, T, Q>::bool_type isdenormal - ( - vec<3, T, Q> const& x - ) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'isdenormal' only accept floating-point inputs"); - - return typename vec<3, T, Q>::bool_type( - isdenormal(x.x), - isdenormal(x.y), - isdenormal(x.z)); - } - - template - GLM_FUNC_QUALIFIER typename vec<4, T, Q>::bool_type isdenormal - ( - vec<4, T, Q> const& x - ) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'isdenormal' only accept floating-point inputs"); - - return typename vec<4, T, Q>::bool_type( - isdenormal(x.x), - isdenormal(x.y), - isdenormal(x.z), - isdenormal(x.w)); - } - - // fmod - template - GLM_FUNC_QUALIFIER genType fmod(genType x, genType y) - { - return fmod(vec<1, genType>(x), y).x; - } - - template - GLM_FUNC_QUALIFIER vec fmod(vec const& x, T y) - { - return detail::compute_fmod::is_iec559>::call(x, vec(y)); - } - - template - GLM_FUNC_QUALIFIER vec fmod(vec const& x, vec const& y) - { - return detail::compute_fmod::is_iec559>::call(x, y); - } - - template - GLM_FUNC_QUALIFIER vec openBounded(vec const& Value, vec const& Min, vec const& Max) - { - return greaterThan(Value, Min) && lessThan(Value, Max); - } - - template - GLM_FUNC_QUALIFIER vec closeBounded(vec const& Value, vec const& Min, vec const& Max) - { - return greaterThanEqual(Value, Min) && lessThanEqual(Value, Max); - } -}//namespace glm diff --git a/third_party/glm/gtx/compatibility.hpp b/third_party/glm/gtx/compatibility.hpp deleted file mode 100755 index f1b00a6..0000000 --- a/third_party/glm/gtx/compatibility.hpp +++ /dev/null @@ -1,133 +0,0 @@ -/// @ref gtx_compatibility -/// @file glm/gtx/compatibility.hpp -/// -/// @see core (dependence) -/// -/// @defgroup gtx_compatibility GLM_GTX_compatibility -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Provide functions to increase the compatibility with Cg and HLSL languages - -#pragma once - -// Dependency: -#include "../glm.hpp" -#include "../gtc/quaternion.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_compatibility is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_compatibility extension included") -# endif -#endif - -#if GLM_COMPILER & GLM_COMPILER_VC -# include -#elif GLM_COMPILER & GLM_COMPILER_GCC -# include -# if(GLM_PLATFORM & GLM_PLATFORM_ANDROID) -# undef isfinite -# endif -#endif//GLM_COMPILER - -namespace glm -{ - /// @addtogroup gtx_compatibility - /// @{ - - template GLM_FUNC_QUALIFIER T lerp(T x, T y, T a){return mix(x, y, a);} //!< \brief Returns x * (1.0 - a) + y * a, i.e., the linear blend of x and y using the floating-point value a. The value for a is not restricted to the range [0, 1]. (From GLM_GTX_compatibility) - template GLM_FUNC_QUALIFIER vec<2, T, Q> lerp(const vec<2, T, Q>& x, const vec<2, T, Q>& y, T a){return mix(x, y, a);} //!< \brief Returns x * (1.0 - a) + y * a, i.e., the linear blend of x and y using the floating-point value a. The value for a is not restricted to the range [0, 1]. (From GLM_GTX_compatibility) - - template GLM_FUNC_QUALIFIER vec<3, T, Q> lerp(const vec<3, T, Q>& x, const vec<3, T, Q>& y, T a){return mix(x, y, a);} //!< \brief Returns x * (1.0 - a) + y * a, i.e., the linear blend of x and y using the floating-point value a. The value for a is not restricted to the range [0, 1]. (From GLM_GTX_compatibility) - template GLM_FUNC_QUALIFIER vec<4, T, Q> lerp(const vec<4, T, Q>& x, const vec<4, T, Q>& y, T a){return mix(x, y, a);} //!< \brief Returns x * (1.0 - a) + y * a, i.e., the linear blend of x and y using the floating-point value a. The value for a is not restricted to the range [0, 1]. (From GLM_GTX_compatibility) - template GLM_FUNC_QUALIFIER vec<2, T, Q> lerp(const vec<2, T, Q>& x, const vec<2, T, Q>& y, const vec<2, T, Q>& a){return mix(x, y, a);} //!< \brief Returns the component-wise result of x * (1.0 - a) + y * a, i.e., the linear blend of x and y using vector a. The value for a is not restricted to the range [0, 1]. (From GLM_GTX_compatibility) - template GLM_FUNC_QUALIFIER vec<3, T, Q> lerp(const vec<3, T, Q>& x, const vec<3, T, Q>& y, const vec<3, T, Q>& a){return mix(x, y, a);} //!< \brief Returns the component-wise result of x * (1.0 - a) + y * a, i.e., the linear blend of x and y using vector a. The value for a is not restricted to the range [0, 1]. (From GLM_GTX_compatibility) - template GLM_FUNC_QUALIFIER vec<4, T, Q> lerp(const vec<4, T, Q>& x, const vec<4, T, Q>& y, const vec<4, T, Q>& a){return mix(x, y, a);} //!< \brief Returns the component-wise result of x * (1.0 - a) + y * a, i.e., the linear blend of x and y using vector a. The value for a is not restricted to the range [0, 1]. (From GLM_GTX_compatibility) - - template GLM_FUNC_QUALIFIER T saturate(T x){return clamp(x, T(0), T(1));} //!< \brief Returns clamp(x, 0, 1) for each component in x. (From GLM_GTX_compatibility) - template GLM_FUNC_QUALIFIER vec<2, T, Q> saturate(const vec<2, T, Q>& x){return clamp(x, T(0), T(1));} //!< \brief Returns clamp(x, 0, 1) for each component in x. (From GLM_GTX_compatibility) - template GLM_FUNC_QUALIFIER vec<3, T, Q> saturate(const vec<3, T, Q>& x){return clamp(x, T(0), T(1));} //!< \brief Returns clamp(x, 0, 1) for each component in x. (From GLM_GTX_compatibility) - template GLM_FUNC_QUALIFIER vec<4, T, Q> saturate(const vec<4, T, Q>& x){return clamp(x, T(0), T(1));} //!< \brief Returns clamp(x, 0, 1) for each component in x. (From GLM_GTX_compatibility) - - template GLM_FUNC_QUALIFIER T atan2(T x, T y){return atan(x, y);} //!< \brief Arc tangent. Returns an angle whose tangent is y/x. The signs of x and y are used to determine what quadrant the angle is in. The range of values returned by this function is [-PI, PI]. Results are undefined if x and y are both 0. (From GLM_GTX_compatibility) - template GLM_FUNC_QUALIFIER vec<2, T, Q> atan2(const vec<2, T, Q>& x, const vec<2, T, Q>& y){return atan(x, y);} //!< \brief Arc tangent. Returns an angle whose tangent is y/x. The signs of x and y are used to determine what quadrant the angle is in. The range of values returned by this function is [-PI, PI]. Results are undefined if x and y are both 0. (From GLM_GTX_compatibility) - template GLM_FUNC_QUALIFIER vec<3, T, Q> atan2(const vec<3, T, Q>& x, const vec<3, T, Q>& y){return atan(x, y);} //!< \brief Arc tangent. Returns an angle whose tangent is y/x. The signs of x and y are used to determine what quadrant the angle is in. The range of values returned by this function is [-PI, PI]. Results are undefined if x and y are both 0. (From GLM_GTX_compatibility) - template GLM_FUNC_QUALIFIER vec<4, T, Q> atan2(const vec<4, T, Q>& x, const vec<4, T, Q>& y){return atan(x, y);} //!< \brief Arc tangent. Returns an angle whose tangent is y/x. The signs of x and y are used to determine what quadrant the angle is in. The range of values returned by this function is [-PI, PI]. Results are undefined if x and y are both 0. (From GLM_GTX_compatibility) - - template GLM_FUNC_DECL bool isfinite(genType const& x); //!< \brief Test whether or not a scalar or each vector component is a finite value. (From GLM_GTX_compatibility) - template GLM_FUNC_DECL vec<1, bool, Q> isfinite(const vec<1, T, Q>& x); //!< \brief Test whether or not a scalar or each vector component is a finite value. (From GLM_GTX_compatibility) - template GLM_FUNC_DECL vec<2, bool, Q> isfinite(const vec<2, T, Q>& x); //!< \brief Test whether or not a scalar or each vector component is a finite value. (From GLM_GTX_compatibility) - template GLM_FUNC_DECL vec<3, bool, Q> isfinite(const vec<3, T, Q>& x); //!< \brief Test whether or not a scalar or each vector component is a finite value. (From GLM_GTX_compatibility) - template GLM_FUNC_DECL vec<4, bool, Q> isfinite(const vec<4, T, Q>& x); //!< \brief Test whether or not a scalar or each vector component is a finite value. (From GLM_GTX_compatibility) - - typedef bool bool1; //!< \brief boolean type with 1 component. (From GLM_GTX_compatibility extension) - typedef vec<2, bool, highp> bool2; //!< \brief boolean type with 2 components. (From GLM_GTX_compatibility extension) - typedef vec<3, bool, highp> bool3; //!< \brief boolean type with 3 components. (From GLM_GTX_compatibility extension) - typedef vec<4, bool, highp> bool4; //!< \brief boolean type with 4 components. (From GLM_GTX_compatibility extension) - - typedef bool bool1x1; //!< \brief boolean matrix with 1 x 1 component. (From GLM_GTX_compatibility extension) - typedef mat<2, 2, bool, highp> bool2x2; //!< \brief boolean matrix with 2 x 2 components. (From GLM_GTX_compatibility extension) - typedef mat<2, 3, bool, highp> bool2x3; //!< \brief boolean matrix with 2 x 3 components. (From GLM_GTX_compatibility extension) - typedef mat<2, 4, bool, highp> bool2x4; //!< \brief boolean matrix with 2 x 4 components. (From GLM_GTX_compatibility extension) - typedef mat<3, 2, bool, highp> bool3x2; //!< \brief boolean matrix with 3 x 2 components. (From GLM_GTX_compatibility extension) - typedef mat<3, 3, bool, highp> bool3x3; //!< \brief boolean matrix with 3 x 3 components. (From GLM_GTX_compatibility extension) - typedef mat<3, 4, bool, highp> bool3x4; //!< \brief boolean matrix with 3 x 4 components. (From GLM_GTX_compatibility extension) - typedef mat<4, 2, bool, highp> bool4x2; //!< \brief boolean matrix with 4 x 2 components. (From GLM_GTX_compatibility extension) - typedef mat<4, 3, bool, highp> bool4x3; //!< \brief boolean matrix with 4 x 3 components. (From GLM_GTX_compatibility extension) - typedef mat<4, 4, bool, highp> bool4x4; //!< \brief boolean matrix with 4 x 4 components. (From GLM_GTX_compatibility extension) - - typedef int int1; //!< \brief integer vector with 1 component. (From GLM_GTX_compatibility extension) - typedef vec<2, int, highp> int2; //!< \brief integer vector with 2 components. (From GLM_GTX_compatibility extension) - typedef vec<3, int, highp> int3; //!< \brief integer vector with 3 components. (From GLM_GTX_compatibility extension) - typedef vec<4, int, highp> int4; //!< \brief integer vector with 4 components. (From GLM_GTX_compatibility extension) - - typedef int int1x1; //!< \brief integer matrix with 1 component. (From GLM_GTX_compatibility extension) - typedef mat<2, 2, int, highp> int2x2; //!< \brief integer matrix with 2 x 2 components. (From GLM_GTX_compatibility extension) - typedef mat<2, 3, int, highp> int2x3; //!< \brief integer matrix with 2 x 3 components. (From GLM_GTX_compatibility extension) - typedef mat<2, 4, int, highp> int2x4; //!< \brief integer matrix with 2 x 4 components. (From GLM_GTX_compatibility extension) - typedef mat<3, 2, int, highp> int3x2; //!< \brief integer matrix with 3 x 2 components. (From GLM_GTX_compatibility extension) - typedef mat<3, 3, int, highp> int3x3; //!< \brief integer matrix with 3 x 3 components. (From GLM_GTX_compatibility extension) - typedef mat<3, 4, int, highp> int3x4; //!< \brief integer matrix with 3 x 4 components. (From GLM_GTX_compatibility extension) - typedef mat<4, 2, int, highp> int4x2; //!< \brief integer matrix with 4 x 2 components. (From GLM_GTX_compatibility extension) - typedef mat<4, 3, int, highp> int4x3; //!< \brief integer matrix with 4 x 3 components. (From GLM_GTX_compatibility extension) - typedef mat<4, 4, int, highp> int4x4; //!< \brief integer matrix with 4 x 4 components. (From GLM_GTX_compatibility extension) - - typedef float float1; //!< \brief single-qualifier floating-point vector with 1 component. (From GLM_GTX_compatibility extension) - typedef vec<2, float, highp> float2; //!< \brief single-qualifier floating-point vector with 2 components. (From GLM_GTX_compatibility extension) - typedef vec<3, float, highp> float3; //!< \brief single-qualifier floating-point vector with 3 components. (From GLM_GTX_compatibility extension) - typedef vec<4, float, highp> float4; //!< \brief single-qualifier floating-point vector with 4 components. (From GLM_GTX_compatibility extension) - - typedef float float1x1; //!< \brief single-qualifier floating-point matrix with 1 component. (From GLM_GTX_compatibility extension) - typedef mat<2, 2, float, highp> float2x2; //!< \brief single-qualifier floating-point matrix with 2 x 2 components. (From GLM_GTX_compatibility extension) - typedef mat<2, 3, float, highp> float2x3; //!< \brief single-qualifier floating-point matrix with 2 x 3 components. (From GLM_GTX_compatibility extension) - typedef mat<2, 4, float, highp> float2x4; //!< \brief single-qualifier floating-point matrix with 2 x 4 components. (From GLM_GTX_compatibility extension) - typedef mat<3, 2, float, highp> float3x2; //!< \brief single-qualifier floating-point matrix with 3 x 2 components. (From GLM_GTX_compatibility extension) - typedef mat<3, 3, float, highp> float3x3; //!< \brief single-qualifier floating-point matrix with 3 x 3 components. (From GLM_GTX_compatibility extension) - typedef mat<3, 4, float, highp> float3x4; //!< \brief single-qualifier floating-point matrix with 3 x 4 components. (From GLM_GTX_compatibility extension) - typedef mat<4, 2, float, highp> float4x2; //!< \brief single-qualifier floating-point matrix with 4 x 2 components. (From GLM_GTX_compatibility extension) - typedef mat<4, 3, float, highp> float4x3; //!< \brief single-qualifier floating-point matrix with 4 x 3 components. (From GLM_GTX_compatibility extension) - typedef mat<4, 4, float, highp> float4x4; //!< \brief single-qualifier floating-point matrix with 4 x 4 components. (From GLM_GTX_compatibility extension) - - typedef double double1; //!< \brief double-qualifier floating-point vector with 1 component. (From GLM_GTX_compatibility extension) - typedef vec<2, double, highp> double2; //!< \brief double-qualifier floating-point vector with 2 components. (From GLM_GTX_compatibility extension) - typedef vec<3, double, highp> double3; //!< \brief double-qualifier floating-point vector with 3 components. (From GLM_GTX_compatibility extension) - typedef vec<4, double, highp> double4; //!< \brief double-qualifier floating-point vector with 4 components. (From GLM_GTX_compatibility extension) - - typedef double double1x1; //!< \brief double-qualifier floating-point matrix with 1 component. (From GLM_GTX_compatibility extension) - typedef mat<2, 2, double, highp> double2x2; //!< \brief double-qualifier floating-point matrix with 2 x 2 components. (From GLM_GTX_compatibility extension) - typedef mat<2, 3, double, highp> double2x3; //!< \brief double-qualifier floating-point matrix with 2 x 3 components. (From GLM_GTX_compatibility extension) - typedef mat<2, 4, double, highp> double2x4; //!< \brief double-qualifier floating-point matrix with 2 x 4 components. (From GLM_GTX_compatibility extension) - typedef mat<3, 2, double, highp> double3x2; //!< \brief double-qualifier floating-point matrix with 3 x 2 components. (From GLM_GTX_compatibility extension) - typedef mat<3, 3, double, highp> double3x3; //!< \brief double-qualifier floating-point matrix with 3 x 3 components. (From GLM_GTX_compatibility extension) - typedef mat<3, 4, double, highp> double3x4; //!< \brief double-qualifier floating-point matrix with 3 x 4 components. (From GLM_GTX_compatibility extension) - typedef mat<4, 2, double, highp> double4x2; //!< \brief double-qualifier floating-point matrix with 4 x 2 components. (From GLM_GTX_compatibility extension) - typedef mat<4, 3, double, highp> double4x3; //!< \brief double-qualifier floating-point matrix with 4 x 3 components. (From GLM_GTX_compatibility extension) - typedef mat<4, 4, double, highp> double4x4; //!< \brief double-qualifier floating-point matrix with 4 x 4 components. (From GLM_GTX_compatibility extension) - - /// @} -}//namespace glm - -#include "compatibility.inl" diff --git a/third_party/glm/gtx/compatibility.inl b/third_party/glm/gtx/compatibility.inl deleted file mode 100755 index 1d49496..0000000 --- a/third_party/glm/gtx/compatibility.inl +++ /dev/null @@ -1,62 +0,0 @@ -#include - -namespace glm -{ - // isfinite - template - GLM_FUNC_QUALIFIER bool isfinite( - genType const& x) - { -# if GLM_HAS_CXX11_STL - return std::isfinite(x) != 0; -# elif GLM_COMPILER & GLM_COMPILER_VC - return _finite(x) != 0; -# elif GLM_COMPILER & GLM_COMPILER_GCC && GLM_PLATFORM & GLM_PLATFORM_ANDROID - return _isfinite(x) != 0; -# else - if (std::numeric_limits::is_integer || std::denorm_absent == std::numeric_limits::has_denorm) - return std::numeric_limits::min() <= x && std::numeric_limits::max() >= x; - else - return -std::numeric_limits::max() <= x && std::numeric_limits::max() >= x; -# endif - } - - template - GLM_FUNC_QUALIFIER vec<1, bool, Q> isfinite( - vec<1, T, Q> const& x) - { - return vec<1, bool, Q>( - isfinite(x.x)); - } - - template - GLM_FUNC_QUALIFIER vec<2, bool, Q> isfinite( - vec<2, T, Q> const& x) - { - return vec<2, bool, Q>( - isfinite(x.x), - isfinite(x.y)); - } - - template - GLM_FUNC_QUALIFIER vec<3, bool, Q> isfinite( - vec<3, T, Q> const& x) - { - return vec<3, bool, Q>( - isfinite(x.x), - isfinite(x.y), - isfinite(x.z)); - } - - template - GLM_FUNC_QUALIFIER vec<4, bool, Q> isfinite( - vec<4, T, Q> const& x) - { - return vec<4, bool, Q>( - isfinite(x.x), - isfinite(x.y), - isfinite(x.z), - isfinite(x.w)); - } - -}//namespace glm diff --git a/third_party/glm/gtx/component_wise.hpp b/third_party/glm/gtx/component_wise.hpp deleted file mode 100755 index 34a2b0a..0000000 --- a/third_party/glm/gtx/component_wise.hpp +++ /dev/null @@ -1,69 +0,0 @@ -/// @ref gtx_component_wise -/// @file glm/gtx/component_wise.hpp -/// @date 2007-05-21 / 2011-06-07 -/// @author Christophe Riccio -/// -/// @see core (dependence) -/// -/// @defgroup gtx_component_wise GLM_GTX_component_wise -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Operations between components of a type - -#pragma once - -// Dependencies -#include "../detail/setup.hpp" -#include "../detail/qualifier.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_component_wise is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_component_wise extension included") -# endif -#endif - -namespace glm -{ - /// @addtogroup gtx_component_wise - /// @{ - - /// Convert an integer vector to a normalized float vector. - /// If the parameter value type is already a floating qualifier type, the value is passed through. - /// @see gtx_component_wise - template - GLM_FUNC_DECL vec compNormalize(vec const& v); - - /// Convert a normalized float vector to an integer vector. - /// If the parameter value type is already a floating qualifier type, the value is passed through. - /// @see gtx_component_wise - template - GLM_FUNC_DECL vec compScale(vec const& v); - - /// Add all vector components together. - /// @see gtx_component_wise - template - GLM_FUNC_DECL typename genType::value_type compAdd(genType const& v); - - /// Multiply all vector components together. - /// @see gtx_component_wise - template - GLM_FUNC_DECL typename genType::value_type compMul(genType const& v); - - /// Find the minimum value between single vector components. - /// @see gtx_component_wise - template - GLM_FUNC_DECL typename genType::value_type compMin(genType const& v); - - /// Find the maximum value between single vector components. - /// @see gtx_component_wise - template - GLM_FUNC_DECL typename genType::value_type compMax(genType const& v); - - /// @} -}//namespace glm - -#include "component_wise.inl" diff --git a/third_party/glm/gtx/component_wise.inl b/third_party/glm/gtx/component_wise.inl deleted file mode 100755 index cbbc7d4..0000000 --- a/third_party/glm/gtx/component_wise.inl +++ /dev/null @@ -1,127 +0,0 @@ -/// @ref gtx_component_wise - -#include - -namespace glm{ -namespace detail -{ - template - struct compute_compNormalize - {}; - - template - struct compute_compNormalize - { - GLM_FUNC_QUALIFIER static vec call(vec const& v) - { - floatType const Min = static_cast(std::numeric_limits::min()); - floatType const Max = static_cast(std::numeric_limits::max()); - return (vec(v) - Min) / (Max - Min) * static_cast(2) - static_cast(1); - } - }; - - template - struct compute_compNormalize - { - GLM_FUNC_QUALIFIER static vec call(vec const& v) - { - return vec(v) / static_cast(std::numeric_limits::max()); - } - }; - - template - struct compute_compNormalize - { - GLM_FUNC_QUALIFIER static vec call(vec const& v) - { - return v; - } - }; - - template - struct compute_compScale - {}; - - template - struct compute_compScale - { - GLM_FUNC_QUALIFIER static vec call(vec const& v) - { - floatType const Max = static_cast(std::numeric_limits::max()) + static_cast(0.5); - vec const Scaled(v * Max); - vec const Result(Scaled - static_cast(0.5)); - return Result; - } - }; - - template - struct compute_compScale - { - GLM_FUNC_QUALIFIER static vec call(vec const& v) - { - return vec(vec(v) * static_cast(std::numeric_limits::max())); - } - }; - - template - struct compute_compScale - { - GLM_FUNC_QUALIFIER static vec call(vec const& v) - { - return v; - } - }; -}//namespace detail - - template - GLM_FUNC_QUALIFIER vec compNormalize(vec const& v) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'compNormalize' accepts only floating-point types for 'floatType' template parameter"); - - return detail::compute_compNormalize::is_integer, std::numeric_limits::is_signed>::call(v); - } - - template - GLM_FUNC_QUALIFIER vec compScale(vec const& v) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'compScale' accepts only floating-point types for 'floatType' template parameter"); - - return detail::compute_compScale::is_integer, std::numeric_limits::is_signed>::call(v); - } - - template - GLM_FUNC_QUALIFIER T compAdd(vec const& v) - { - T Result(0); - for(length_t i = 0, n = v.length(); i < n; ++i) - Result += v[i]; - return Result; - } - - template - GLM_FUNC_QUALIFIER T compMul(vec const& v) - { - T Result(1); - for(length_t i = 0, n = v.length(); i < n; ++i) - Result *= v[i]; - return Result; - } - - template - GLM_FUNC_QUALIFIER T compMin(vec const& v) - { - T Result(v[0]); - for(length_t i = 1, n = v.length(); i < n; ++i) - Result = min(Result, v[i]); - return Result; - } - - template - GLM_FUNC_QUALIFIER T compMax(vec const& v) - { - T Result(v[0]); - for(length_t i = 1, n = v.length(); i < n; ++i) - Result = max(Result, v[i]); - return Result; - } -}//namespace glm diff --git a/third_party/glm/gtx/dual_quaternion.hpp b/third_party/glm/gtx/dual_quaternion.hpp deleted file mode 100755 index 6a51ab7..0000000 --- a/third_party/glm/gtx/dual_quaternion.hpp +++ /dev/null @@ -1,274 +0,0 @@ -/// @ref gtx_dual_quaternion -/// @file glm/gtx/dual_quaternion.hpp -/// @author Maksim Vorobiev (msomeone@gmail.com) -/// -/// @see core (dependence) -/// @see gtc_constants (dependence) -/// @see gtc_quaternion (dependence) -/// -/// @defgroup gtx_dual_quaternion GLM_GTX_dual_quaternion -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Defines a templated dual-quaternion type and several dual-quaternion operations. - -#pragma once - -// Dependency: -#include "../glm.hpp" -#include "../gtc/constants.hpp" -#include "../gtc/quaternion.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_dual_quaternion is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_dual_quaternion extension included") -# endif -#endif - -namespace glm -{ - /// @addtogroup gtx_dual_quaternion - /// @{ - - template - struct tdualquat - { - // -- Implementation detail -- - - typedef T value_type; - typedef qua part_type; - - // -- Data -- - - qua real, dual; - - // -- Component accesses -- - - typedef length_t length_type; - /// Return the count of components of a dual quaternion - GLM_FUNC_DECL static GLM_CONSTEXPR length_type length(){return 2;} - - GLM_FUNC_DECL part_type & operator[](length_type i); - GLM_FUNC_DECL part_type const& operator[](length_type i) const; - - // -- Implicit basic constructors -- - - GLM_FUNC_DECL GLM_CONSTEXPR tdualquat() GLM_DEFAULT; - GLM_FUNC_DECL GLM_CONSTEXPR tdualquat(tdualquat const& d) GLM_DEFAULT; - template - GLM_FUNC_DECL GLM_CONSTEXPR tdualquat(tdualquat const& d); - - // -- Explicit basic constructors -- - - GLM_FUNC_DECL GLM_CONSTEXPR tdualquat(qua const& real); - GLM_FUNC_DECL GLM_CONSTEXPR tdualquat(qua const& orientation, vec<3, T, Q> const& translation); - GLM_FUNC_DECL GLM_CONSTEXPR tdualquat(qua const& real, qua const& dual); - - // -- Conversion constructors -- - - template - GLM_FUNC_DECL GLM_CONSTEXPR GLM_EXPLICIT tdualquat(tdualquat const& q); - - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR tdualquat(mat<2, 4, T, Q> const& holder_mat); - GLM_FUNC_DECL GLM_EXPLICIT GLM_CONSTEXPR tdualquat(mat<3, 4, T, Q> const& aug_mat); - - // -- Unary arithmetic operators -- - - GLM_FUNC_DECL tdualquat & operator=(tdualquat const& m) GLM_DEFAULT; - - template - GLM_FUNC_DECL tdualquat & operator=(tdualquat const& m); - template - GLM_FUNC_DECL tdualquat & operator*=(U s); - template - GLM_FUNC_DECL tdualquat & operator/=(U s); - }; - - // -- Unary bit operators -- - - template - GLM_FUNC_DECL tdualquat operator+(tdualquat const& q); - - template - GLM_FUNC_DECL tdualquat operator-(tdualquat const& q); - - // -- Binary operators -- - - template - GLM_FUNC_DECL tdualquat operator+(tdualquat const& q, tdualquat const& p); - - template - GLM_FUNC_DECL tdualquat operator*(tdualquat const& q, tdualquat const& p); - - template - GLM_FUNC_DECL vec<3, T, Q> operator*(tdualquat const& q, vec<3, T, Q> const& v); - - template - GLM_FUNC_DECL vec<3, T, Q> operator*(vec<3, T, Q> const& v, tdualquat const& q); - - template - GLM_FUNC_DECL vec<4, T, Q> operator*(tdualquat const& q, vec<4, T, Q> const& v); - - template - GLM_FUNC_DECL vec<4, T, Q> operator*(vec<4, T, Q> const& v, tdualquat const& q); - - template - GLM_FUNC_DECL tdualquat operator*(tdualquat const& q, T const& s); - - template - GLM_FUNC_DECL tdualquat operator*(T const& s, tdualquat const& q); - - template - GLM_FUNC_DECL tdualquat operator/(tdualquat const& q, T const& s); - - // -- Boolean operators -- - - template - GLM_FUNC_DECL bool operator==(tdualquat const& q1, tdualquat const& q2); - - template - GLM_FUNC_DECL bool operator!=(tdualquat const& q1, tdualquat const& q2); - - /// Creates an identity dual quaternion. - /// - /// @see gtx_dual_quaternion - template - GLM_FUNC_DECL tdualquat dual_quat_identity(); - - /// Returns the normalized quaternion. - /// - /// @see gtx_dual_quaternion - template - GLM_FUNC_DECL tdualquat normalize(tdualquat const& q); - - /// Returns the linear interpolation of two dual quaternion. - /// - /// @see gtc_dual_quaternion - template - GLM_FUNC_DECL tdualquat lerp(tdualquat const& x, tdualquat const& y, T const& a); - - /// Returns the q inverse. - /// - /// @see gtx_dual_quaternion - template - GLM_FUNC_DECL tdualquat inverse(tdualquat const& q); - - /// Converts a quaternion to a 2 * 4 matrix. - /// - /// @see gtx_dual_quaternion - template - GLM_FUNC_DECL mat<2, 4, T, Q> mat2x4_cast(tdualquat const& x); - - /// Converts a quaternion to a 3 * 4 matrix. - /// - /// @see gtx_dual_quaternion - template - GLM_FUNC_DECL mat<3, 4, T, Q> mat3x4_cast(tdualquat const& x); - - /// Converts a 2 * 4 matrix (matrix which holds real and dual parts) to a quaternion. - /// - /// @see gtx_dual_quaternion - template - GLM_FUNC_DECL tdualquat dualquat_cast(mat<2, 4, T, Q> const& x); - - /// Converts a 3 * 4 matrix (augmented matrix rotation + translation) to a quaternion. - /// - /// @see gtx_dual_quaternion - template - GLM_FUNC_DECL tdualquat dualquat_cast(mat<3, 4, T, Q> const& x); - - - /// Dual-quaternion of low single-qualifier floating-point numbers. - /// - /// @see gtx_dual_quaternion - typedef tdualquat lowp_dualquat; - - /// Dual-quaternion of medium single-qualifier floating-point numbers. - /// - /// @see gtx_dual_quaternion - typedef tdualquat mediump_dualquat; - - /// Dual-quaternion of high single-qualifier floating-point numbers. - /// - /// @see gtx_dual_quaternion - typedef tdualquat highp_dualquat; - - - /// Dual-quaternion of low single-qualifier floating-point numbers. - /// - /// @see gtx_dual_quaternion - typedef tdualquat lowp_fdualquat; - - /// Dual-quaternion of medium single-qualifier floating-point numbers. - /// - /// @see gtx_dual_quaternion - typedef tdualquat mediump_fdualquat; - - /// Dual-quaternion of high single-qualifier floating-point numbers. - /// - /// @see gtx_dual_quaternion - typedef tdualquat highp_fdualquat; - - - /// Dual-quaternion of low double-qualifier floating-point numbers. - /// - /// @see gtx_dual_quaternion - typedef tdualquat lowp_ddualquat; - - /// Dual-quaternion of medium double-qualifier floating-point numbers. - /// - /// @see gtx_dual_quaternion - typedef tdualquat mediump_ddualquat; - - /// Dual-quaternion of high double-qualifier floating-point numbers. - /// - /// @see gtx_dual_quaternion - typedef tdualquat highp_ddualquat; - - -#if(!defined(GLM_PRECISION_HIGHP_FLOAT) && !defined(GLM_PRECISION_MEDIUMP_FLOAT) && !defined(GLM_PRECISION_LOWP_FLOAT)) - /// Dual-quaternion of floating-point numbers. - /// - /// @see gtx_dual_quaternion - typedef highp_fdualquat dualquat; - - /// Dual-quaternion of single-qualifier floating-point numbers. - /// - /// @see gtx_dual_quaternion - typedef highp_fdualquat fdualquat; -#elif(defined(GLM_PRECISION_HIGHP_FLOAT) && !defined(GLM_PRECISION_MEDIUMP_FLOAT) && !defined(GLM_PRECISION_LOWP_FLOAT)) - typedef highp_fdualquat dualquat; - typedef highp_fdualquat fdualquat; -#elif(!defined(GLM_PRECISION_HIGHP_FLOAT) && defined(GLM_PRECISION_MEDIUMP_FLOAT) && !defined(GLM_PRECISION_LOWP_FLOAT)) - typedef mediump_fdualquat dualquat; - typedef mediump_fdualquat fdualquat; -#elif(!defined(GLM_PRECISION_HIGHP_FLOAT) && !defined(GLM_PRECISION_MEDIUMP_FLOAT) && defined(GLM_PRECISION_LOWP_FLOAT)) - typedef lowp_fdualquat dualquat; - typedef lowp_fdualquat fdualquat; -#else -# error "GLM error: multiple default precision requested for single-precision floating-point types" -#endif - - -#if(!defined(GLM_PRECISION_HIGHP_DOUBLE) && !defined(GLM_PRECISION_MEDIUMP_DOUBLE) && !defined(GLM_PRECISION_LOWP_DOUBLE)) - /// Dual-quaternion of default double-qualifier floating-point numbers. - /// - /// @see gtx_dual_quaternion - typedef highp_ddualquat ddualquat; -#elif(defined(GLM_PRECISION_HIGHP_DOUBLE) && !defined(GLM_PRECISION_MEDIUMP_DOUBLE) && !defined(GLM_PRECISION_LOWP_DOUBLE)) - typedef highp_ddualquat ddualquat; -#elif(!defined(GLM_PRECISION_HIGHP_DOUBLE) && defined(GLM_PRECISION_MEDIUMP_DOUBLE) && !defined(GLM_PRECISION_LOWP_DOUBLE)) - typedef mediump_ddualquat ddualquat; -#elif(!defined(GLM_PRECISION_HIGHP_DOUBLE) && !defined(GLM_PRECISION_MEDIUMP_DOUBLE) && defined(GLM_PRECISION_LOWP_DOUBLE)) - typedef lowp_ddualquat ddualquat; -#else -# error "GLM error: Multiple default precision requested for double-precision floating-point types" -#endif - - /// @} -} //namespace glm - -#include "dual_quaternion.inl" diff --git a/third_party/glm/gtx/dual_quaternion.inl b/third_party/glm/gtx/dual_quaternion.inl deleted file mode 100755 index fad07ea..0000000 --- a/third_party/glm/gtx/dual_quaternion.inl +++ /dev/null @@ -1,352 +0,0 @@ -/// @ref gtx_dual_quaternion - -#include "../geometric.hpp" -#include - -namespace glm -{ - // -- Component accesses -- - - template - GLM_FUNC_QUALIFIER typename tdualquat::part_type & tdualquat::operator[](typename tdualquat::length_type i) - { - assert(i >= 0 && i < this->length()); - return (&real)[i]; - } - - template - GLM_FUNC_QUALIFIER typename tdualquat::part_type const& tdualquat::operator[](typename tdualquat::length_type i) const - { - assert(i >= 0 && i < this->length()); - return (&real)[i]; - } - - // -- Implicit basic constructors -- - -# if GLM_CONFIG_DEFAULTED_FUNCTIONS == GLM_DISABLE - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR tdualquat::tdualquat() -# if GLM_CONFIG_DEFAULTED_FUNCTIONS != GLM_DISABLE - : real(qua()) - , dual(qua(0, 0, 0, 0)) -# endif - {} - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR tdualquat::tdualquat(tdualquat const& d) - : real(d.real) - , dual(d.dual) - {} -# endif - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR tdualquat::tdualquat(tdualquat const& d) - : real(d.real) - , dual(d.dual) - {} - - // -- Explicit basic constructors -- - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR tdualquat::tdualquat(qua const& r) - : real(r), dual(qua(0, 0, 0, 0)) - {} - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR tdualquat::tdualquat(qua const& q, vec<3, T, Q> const& p) - : real(q), dual( - T(-0.5) * ( p.x*q.x + p.y*q.y + p.z*q.z), - T(+0.5) * ( p.x*q.w + p.y*q.z - p.z*q.y), - T(+0.5) * (-p.x*q.z + p.y*q.w + p.z*q.x), - T(+0.5) * ( p.x*q.y - p.y*q.x + p.z*q.w)) - {} - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR tdualquat::tdualquat(qua const& r, qua const& d) - : real(r), dual(d) - {} - - // -- Conversion constructors -- - - template - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR tdualquat::tdualquat(tdualquat const& q) - : real(q.real) - , dual(q.dual) - {} - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR tdualquat::tdualquat(mat<2, 4, T, Q> const& m) - { - *this = dualquat_cast(m); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR tdualquat::tdualquat(mat<3, 4, T, Q> const& m) - { - *this = dualquat_cast(m); - } - - // -- Unary arithmetic operators -- - -# if GLM_CONFIG_DEFAULTED_FUNCTIONS == GLM_DISABLE - template - GLM_FUNC_QUALIFIER tdualquat & tdualquat::operator=(tdualquat const& q) - { - this->real = q.real; - this->dual = q.dual; - return *this; - } -# endif - - template - template - GLM_FUNC_QUALIFIER tdualquat & tdualquat::operator=(tdualquat const& q) - { - this->real = q.real; - this->dual = q.dual; - return *this; - } - - template - template - GLM_FUNC_QUALIFIER tdualquat & tdualquat::operator*=(U s) - { - this->real *= static_cast(s); - this->dual *= static_cast(s); - return *this; - } - - template - template - GLM_FUNC_QUALIFIER tdualquat & tdualquat::operator/=(U s) - { - this->real /= static_cast(s); - this->dual /= static_cast(s); - return *this; - } - - // -- Unary bit operators -- - - template - GLM_FUNC_QUALIFIER tdualquat operator+(tdualquat const& q) - { - return q; - } - - template - GLM_FUNC_QUALIFIER tdualquat operator-(tdualquat const& q) - { - return tdualquat(-q.real, -q.dual); - } - - // -- Binary operators -- - - template - GLM_FUNC_QUALIFIER tdualquat operator+(tdualquat const& q, tdualquat const& p) - { - return tdualquat(q.real + p.real,q.dual + p.dual); - } - - template - GLM_FUNC_QUALIFIER tdualquat operator*(tdualquat const& p, tdualquat const& o) - { - return tdualquat(p.real * o.real,p.real * o.dual + p.dual * o.real); - } - - template - GLM_FUNC_QUALIFIER vec<3, T, Q> operator*(tdualquat const& q, vec<3, T, Q> const& v) - { - vec<3, T, Q> const real_v3(q.real.x,q.real.y,q.real.z); - vec<3, T, Q> const dual_v3(q.dual.x,q.dual.y,q.dual.z); - return (cross(real_v3, cross(real_v3,v) + v * q.real.w + dual_v3) + dual_v3 * q.real.w - real_v3 * q.dual.w) * T(2) + v; - } - - template - GLM_FUNC_QUALIFIER vec<3, T, Q> operator*(vec<3, T, Q> const& v, tdualquat const& q) - { - return glm::inverse(q) * v; - } - - template - GLM_FUNC_QUALIFIER vec<4, T, Q> operator*(tdualquat const& q, vec<4, T, Q> const& v) - { - return vec<4, T, Q>(q * vec<3, T, Q>(v), v.w); - } - - template - GLM_FUNC_QUALIFIER vec<4, T, Q> operator*(vec<4, T, Q> const& v, tdualquat const& q) - { - return glm::inverse(q) * v; - } - - template - GLM_FUNC_QUALIFIER tdualquat operator*(tdualquat const& q, T const& s) - { - return tdualquat(q.real * s, q.dual * s); - } - - template - GLM_FUNC_QUALIFIER tdualquat operator*(T const& s, tdualquat const& q) - { - return q * s; - } - - template - GLM_FUNC_QUALIFIER tdualquat operator/(tdualquat const& q, T const& s) - { - return tdualquat(q.real / s, q.dual / s); - } - - // -- Boolean operators -- - - template - GLM_FUNC_QUALIFIER bool operator==(tdualquat const& q1, tdualquat const& q2) - { - return (q1.real == q2.real) && (q1.dual == q2.dual); - } - - template - GLM_FUNC_QUALIFIER bool operator!=(tdualquat const& q1, tdualquat const& q2) - { - return (q1.real != q2.real) || (q1.dual != q2.dual); - } - - // -- Operations -- - - template - GLM_FUNC_QUALIFIER tdualquat dual_quat_identity() - { - return tdualquat( - qua(static_cast(1), static_cast(0), static_cast(0), static_cast(0)), - qua(static_cast(0), static_cast(0), static_cast(0), static_cast(0))); - } - - template - GLM_FUNC_QUALIFIER tdualquat normalize(tdualquat const& q) - { - return q / length(q.real); - } - - template - GLM_FUNC_QUALIFIER tdualquat lerp(tdualquat const& x, tdualquat const& y, T const& a) - { - // Dual Quaternion Linear blend aka DLB: - // Lerp is only defined in [0, 1] - assert(a >= static_cast(0)); - assert(a <= static_cast(1)); - T const k = dot(x.real,y.real) < static_cast(0) ? -a : a; - T const one(1); - return tdualquat(x * (one - a) + y * k); - } - - template - GLM_FUNC_QUALIFIER tdualquat inverse(tdualquat const& q) - { - const glm::qua real = conjugate(q.real); - const glm::qua dual = conjugate(q.dual); - return tdualquat(real, dual + (real * (-2.0f * dot(real,dual)))); - } - - template - GLM_FUNC_QUALIFIER mat<2, 4, T, Q> mat2x4_cast(tdualquat const& x) - { - return mat<2, 4, T, Q>( x[0].x, x[0].y, x[0].z, x[0].w, x[1].x, x[1].y, x[1].z, x[1].w ); - } - - template - GLM_FUNC_QUALIFIER mat<3, 4, T, Q> mat3x4_cast(tdualquat const& x) - { - qua r = x.real / length2(x.real); - - qua const rr(r.w * x.real.w, r.x * x.real.x, r.y * x.real.y, r.z * x.real.z); - r *= static_cast(2); - - T const xy = r.x * x.real.y; - T const xz = r.x * x.real.z; - T const yz = r.y * x.real.z; - T const wx = r.w * x.real.x; - T const wy = r.w * x.real.y; - T const wz = r.w * x.real.z; - - vec<4, T, Q> const a( - rr.w + rr.x - rr.y - rr.z, - xy - wz, - xz + wy, - -(x.dual.w * r.x - x.dual.x * r.w + x.dual.y * r.z - x.dual.z * r.y)); - - vec<4, T, Q> const b( - xy + wz, - rr.w + rr.y - rr.x - rr.z, - yz - wx, - -(x.dual.w * r.y - x.dual.x * r.z - x.dual.y * r.w + x.dual.z * r.x)); - - vec<4, T, Q> const c( - xz - wy, - yz + wx, - rr.w + rr.z - rr.x - rr.y, - -(x.dual.w * r.z + x.dual.x * r.y - x.dual.y * r.x - x.dual.z * r.w)); - - return mat<3, 4, T, Q>(a, b, c); - } - - template - GLM_FUNC_QUALIFIER tdualquat dualquat_cast(mat<2, 4, T, Q> const& x) - { - return tdualquat( - qua( x[0].w, x[0].x, x[0].y, x[0].z ), - qua( x[1].w, x[1].x, x[1].y, x[1].z )); - } - - template - GLM_FUNC_QUALIFIER tdualquat dualquat_cast(mat<3, 4, T, Q> const& x) - { - qua real; - - T const trace = x[0].x + x[1].y + x[2].z; - if(trace > static_cast(0)) - { - T const r = sqrt(T(1) + trace); - T const invr = static_cast(0.5) / r; - real.w = static_cast(0.5) * r; - real.x = (x[2].y - x[1].z) * invr; - real.y = (x[0].z - x[2].x) * invr; - real.z = (x[1].x - x[0].y) * invr; - } - else if(x[0].x > x[1].y && x[0].x > x[2].z) - { - T const r = sqrt(T(1) + x[0].x - x[1].y - x[2].z); - T const invr = static_cast(0.5) / r; - real.x = static_cast(0.5)*r; - real.y = (x[1].x + x[0].y) * invr; - real.z = (x[0].z + x[2].x) * invr; - real.w = (x[2].y - x[1].z) * invr; - } - else if(x[1].y > x[2].z) - { - T const r = sqrt(T(1) + x[1].y - x[0].x - x[2].z); - T const invr = static_cast(0.5) / r; - real.x = (x[1].x + x[0].y) * invr; - real.y = static_cast(0.5) * r; - real.z = (x[2].y + x[1].z) * invr; - real.w = (x[0].z - x[2].x) * invr; - } - else - { - T const r = sqrt(T(1) + x[2].z - x[0].x - x[1].y); - T const invr = static_cast(0.5) / r; - real.x = (x[0].z + x[2].x) * invr; - real.y = (x[2].y + x[1].z) * invr; - real.z = static_cast(0.5) * r; - real.w = (x[1].x - x[0].y) * invr; - } - - qua dual; - dual.x = static_cast(0.5) * ( x[0].w * real.w + x[1].w * real.z - x[2].w * real.y); - dual.y = static_cast(0.5) * (-x[0].w * real.z + x[1].w * real.w + x[2].w * real.x); - dual.z = static_cast(0.5) * ( x[0].w * real.y - x[1].w * real.x + x[2].w * real.w); - dual.w = -static_cast(0.5) * ( x[0].w * real.x + x[1].w * real.y + x[2].w * real.z); - return tdualquat(real, dual); - } -}//namespace glm diff --git a/third_party/glm/gtx/easing.hpp b/third_party/glm/gtx/easing.hpp deleted file mode 100755 index 57f3d61..0000000 --- a/third_party/glm/gtx/easing.hpp +++ /dev/null @@ -1,219 +0,0 @@ -/// @ref gtx_easing -/// @file glm/gtx/easing.hpp -/// @author Robert Chisholm -/// -/// @see core (dependence) -/// -/// @defgroup gtx_easing GLM_GTX_easing -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Easing functions for animations and transitons -/// All functions take a parameter x in the range [0.0,1.0] -/// -/// Based on the AHEasing project of Warren Moore (https://github.com/warrenm/AHEasing) - -#pragma once - -// Dependency: -#include "../glm.hpp" -#include "../gtc/constants.hpp" -#include "../detail/qualifier.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_easing is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_easing extension included") -# endif -#endif - -namespace glm{ - /// @addtogroup gtx_easing - /// @{ - - /// Modelled after the line y = x - /// @see gtx_easing - template - GLM_FUNC_DECL genType linearInterpolation(genType const & a); - - /// Modelled after the parabola y = x^2 - /// @see gtx_easing - template - GLM_FUNC_DECL genType quadraticEaseIn(genType const & a); - - /// Modelled after the parabola y = -x^2 + 2x - /// @see gtx_easing - template - GLM_FUNC_DECL genType quadraticEaseOut(genType const & a); - - /// Modelled after the piecewise quadratic - /// y = (1/2)((2x)^2) ; [0, 0.5) - /// y = -(1/2)((2x-1)*(2x-3) - 1) ; [0.5, 1] - /// @see gtx_easing - template - GLM_FUNC_DECL genType quadraticEaseInOut(genType const & a); - - /// Modelled after the cubic y = x^3 - template - GLM_FUNC_DECL genType cubicEaseIn(genType const & a); - - /// Modelled after the cubic y = (x - 1)^3 + 1 - /// @see gtx_easing - template - GLM_FUNC_DECL genType cubicEaseOut(genType const & a); - - /// Modelled after the piecewise cubic - /// y = (1/2)((2x)^3) ; [0, 0.5) - /// y = (1/2)((2x-2)^3 + 2) ; [0.5, 1] - /// @see gtx_easing - template - GLM_FUNC_DECL genType cubicEaseInOut(genType const & a); - - /// Modelled after the quartic x^4 - /// @see gtx_easing - template - GLM_FUNC_DECL genType quarticEaseIn(genType const & a); - - /// Modelled after the quartic y = 1 - (x - 1)^4 - /// @see gtx_easing - template - GLM_FUNC_DECL genType quarticEaseOut(genType const & a); - - /// Modelled after the piecewise quartic - /// y = (1/2)((2x)^4) ; [0, 0.5) - /// y = -(1/2)((2x-2)^4 - 2) ; [0.5, 1] - /// @see gtx_easing - template - GLM_FUNC_DECL genType quarticEaseInOut(genType const & a); - - /// Modelled after the quintic y = x^5 - /// @see gtx_easing - template - GLM_FUNC_DECL genType quinticEaseIn(genType const & a); - - /// Modelled after the quintic y = (x - 1)^5 + 1 - /// @see gtx_easing - template - GLM_FUNC_DECL genType quinticEaseOut(genType const & a); - - /// Modelled after the piecewise quintic - /// y = (1/2)((2x)^5) ; [0, 0.5) - /// y = (1/2)((2x-2)^5 + 2) ; [0.5, 1] - /// @see gtx_easing - template - GLM_FUNC_DECL genType quinticEaseInOut(genType const & a); - - /// Modelled after quarter-cycle of sine wave - /// @see gtx_easing - template - GLM_FUNC_DECL genType sineEaseIn(genType const & a); - - /// Modelled after quarter-cycle of sine wave (different phase) - /// @see gtx_easing - template - GLM_FUNC_DECL genType sineEaseOut(genType const & a); - - /// Modelled after half sine wave - /// @see gtx_easing - template - GLM_FUNC_DECL genType sineEaseInOut(genType const & a); - - /// Modelled after shifted quadrant IV of unit circle - /// @see gtx_easing - template - GLM_FUNC_DECL genType circularEaseIn(genType const & a); - - /// Modelled after shifted quadrant II of unit circle - /// @see gtx_easing - template - GLM_FUNC_DECL genType circularEaseOut(genType const & a); - - /// Modelled after the piecewise circular function - /// y = (1/2)(1 - sqrt(1 - 4x^2)) ; [0, 0.5) - /// y = (1/2)(sqrt(-(2x - 3)*(2x - 1)) + 1) ; [0.5, 1] - /// @see gtx_easing - template - GLM_FUNC_DECL genType circularEaseInOut(genType const & a); - - /// Modelled after the exponential function y = 2^(10(x - 1)) - /// @see gtx_easing - template - GLM_FUNC_DECL genType exponentialEaseIn(genType const & a); - - /// Modelled after the exponential function y = -2^(-10x) + 1 - /// @see gtx_easing - template - GLM_FUNC_DECL genType exponentialEaseOut(genType const & a); - - /// Modelled after the piecewise exponential - /// y = (1/2)2^(10(2x - 1)) ; [0,0.5) - /// y = -(1/2)*2^(-10(2x - 1))) + 1 ; [0.5,1] - /// @see gtx_easing - template - GLM_FUNC_DECL genType exponentialEaseInOut(genType const & a); - - /// Modelled after the damped sine wave y = sin(13pi/2*x)*pow(2, 10 * (x - 1)) - /// @see gtx_easing - template - GLM_FUNC_DECL genType elasticEaseIn(genType const & a); - - /// Modelled after the damped sine wave y = sin(-13pi/2*(x + 1))*pow(2, -10x) + 1 - /// @see gtx_easing - template - GLM_FUNC_DECL genType elasticEaseOut(genType const & a); - - /// Modelled after the piecewise exponentially-damped sine wave: - /// y = (1/2)*sin(13pi/2*(2*x))*pow(2, 10 * ((2*x) - 1)) ; [0,0.5) - /// y = (1/2)*(sin(-13pi/2*((2x-1)+1))*pow(2,-10(2*x-1)) + 2) ; [0.5, 1] - /// @see gtx_easing - template - GLM_FUNC_DECL genType elasticEaseInOut(genType const & a); - - /// @see gtx_easing - template - GLM_FUNC_DECL genType backEaseIn(genType const& a); - - /// @see gtx_easing - template - GLM_FUNC_DECL genType backEaseOut(genType const& a); - - /// @see gtx_easing - template - GLM_FUNC_DECL genType backEaseInOut(genType const& a); - - /// @param a parameter - /// @param o Optional overshoot modifier - /// @see gtx_easing - template - GLM_FUNC_DECL genType backEaseIn(genType const& a, genType const& o); - - /// @param a parameter - /// @param o Optional overshoot modifier - /// @see gtx_easing - template - GLM_FUNC_DECL genType backEaseOut(genType const& a, genType const& o); - - /// @param a parameter - /// @param o Optional overshoot modifier - /// @see gtx_easing - template - GLM_FUNC_DECL genType backEaseInOut(genType const& a, genType const& o); - - /// @see gtx_easing - template - GLM_FUNC_DECL genType bounceEaseIn(genType const& a); - - /// @see gtx_easing - template - GLM_FUNC_DECL genType bounceEaseOut(genType const& a); - - /// @see gtx_easing - template - GLM_FUNC_DECL genType bounceEaseInOut(genType const& a); - - /// @} -}//namespace glm - -#include "easing.inl" diff --git a/third_party/glm/gtx/easing.inl b/third_party/glm/gtx/easing.inl deleted file mode 100755 index 4b7d05b..0000000 --- a/third_party/glm/gtx/easing.inl +++ /dev/null @@ -1,436 +0,0 @@ -/// @ref gtx_easing - -#include - -namespace glm{ - - template - GLM_FUNC_QUALIFIER genType linearInterpolation(genType const& a) - { - // Only defined in [0, 1] - assert(a >= zero()); - assert(a <= one()); - - return a; - } - - template - GLM_FUNC_QUALIFIER genType quadraticEaseIn(genType const& a) - { - // Only defined in [0, 1] - assert(a >= zero()); - assert(a <= one()); - - return a * a; - } - - template - GLM_FUNC_QUALIFIER genType quadraticEaseOut(genType const& a) - { - // Only defined in [0, 1] - assert(a >= zero()); - assert(a <= one()); - - return -(a * (a - static_cast(2))); - } - - template - GLM_FUNC_QUALIFIER genType quadraticEaseInOut(genType const& a) - { - // Only defined in [0, 1] - assert(a >= zero()); - assert(a <= one()); - - if(a < static_cast(0.5)) - { - return static_cast(2) * a * a; - } - else - { - return (-static_cast(2) * a * a) + (4 * a) - one(); - } - } - - template - GLM_FUNC_QUALIFIER genType cubicEaseIn(genType const& a) - { - // Only defined in [0, 1] - assert(a >= zero()); - assert(a <= one()); - - return a * a * a; - } - - template - GLM_FUNC_QUALIFIER genType cubicEaseOut(genType const& a) - { - // Only defined in [0, 1] - assert(a >= zero()); - assert(a <= one()); - - genType const f = a - one(); - return f * f * f + one(); - } - - template - GLM_FUNC_QUALIFIER genType cubicEaseInOut(genType const& a) - { - // Only defined in [0, 1] - assert(a >= zero()); - assert(a <= one()); - - if (a < static_cast(0.5)) - { - return static_cast(4) * a * a * a; - } - else - { - genType const f = ((static_cast(2) * a) - static_cast(2)); - return static_cast(0.5) * f * f * f + one(); - } - } - - template - GLM_FUNC_QUALIFIER genType quarticEaseIn(genType const& a) - { - // Only defined in [0, 1] - assert(a >= zero()); - assert(a <= one()); - - return a * a * a * a; - } - - template - GLM_FUNC_QUALIFIER genType quarticEaseOut(genType const& a) - { - // Only defined in [0, 1] - assert(a >= zero()); - assert(a <= one()); - - genType const f = (a - one()); - return f * f * f * (one() - a) + one(); - } - - template - GLM_FUNC_QUALIFIER genType quarticEaseInOut(genType const& a) - { - // Only defined in [0, 1] - assert(a >= zero()); - assert(a <= one()); - - if(a < static_cast(0.5)) - { - return static_cast(8) * a * a * a * a; - } - else - { - genType const f = (a - one()); - return -static_cast(8) * f * f * f * f + one(); - } - } - - template - GLM_FUNC_QUALIFIER genType quinticEaseIn(genType const& a) - { - // Only defined in [0, 1] - assert(a >= zero()); - assert(a <= one()); - - return a * a * a * a * a; - } - - template - GLM_FUNC_QUALIFIER genType quinticEaseOut(genType const& a) - { - // Only defined in [0, 1] - assert(a >= zero()); - assert(a <= one()); - - genType const f = (a - one()); - return f * f * f * f * f + one(); - } - - template - GLM_FUNC_QUALIFIER genType quinticEaseInOut(genType const& a) - { - // Only defined in [0, 1] - assert(a >= zero()); - assert(a <= one()); - - if(a < static_cast(0.5)) - { - return static_cast(16) * a * a * a * a * a; - } - else - { - genType const f = ((static_cast(2) * a) - static_cast(2)); - return static_cast(0.5) * f * f * f * f * f + one(); - } - } - - template - GLM_FUNC_QUALIFIER genType sineEaseIn(genType const& a) - { - // Only defined in [0, 1] - assert(a >= zero()); - assert(a <= one()); - - return sin((a - one()) * half_pi()) + one(); - } - - template - GLM_FUNC_QUALIFIER genType sineEaseOut(genType const& a) - { - // Only defined in [0, 1] - assert(a >= zero()); - assert(a <= one()); - - return sin(a * half_pi()); - } - - template - GLM_FUNC_QUALIFIER genType sineEaseInOut(genType const& a) - { - // Only defined in [0, 1] - assert(a >= zero()); - assert(a <= one()); - - return static_cast(0.5) * (one() - cos(a * pi())); - } - - template - GLM_FUNC_QUALIFIER genType circularEaseIn(genType const& a) - { - // Only defined in [0, 1] - assert(a >= zero()); - assert(a <= one()); - - return one() - sqrt(one() - (a * a)); - } - - template - GLM_FUNC_QUALIFIER genType circularEaseOut(genType const& a) - { - // Only defined in [0, 1] - assert(a >= zero()); - assert(a <= one()); - - return sqrt((static_cast(2) - a) * a); - } - - template - GLM_FUNC_QUALIFIER genType circularEaseInOut(genType const& a) - { - // Only defined in [0, 1] - assert(a >= zero()); - assert(a <= one()); - - if(a < static_cast(0.5)) - { - return static_cast(0.5) * (one() - std::sqrt(one() - static_cast(4) * (a * a))); - } - else - { - return static_cast(0.5) * (std::sqrt(-((static_cast(2) * a) - static_cast(3)) * ((static_cast(2) * a) - one())) + one()); - } - } - - template - GLM_FUNC_QUALIFIER genType exponentialEaseIn(genType const& a) - { - // Only defined in [0, 1] - assert(a >= zero()); - assert(a <= one()); - - if(a <= zero()) - return a; - else - { - genType const Complementary = a - one(); - genType const Two = static_cast(2); - - return glm::pow(Two, Complementary * static_cast(10)); - } - } - - template - GLM_FUNC_QUALIFIER genType exponentialEaseOut(genType const& a) - { - // Only defined in [0, 1] - assert(a >= zero()); - assert(a <= one()); - - if(a >= one()) - return a; - else - { - return one() - glm::pow(static_cast(2), -static_cast(10) * a); - } - } - - template - GLM_FUNC_QUALIFIER genType exponentialEaseInOut(genType const& a) - { - // Only defined in [0, 1] - assert(a >= zero()); - assert(a <= one()); - - if(a < static_cast(0.5)) - return static_cast(0.5) * glm::pow(static_cast(2), (static_cast(20) * a) - static_cast(10)); - else - return -static_cast(0.5) * glm::pow(static_cast(2), (-static_cast(20) * a) + static_cast(10)) + one(); - } - - template - GLM_FUNC_QUALIFIER genType elasticEaseIn(genType const& a) - { - // Only defined in [0, 1] - assert(a >= zero()); - assert(a <= one()); - - return std::sin(static_cast(13) * half_pi() * a) * glm::pow(static_cast(2), static_cast(10) * (a - one())); - } - - template - GLM_FUNC_QUALIFIER genType elasticEaseOut(genType const& a) - { - // Only defined in [0, 1] - assert(a >= zero()); - assert(a <= one()); - - return std::sin(-static_cast(13) * half_pi() * (a + one())) * glm::pow(static_cast(2), -static_cast(10) * a) + one(); - } - - template - GLM_FUNC_QUALIFIER genType elasticEaseInOut(genType const& a) - { - // Only defined in [0, 1] - assert(a >= zero()); - assert(a <= one()); - - if(a < static_cast(0.5)) - return static_cast(0.5) * std::sin(static_cast(13) * half_pi() * (static_cast(2) * a)) * glm::pow(static_cast(2), static_cast(10) * ((static_cast(2) * a) - one())); - else - return static_cast(0.5) * (std::sin(-static_cast(13) * half_pi() * ((static_cast(2) * a - one()) + one())) * glm::pow(static_cast(2), -static_cast(10) * (static_cast(2) * a - one())) + static_cast(2)); - } - - template - GLM_FUNC_QUALIFIER genType backEaseIn(genType const& a, genType const& o) - { - // Only defined in [0, 1] - assert(a >= zero()); - assert(a <= one()); - - genType z = ((o + one()) * a) - o; - return (a * a * z); - } - - template - GLM_FUNC_QUALIFIER genType backEaseOut(genType const& a, genType const& o) - { - // Only defined in [0, 1] - assert(a >= zero()); - assert(a <= one()); - - genType n = a - one(); - genType z = ((o + one()) * n) + o; - return (n * n * z) + one(); - } - - template - GLM_FUNC_QUALIFIER genType backEaseInOut(genType const& a, genType const& o) - { - // Only defined in [0, 1] - assert(a >= zero()); - assert(a <= one()); - - genType s = o * static_cast(1.525); - genType x = static_cast(0.5); - genType n = a / static_cast(0.5); - - if (n < static_cast(1)) - { - genType z = ((s + static_cast(1)) * n) - s; - genType m = n * n * z; - return x * m; - } - else - { - n -= static_cast(2); - genType z = ((s + static_cast(1)) * n) + s; - genType m = (n*n*z) + static_cast(2); - return x * m; - } - } - - template - GLM_FUNC_QUALIFIER genType backEaseIn(genType const& a) - { - return backEaseIn(a, static_cast(1.70158)); - } - - template - GLM_FUNC_QUALIFIER genType backEaseOut(genType const& a) - { - return backEaseOut(a, static_cast(1.70158)); - } - - template - GLM_FUNC_QUALIFIER genType backEaseInOut(genType const& a) - { - return backEaseInOut(a, static_cast(1.70158)); - } - - template - GLM_FUNC_QUALIFIER genType bounceEaseOut(genType const& a) - { - // Only defined in [0, 1] - assert(a >= zero()); - assert(a <= one()); - - if(a < static_cast(4.0 / 11.0)) - { - return (static_cast(121) * a * a) / static_cast(16); - } - else if(a < static_cast(8.0 / 11.0)) - { - return (static_cast(363.0 / 40.0) * a * a) - (static_cast(99.0 / 10.0) * a) + static_cast(17.0 / 5.0); - } - else if(a < static_cast(9.0 / 10.0)) - { - return (static_cast(4356.0 / 361.0) * a * a) - (static_cast(35442.0 / 1805.0) * a) + static_cast(16061.0 / 1805.0); - } - else - { - return (static_cast(54.0 / 5.0) * a * a) - (static_cast(513.0 / 25.0) * a) + static_cast(268.0 / 25.0); - } - } - - template - GLM_FUNC_QUALIFIER genType bounceEaseIn(genType const& a) - { - // Only defined in [0, 1] - assert(a >= zero()); - assert(a <= one()); - - return one() - bounceEaseOut(one() - a); - } - - template - GLM_FUNC_QUALIFIER genType bounceEaseInOut(genType const& a) - { - // Only defined in [0, 1] - assert(a >= zero()); - assert(a <= one()); - - if(a < static_cast(0.5)) - { - return static_cast(0.5) * (one() - bounceEaseOut(a * static_cast(2))); - } - else - { - return static_cast(0.5) * bounceEaseOut(a * static_cast(2) - one()) + static_cast(0.5); - } - } - -}//namespace glm diff --git a/third_party/glm/gtx/euler_angles.hpp b/third_party/glm/gtx/euler_angles.hpp deleted file mode 100755 index 2723697..0000000 --- a/third_party/glm/gtx/euler_angles.hpp +++ /dev/null @@ -1,335 +0,0 @@ -/// @ref gtx_euler_angles -/// @file glm/gtx/euler_angles.hpp -/// -/// @see core (dependence) -/// -/// @defgroup gtx_euler_angles GLM_GTX_euler_angles -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Build matrices from Euler angles. -/// -/// Extraction of Euler angles from rotation matrix. -/// Based on the original paper 2014 Mike Day - Extracting Euler Angles from a Rotation Matrix. - -#pragma once - -// Dependency: -#include "../glm.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_euler_angles is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_euler_angles extension included") -# endif -#endif - -namespace glm -{ - /// @addtogroup gtx_euler_angles - /// @{ - - /// Creates a 3D 4 * 4 homogeneous rotation matrix from an euler angle X. - /// @see gtx_euler_angles - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleX( - T const& angleX); - - /// Creates a 3D 4 * 4 homogeneous rotation matrix from an euler angle Y. - /// @see gtx_euler_angles - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleY( - T const& angleY); - - /// Creates a 3D 4 * 4 homogeneous rotation matrix from an euler angle Z. - /// @see gtx_euler_angles - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleZ( - T const& angleZ); - - /// Creates a 3D 4 * 4 homogeneous derived matrix from the rotation matrix about X-axis. - /// @see gtx_euler_angles - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> derivedEulerAngleX( - T const & angleX, T const & angularVelocityX); - - /// Creates a 3D 4 * 4 homogeneous derived matrix from the rotation matrix about Y-axis. - /// @see gtx_euler_angles - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> derivedEulerAngleY( - T const & angleY, T const & angularVelocityY); - - /// Creates a 3D 4 * 4 homogeneous derived matrix from the rotation matrix about Z-axis. - /// @see gtx_euler_angles - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> derivedEulerAngleZ( - T const & angleZ, T const & angularVelocityZ); - - /// Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (X * Y). - /// @see gtx_euler_angles - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleXY( - T const& angleX, - T const& angleY); - - /// Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (Y * X). - /// @see gtx_euler_angles - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleYX( - T const& angleY, - T const& angleX); - - /// Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (X * Z). - /// @see gtx_euler_angles - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleXZ( - T const& angleX, - T const& angleZ); - - /// Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (Z * X). - /// @see gtx_euler_angles - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleZX( - T const& angle, - T const& angleX); - - /// Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (Y * Z). - /// @see gtx_euler_angles - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleYZ( - T const& angleY, - T const& angleZ); - - /// Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (Z * Y). - /// @see gtx_euler_angles - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleZY( - T const& angleZ, - T const& angleY); - - /// Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (X * Y * Z). - /// @see gtx_euler_angles - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleXYZ( - T const& t1, - T const& t2, - T const& t3); - - /// Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (Y * X * Z). - /// @see gtx_euler_angles - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleYXZ( - T const& yaw, - T const& pitch, - T const& roll); - - /// Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (X * Z * X). - /// @see gtx_euler_angles - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleXZX( - T const & t1, - T const & t2, - T const & t3); - - /// Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (X * Y * X). - /// @see gtx_euler_angles - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleXYX( - T const & t1, - T const & t2, - T const & t3); - - /// Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (Y * X * Y). - /// @see gtx_euler_angles - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleYXY( - T const & t1, - T const & t2, - T const & t3); - - /// Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (Y * Z * Y). - /// @see gtx_euler_angles - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleYZY( - T const & t1, - T const & t2, - T const & t3); - - /// Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (Z * Y * Z). - /// @see gtx_euler_angles - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleZYZ( - T const & t1, - T const & t2, - T const & t3); - - /// Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (Z * X * Z). - /// @see gtx_euler_angles - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleZXZ( - T const & t1, - T const & t2, - T const & t3); - - /// Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (X * Z * Y). - /// @see gtx_euler_angles - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleXZY( - T const & t1, - T const & t2, - T const & t3); - - /// Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (Y * Z * X). - /// @see gtx_euler_angles - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleYZX( - T const & t1, - T const & t2, - T const & t3); - - /// Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (Z * Y * X). - /// @see gtx_euler_angles - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleZYX( - T const & t1, - T const & t2, - T const & t3); - - /// Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (Z * X * Y). - /// @see gtx_euler_angles - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> eulerAngleZXY( - T const & t1, - T const & t2, - T const & t3); - - /// Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (Y * X * Z). - /// @see gtx_euler_angles - template - GLM_FUNC_DECL mat<4, 4, T, defaultp> yawPitchRoll( - T const& yaw, - T const& pitch, - T const& roll); - - /// Creates a 2D 2 * 2 rotation matrix from an euler angle. - /// @see gtx_euler_angles - template - GLM_FUNC_DECL mat<2, 2, T, defaultp> orientate2(T const& angle); - - /// Creates a 2D 4 * 4 homogeneous rotation matrix from an euler angle. - /// @see gtx_euler_angles - template - GLM_FUNC_DECL mat<3, 3, T, defaultp> orientate3(T const& angle); - - /// Creates a 3D 3 * 3 rotation matrix from euler angles (Y * X * Z). - /// @see gtx_euler_angles - template - GLM_FUNC_DECL mat<3, 3, T, Q> orientate3(vec<3, T, Q> const& angles); - - /// Creates a 3D 4 * 4 homogeneous rotation matrix from euler angles (Y * X * Z). - /// @see gtx_euler_angles - template - GLM_FUNC_DECL mat<4, 4, T, Q> orientate4(vec<3, T, Q> const& angles); - - /// Extracts the (X * Y * Z) Euler angles from the rotation matrix M - /// @see gtx_euler_angles - template - GLM_FUNC_DECL void extractEulerAngleXYZ(mat<4, 4, T, defaultp> const& M, - T & t1, - T & t2, - T & t3); - - /// Extracts the (Y * X * Z) Euler angles from the rotation matrix M - /// @see gtx_euler_angles - template - GLM_FUNC_DECL void extractEulerAngleYXZ(mat<4, 4, T, defaultp> const & M, - T & t1, - T & t2, - T & t3); - - /// Extracts the (X * Z * X) Euler angles from the rotation matrix M - /// @see gtx_euler_angles - template - GLM_FUNC_DECL void extractEulerAngleXZX(mat<4, 4, T, defaultp> const & M, - T & t1, - T & t2, - T & t3); - - /// Extracts the (X * Y * X) Euler angles from the rotation matrix M - /// @see gtx_euler_angles - template - GLM_FUNC_DECL void extractEulerAngleXYX(mat<4, 4, T, defaultp> const & M, - T & t1, - T & t2, - T & t3); - - /// Extracts the (Y * X * Y) Euler angles from the rotation matrix M - /// @see gtx_euler_angles - template - GLM_FUNC_DECL void extractEulerAngleYXY(mat<4, 4, T, defaultp> const & M, - T & t1, - T & t2, - T & t3); - - /// Extracts the (Y * Z * Y) Euler angles from the rotation matrix M - /// @see gtx_euler_angles - template - GLM_FUNC_DECL void extractEulerAngleYZY(mat<4, 4, T, defaultp> const & M, - T & t1, - T & t2, - T & t3); - - /// Extracts the (Z * Y * Z) Euler angles from the rotation matrix M - /// @see gtx_euler_angles - template - GLM_FUNC_DECL void extractEulerAngleZYZ(mat<4, 4, T, defaultp> const & M, - T & t1, - T & t2, - T & t3); - - /// Extracts the (Z * X * Z) Euler angles from the rotation matrix M - /// @see gtx_euler_angles - template - GLM_FUNC_DECL void extractEulerAngleZXZ(mat<4, 4, T, defaultp> const & M, - T & t1, - T & t2, - T & t3); - - /// Extracts the (X * Z * Y) Euler angles from the rotation matrix M - /// @see gtx_euler_angles - template - GLM_FUNC_DECL void extractEulerAngleXZY(mat<4, 4, T, defaultp> const & M, - T & t1, - T & t2, - T & t3); - - /// Extracts the (Y * Z * X) Euler angles from the rotation matrix M - /// @see gtx_euler_angles - template - GLM_FUNC_DECL void extractEulerAngleYZX(mat<4, 4, T, defaultp> const & M, - T & t1, - T & t2, - T & t3); - - /// Extracts the (Z * Y * X) Euler angles from the rotation matrix M - /// @see gtx_euler_angles - template - GLM_FUNC_DECL void extractEulerAngleZYX(mat<4, 4, T, defaultp> const & M, - T & t1, - T & t2, - T & t3); - - /// Extracts the (Z * X * Y) Euler angles from the rotation matrix M - /// @see gtx_euler_angles - template - GLM_FUNC_DECL void extractEulerAngleZXY(mat<4, 4, T, defaultp> const & M, - T & t1, - T & t2, - T & t3); - - /// @} -}//namespace glm - -#include "euler_angles.inl" diff --git a/third_party/glm/gtx/euler_angles.inl b/third_party/glm/gtx/euler_angles.inl deleted file mode 100755 index 68c5012..0000000 --- a/third_party/glm/gtx/euler_angles.inl +++ /dev/null @@ -1,899 +0,0 @@ -/// @ref gtx_euler_angles - -#include "compatibility.hpp" // glm::atan2 - -namespace glm -{ - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleX - ( - T const& angleX - ) - { - T cosX = glm::cos(angleX); - T sinX = glm::sin(angleX); - - return mat<4, 4, T, defaultp>( - T(1), T(0), T(0), T(0), - T(0), cosX, sinX, T(0), - T(0),-sinX, cosX, T(0), - T(0), T(0), T(0), T(1)); - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleY - ( - T const& angleY - ) - { - T cosY = glm::cos(angleY); - T sinY = glm::sin(angleY); - - return mat<4, 4, T, defaultp>( - cosY, T(0), -sinY, T(0), - T(0), T(1), T(0), T(0), - sinY, T(0), cosY, T(0), - T(0), T(0), T(0), T(1)); - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleZ - ( - T const& angleZ - ) - { - T cosZ = glm::cos(angleZ); - T sinZ = glm::sin(angleZ); - - return mat<4, 4, T, defaultp>( - cosZ, sinZ, T(0), T(0), - -sinZ, cosZ, T(0), T(0), - T(0), T(0), T(1), T(0), - T(0), T(0), T(0), T(1)); - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> derivedEulerAngleX - ( - T const & angleX, - T const & angularVelocityX - ) - { - T cosX = glm::cos(angleX) * angularVelocityX; - T sinX = glm::sin(angleX) * angularVelocityX; - - return mat<4, 4, T, defaultp>( - T(0), T(0), T(0), T(0), - T(0),-sinX, cosX, T(0), - T(0),-cosX,-sinX, T(0), - T(0), T(0), T(0), T(0)); - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> derivedEulerAngleY - ( - T const & angleY, - T const & angularVelocityY - ) - { - T cosY = glm::cos(angleY) * angularVelocityY; - T sinY = glm::sin(angleY) * angularVelocityY; - - return mat<4, 4, T, defaultp>( - -sinY, T(0), -cosY, T(0), - T(0), T(0), T(0), T(0), - cosY, T(0), -sinY, T(0), - T(0), T(0), T(0), T(0)); - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> derivedEulerAngleZ - ( - T const & angleZ, - T const & angularVelocityZ - ) - { - T cosZ = glm::cos(angleZ) * angularVelocityZ; - T sinZ = glm::sin(angleZ) * angularVelocityZ; - - return mat<4, 4, T, defaultp>( - -sinZ, cosZ, T(0), T(0), - -cosZ, -sinZ, T(0), T(0), - T(0), T(0), T(0), T(0), - T(0), T(0), T(0), T(0)); - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleXY - ( - T const& angleX, - T const& angleY - ) - { - T cosX = glm::cos(angleX); - T sinX = glm::sin(angleX); - T cosY = glm::cos(angleY); - T sinY = glm::sin(angleY); - - return mat<4, 4, T, defaultp>( - cosY, -sinX * -sinY, cosX * -sinY, T(0), - T(0), cosX, sinX, T(0), - sinY, -sinX * cosY, cosX * cosY, T(0), - T(0), T(0), T(0), T(1)); - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleYX - ( - T const& angleY, - T const& angleX - ) - { - T cosX = glm::cos(angleX); - T sinX = glm::sin(angleX); - T cosY = glm::cos(angleY); - T sinY = glm::sin(angleY); - - return mat<4, 4, T, defaultp>( - cosY, 0, -sinY, T(0), - sinY * sinX, cosX, cosY * sinX, T(0), - sinY * cosX, -sinX, cosY * cosX, T(0), - T(0), T(0), T(0), T(1)); - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleXZ - ( - T const& angleX, - T const& angleZ - ) - { - return eulerAngleX(angleX) * eulerAngleZ(angleZ); - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleZX - ( - T const& angleZ, - T const& angleX - ) - { - return eulerAngleZ(angleZ) * eulerAngleX(angleX); - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleYZ - ( - T const& angleY, - T const& angleZ - ) - { - return eulerAngleY(angleY) * eulerAngleZ(angleZ); - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleZY - ( - T const& angleZ, - T const& angleY - ) - { - return eulerAngleZ(angleZ) * eulerAngleY(angleY); - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleXYZ - ( - T const& t1, - T const& t2, - T const& t3 - ) - { - T c1 = glm::cos(-t1); - T c2 = glm::cos(-t2); - T c3 = glm::cos(-t3); - T s1 = glm::sin(-t1); - T s2 = glm::sin(-t2); - T s3 = glm::sin(-t3); - - mat<4, 4, T, defaultp> Result; - Result[0][0] = c2 * c3; - Result[0][1] =-c1 * s3 + s1 * s2 * c3; - Result[0][2] = s1 * s3 + c1 * s2 * c3; - Result[0][3] = static_cast(0); - Result[1][0] = c2 * s3; - Result[1][1] = c1 * c3 + s1 * s2 * s3; - Result[1][2] =-s1 * c3 + c1 * s2 * s3; - Result[1][3] = static_cast(0); - Result[2][0] =-s2; - Result[2][1] = s1 * c2; - Result[2][2] = c1 * c2; - Result[2][3] = static_cast(0); - Result[3][0] = static_cast(0); - Result[3][1] = static_cast(0); - Result[3][2] = static_cast(0); - Result[3][3] = static_cast(1); - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleYXZ - ( - T const& yaw, - T const& pitch, - T const& roll - ) - { - T tmp_ch = glm::cos(yaw); - T tmp_sh = glm::sin(yaw); - T tmp_cp = glm::cos(pitch); - T tmp_sp = glm::sin(pitch); - T tmp_cb = glm::cos(roll); - T tmp_sb = glm::sin(roll); - - mat<4, 4, T, defaultp> Result; - Result[0][0] = tmp_ch * tmp_cb + tmp_sh * tmp_sp * tmp_sb; - Result[0][1] = tmp_sb * tmp_cp; - Result[0][2] = -tmp_sh * tmp_cb + tmp_ch * tmp_sp * tmp_sb; - Result[0][3] = static_cast(0); - Result[1][0] = -tmp_ch * tmp_sb + tmp_sh * tmp_sp * tmp_cb; - Result[1][1] = tmp_cb * tmp_cp; - Result[1][2] = tmp_sb * tmp_sh + tmp_ch * tmp_sp * tmp_cb; - Result[1][3] = static_cast(0); - Result[2][0] = tmp_sh * tmp_cp; - Result[2][1] = -tmp_sp; - Result[2][2] = tmp_ch * tmp_cp; - Result[2][3] = static_cast(0); - Result[3][0] = static_cast(0); - Result[3][1] = static_cast(0); - Result[3][2] = static_cast(0); - Result[3][3] = static_cast(1); - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleXZX - ( - T const & t1, - T const & t2, - T const & t3 - ) - { - T c1 = glm::cos(t1); - T s1 = glm::sin(t1); - T c2 = glm::cos(t2); - T s2 = glm::sin(t2); - T c3 = glm::cos(t3); - T s3 = glm::sin(t3); - - mat<4, 4, T, defaultp> Result; - Result[0][0] = c2; - Result[0][1] = c1 * s2; - Result[0][2] = s1 * s2; - Result[0][3] = static_cast(0); - Result[1][0] =-c3 * s2; - Result[1][1] = c1 * c2 * c3 - s1 * s3; - Result[1][2] = c1 * s3 + c2 * c3 * s1; - Result[1][3] = static_cast(0); - Result[2][0] = s2 * s3; - Result[2][1] =-c3 * s1 - c1 * c2 * s3; - Result[2][2] = c1 * c3 - c2 * s1 * s3; - Result[2][3] = static_cast(0); - Result[3][0] = static_cast(0); - Result[3][1] = static_cast(0); - Result[3][2] = static_cast(0); - Result[3][3] = static_cast(1); - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleXYX - ( - T const & t1, - T const & t2, - T const & t3 - ) - { - T c1 = glm::cos(t1); - T s1 = glm::sin(t1); - T c2 = glm::cos(t2); - T s2 = glm::sin(t2); - T c3 = glm::cos(t3); - T s3 = glm::sin(t3); - - mat<4, 4, T, defaultp> Result; - Result[0][0] = c2; - Result[0][1] = s1 * s2; - Result[0][2] =-c1 * s2; - Result[0][3] = static_cast(0); - Result[1][0] = s2 * s3; - Result[1][1] = c1 * c3 - c2 * s1 * s3; - Result[1][2] = c3 * s1 + c1 * c2 * s3; - Result[1][3] = static_cast(0); - Result[2][0] = c3 * s2; - Result[2][1] =-c1 * s3 - c2 * c3 * s1; - Result[2][2] = c1 * c2 * c3 - s1 * s3; - Result[2][3] = static_cast(0); - Result[3][0] = static_cast(0); - Result[3][1] = static_cast(0); - Result[3][2] = static_cast(0); - Result[3][3] = static_cast(1); - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleYXY - ( - T const & t1, - T const & t2, - T const & t3 - ) - { - T c1 = glm::cos(t1); - T s1 = glm::sin(t1); - T c2 = glm::cos(t2); - T s2 = glm::sin(t2); - T c3 = glm::cos(t3); - T s3 = glm::sin(t3); - - mat<4, 4, T, defaultp> Result; - Result[0][0] = c1 * c3 - c2 * s1 * s3; - Result[0][1] = s2* s3; - Result[0][2] =-c3 * s1 - c1 * c2 * s3; - Result[0][3] = static_cast(0); - Result[1][0] = s1 * s2; - Result[1][1] = c2; - Result[1][2] = c1 * s2; - Result[1][3] = static_cast(0); - Result[2][0] = c1 * s3 + c2 * c3 * s1; - Result[2][1] =-c3 * s2; - Result[2][2] = c1 * c2 * c3 - s1 * s3; - Result[2][3] = static_cast(0); - Result[3][0] = static_cast(0); - Result[3][1] = static_cast(0); - Result[3][2] = static_cast(0); - Result[3][3] = static_cast(1); - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleYZY - ( - T const & t1, - T const & t2, - T const & t3 - ) - { - T c1 = glm::cos(t1); - T s1 = glm::sin(t1); - T c2 = glm::cos(t2); - T s2 = glm::sin(t2); - T c3 = glm::cos(t3); - T s3 = glm::sin(t3); - - mat<4, 4, T, defaultp> Result; - Result[0][0] = c1 * c2 * c3 - s1 * s3; - Result[0][1] = c3 * s2; - Result[0][2] =-c1 * s3 - c2 * c3 * s1; - Result[0][3] = static_cast(0); - Result[1][0] =-c1 * s2; - Result[1][1] = c2; - Result[1][2] = s1 * s2; - Result[1][3] = static_cast(0); - Result[2][0] = c3 * s1 + c1 * c2 * s3; - Result[2][1] = s2 * s3; - Result[2][2] = c1 * c3 - c2 * s1 * s3; - Result[2][3] = static_cast(0); - Result[3][0] = static_cast(0); - Result[3][1] = static_cast(0); - Result[3][2] = static_cast(0); - Result[3][3] = static_cast(1); - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleZYZ - ( - T const & t1, - T const & t2, - T const & t3 - ) - { - T c1 = glm::cos(t1); - T s1 = glm::sin(t1); - T c2 = glm::cos(t2); - T s2 = glm::sin(t2); - T c3 = glm::cos(t3); - T s3 = glm::sin(t3); - - mat<4, 4, T, defaultp> Result; - Result[0][0] = c1 * c2 * c3 - s1 * s3; - Result[0][1] = c1 * s3 + c2 * c3 * s1; - Result[0][2] =-c3 * s2; - Result[0][3] = static_cast(0); - Result[1][0] =-c3 * s1 - c1 * c2 * s3; - Result[1][1] = c1 * c3 - c2 * s1 * s3; - Result[1][2] = s2 * s3; - Result[1][3] = static_cast(0); - Result[2][0] = c1 * s2; - Result[2][1] = s1 * s2; - Result[2][2] = c2; - Result[2][3] = static_cast(0); - Result[3][0] = static_cast(0); - Result[3][1] = static_cast(0); - Result[3][2] = static_cast(0); - Result[3][3] = static_cast(1); - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleZXZ - ( - T const & t1, - T const & t2, - T const & t3 - ) - { - T c1 = glm::cos(t1); - T s1 = glm::sin(t1); - T c2 = glm::cos(t2); - T s2 = glm::sin(t2); - T c3 = glm::cos(t3); - T s3 = glm::sin(t3); - - mat<4, 4, T, defaultp> Result; - Result[0][0] = c1 * c3 - c2 * s1 * s3; - Result[0][1] = c3 * s1 + c1 * c2 * s3; - Result[0][2] = s2 *s3; - Result[0][3] = static_cast(0); - Result[1][0] =-c1 * s3 - c2 * c3 * s1; - Result[1][1] = c1 * c2 * c3 - s1 * s3; - Result[1][2] = c3 * s2; - Result[1][3] = static_cast(0); - Result[2][0] = s1 * s2; - Result[2][1] =-c1 * s2; - Result[2][2] = c2; - Result[2][3] = static_cast(0); - Result[3][0] = static_cast(0); - Result[3][1] = static_cast(0); - Result[3][2] = static_cast(0); - Result[3][3] = static_cast(1); - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleXZY - ( - T const & t1, - T const & t2, - T const & t3 - ) - { - T c1 = glm::cos(t1); - T s1 = glm::sin(t1); - T c2 = glm::cos(t2); - T s2 = glm::sin(t2); - T c3 = glm::cos(t3); - T s3 = glm::sin(t3); - - mat<4, 4, T, defaultp> Result; - Result[0][0] = c2 * c3; - Result[0][1] = s1 * s3 + c1 * c3 * s2; - Result[0][2] = c3 * s1 * s2 - c1 * s3; - Result[0][3] = static_cast(0); - Result[1][0] =-s2; - Result[1][1] = c1 * c2; - Result[1][2] = c2 * s1; - Result[1][3] = static_cast(0); - Result[2][0] = c2 * s3; - Result[2][1] = c1 * s2 * s3 - c3 * s1; - Result[2][2] = c1 * c3 + s1 * s2 *s3; - Result[2][3] = static_cast(0); - Result[3][0] = static_cast(0); - Result[3][1] = static_cast(0); - Result[3][2] = static_cast(0); - Result[3][3] = static_cast(1); - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleYZX - ( - T const & t1, - T const & t2, - T const & t3 - ) - { - T c1 = glm::cos(t1); - T s1 = glm::sin(t1); - T c2 = glm::cos(t2); - T s2 = glm::sin(t2); - T c3 = glm::cos(t3); - T s3 = glm::sin(t3); - - mat<4, 4, T, defaultp> Result; - Result[0][0] = c1 * c2; - Result[0][1] = s2; - Result[0][2] =-c2 * s1; - Result[0][3] = static_cast(0); - Result[1][0] = s1 * s3 - c1 * c3 * s2; - Result[1][1] = c2 * c3; - Result[1][2] = c1 * s3 + c3 * s1 * s2; - Result[1][3] = static_cast(0); - Result[2][0] = c3 * s1 + c1 * s2 * s3; - Result[2][1] =-c2 * s3; - Result[2][2] = c1 * c3 - s1 * s2 * s3; - Result[2][3] = static_cast(0); - Result[3][0] = static_cast(0); - Result[3][1] = static_cast(0); - Result[3][2] = static_cast(0); - Result[3][3] = static_cast(1); - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleZYX - ( - T const & t1, - T const & t2, - T const & t3 - ) - { - T c1 = glm::cos(t1); - T s1 = glm::sin(t1); - T c2 = glm::cos(t2); - T s2 = glm::sin(t2); - T c3 = glm::cos(t3); - T s3 = glm::sin(t3); - - mat<4, 4, T, defaultp> Result; - Result[0][0] = c1 * c2; - Result[0][1] = c2 * s1; - Result[0][2] =-s2; - Result[0][3] = static_cast(0); - Result[1][0] = c1 * s2 * s3 - c3 * s1; - Result[1][1] = c1 * c3 + s1 * s2 * s3; - Result[1][2] = c2 * s3; - Result[1][3] = static_cast(0); - Result[2][0] = s1 * s3 + c1 * c3 * s2; - Result[2][1] = c3 * s1 * s2 - c1 * s3; - Result[2][2] = c2 * c3; - Result[2][3] = static_cast(0); - Result[3][0] = static_cast(0); - Result[3][1] = static_cast(0); - Result[3][2] = static_cast(0); - Result[3][3] = static_cast(1); - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleZXY - ( - T const & t1, - T const & t2, - T const & t3 - ) - { - T c1 = glm::cos(t1); - T s1 = glm::sin(t1); - T c2 = glm::cos(t2); - T s2 = glm::sin(t2); - T c3 = glm::cos(t3); - T s3 = glm::sin(t3); - - mat<4, 4, T, defaultp> Result; - Result[0][0] = c1 * c3 - s1 * s2 * s3; - Result[0][1] = c3 * s1 + c1 * s2 * s3; - Result[0][2] =-c2 * s3; - Result[0][3] = static_cast(0); - Result[1][0] =-c2 * s1; - Result[1][1] = c1 * c2; - Result[1][2] = s2; - Result[1][3] = static_cast(0); - Result[2][0] = c1 * s3 + c3 * s1 * s2; - Result[2][1] = s1 * s3 - c1 * c3 * s2; - Result[2][2] = c2 * c3; - Result[2][3] = static_cast(0); - Result[3][0] = static_cast(0); - Result[3][1] = static_cast(0); - Result[3][2] = static_cast(0); - Result[3][3] = static_cast(1); - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> yawPitchRoll - ( - T const& yaw, - T const& pitch, - T const& roll - ) - { - T tmp_ch = glm::cos(yaw); - T tmp_sh = glm::sin(yaw); - T tmp_cp = glm::cos(pitch); - T tmp_sp = glm::sin(pitch); - T tmp_cb = glm::cos(roll); - T tmp_sb = glm::sin(roll); - - mat<4, 4, T, defaultp> Result; - Result[0][0] = tmp_ch * tmp_cb + tmp_sh * tmp_sp * tmp_sb; - Result[0][1] = tmp_sb * tmp_cp; - Result[0][2] = -tmp_sh * tmp_cb + tmp_ch * tmp_sp * tmp_sb; - Result[0][3] = static_cast(0); - Result[1][0] = -tmp_ch * tmp_sb + tmp_sh * tmp_sp * tmp_cb; - Result[1][1] = tmp_cb * tmp_cp; - Result[1][2] = tmp_sb * tmp_sh + tmp_ch * tmp_sp * tmp_cb; - Result[1][3] = static_cast(0); - Result[2][0] = tmp_sh * tmp_cp; - Result[2][1] = -tmp_sp; - Result[2][2] = tmp_ch * tmp_cp; - Result[2][3] = static_cast(0); - Result[3][0] = static_cast(0); - Result[3][1] = static_cast(0); - Result[3][2] = static_cast(0); - Result[3][3] = static_cast(1); - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<2, 2, T, defaultp> orientate2 - ( - T const& angle - ) - { - T c = glm::cos(angle); - T s = glm::sin(angle); - - mat<2, 2, T, defaultp> Result; - Result[0][0] = c; - Result[0][1] = s; - Result[1][0] = -s; - Result[1][1] = c; - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<3, 3, T, defaultp> orientate3 - ( - T const& angle - ) - { - T c = glm::cos(angle); - T s = glm::sin(angle); - - mat<3, 3, T, defaultp> Result; - Result[0][0] = c; - Result[0][1] = s; - Result[0][2] = 0.0f; - Result[1][0] = -s; - Result[1][1] = c; - Result[1][2] = 0.0f; - Result[2][0] = 0.0f; - Result[2][1] = 0.0f; - Result[2][2] = 1.0f; - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> orientate3 - ( - vec<3, T, Q> const& angles - ) - { - return mat<3, 3, T, Q>(yawPitchRoll(angles.z, angles.x, angles.y)); - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> orientate4 - ( - vec<3, T, Q> const& angles - ) - { - return yawPitchRoll(angles.z, angles.x, angles.y); - } - - template - GLM_FUNC_DECL void extractEulerAngleXYZ(mat<4, 4, T, defaultp> const& M, - T & t1, - T & t2, - T & t3) - { - T T1 = glm::atan2(M[2][1], M[2][2]); - T C2 = glm::sqrt(M[0][0]*M[0][0] + M[1][0]*M[1][0]); - T T2 = glm::atan2(-M[2][0], C2); - T S1 = glm::sin(T1); - T C1 = glm::cos(T1); - T T3 = glm::atan2(S1*M[0][2] - C1*M[0][1], C1*M[1][1] - S1*M[1][2 ]); - t1 = -T1; - t2 = -T2; - t3 = -T3; - } - - template - GLM_FUNC_QUALIFIER void extractEulerAngleYXZ(mat<4, 4, T, defaultp> const & M, - T & t1, - T & t2, - T & t3) - { - T T1 = glm::atan2(M[2][0], M[2][2]); - T C2 = glm::sqrt(M[0][1]*M[0][1] + M[1][1]*M[1][1]); - T T2 = glm::atan2(-M[2][1], C2); - T S1 = glm::sin(T1); - T C1 = glm::cos(T1); - T T3 = glm::atan2(S1*M[1][2] - C1*M[1][0], C1*M[0][0] - S1*M[0][2]); - t1 = T1; - t2 = T2; - t3 = T3; - } - - template - GLM_FUNC_QUALIFIER void extractEulerAngleXZX(mat<4, 4, T, defaultp> const & M, - T & t1, - T & t2, - T & t3) - { - T T1 = glm::atan2(M[0][2], M[0][1]); - T S2 = glm::sqrt(M[1][0]*M[1][0] + M[2][0]*M[2][0]); - T T2 = glm::atan2(S2, M[0][0]); - T S1 = glm::sin(T1); - T C1 = glm::cos(T1); - T T3 = glm::atan2(C1*M[1][2] - S1*M[1][1], C1*M[2][2] - S1*M[2][1]); - t1 = T1; - t2 = T2; - t3 = T3; - } - - template - GLM_FUNC_QUALIFIER void extractEulerAngleXYX(mat<4, 4, T, defaultp> const & M, - T & t1, - T & t2, - T & t3) - { - T T1 = glm::atan2(M[0][1], -M[0][2]); - T S2 = glm::sqrt(M[1][0]*M[1][0] + M[2][0]*M[2][0]); - T T2 = glm::atan2(S2, M[0][0]); - T S1 = glm::sin(T1); - T C1 = glm::cos(T1); - T T3 = glm::atan2(-C1*M[2][1] - S1*M[2][2], C1*M[1][1] + S1*M[1][2]); - t1 = T1; - t2 = T2; - t3 = T3; - } - - template - GLM_FUNC_QUALIFIER void extractEulerAngleYXY(mat<4, 4, T, defaultp> const & M, - T & t1, - T & t2, - T & t3) - { - T T1 = glm::atan2(M[1][0], M[1][2]); - T S2 = glm::sqrt(M[0][1]*M[0][1] + M[2][1]*M[2][1]); - T T2 = glm::atan2(S2, M[1][1]); - T S1 = glm::sin(T1); - T C1 = glm::cos(T1); - T T3 = glm::atan2(C1*M[2][0] - S1*M[2][2], C1*M[0][0] - S1*M[0][2]); - t1 = T1; - t2 = T2; - t3 = T3; - } - - template - GLM_FUNC_QUALIFIER void extractEulerAngleYZY(mat<4, 4, T, defaultp> const & M, - T & t1, - T & t2, - T & t3) - { - T T1 = glm::atan2(M[1][2], -M[1][0]); - T S2 = glm::sqrt(M[0][1]*M[0][1] + M[2][1]*M[2][1]); - T T2 = glm::atan2(S2, M[1][1]); - T S1 = glm::sin(T1); - T C1 = glm::cos(T1); - T T3 = glm::atan2(-S1*M[0][0] - C1*M[0][2], S1*M[2][0] + C1*M[2][2]); - t1 = T1; - t2 = T2; - t3 = T3; - } - - template - GLM_FUNC_QUALIFIER void extractEulerAngleZYZ(mat<4, 4, T, defaultp> const & M, - T & t1, - T & t2, - T & t3) - { - T T1 = glm::atan2(M[2][1], M[2][0]); - T S2 = glm::sqrt(M[0][2]*M[0][2] + M[1][2]*M[1][2]); - T T2 = glm::atan2(S2, M[2][2]); - T S1 = glm::sin(T1); - T C1 = glm::cos(T1); - T T3 = glm::atan2(C1*M[0][1] - S1*M[0][0], C1*M[1][1] - S1*M[1][0]); - t1 = T1; - t2 = T2; - t3 = T3; - } - - template - GLM_FUNC_QUALIFIER void extractEulerAngleZXZ(mat<4, 4, T, defaultp> const & M, - T & t1, - T & t2, - T & t3) - { - T T1 = glm::atan2(M[2][0], -M[2][1]); - T S2 = glm::sqrt(M[0][2]*M[0][2] + M[1][2]*M[1][2]); - T T2 = glm::atan2(S2, M[2][2]); - T S1 = glm::sin(T1); - T C1 = glm::cos(T1); - T T3 = glm::atan2(-C1*M[1][0] - S1*M[1][1], C1*M[0][0] + S1*M[0][1]); - t1 = T1; - t2 = T2; - t3 = T3; - } - - template - GLM_FUNC_QUALIFIER void extractEulerAngleXZY(mat<4, 4, T, defaultp> const & M, - T & t1, - T & t2, - T & t3) - { - T T1 = glm::atan2(M[1][2], M[1][1]); - T C2 = glm::sqrt(M[0][0]*M[0][0] + M[2][0]*M[2][0]); - T T2 = glm::atan2(-M[1][0], C2); - T S1 = glm::sin(T1); - T C1 = glm::cos(T1); - T T3 = glm::atan2(S1*M[0][1] - C1*M[0][2], C1*M[2][2] - S1*M[2][1]); - t1 = T1; - t2 = T2; - t3 = T3; - } - - template - GLM_FUNC_QUALIFIER void extractEulerAngleYZX(mat<4, 4, T, defaultp> const & M, - T & t1, - T & t2, - T & t3) - { - T T1 = glm::atan2(-M[0][2], M[0][0]); - T C2 = glm::sqrt(M[1][1]*M[1][1] + M[2][1]*M[2][1]); - T T2 = glm::atan2(M[0][1], C2); - T S1 = glm::sin(T1); - T C1 = glm::cos(T1); - T T3 = glm::atan2(S1*M[1][0] + C1*M[1][2], S1*M[2][0] + C1*M[2][2]); - t1 = T1; - t2 = T2; - t3 = T3; - } - - template - GLM_FUNC_QUALIFIER void extractEulerAngleZYX(mat<4, 4, T, defaultp> const & M, - T & t1, - T & t2, - T & t3) - { - T T1 = glm::atan2(M[0][1], M[0][0]); - T C2 = glm::sqrt(M[1][2]*M[1][2] + M[2][2]*M[2][2]); - T T2 = glm::atan2(-M[0][2], C2); - T S1 = glm::sin(T1); - T C1 = glm::cos(T1); - T T3 = glm::atan2(S1*M[2][0] - C1*M[2][1], C1*M[1][1] - S1*M[1][0]); - t1 = T1; - t2 = T2; - t3 = T3; - } - - template - GLM_FUNC_QUALIFIER void extractEulerAngleZXY(mat<4, 4, T, defaultp> const & M, - T & t1, - T & t2, - T & t3) - { - T T1 = glm::atan2(-M[1][0], M[1][1]); - T C2 = glm::sqrt(M[0][2]*M[0][2] + M[2][2]*M[2][2]); - T T2 = glm::atan2(M[1][2], C2); - T S1 = glm::sin(T1); - T C1 = glm::cos(T1); - T T3 = glm::atan2(C1*M[2][0] + S1*M[2][1], C1*M[0][0] + S1*M[0][1]); - t1 = T1; - t2 = T2; - t3 = T3; - } -}//namespace glm diff --git a/third_party/glm/gtx/extend.hpp b/third_party/glm/gtx/extend.hpp deleted file mode 100755 index 28b7c5c..0000000 --- a/third_party/glm/gtx/extend.hpp +++ /dev/null @@ -1,42 +0,0 @@ -/// @ref gtx_extend -/// @file glm/gtx/extend.hpp -/// -/// @see core (dependence) -/// -/// @defgroup gtx_extend GLM_GTX_extend -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Extend a position from a source to a position at a defined length. - -#pragma once - -// Dependency: -#include "../glm.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_extend is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_extend extension included") -# endif -#endif - -namespace glm -{ - /// @addtogroup gtx_extend - /// @{ - - /// Extends of Length the Origin position using the (Source - Origin) direction. - /// @see gtx_extend - template - GLM_FUNC_DECL genType extend( - genType const& Origin, - genType const& Source, - typename genType::value_type const Length); - - /// @} -}//namespace glm - -#include "extend.inl" diff --git a/third_party/glm/gtx/extend.inl b/third_party/glm/gtx/extend.inl deleted file mode 100755 index 32128eb..0000000 --- a/third_party/glm/gtx/extend.inl +++ /dev/null @@ -1,48 +0,0 @@ -/// @ref gtx_extend - -namespace glm -{ - template - GLM_FUNC_QUALIFIER genType extend - ( - genType const& Origin, - genType const& Source, - genType const& Distance - ) - { - return Origin + (Source - Origin) * Distance; - } - - template - GLM_FUNC_QUALIFIER vec<2, T, Q> extend - ( - vec<2, T, Q> const& Origin, - vec<2, T, Q> const& Source, - T const& Distance - ) - { - return Origin + (Source - Origin) * Distance; - } - - template - GLM_FUNC_QUALIFIER vec<3, T, Q> extend - ( - vec<3, T, Q> const& Origin, - vec<3, T, Q> const& Source, - T const& Distance - ) - { - return Origin + (Source - Origin) * Distance; - } - - template - GLM_FUNC_QUALIFIER vec<4, T, Q> extend - ( - vec<4, T, Q> const& Origin, - vec<4, T, Q> const& Source, - T const& Distance - ) - { - return Origin + (Source - Origin) * Distance; - } -}//namespace glm diff --git a/third_party/glm/gtx/extended_min_max.hpp b/third_party/glm/gtx/extended_min_max.hpp deleted file mode 100755 index ad23a91..0000000 --- a/third_party/glm/gtx/extended_min_max.hpp +++ /dev/null @@ -1,182 +0,0 @@ -/// @ref gtx_extended_min_max -/// @file glm/gtx/extended_min_max.hpp -/// -/// @see core (dependence) -/// -/// @defgroup gtx_extended_min_max GLM_GTX_extented_min_max -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Min and max functions for 3 to 4 parameters. - -#pragma once - -// Dependency: -#include "../glm.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_extented_min_max is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_extented_min_max extension included") -# endif -#endif - -namespace glm -{ - /// @addtogroup gtx_extended_min_max - /// @{ - - /// Return the minimum component-wise values of 3 inputs - /// @see gtx_extented_min_max - template - GLM_FUNC_DECL T min( - T const& x, - T const& y, - T const& z); - - /// Return the minimum component-wise values of 3 inputs - /// @see gtx_extented_min_max - template class C> - GLM_FUNC_DECL C min( - C const& x, - typename C::T const& y, - typename C::T const& z); - - /// Return the minimum component-wise values of 3 inputs - /// @see gtx_extented_min_max - template class C> - GLM_FUNC_DECL C min( - C const& x, - C const& y, - C const& z); - - /// Return the minimum component-wise values of 4 inputs - /// @see gtx_extented_min_max - template - GLM_FUNC_DECL T min( - T const& x, - T const& y, - T const& z, - T const& w); - - /// Return the minimum component-wise values of 4 inputs - /// @see gtx_extented_min_max - template class C> - GLM_FUNC_DECL C min( - C const& x, - typename C::T const& y, - typename C::T const& z, - typename C::T const& w); - - /// Return the minimum component-wise values of 4 inputs - /// @see gtx_extented_min_max - template class C> - GLM_FUNC_DECL C min( - C const& x, - C const& y, - C const& z, - C const& w); - - /// Return the maximum component-wise values of 3 inputs - /// @see gtx_extented_min_max - template - GLM_FUNC_DECL T max( - T const& x, - T const& y, - T const& z); - - /// Return the maximum component-wise values of 3 inputs - /// @see gtx_extented_min_max - template class C> - GLM_FUNC_DECL C max( - C const& x, - typename C::T const& y, - typename C::T const& z); - - /// Return the maximum component-wise values of 3 inputs - /// @see gtx_extented_min_max - template class C> - GLM_FUNC_DECL C max( - C const& x, - C const& y, - C const& z); - - /// Return the maximum component-wise values of 4 inputs - /// @see gtx_extented_min_max - template - GLM_FUNC_DECL T max( - T const& x, - T const& y, - T const& z, - T const& w); - - /// Return the maximum component-wise values of 4 inputs - /// @see gtx_extented_min_max - template class C> - GLM_FUNC_DECL C max( - C const& x, - typename C::T const& y, - typename C::T const& z, - typename C::T const& w); - - /// Return the maximum component-wise values of 4 inputs - /// @see gtx_extented_min_max - template class C> - GLM_FUNC_DECL C max( - C const& x, - C const& y, - C const& z, - C const& w); - - /// Returns y if y < x; otherwise, it returns x. If one of the two arguments is NaN, the value of the other argument is returned. - /// - /// @tparam genType Floating-point or integer; scalar or vector types. - /// - /// @see gtx_extented_min_max - template - GLM_FUNC_DECL genType fmin(genType x, genType y); - - /// Returns y if x < y; otherwise, it returns x. If one of the two arguments is NaN, the value of the other argument is returned. - /// - /// @tparam genType Floating-point; scalar or vector types. - /// - /// @see gtx_extented_min_max - /// @see std::fmax documentation - template - GLM_FUNC_DECL genType fmax(genType x, genType y); - - /// Returns min(max(x, minVal), maxVal) for each component in x. If one of the two arguments is NaN, the value of the other argument is returned. - /// - /// @tparam genType Floating-point scalar or vector types. - /// - /// @see gtx_extented_min_max - template - GLM_FUNC_DECL genType fclamp(genType x, genType minVal, genType maxVal); - - /// Returns min(max(x, minVal), maxVal) for each component in x. If one of the two arguments is NaN, the value of the other argument is returned. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see gtx_extented_min_max - template - GLM_FUNC_DECL vec fclamp(vec const& x, T minVal, T maxVal); - - /// Returns min(max(x, minVal), maxVal) for each component in x. If one of the two arguments is NaN, the value of the other argument is returned. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see gtx_extented_min_max - template - GLM_FUNC_DECL vec fclamp(vec const& x, vec const& minVal, vec const& maxVal); - - - /// @} -}//namespace glm - -#include "extended_min_max.inl" diff --git a/third_party/glm/gtx/extended_min_max.inl b/third_party/glm/gtx/extended_min_max.inl deleted file mode 100755 index e72d1cc..0000000 --- a/third_party/glm/gtx/extended_min_max.inl +++ /dev/null @@ -1,218 +0,0 @@ -/// @ref gtx_extended_min_max - -namespace glm -{ - template - GLM_FUNC_QUALIFIER T min( - T const& x, - T const& y, - T const& z) - { - return glm::min(glm::min(x, y), z); - } - - template class C> - GLM_FUNC_QUALIFIER C min - ( - C const& x, - typename C::T const& y, - typename C::T const& z - ) - { - return glm::min(glm::min(x, y), z); - } - - template class C> - GLM_FUNC_QUALIFIER C min - ( - C const& x, - C const& y, - C const& z - ) - { - return glm::min(glm::min(x, y), z); - } - - template - GLM_FUNC_QUALIFIER T min - ( - T const& x, - T const& y, - T const& z, - T const& w - ) - { - return glm::min(glm::min(x, y), glm::min(z, w)); - } - - template class C> - GLM_FUNC_QUALIFIER C min - ( - C const& x, - typename C::T const& y, - typename C::T const& z, - typename C::T const& w - ) - { - return glm::min(glm::min(x, y), glm::min(z, w)); - } - - template class C> - GLM_FUNC_QUALIFIER C min - ( - C const& x, - C const& y, - C const& z, - C const& w - ) - { - return glm::min(glm::min(x, y), glm::min(z, w)); - } - - template - GLM_FUNC_QUALIFIER T max( - T const& x, - T const& y, - T const& z) - { - return glm::max(glm::max(x, y), z); - } - - template class C> - GLM_FUNC_QUALIFIER C max - ( - C const& x, - typename C::T const& y, - typename C::T const& z - ) - { - return glm::max(glm::max(x, y), z); - } - - template class C> - GLM_FUNC_QUALIFIER C max - ( - C const& x, - C const& y, - C const& z - ) - { - return glm::max(glm::max(x, y), z); - } - - template - GLM_FUNC_QUALIFIER T max - ( - T const& x, - T const& y, - T const& z, - T const& w - ) - { - return glm::max(glm::max(x, y), glm::max(z, w)); - } - - template class C> - GLM_FUNC_QUALIFIER C max - ( - C const& x, - typename C::T const& y, - typename C::T const& z, - typename C::T const& w - ) - { - return glm::max(glm::max(x, y), glm::max(z, w)); - } - - template class C> - GLM_FUNC_QUALIFIER C max - ( - C const& x, - C const& y, - C const& z, - C const& w - ) - { - return glm::max(glm::max(x, y), glm::max(z, w)); - } - - // fmin -# if GLM_HAS_CXX11_STL - using std::fmin; -# else - template - GLM_FUNC_QUALIFIER genType fmin(genType x, genType y) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'fmin' only accept floating-point input"); - - if (isnan(x)) - return y; - if (isnan(y)) - return x; - - return min(x, y); - } -# endif - - template - GLM_FUNC_QUALIFIER vec fmin(vec const& a, T b) - { - return detail::functor2::call(fmin, a, vec(b)); - } - - template - GLM_FUNC_QUALIFIER vec fmin(vec const& a, vec const& b) - { - return detail::functor2::call(fmin, a, b); - } - - // fmax -# if GLM_HAS_CXX11_STL - using std::fmax; -# else - template - GLM_FUNC_QUALIFIER genType fmax(genType x, genType y) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'fmax' only accept floating-point input"); - - if (isnan(x)) - return y; - if (isnan(y)) - return x; - - return max(x, y); - } -# endif - - template - GLM_FUNC_QUALIFIER vec fmax(vec const& a, T b) - { - return detail::functor2::call(fmax, a, vec(b)); - } - - template - GLM_FUNC_QUALIFIER vec fmax(vec const& a, vec const& b) - { - return detail::functor2::call(fmax, a, b); - } - - // fclamp - template - GLM_FUNC_QUALIFIER genType fclamp(genType x, genType minVal, genType maxVal) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'fclamp' only accept floating-point or integer inputs"); - return fmin(fmax(x, minVal), maxVal); - } - - template - GLM_FUNC_QUALIFIER vec fclamp(vec const& x, T minVal, T maxVal) - { - return fmin(fmax(x, vec(minVal)), vec(maxVal)); - } - - template - GLM_FUNC_QUALIFIER vec fclamp(vec const& x, vec const& minVal, vec const& maxVal) - { - return fmin(fmax(x, minVal), maxVal); - } -}//namespace glm diff --git a/third_party/glm/gtx/exterior_product.hpp b/third_party/glm/gtx/exterior_product.hpp deleted file mode 100755 index 5522df7..0000000 --- a/third_party/glm/gtx/exterior_product.hpp +++ /dev/null @@ -1,45 +0,0 @@ -/// @ref gtx_exterior_product -/// @file glm/gtx/exterior_product.hpp -/// -/// @see core (dependence) -/// @see gtx_exterior_product (dependence) -/// -/// @defgroup gtx_exterior_product GLM_GTX_exterior_product -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// @brief Allow to perform bit operations on integer values - -#pragma once - -// Dependencies -#include "../detail/setup.hpp" -#include "../detail/qualifier.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_exterior_product is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_exterior_product extension included") -# endif -#endif - -namespace glm -{ - /// @addtogroup gtx_exterior_product - /// @{ - - /// Returns the cross product of x and y. - /// - /// @tparam T Floating-point scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see Exterior product - template - GLM_FUNC_DECL T cross(vec<2, T, Q> const& v, vec<2, T, Q> const& u); - - /// @} -} //namespace glm - -#include "exterior_product.inl" diff --git a/third_party/glm/gtx/exterior_product.inl b/third_party/glm/gtx/exterior_product.inl deleted file mode 100755 index 93661fd..0000000 --- a/third_party/glm/gtx/exterior_product.inl +++ /dev/null @@ -1,26 +0,0 @@ -/// @ref gtx_exterior_product - -#include - -namespace glm { -namespace detail -{ - template - struct compute_cross_vec2 - { - GLM_FUNC_QUALIFIER static T call(vec<2, T, Q> const& v, vec<2, T, Q> const& u) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'cross' accepts only floating-point inputs"); - - return v.x * u.y - u.x * v.y; - } - }; -}//namespace detail - - template - GLM_FUNC_QUALIFIER T cross(vec<2, T, Q> const& x, vec<2, T, Q> const& y) - { - return detail::compute_cross_vec2::value>::call(x, y); - } -}//namespace glm - diff --git a/third_party/glm/gtx/fast_exponential.hpp b/third_party/glm/gtx/fast_exponential.hpp deleted file mode 100755 index 6fb7286..0000000 --- a/third_party/glm/gtx/fast_exponential.hpp +++ /dev/null @@ -1,95 +0,0 @@ -/// @ref gtx_fast_exponential -/// @file glm/gtx/fast_exponential.hpp -/// -/// @see core (dependence) -/// @see gtx_half_float (dependence) -/// -/// @defgroup gtx_fast_exponential GLM_GTX_fast_exponential -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Fast but less accurate implementations of exponential based functions. - -#pragma once - -// Dependency: -#include "../glm.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_fast_exponential is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_fast_exponential extension included") -# endif -#endif - -namespace glm -{ - /// @addtogroup gtx_fast_exponential - /// @{ - - /// Faster than the common pow function but less accurate. - /// @see gtx_fast_exponential - template - GLM_FUNC_DECL genType fastPow(genType x, genType y); - - /// Faster than the common pow function but less accurate. - /// @see gtx_fast_exponential - template - GLM_FUNC_DECL vec fastPow(vec const& x, vec const& y); - - /// Faster than the common pow function but less accurate. - /// @see gtx_fast_exponential - template - GLM_FUNC_DECL genTypeT fastPow(genTypeT x, genTypeU y); - - /// Faster than the common pow function but less accurate. - /// @see gtx_fast_exponential - template - GLM_FUNC_DECL vec fastPow(vec const& x); - - /// Faster than the common exp function but less accurate. - /// @see gtx_fast_exponential - template - GLM_FUNC_DECL T fastExp(T x); - - /// Faster than the common exp function but less accurate. - /// @see gtx_fast_exponential - template - GLM_FUNC_DECL vec fastExp(vec const& x); - - /// Faster than the common log function but less accurate. - /// @see gtx_fast_exponential - template - GLM_FUNC_DECL T fastLog(T x); - - /// Faster than the common exp2 function but less accurate. - /// @see gtx_fast_exponential - template - GLM_FUNC_DECL vec fastLog(vec const& x); - - /// Faster than the common exp2 function but less accurate. - /// @see gtx_fast_exponential - template - GLM_FUNC_DECL T fastExp2(T x); - - /// Faster than the common exp2 function but less accurate. - /// @see gtx_fast_exponential - template - GLM_FUNC_DECL vec fastExp2(vec const& x); - - /// Faster than the common log2 function but less accurate. - /// @see gtx_fast_exponential - template - GLM_FUNC_DECL T fastLog2(T x); - - /// Faster than the common log2 function but less accurate. - /// @see gtx_fast_exponential - template - GLM_FUNC_DECL vec fastLog2(vec const& x); - - /// @} -}//namespace glm - -#include "fast_exponential.inl" diff --git a/third_party/glm/gtx/fast_exponential.inl b/third_party/glm/gtx/fast_exponential.inl deleted file mode 100755 index f139e50..0000000 --- a/third_party/glm/gtx/fast_exponential.inl +++ /dev/null @@ -1,136 +0,0 @@ -/// @ref gtx_fast_exponential - -namespace glm -{ - // fastPow: - template - GLM_FUNC_QUALIFIER genType fastPow(genType x, genType y) - { - return exp(y * log(x)); - } - - template - GLM_FUNC_QUALIFIER vec fastPow(vec const& x, vec const& y) - { - return exp(y * log(x)); - } - - template - GLM_FUNC_QUALIFIER T fastPow(T x, int y) - { - T f = static_cast(1); - for(int i = 0; i < y; ++i) - f *= x; - return f; - } - - template - GLM_FUNC_QUALIFIER vec fastPow(vec const& x, vec const& y) - { - vec Result; - for(length_t i = 0, n = x.length(); i < n; ++i) - Result[i] = fastPow(x[i], y[i]); - return Result; - } - - // fastExp - // Note: This function provides accurate results only for value between -1 and 1, else avoid it. - template - GLM_FUNC_QUALIFIER T fastExp(T x) - { - // This has a better looking and same performance in release mode than the following code. However, in debug mode it's slower. - // return 1.0f + x * (1.0f + x * 0.5f * (1.0f + x * 0.3333333333f * (1.0f + x * 0.25 * (1.0f + x * 0.2f)))); - T x2 = x * x; - T x3 = x2 * x; - T x4 = x3 * x; - T x5 = x4 * x; - return T(1) + x + (x2 * T(0.5)) + (x3 * T(0.1666666667)) + (x4 * T(0.041666667)) + (x5 * T(0.008333333333)); - } - /* // Try to handle all values of float... but often shower than std::exp, glm::floor and the loop kill the performance - GLM_FUNC_QUALIFIER float fastExp(float x) - { - const float e = 2.718281828f; - const float IntegerPart = floor(x); - const float FloatPart = x - IntegerPart; - float z = 1.f; - - for(int i = 0; i < int(IntegerPart); ++i) - z *= e; - - const float x2 = FloatPart * FloatPart; - const float x3 = x2 * FloatPart; - const float x4 = x3 * FloatPart; - const float x5 = x4 * FloatPart; - return z * (1.0f + FloatPart + (x2 * 0.5f) + (x3 * 0.1666666667f) + (x4 * 0.041666667f) + (x5 * 0.008333333333f)); - } - - // Increase accuracy on number bigger that 1 and smaller than -1 but it's not enough for high and negative numbers - GLM_FUNC_QUALIFIER float fastExp(float x) - { - // This has a better looking and same performance in release mode than the following code. However, in debug mode it's slower. - // return 1.0f + x * (1.0f + x * 0.5f * (1.0f + x * 0.3333333333f * (1.0f + x * 0.25 * (1.0f + x * 0.2f)))); - float x2 = x * x; - float x3 = x2 * x; - float x4 = x3 * x; - float x5 = x4 * x; - float x6 = x5 * x; - float x7 = x6 * x; - float x8 = x7 * x; - return 1.0f + x + (x2 * 0.5f) + (x3 * 0.1666666667f) + (x4 * 0.041666667f) + (x5 * 0.008333333333f)+ (x6 * 0.00138888888888f) + (x7 * 0.000198412698f) + (x8 * 0.0000248015873f);; - } - */ - - template - GLM_FUNC_QUALIFIER vec fastExp(vec const& x) - { - return detail::functor1::call(fastExp, x); - } - - // fastLog - template - GLM_FUNC_QUALIFIER genType fastLog(genType x) - { - return std::log(x); - } - - /* Slower than the VC7.1 function... - GLM_FUNC_QUALIFIER float fastLog(float x) - { - float y1 = (x - 1.0f) / (x + 1.0f); - float y2 = y1 * y1; - return 2.0f * y1 * (1.0f + y2 * (0.3333333333f + y2 * (0.2f + y2 * 0.1428571429f))); - } - */ - - template - GLM_FUNC_QUALIFIER vec fastLog(vec const& x) - { - return detail::functor1::call(fastLog, x); - } - - //fastExp2, ln2 = 0.69314718055994530941723212145818f - template - GLM_FUNC_QUALIFIER genType fastExp2(genType x) - { - return fastExp(0.69314718055994530941723212145818f * x); - } - - template - GLM_FUNC_QUALIFIER vec fastExp2(vec const& x) - { - return detail::functor1::call(fastExp2, x); - } - - // fastLog2, ln2 = 0.69314718055994530941723212145818f - template - GLM_FUNC_QUALIFIER genType fastLog2(genType x) - { - return fastLog(x) / 0.69314718055994530941723212145818f; - } - - template - GLM_FUNC_QUALIFIER vec fastLog2(vec const& x) - { - return detail::functor1::call(fastLog2, x); - } -}//namespace glm diff --git a/third_party/glm/gtx/fast_square_root.hpp b/third_party/glm/gtx/fast_square_root.hpp deleted file mode 100755 index 9fb3f2f..0000000 --- a/third_party/glm/gtx/fast_square_root.hpp +++ /dev/null @@ -1,92 +0,0 @@ -/// @ref gtx_fast_square_root -/// @file glm/gtx/fast_square_root.hpp -/// -/// @see core (dependence) -/// -/// @defgroup gtx_fast_square_root GLM_GTX_fast_square_root -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Fast but less accurate implementations of square root based functions. -/// - Sqrt optimisation based on Newton's method, -/// www.gamedev.net/community/forums/topic.asp?topic id=139956 - -#pragma once - -// Dependency: -#include "../common.hpp" -#include "../exponential.hpp" -#include "../geometric.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_fast_square_root is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_fast_square_root extension included") -# endif -#endif - -namespace glm -{ - /// @addtogroup gtx_fast_square_root - /// @{ - - /// Faster than the common sqrt function but less accurate. - /// - /// @see gtx_fast_square_root extension. - template - GLM_FUNC_DECL genType fastSqrt(genType x); - - /// Faster than the common sqrt function but less accurate. - /// - /// @see gtx_fast_square_root extension. - template - GLM_FUNC_DECL vec fastSqrt(vec const& x); - - /// Faster than the common inversesqrt function but less accurate. - /// - /// @see gtx_fast_square_root extension. - template - GLM_FUNC_DECL genType fastInverseSqrt(genType x); - - /// Faster than the common inversesqrt function but less accurate. - /// - /// @see gtx_fast_square_root extension. - template - GLM_FUNC_DECL vec fastInverseSqrt(vec const& x); - - /// Faster than the common length function but less accurate. - /// - /// @see gtx_fast_square_root extension. - template - GLM_FUNC_DECL genType fastLength(genType x); - - /// Faster than the common length function but less accurate. - /// - /// @see gtx_fast_square_root extension. - template - GLM_FUNC_DECL T fastLength(vec const& x); - - /// Faster than the common distance function but less accurate. - /// - /// @see gtx_fast_square_root extension. - template - GLM_FUNC_DECL genType fastDistance(genType x, genType y); - - /// Faster than the common distance function but less accurate. - /// - /// @see gtx_fast_square_root extension. - template - GLM_FUNC_DECL T fastDistance(vec const& x, vec const& y); - - /// Faster than the common normalize function but less accurate. - /// - /// @see gtx_fast_square_root extension. - template - GLM_FUNC_DECL genType fastNormalize(genType const& x); - - /// @} -}// namespace glm - -#include "fast_square_root.inl" diff --git a/third_party/glm/gtx/fast_square_root.inl b/third_party/glm/gtx/fast_square_root.inl deleted file mode 100755 index 4e6c6de..0000000 --- a/third_party/glm/gtx/fast_square_root.inl +++ /dev/null @@ -1,75 +0,0 @@ -/// @ref gtx_fast_square_root - -namespace glm -{ - // fastSqrt - template - GLM_FUNC_QUALIFIER genType fastSqrt(genType x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'fastSqrt' only accept floating-point input"); - - return genType(1) / fastInverseSqrt(x); - } - - template - GLM_FUNC_QUALIFIER vec fastSqrt(vec const& x) - { - return detail::functor1::call(fastSqrt, x); - } - - // fastInversesqrt - template - GLM_FUNC_QUALIFIER genType fastInverseSqrt(genType x) - { - return detail::compute_inversesqrt<1, genType, lowp, detail::is_aligned::value>::call(vec<1, genType, lowp>(x)).x; - } - - template - GLM_FUNC_QUALIFIER vec fastInverseSqrt(vec const& x) - { - return detail::compute_inversesqrt::value>::call(x); - } - - // fastLength - template - GLM_FUNC_QUALIFIER genType fastLength(genType x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'fastLength' only accept floating-point inputs"); - - return abs(x); - } - - template - GLM_FUNC_QUALIFIER T fastLength(vec const& x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'fastLength' only accept floating-point inputs"); - - return fastSqrt(dot(x, x)); - } - - // fastDistance - template - GLM_FUNC_QUALIFIER genType fastDistance(genType x, genType y) - { - return fastLength(y - x); - } - - template - GLM_FUNC_QUALIFIER T fastDistance(vec const& x, vec const& y) - { - return fastLength(y - x); - } - - // fastNormalize - template - GLM_FUNC_QUALIFIER genType fastNormalize(genType x) - { - return x > genType(0) ? genType(1) : -genType(1); - } - - template - GLM_FUNC_QUALIFIER vec fastNormalize(vec const& x) - { - return x * fastInverseSqrt(dot(x, x)); - } -}//namespace glm diff --git a/third_party/glm/gtx/fast_trigonometry.hpp b/third_party/glm/gtx/fast_trigonometry.hpp deleted file mode 100755 index 2650d6e..0000000 --- a/third_party/glm/gtx/fast_trigonometry.hpp +++ /dev/null @@ -1,79 +0,0 @@ -/// @ref gtx_fast_trigonometry -/// @file glm/gtx/fast_trigonometry.hpp -/// -/// @see core (dependence) -/// -/// @defgroup gtx_fast_trigonometry GLM_GTX_fast_trigonometry -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Fast but less accurate implementations of trigonometric functions. - -#pragma once - -// Dependency: -#include "../gtc/constants.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_fast_trigonometry is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_fast_trigonometry extension included") -# endif -#endif - -namespace glm -{ - /// @addtogroup gtx_fast_trigonometry - /// @{ - - /// Wrap an angle to [0 2pi[ - /// From GLM_GTX_fast_trigonometry extension. - template - GLM_FUNC_DECL T wrapAngle(T angle); - - /// Faster than the common sin function but less accurate. - /// From GLM_GTX_fast_trigonometry extension. - template - GLM_FUNC_DECL T fastSin(T angle); - - /// Faster than the common cos function but less accurate. - /// From GLM_GTX_fast_trigonometry extension. - template - GLM_FUNC_DECL T fastCos(T angle); - - /// Faster than the common tan function but less accurate. - /// Defined between -2pi and 2pi. - /// From GLM_GTX_fast_trigonometry extension. - template - GLM_FUNC_DECL T fastTan(T angle); - - /// Faster than the common asin function but less accurate. - /// Defined between -2pi and 2pi. - /// From GLM_GTX_fast_trigonometry extension. - template - GLM_FUNC_DECL T fastAsin(T angle); - - /// Faster than the common acos function but less accurate. - /// Defined between -2pi and 2pi. - /// From GLM_GTX_fast_trigonometry extension. - template - GLM_FUNC_DECL T fastAcos(T angle); - - /// Faster than the common atan function but less accurate. - /// Defined between -2pi and 2pi. - /// From GLM_GTX_fast_trigonometry extension. - template - GLM_FUNC_DECL T fastAtan(T y, T x); - - /// Faster than the common atan function but less accurate. - /// Defined between -2pi and 2pi. - /// From GLM_GTX_fast_trigonometry extension. - template - GLM_FUNC_DECL T fastAtan(T angle); - - /// @} -}//namespace glm - -#include "fast_trigonometry.inl" diff --git a/third_party/glm/gtx/fast_trigonometry.inl b/third_party/glm/gtx/fast_trigonometry.inl deleted file mode 100755 index 1a710cb..0000000 --- a/third_party/glm/gtx/fast_trigonometry.inl +++ /dev/null @@ -1,142 +0,0 @@ -/// @ref gtx_fast_trigonometry - -namespace glm{ -namespace detail -{ - template - GLM_FUNC_QUALIFIER vec taylorCos(vec const& x) - { - return static_cast(1) - - (x * x) * (1.f / 2.f) - + ((x * x) * (x * x)) * (1.f / 24.f) - - (((x * x) * (x * x)) * (x * x)) * (1.f / 720.f) - + (((x * x) * (x * x)) * ((x * x) * (x * x))) * (1.f / 40320.f); - } - - template - GLM_FUNC_QUALIFIER T cos_52s(T x) - { - T const xx(x * x); - return (T(0.9999932946) + xx * (T(-0.4999124376) + xx * (T(0.0414877472) + xx * T(-0.0012712095)))); - } - - template - GLM_FUNC_QUALIFIER vec cos_52s(vec const& x) - { - return detail::functor1::call(cos_52s, x); - } -}//namespace detail - - // wrapAngle - template - GLM_FUNC_QUALIFIER T wrapAngle(T angle) - { - return abs(mod(angle, two_pi())); - } - - template - GLM_FUNC_QUALIFIER vec wrapAngle(vec const& x) - { - return detail::functor1::call(wrapAngle, x); - } - - // cos - template - GLM_FUNC_QUALIFIER T fastCos(T x) - { - T const angle(wrapAngle(x)); - - if(angle < half_pi()) - return detail::cos_52s(angle); - if(angle < pi()) - return -detail::cos_52s(pi() - angle); - if(angle < (T(3) * half_pi())) - return -detail::cos_52s(angle - pi()); - - return detail::cos_52s(two_pi() - angle); - } - - template - GLM_FUNC_QUALIFIER vec fastCos(vec const& x) - { - return detail::functor1::call(fastCos, x); - } - - // sin - template - GLM_FUNC_QUALIFIER T fastSin(T x) - { - return fastCos(half_pi() - x); - } - - template - GLM_FUNC_QUALIFIER vec fastSin(vec const& x) - { - return detail::functor1::call(fastSin, x); - } - - // tan - template - GLM_FUNC_QUALIFIER T fastTan(T x) - { - return x + (x * x * x * T(0.3333333333)) + (x * x * x * x * x * T(0.1333333333333)) + (x * x * x * x * x * x * x * T(0.0539682539)); - } - - template - GLM_FUNC_QUALIFIER vec fastTan(vec const& x) - { - return detail::functor1::call(fastTan, x); - } - - // asin - template - GLM_FUNC_QUALIFIER T fastAsin(T x) - { - return x + (x * x * x * T(0.166666667)) + (x * x * x * x * x * T(0.075)) + (x * x * x * x * x * x * x * T(0.0446428571)) + (x * x * x * x * x * x * x * x * x * T(0.0303819444));// + (x * x * x * x * x * x * x * x * x * x * x * T(0.022372159)); - } - - template - GLM_FUNC_QUALIFIER vec fastAsin(vec const& x) - { - return detail::functor1::call(fastAsin, x); - } - - // acos - template - GLM_FUNC_QUALIFIER T fastAcos(T x) - { - return T(1.5707963267948966192313216916398) - fastAsin(x); //(PI / 2) - } - - template - GLM_FUNC_QUALIFIER vec fastAcos(vec const& x) - { - return detail::functor1::call(fastAcos, x); - } - - // atan - template - GLM_FUNC_QUALIFIER T fastAtan(T y, T x) - { - T sgn = sign(y) * sign(x); - return abs(fastAtan(y / x)) * sgn; - } - - template - GLM_FUNC_QUALIFIER vec fastAtan(vec const& y, vec const& x) - { - return detail::functor2::call(fastAtan, y, x); - } - - template - GLM_FUNC_QUALIFIER T fastAtan(T x) - { - return x - (x * x * x * T(0.333333333333)) + (x * x * x * x * x * T(0.2)) - (x * x * x * x * x * x * x * T(0.1428571429)) + (x * x * x * x * x * x * x * x * x * T(0.111111111111)) - (x * x * x * x * x * x * x * x * x * x * x * T(0.0909090909)); - } - - template - GLM_FUNC_QUALIFIER vec fastAtan(vec const& x) - { - return detail::functor1::call(fastAtan, x); - } -}//namespace glm diff --git a/third_party/glm/gtx/float_notmalize.inl b/third_party/glm/gtx/float_notmalize.inl deleted file mode 100755 index 8cdbc5a..0000000 --- a/third_party/glm/gtx/float_notmalize.inl +++ /dev/null @@ -1,13 +0,0 @@ -/// @ref gtx_float_normalize - -#include - -namespace glm -{ - template - GLM_FUNC_QUALIFIER vec floatNormalize(vec const& v) - { - return vec(v) / static_cast(std::numeric_limits::max()); - } - -}//namespace glm diff --git a/third_party/glm/gtx/functions.hpp b/third_party/glm/gtx/functions.hpp deleted file mode 100755 index 9f4166c..0000000 --- a/third_party/glm/gtx/functions.hpp +++ /dev/null @@ -1,56 +0,0 @@ -/// @ref gtx_functions -/// @file glm/gtx/functions.hpp -/// -/// @see core (dependence) -/// @see gtc_quaternion (dependence) -/// -/// @defgroup gtx_functions GLM_GTX_functions -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// List of useful common functions. - -#pragma once - -// Dependencies -#include "../detail/setup.hpp" -#include "../detail/qualifier.hpp" -#include "../detail/type_vec2.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_functions is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_functions extension included") -# endif -#endif - -namespace glm -{ - /// @addtogroup gtx_functions - /// @{ - - /// 1D gauss function - /// - /// @see gtc_epsilon - template - GLM_FUNC_DECL T gauss( - T x, - T ExpectedValue, - T StandardDeviation); - - /// 2D gauss function - /// - /// @see gtc_epsilon - template - GLM_FUNC_DECL T gauss( - vec<2, T, Q> const& Coord, - vec<2, T, Q> const& ExpectedValue, - vec<2, T, Q> const& StandardDeviation); - - /// @} -}//namespace glm - -#include "functions.inl" - diff --git a/third_party/glm/gtx/functions.inl b/third_party/glm/gtx/functions.inl deleted file mode 100755 index 29cbb20..0000000 --- a/third_party/glm/gtx/functions.inl +++ /dev/null @@ -1,30 +0,0 @@ -/// @ref gtx_functions - -#include "../exponential.hpp" - -namespace glm -{ - template - GLM_FUNC_QUALIFIER T gauss - ( - T x, - T ExpectedValue, - T StandardDeviation - ) - { - return exp(-((x - ExpectedValue) * (x - ExpectedValue)) / (static_cast(2) * StandardDeviation * StandardDeviation)) / (StandardDeviation * sqrt(static_cast(6.28318530717958647692528676655900576))); - } - - template - GLM_FUNC_QUALIFIER T gauss - ( - vec<2, T, Q> const& Coord, - vec<2, T, Q> const& ExpectedValue, - vec<2, T, Q> const& StandardDeviation - ) - { - vec<2, T, Q> const Squared = ((Coord - ExpectedValue) * (Coord - ExpectedValue)) / (static_cast(2) * StandardDeviation * StandardDeviation); - return exp(-(Squared.x + Squared.y)); - } -}//namespace glm - diff --git a/third_party/glm/gtx/gradient_paint.hpp b/third_party/glm/gtx/gradient_paint.hpp deleted file mode 100755 index 6f85bf4..0000000 --- a/third_party/glm/gtx/gradient_paint.hpp +++ /dev/null @@ -1,53 +0,0 @@ -/// @ref gtx_gradient_paint -/// @file glm/gtx/gradient_paint.hpp -/// -/// @see core (dependence) -/// @see gtx_optimum_pow (dependence) -/// -/// @defgroup gtx_gradient_paint GLM_GTX_gradient_paint -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Functions that return the color of procedural gradient for specific coordinates. - -#pragma once - -// Dependency: -#include "../glm.hpp" -#include "../gtx/optimum_pow.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_gradient_paint is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_gradient_paint extension included") -# endif -#endif - -namespace glm -{ - /// @addtogroup gtx_gradient_paint - /// @{ - - /// Return a color from a radial gradient. - /// @see - gtx_gradient_paint - template - GLM_FUNC_DECL T radialGradient( - vec<2, T, Q> const& Center, - T const& Radius, - vec<2, T, Q> const& Focal, - vec<2, T, Q> const& Position); - - /// Return a color from a linear gradient. - /// @see - gtx_gradient_paint - template - GLM_FUNC_DECL T linearGradient( - vec<2, T, Q> const& Point0, - vec<2, T, Q> const& Point1, - vec<2, T, Q> const& Position); - - /// @} -}// namespace glm - -#include "gradient_paint.inl" diff --git a/third_party/glm/gtx/gradient_paint.inl b/third_party/glm/gtx/gradient_paint.inl deleted file mode 100755 index 4c495e6..0000000 --- a/third_party/glm/gtx/gradient_paint.inl +++ /dev/null @@ -1,36 +0,0 @@ -/// @ref gtx_gradient_paint - -namespace glm -{ - template - GLM_FUNC_QUALIFIER T radialGradient - ( - vec<2, T, Q> const& Center, - T const& Radius, - vec<2, T, Q> const& Focal, - vec<2, T, Q> const& Position - ) - { - vec<2, T, Q> F = Focal - Center; - vec<2, T, Q> D = Position - Focal; - T Radius2 = pow2(Radius); - T Fx2 = pow2(F.x); - T Fy2 = pow2(F.y); - - T Numerator = (D.x * F.x + D.y * F.y) + sqrt(Radius2 * (pow2(D.x) + pow2(D.y)) - pow2(D.x * F.y - D.y * F.x)); - T Denominator = Radius2 - (Fx2 + Fy2); - return Numerator / Denominator; - } - - template - GLM_FUNC_QUALIFIER T linearGradient - ( - vec<2, T, Q> const& Point0, - vec<2, T, Q> const& Point1, - vec<2, T, Q> const& Position - ) - { - vec<2, T, Q> Dist = Point1 - Point0; - return (Dist.x * (Position.x - Point0.x) + Dist.y * (Position.y - Point0.y)) / glm::dot(Dist, Dist); - } -}//namespace glm diff --git a/third_party/glm/gtx/handed_coordinate_space.hpp b/third_party/glm/gtx/handed_coordinate_space.hpp deleted file mode 100755 index 3c85968..0000000 --- a/third_party/glm/gtx/handed_coordinate_space.hpp +++ /dev/null @@ -1,50 +0,0 @@ -/// @ref gtx_handed_coordinate_space -/// @file glm/gtx/handed_coordinate_space.hpp -/// -/// @see core (dependence) -/// -/// @defgroup gtx_handed_coordinate_space GLM_GTX_handed_coordinate_space -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// To know if a set of three basis vectors defines a right or left-handed coordinate system. - -#pragma once - -// Dependency: -#include "../glm.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_handed_coordinate_space is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_handed_coordinate_space extension included") -# endif -#endif - -namespace glm -{ - /// @addtogroup gtx_handed_coordinate_space - /// @{ - - //! Return if a trihedron right handed or not. - //! From GLM_GTX_handed_coordinate_space extension. - template - GLM_FUNC_DECL bool rightHanded( - vec<3, T, Q> const& tangent, - vec<3, T, Q> const& binormal, - vec<3, T, Q> const& normal); - - //! Return if a trihedron left handed or not. - //! From GLM_GTX_handed_coordinate_space extension. - template - GLM_FUNC_DECL bool leftHanded( - vec<3, T, Q> const& tangent, - vec<3, T, Q> const& binormal, - vec<3, T, Q> const& normal); - - /// @} -}// namespace glm - -#include "handed_coordinate_space.inl" diff --git a/third_party/glm/gtx/handed_coordinate_space.inl b/third_party/glm/gtx/handed_coordinate_space.inl deleted file mode 100755 index e43c17b..0000000 --- a/third_party/glm/gtx/handed_coordinate_space.inl +++ /dev/null @@ -1,26 +0,0 @@ -/// @ref gtx_handed_coordinate_space - -namespace glm -{ - template - GLM_FUNC_QUALIFIER bool rightHanded - ( - vec<3, T, Q> const& tangent, - vec<3, T, Q> const& binormal, - vec<3, T, Q> const& normal - ) - { - return dot(cross(normal, tangent), binormal) > T(0); - } - - template - GLM_FUNC_QUALIFIER bool leftHanded - ( - vec<3, T, Q> const& tangent, - vec<3, T, Q> const& binormal, - vec<3, T, Q> const& normal - ) - { - return dot(cross(normal, tangent), binormal) < T(0); - } -}//namespace glm diff --git a/third_party/glm/gtx/hash.hpp b/third_party/glm/gtx/hash.hpp deleted file mode 100755 index 05dae9f..0000000 --- a/third_party/glm/gtx/hash.hpp +++ /dev/null @@ -1,142 +0,0 @@ -/// @ref gtx_hash -/// @file glm/gtx/hash.hpp -/// -/// @see core (dependence) -/// -/// @defgroup gtx_hash GLM_GTX_hash -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Add std::hash support for glm types - -#pragma once - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_hash is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_hash extension included") -# endif -#endif - -#include - -#include "../vec2.hpp" -#include "../vec3.hpp" -#include "../vec4.hpp" -#include "../gtc/vec1.hpp" - -#include "../gtc/quaternion.hpp" -#include "../gtx/dual_quaternion.hpp" - -#include "../mat2x2.hpp" -#include "../mat2x3.hpp" -#include "../mat2x4.hpp" - -#include "../mat3x2.hpp" -#include "../mat3x3.hpp" -#include "../mat3x4.hpp" - -#include "../mat4x2.hpp" -#include "../mat4x3.hpp" -#include "../mat4x4.hpp" - -#if !GLM_HAS_CXX11_STL -# error "GLM_GTX_hash requires C++11 standard library support" -#endif - -namespace std -{ - template - struct hash > - { - GLM_FUNC_DECL size_t operator()(glm::vec<1, T, Q> const& v) const; - }; - - template - struct hash > - { - GLM_FUNC_DECL size_t operator()(glm::vec<2, T, Q> const& v) const; - }; - - template - struct hash > - { - GLM_FUNC_DECL size_t operator()(glm::vec<3, T, Q> const& v) const; - }; - - template - struct hash > - { - GLM_FUNC_DECL size_t operator()(glm::vec<4, T, Q> const& v) const; - }; - - template - struct hash> - { - GLM_FUNC_DECL size_t operator()(glm::qua const& q) const; - }; - - template - struct hash > - { - GLM_FUNC_DECL size_t operator()(glm::tdualquat const& q) const; - }; - - template - struct hash > - { - GLM_FUNC_DECL size_t operator()(glm::mat<2, 2, T,Q> const& m) const; - }; - - template - struct hash > - { - GLM_FUNC_DECL size_t operator()(glm::mat<2, 3, T,Q> const& m) const; - }; - - template - struct hash > - { - GLM_FUNC_DECL size_t operator()(glm::mat<2, 4, T,Q> const& m) const; - }; - - template - struct hash > - { - GLM_FUNC_DECL size_t operator()(glm::mat<3, 2, T,Q> const& m) const; - }; - - template - struct hash > - { - GLM_FUNC_DECL size_t operator()(glm::mat<3, 3, T,Q> const& m) const; - }; - - template - struct hash > - { - GLM_FUNC_DECL size_t operator()(glm::mat<3, 4, T,Q> const& m) const; - }; - - template - struct hash > - { - GLM_FUNC_DECL size_t operator()(glm::mat<4, 2, T,Q> const& m) const; - }; - - template - struct hash > - { - GLM_FUNC_DECL size_t operator()(glm::mat<4, 3, T,Q> const& m) const; - }; - - template - struct hash > - { - GLM_FUNC_DECL size_t operator()(glm::mat<4, 4, T,Q> const& m) const; - }; -} // namespace std - -#include "hash.inl" diff --git a/third_party/glm/gtx/hash.inl b/third_party/glm/gtx/hash.inl deleted file mode 100755 index ff71ca9..0000000 --- a/third_party/glm/gtx/hash.inl +++ /dev/null @@ -1,184 +0,0 @@ -/// @ref gtx_hash -/// -/// @see core (dependence) -/// -/// @defgroup gtx_hash GLM_GTX_hash -/// @ingroup gtx -/// -/// @brief Add std::hash support for glm types -/// -/// need to be included to use the features of this extension. - -namespace glm { -namespace detail -{ - GLM_INLINE void hash_combine(size_t &seed, size_t hash) - { - hash += 0x9e3779b9 + (seed << 6) + (seed >> 2); - seed ^= hash; - } -}} - -namespace std -{ - template - GLM_FUNC_QUALIFIER size_t hash>::operator()(glm::vec<1, T, Q> const& v) const - { - hash hasher; - return hasher(v.x); - } - - template - GLM_FUNC_QUALIFIER size_t hash>::operator()(glm::vec<2, T, Q> const& v) const - { - size_t seed = 0; - hash hasher; - glm::detail::hash_combine(seed, hasher(v.x)); - glm::detail::hash_combine(seed, hasher(v.y)); - return seed; - } - - template - GLM_FUNC_QUALIFIER size_t hash>::operator()(glm::vec<3, T, Q> const& v) const - { - size_t seed = 0; - hash hasher; - glm::detail::hash_combine(seed, hasher(v.x)); - glm::detail::hash_combine(seed, hasher(v.y)); - glm::detail::hash_combine(seed, hasher(v.z)); - return seed; - } - - template - GLM_FUNC_QUALIFIER size_t hash>::operator()(glm::vec<4, T, Q> const& v) const - { - size_t seed = 0; - hash hasher; - glm::detail::hash_combine(seed, hasher(v.x)); - glm::detail::hash_combine(seed, hasher(v.y)); - glm::detail::hash_combine(seed, hasher(v.z)); - glm::detail::hash_combine(seed, hasher(v.w)); - return seed; - } - - template - GLM_FUNC_QUALIFIER size_t hash>::operator()(glm::qua const& q) const - { - size_t seed = 0; - hash hasher; - glm::detail::hash_combine(seed, hasher(q.x)); - glm::detail::hash_combine(seed, hasher(q.y)); - glm::detail::hash_combine(seed, hasher(q.z)); - glm::detail::hash_combine(seed, hasher(q.w)); - return seed; - } - - template - GLM_FUNC_QUALIFIER size_t hash>::operator()(glm::tdualquat const& q) const - { - size_t seed = 0; - hash> hasher; - glm::detail::hash_combine(seed, hasher(q.real)); - glm::detail::hash_combine(seed, hasher(q.dual)); - return seed; - } - - template - GLM_FUNC_QUALIFIER size_t hash>::operator()(glm::mat<2, 2, T, Q> const& m) const - { - size_t seed = 0; - hash> hasher; - glm::detail::hash_combine(seed, hasher(m[0])); - glm::detail::hash_combine(seed, hasher(m[1])); - return seed; - } - - template - GLM_FUNC_QUALIFIER size_t hash>::operator()(glm::mat<2, 3, T, Q> const& m) const - { - size_t seed = 0; - hash> hasher; - glm::detail::hash_combine(seed, hasher(m[0])); - glm::detail::hash_combine(seed, hasher(m[1])); - return seed; - } - - template - GLM_FUNC_QUALIFIER size_t hash>::operator()(glm::mat<2, 4, T, Q> const& m) const - { - size_t seed = 0; - hash> hasher; - glm::detail::hash_combine(seed, hasher(m[0])); - glm::detail::hash_combine(seed, hasher(m[1])); - return seed; - } - - template - GLM_FUNC_QUALIFIER size_t hash>::operator()(glm::mat<3, 2, T, Q> const& m) const - { - size_t seed = 0; - hash> hasher; - glm::detail::hash_combine(seed, hasher(m[0])); - glm::detail::hash_combine(seed, hasher(m[1])); - glm::detail::hash_combine(seed, hasher(m[2])); - return seed; - } - - template - GLM_FUNC_QUALIFIER size_t hash>::operator()(glm::mat<3, 3, T, Q> const& m) const - { - size_t seed = 0; - hash> hasher; - glm::detail::hash_combine(seed, hasher(m[0])); - glm::detail::hash_combine(seed, hasher(m[1])); - glm::detail::hash_combine(seed, hasher(m[2])); - return seed; - } - - template - GLM_FUNC_QUALIFIER size_t hash>::operator()(glm::mat<3, 4, T, Q> const& m) const - { - size_t seed = 0; - hash> hasher; - glm::detail::hash_combine(seed, hasher(m[0])); - glm::detail::hash_combine(seed, hasher(m[1])); - glm::detail::hash_combine(seed, hasher(m[2])); - return seed; - } - - template - GLM_FUNC_QUALIFIER size_t hash>::operator()(glm::mat<4, 2, T,Q> const& m) const - { - size_t seed = 0; - hash> hasher; - glm::detail::hash_combine(seed, hasher(m[0])); - glm::detail::hash_combine(seed, hasher(m[1])); - glm::detail::hash_combine(seed, hasher(m[2])); - glm::detail::hash_combine(seed, hasher(m[3])); - return seed; - } - - template - GLM_FUNC_QUALIFIER size_t hash>::operator()(glm::mat<4, 3, T,Q> const& m) const - { - size_t seed = 0; - hash> hasher; - glm::detail::hash_combine(seed, hasher(m[0])); - glm::detail::hash_combine(seed, hasher(m[1])); - glm::detail::hash_combine(seed, hasher(m[2])); - glm::detail::hash_combine(seed, hasher(m[3])); - return seed; - } - - template - GLM_FUNC_QUALIFIER size_t hash>::operator()(glm::mat<4, 4, T, Q> const& m) const - { - size_t seed = 0; - hash> hasher; - glm::detail::hash_combine(seed, hasher(m[0])); - glm::detail::hash_combine(seed, hasher(m[1])); - glm::detail::hash_combine(seed, hasher(m[2])); - glm::detail::hash_combine(seed, hasher(m[3])); - return seed; - } -} diff --git a/third_party/glm/gtx/integer.hpp b/third_party/glm/gtx/integer.hpp deleted file mode 100755 index d0b4c61..0000000 --- a/third_party/glm/gtx/integer.hpp +++ /dev/null @@ -1,76 +0,0 @@ -/// @ref gtx_integer -/// @file glm/gtx/integer.hpp -/// -/// @see core (dependence) -/// -/// @defgroup gtx_integer GLM_GTX_integer -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Add support for integer for core functions - -#pragma once - -// Dependency: -#include "../glm.hpp" -#include "../gtc/integer.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_integer is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_integer extension included") -# endif -#endif - -namespace glm -{ - /// @addtogroup gtx_integer - /// @{ - - //! Returns x raised to the y power. - //! From GLM_GTX_integer extension. - GLM_FUNC_DECL int pow(int x, uint y); - - //! Returns the positive square root of x. - //! From GLM_GTX_integer extension. - GLM_FUNC_DECL int sqrt(int x); - - //! Returns the floor log2 of x. - //! From GLM_GTX_integer extension. - GLM_FUNC_DECL unsigned int floor_log2(unsigned int x); - - //! Modulus. Returns x - y * floor(x / y) for each component in x using the floating point value y. - //! From GLM_GTX_integer extension. - GLM_FUNC_DECL int mod(int x, int y); - - //! Return the factorial value of a number (!12 max, integer only) - //! From GLM_GTX_integer extension. - template - GLM_FUNC_DECL genType factorial(genType const& x); - - //! 32bit signed integer. - //! From GLM_GTX_integer extension. - typedef signed int sint; - - //! Returns x raised to the y power. - //! From GLM_GTX_integer extension. - GLM_FUNC_DECL uint pow(uint x, uint y); - - //! Returns the positive square root of x. - //! From GLM_GTX_integer extension. - GLM_FUNC_DECL uint sqrt(uint x); - - //! Modulus. Returns x - y * floor(x / y) for each component in x using the floating point value y. - //! From GLM_GTX_integer extension. - GLM_FUNC_DECL uint mod(uint x, uint y); - - //! Returns the number of leading zeros. - //! From GLM_GTX_integer extension. - GLM_FUNC_DECL uint nlz(uint x); - - /// @} -}//namespace glm - -#include "integer.inl" diff --git a/third_party/glm/gtx/integer.inl b/third_party/glm/gtx/integer.inl deleted file mode 100755 index 956366b..0000000 --- a/third_party/glm/gtx/integer.inl +++ /dev/null @@ -1,185 +0,0 @@ -/// @ref gtx_integer - -namespace glm -{ - // pow - GLM_FUNC_QUALIFIER int pow(int x, uint y) - { - if(y == 0) - return x >= 0 ? 1 : -1; - - int result = x; - for(uint i = 1; i < y; ++i) - result *= x; - return result; - } - - // sqrt: From Christopher J. Musial, An integer square root, Graphics Gems, 1990, page 387 - GLM_FUNC_QUALIFIER int sqrt(int x) - { - if(x <= 1) return x; - - int NextTrial = x >> 1; - int CurrentAnswer; - - do - { - CurrentAnswer = NextTrial; - NextTrial = (NextTrial + x / NextTrial) >> 1; - } while(NextTrial < CurrentAnswer); - - return CurrentAnswer; - } - -// Henry Gordon Dietz: http://aggregate.org/MAGIC/ -namespace detail -{ - GLM_FUNC_QUALIFIER unsigned int ones32(unsigned int x) - { - /* 32-bit recursive reduction using SWAR... - but first step is mapping 2-bit values - into sum of 2 1-bit values in sneaky way - */ - x -= ((x >> 1) & 0x55555555); - x = (((x >> 2) & 0x33333333) + (x & 0x33333333)); - x = (((x >> 4) + x) & 0x0f0f0f0f); - x += (x >> 8); - x += (x >> 16); - return(x & 0x0000003f); - } -}//namespace detail - - // Henry Gordon Dietz: http://aggregate.org/MAGIC/ -/* - GLM_FUNC_QUALIFIER unsigned int floor_log2(unsigned int x) - { - x |= (x >> 1); - x |= (x >> 2); - x |= (x >> 4); - x |= (x >> 8); - x |= (x >> 16); - - return _detail::ones32(x) >> 1; - } -*/ - // mod - GLM_FUNC_QUALIFIER int mod(int x, int y) - { - return ((x % y) + y) % y; - } - - // factorial (!12 max, integer only) - template - GLM_FUNC_QUALIFIER genType factorial(genType const& x) - { - genType Temp = x; - genType Result; - for(Result = 1; Temp > 1; --Temp) - Result *= Temp; - return Result; - } - - template - GLM_FUNC_QUALIFIER vec<2, T, Q> factorial( - vec<2, T, Q> const& x) - { - return vec<2, T, Q>( - factorial(x.x), - factorial(x.y)); - } - - template - GLM_FUNC_QUALIFIER vec<3, T, Q> factorial( - vec<3, T, Q> const& x) - { - return vec<3, T, Q>( - factorial(x.x), - factorial(x.y), - factorial(x.z)); - } - - template - GLM_FUNC_QUALIFIER vec<4, T, Q> factorial( - vec<4, T, Q> const& x) - { - return vec<4, T, Q>( - factorial(x.x), - factorial(x.y), - factorial(x.z), - factorial(x.w)); - } - - GLM_FUNC_QUALIFIER uint pow(uint x, uint y) - { - if (y == 0) - return 1u; - - uint result = x; - for(uint i = 1; i < y; ++i) - result *= x; - return result; - } - - GLM_FUNC_QUALIFIER uint sqrt(uint x) - { - if(x <= 1) return x; - - uint NextTrial = x >> 1; - uint CurrentAnswer; - - do - { - CurrentAnswer = NextTrial; - NextTrial = (NextTrial + x / NextTrial) >> 1; - } while(NextTrial < CurrentAnswer); - - return CurrentAnswer; - } - - GLM_FUNC_QUALIFIER uint mod(uint x, uint y) - { - return x - y * (x / y); - } - -#if(GLM_COMPILER & (GLM_COMPILER_VC | GLM_COMPILER_GCC)) - - GLM_FUNC_QUALIFIER unsigned int nlz(unsigned int x) - { - return 31u - findMSB(x); - } - -#else - - // Hackers Delight: http://www.hackersdelight.org/HDcode/nlz.c.txt - GLM_FUNC_QUALIFIER unsigned int nlz(unsigned int x) - { - int y, m, n; - - y = -int(x >> 16); // If left half of x is 0, - m = (y >> 16) & 16; // set n = 16. If left half - n = 16 - m; // is nonzero, set n = 0 and - x = x >> m; // shift x right 16. - // Now x is of the form 0000xxxx. - y = x - 0x100; // If positions 8-15 are 0, - m = (y >> 16) & 8; // add 8 to n and shift x left 8. - n = n + m; - x = x << m; - - y = x - 0x1000; // If positions 12-15 are 0, - m = (y >> 16) & 4; // add 4 to n and shift x left 4. - n = n + m; - x = x << m; - - y = x - 0x4000; // If positions 14-15 are 0, - m = (y >> 16) & 2; // add 2 to n and shift x left 2. - n = n + m; - x = x << m; - - y = x >> 14; // Set y = 0, 1, 2, or 3. - m = y & ~(y >> 1); // Set m = 0, 1, 2, or 2 resp. - return unsigned(n + 2 - m); - } - -#endif//(GLM_COMPILER) - -}//namespace glm diff --git a/third_party/glm/gtx/intersect.hpp b/third_party/glm/gtx/intersect.hpp deleted file mode 100755 index 3c78f2b..0000000 --- a/third_party/glm/gtx/intersect.hpp +++ /dev/null @@ -1,92 +0,0 @@ -/// @ref gtx_intersect -/// @file glm/gtx/intersect.hpp -/// -/// @see core (dependence) -/// @see gtx_closest_point (dependence) -/// -/// @defgroup gtx_intersect GLM_GTX_intersect -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Add intersection functions - -#pragma once - -// Dependency: -#include -#include -#include "../glm.hpp" -#include "../geometric.hpp" -#include "../gtx/closest_point.hpp" -#include "../gtx/vector_query.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_closest_point is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_closest_point extension included") -# endif -#endif - -namespace glm -{ - /// @addtogroup gtx_intersect - /// @{ - - //! Compute the intersection of a ray and a plane. - //! Ray direction and plane normal must be unit length. - //! From GLM_GTX_intersect extension. - template - GLM_FUNC_DECL bool intersectRayPlane( - genType const& orig, genType const& dir, - genType const& planeOrig, genType const& planeNormal, - typename genType::value_type & intersectionDistance); - - //! Compute the intersection of a ray and a triangle. - /// Based om Tomas Möller implementation http://fileadmin.cs.lth.se/cs/Personal/Tomas_Akenine-Moller/raytri/ - //! From GLM_GTX_intersect extension. - template - GLM_FUNC_DECL bool intersectRayTriangle( - vec<3, T, Q> const& orig, vec<3, T, Q> const& dir, - vec<3, T, Q> const& v0, vec<3, T, Q> const& v1, vec<3, T, Q> const& v2, - vec<2, T, Q>& baryPosition, T& distance); - - //! Compute the intersection of a line and a triangle. - //! From GLM_GTX_intersect extension. - template - GLM_FUNC_DECL bool intersectLineTriangle( - genType const& orig, genType const& dir, - genType const& vert0, genType const& vert1, genType const& vert2, - genType & position); - - //! Compute the intersection distance of a ray and a sphere. - //! The ray direction vector is unit length. - //! From GLM_GTX_intersect extension. - template - GLM_FUNC_DECL bool intersectRaySphere( - genType const& rayStarting, genType const& rayNormalizedDirection, - genType const& sphereCenter, typename genType::value_type const sphereRadiusSquered, - typename genType::value_type & intersectionDistance); - - //! Compute the intersection of a ray and a sphere. - //! From GLM_GTX_intersect extension. - template - GLM_FUNC_DECL bool intersectRaySphere( - genType const& rayStarting, genType const& rayNormalizedDirection, - genType const& sphereCenter, const typename genType::value_type sphereRadius, - genType & intersectionPosition, genType & intersectionNormal); - - //! Compute the intersection of a line and a sphere. - //! From GLM_GTX_intersect extension - template - GLM_FUNC_DECL bool intersectLineSphere( - genType const& point0, genType const& point1, - genType const& sphereCenter, typename genType::value_type sphereRadius, - genType & intersectionPosition1, genType & intersectionNormal1, - genType & intersectionPosition2 = genType(), genType & intersectionNormal2 = genType()); - - /// @} -}//namespace glm - -#include "intersect.inl" diff --git a/third_party/glm/gtx/intersect.inl b/third_party/glm/gtx/intersect.inl deleted file mode 100755 index 54ecb4d..0000000 --- a/third_party/glm/gtx/intersect.inl +++ /dev/null @@ -1,200 +0,0 @@ -/// @ref gtx_intersect - -namespace glm -{ - template - GLM_FUNC_QUALIFIER bool intersectRayPlane - ( - genType const& orig, genType const& dir, - genType const& planeOrig, genType const& planeNormal, - typename genType::value_type & intersectionDistance - ) - { - typename genType::value_type d = glm::dot(dir, planeNormal); - typename genType::value_type Epsilon = std::numeric_limits::epsilon(); - - if(glm::abs(d) > Epsilon) // if dir and planeNormal are not perpendicular - { - typename genType::value_type const tmp_intersectionDistance = glm::dot(planeOrig - orig, planeNormal) / d; - if (tmp_intersectionDistance > static_cast(0)) { // allow only intersections - intersectionDistance = tmp_intersectionDistance; - return true; - } - } - - return false; - } - - template - GLM_FUNC_QUALIFIER bool intersectRayTriangle - ( - vec<3, T, Q> const& orig, vec<3, T, Q> const& dir, - vec<3, T, Q> const& vert0, vec<3, T, Q> const& vert1, vec<3, T, Q> const& vert2, - vec<2, T, Q>& baryPosition, T& distance - ) - { - // find vectors for two edges sharing vert0 - vec<3, T, Q> const edge1 = vert1 - vert0; - vec<3, T, Q> const edge2 = vert2 - vert0; - - // begin calculating determinant - also used to calculate U parameter - vec<3, T, Q> const p = glm::cross(dir, edge2); - - // if determinant is near zero, ray lies in plane of triangle - T const det = glm::dot(edge1, p); - - vec<3, T, Q> Perpendicular(0); - - if(det > std::numeric_limits::epsilon()) - { - // calculate distance from vert0 to ray origin - vec<3, T, Q> const dist = orig - vert0; - - // calculate U parameter and test bounds - baryPosition.x = glm::dot(dist, p); - if(baryPosition.x < static_cast(0) || baryPosition.x > det) - return false; - - // prepare to test V parameter - Perpendicular = glm::cross(dist, edge1); - - // calculate V parameter and test bounds - baryPosition.y = glm::dot(dir, Perpendicular); - if((baryPosition.y < static_cast(0)) || ((baryPosition.x + baryPosition.y) > det)) - return false; - } - else if(det < -std::numeric_limits::epsilon()) - { - // calculate distance from vert0 to ray origin - vec<3, T, Q> const dist = orig - vert0; - - // calculate U parameter and test bounds - baryPosition.x = glm::dot(dist, p); - if((baryPosition.x > static_cast(0)) || (baryPosition.x < det)) - return false; - - // prepare to test V parameter - Perpendicular = glm::cross(dist, edge1); - - // calculate V parameter and test bounds - baryPosition.y = glm::dot(dir, Perpendicular); - if((baryPosition.y > static_cast(0)) || (baryPosition.x + baryPosition.y < det)) - return false; - } - else - return false; // ray is parallel to the plane of the triangle - - T inv_det = static_cast(1) / det; - - // calculate distance, ray intersects triangle - distance = glm::dot(edge2, Perpendicular) * inv_det; - baryPosition *= inv_det; - - return true; - } - - template - GLM_FUNC_QUALIFIER bool intersectLineTriangle - ( - genType const& orig, genType const& dir, - genType const& vert0, genType const& vert1, genType const& vert2, - genType & position - ) - { - typename genType::value_type Epsilon = std::numeric_limits::epsilon(); - - genType edge1 = vert1 - vert0; - genType edge2 = vert2 - vert0; - - genType Perpendicular = cross(dir, edge2); - - float det = dot(edge1, Perpendicular); - - if (det > -Epsilon && det < Epsilon) - return false; - typename genType::value_type inv_det = typename genType::value_type(1) / det; - - genType Tengant = orig - vert0; - - position.y = dot(Tengant, Perpendicular) * inv_det; - if (position.y < typename genType::value_type(0) || position.y > typename genType::value_type(1)) - return false; - - genType Cotengant = cross(Tengant, edge1); - - position.z = dot(dir, Cotengant) * inv_det; - if (position.z < typename genType::value_type(0) || position.y + position.z > typename genType::value_type(1)) - return false; - - position.x = dot(edge2, Cotengant) * inv_det; - - return true; - } - - template - GLM_FUNC_QUALIFIER bool intersectRaySphere - ( - genType const& rayStarting, genType const& rayNormalizedDirection, - genType const& sphereCenter, const typename genType::value_type sphereRadiusSquered, - typename genType::value_type & intersectionDistance - ) - { - typename genType::value_type Epsilon = std::numeric_limits::epsilon(); - genType diff = sphereCenter - rayStarting; - typename genType::value_type t0 = dot(diff, rayNormalizedDirection); - typename genType::value_type dSquared = dot(diff, diff) - t0 * t0; - if( dSquared > sphereRadiusSquered ) - { - return false; - } - typename genType::value_type t1 = sqrt( sphereRadiusSquered - dSquared ); - intersectionDistance = t0 > t1 + Epsilon ? t0 - t1 : t0 + t1; - return intersectionDistance > Epsilon; - } - - template - GLM_FUNC_QUALIFIER bool intersectRaySphere - ( - genType const& rayStarting, genType const& rayNormalizedDirection, - genType const& sphereCenter, const typename genType::value_type sphereRadius, - genType & intersectionPosition, genType & intersectionNormal - ) - { - typename genType::value_type distance; - if( intersectRaySphere( rayStarting, rayNormalizedDirection, sphereCenter, sphereRadius * sphereRadius, distance ) ) - { - intersectionPosition = rayStarting + rayNormalizedDirection * distance; - intersectionNormal = (intersectionPosition - sphereCenter) / sphereRadius; - return true; - } - return false; - } - - template - GLM_FUNC_QUALIFIER bool intersectLineSphere - ( - genType const& point0, genType const& point1, - genType const& sphereCenter, typename genType::value_type sphereRadius, - genType & intersectionPoint1, genType & intersectionNormal1, - genType & intersectionPoint2, genType & intersectionNormal2 - ) - { - typename genType::value_type Epsilon = std::numeric_limits::epsilon(); - genType dir = normalize(point1 - point0); - genType diff = sphereCenter - point0; - typename genType::value_type t0 = dot(diff, dir); - typename genType::value_type dSquared = dot(diff, diff) - t0 * t0; - if( dSquared > sphereRadius * sphereRadius ) - { - return false; - } - typename genType::value_type t1 = sqrt( sphereRadius * sphereRadius - dSquared ); - if( t0 < t1 + Epsilon ) - t1 = -t1; - intersectionPoint1 = point0 + dir * (t0 - t1); - intersectionNormal1 = (intersectionPoint1 - sphereCenter) / sphereRadius; - intersectionPoint2 = point0 + dir * (t0 + t1); - intersectionNormal2 = (intersectionPoint2 - sphereCenter) / sphereRadius; - return true; - } -}//namespace glm diff --git a/third_party/glm/gtx/io.hpp b/third_party/glm/gtx/io.hpp deleted file mode 100755 index 8d974f0..0000000 --- a/third_party/glm/gtx/io.hpp +++ /dev/null @@ -1,201 +0,0 @@ -/// @ref gtx_io -/// @file glm/gtx/io.hpp -/// @author Jan P Springer (regnirpsj@gmail.com) -/// -/// @see core (dependence) -/// @see gtc_matrix_access (dependence) -/// @see gtc_quaternion (dependence) -/// -/// @defgroup gtx_io GLM_GTX_io -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// std::[w]ostream support for glm types -/// -/// std::[w]ostream support for glm types + qualifier/width/etc. manipulators -/// based on howard hinnant's std::chrono io proposal -/// [http://home.roadrunner.com/~hinnant/bloomington/chrono_io.html] - -#pragma once - -// Dependency: -#include "../glm.hpp" -#include "../gtx/quaternion.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_io is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_io extension included") -# endif -#endif - -#include // std::basic_ostream<> (fwd) -#include // std::locale, std::locale::facet, std::locale::id -#include // std::pair<> - -namespace glm -{ - /// @addtogroup gtx_io - /// @{ - - namespace io - { - enum order_type { column_major, row_major}; - - template - class format_punct : public std::locale::facet - { - typedef CTy char_type; - - public: - - static std::locale::id id; - - bool formatted; - unsigned precision; - unsigned width; - char_type separator; - char_type delim_left; - char_type delim_right; - char_type space; - char_type newline; - order_type order; - - GLM_FUNC_DECL explicit format_punct(size_t a = 0); - GLM_FUNC_DECL explicit format_punct(format_punct const&); - }; - - template > - class basic_state_saver { - - public: - - GLM_FUNC_DECL explicit basic_state_saver(std::basic_ios&); - GLM_FUNC_DECL ~basic_state_saver(); - - private: - - typedef ::std::basic_ios state_type; - typedef typename state_type::char_type char_type; - typedef ::std::ios_base::fmtflags flags_type; - typedef ::std::streamsize streamsize_type; - typedef ::std::locale const locale_type; - - state_type& state_; - flags_type flags_; - streamsize_type precision_; - streamsize_type width_; - char_type fill_; - locale_type locale_; - - GLM_FUNC_DECL basic_state_saver& operator=(basic_state_saver const&); - }; - - typedef basic_state_saver state_saver; - typedef basic_state_saver wstate_saver; - - template > - class basic_format_saver - { - public: - - GLM_FUNC_DECL explicit basic_format_saver(std::basic_ios&); - GLM_FUNC_DECL ~basic_format_saver(); - - private: - - basic_state_saver const bss_; - - GLM_FUNC_DECL basic_format_saver& operator=(basic_format_saver const&); - }; - - typedef basic_format_saver format_saver; - typedef basic_format_saver wformat_saver; - - struct precision - { - unsigned value; - - GLM_FUNC_DECL explicit precision(unsigned); - }; - - struct width - { - unsigned value; - - GLM_FUNC_DECL explicit width(unsigned); - }; - - template - struct delimeter - { - CTy value[3]; - - GLM_FUNC_DECL explicit delimeter(CTy /* left */, CTy /* right */, CTy /* separator */ = ','); - }; - - struct order - { - order_type value; - - GLM_FUNC_DECL explicit order(order_type); - }; - - // functions, inlined (inline) - - template - FTy const& get_facet(std::basic_ios&); - template - std::basic_ios& formatted(std::basic_ios&); - template - std::basic_ios& unformattet(std::basic_ios&); - - template - std::basic_ostream& operator<<(std::basic_ostream&, precision const&); - template - std::basic_ostream& operator<<(std::basic_ostream&, width const&); - template - std::basic_ostream& operator<<(std::basic_ostream&, delimeter const&); - template - std::basic_ostream& operator<<(std::basic_ostream&, order const&); - }//namespace io - - template - GLM_FUNC_DECL std::basic_ostream& operator<<(std::basic_ostream&, qua const&); - template - GLM_FUNC_DECL std::basic_ostream& operator<<(std::basic_ostream&, vec<1, T, Q> const&); - template - GLM_FUNC_DECL std::basic_ostream& operator<<(std::basic_ostream&, vec<2, T, Q> const&); - template - GLM_FUNC_DECL std::basic_ostream& operator<<(std::basic_ostream&, vec<3, T, Q> const&); - template - GLM_FUNC_DECL std::basic_ostream& operator<<(std::basic_ostream&, vec<4, T, Q> const&); - template - GLM_FUNC_DECL std::basic_ostream& operator<<(std::basic_ostream&, mat<2, 2, T, Q> const&); - template - GLM_FUNC_DECL std::basic_ostream& operator<<(std::basic_ostream&, mat<2, 3, T, Q> const&); - template - GLM_FUNC_DECL std::basic_ostream& operator<<(std::basic_ostream&, mat<2, 4, T, Q> const&); - template - GLM_FUNC_DECL std::basic_ostream& operator<<(std::basic_ostream&, mat<3, 2, T, Q> const&); - template - GLM_FUNC_DECL std::basic_ostream& operator<<(std::basic_ostream&, mat<3, 3, T, Q> const&); - template - GLM_FUNC_DECL std::basic_ostream& operator<<(std::basic_ostream&, mat<3, 4, T, Q> const&); - template - GLM_FUNC_DECL std::basic_ostream& operator<<(std::basic_ostream&, mat<4, 2, T, Q> const&); - template - GLM_FUNC_DECL std::basic_ostream& operator<<(std::basic_ostream&, mat<4, 3, T, Q> const&); - template - GLM_FUNC_DECL std::basic_ostream& operator<<(std::basic_ostream&, mat<4, 4, T, Q> const&); - - template - GLM_FUNC_DECL std::basic_ostream & operator<<(std::basic_ostream &, - std::pair const, mat<4, 4, T, Q> const> const&); - - /// @} -}//namespace glm - -#include "io.inl" diff --git a/third_party/glm/gtx/io.inl b/third_party/glm/gtx/io.inl deleted file mode 100755 index a3a1bb6..0000000 --- a/third_party/glm/gtx/io.inl +++ /dev/null @@ -1,440 +0,0 @@ -/// @ref gtx_io -/// @author Jan P Springer (regnirpsj@gmail.com) - -#include // std::fixed, std::setfill<>, std::setprecision, std::right, std::setw -#include // std::basic_ostream<> -#include "../gtc/matrix_access.hpp" // glm::col, glm::row -#include "../gtx/type_trait.hpp" // glm::type<> - -namespace glm{ -namespace io -{ - template - GLM_FUNC_QUALIFIER format_punct::format_punct(size_t a) - : std::locale::facet(a) - , formatted(true) - , precision(3) - , width(1 + 4 + 1 + precision) - , separator(',') - , delim_left('[') - , delim_right(']') - , space(' ') - , newline('\n') - , order(column_major) - {} - - template - GLM_FUNC_QUALIFIER format_punct::format_punct(format_punct const& a) - : std::locale::facet(0) - , formatted(a.formatted) - , precision(a.precision) - , width(a.width) - , separator(a.separator) - , delim_left(a.delim_left) - , delim_right(a.delim_right) - , space(a.space) - , newline(a.newline) - , order(a.order) - {} - - template std::locale::id format_punct::id; - - template - GLM_FUNC_QUALIFIER basic_state_saver::basic_state_saver(std::basic_ios& a) - : state_(a) - , flags_(a.flags()) - , precision_(a.precision()) - , width_(a.width()) - , fill_(a.fill()) - , locale_(a.getloc()) - {} - - template - GLM_FUNC_QUALIFIER basic_state_saver::~basic_state_saver() - { - state_.imbue(locale_); - state_.fill(fill_); - state_.width(width_); - state_.precision(precision_); - state_.flags(flags_); - } - - template - GLM_FUNC_QUALIFIER basic_format_saver::basic_format_saver(std::basic_ios& a) - : bss_(a) - { - a.imbue(std::locale(a.getloc(), new format_punct(get_facet >(a)))); - } - - template - GLM_FUNC_QUALIFIER - basic_format_saver::~basic_format_saver() - {} - - GLM_FUNC_QUALIFIER precision::precision(unsigned a) - : value(a) - {} - - GLM_FUNC_QUALIFIER width::width(unsigned a) - : value(a) - {} - - template - GLM_FUNC_QUALIFIER delimeter::delimeter(CTy a, CTy b, CTy c) - : value() - { - value[0] = a; - value[1] = b; - value[2] = c; - } - - GLM_FUNC_QUALIFIER order::order(order_type a) - : value(a) - {} - - template - GLM_FUNC_QUALIFIER FTy const& get_facet(std::basic_ios& ios) - { - if(!std::has_facet(ios.getloc())) - ios.imbue(std::locale(ios.getloc(), new FTy)); - - return std::use_facet(ios.getloc()); - } - - template - GLM_FUNC_QUALIFIER std::basic_ios& formatted(std::basic_ios& ios) - { - const_cast&>(get_facet >(ios)).formatted = true; - return ios; - } - - template - GLM_FUNC_QUALIFIER std::basic_ios& unformatted(std::basic_ios& ios) - { - const_cast&>(get_facet >(ios)).formatted = false; - return ios; - } - - template - GLM_FUNC_QUALIFIER std::basic_ostream& operator<<(std::basic_ostream& os, precision const& a) - { - const_cast&>(get_facet >(os)).precision = a.value; - return os; - } - - template - GLM_FUNC_QUALIFIER std::basic_ostream& operator<<(std::basic_ostream& os, width const& a) - { - const_cast&>(get_facet >(os)).width = a.value; - return os; - } - - template - GLM_FUNC_QUALIFIER std::basic_ostream& operator<<(std::basic_ostream& os, delimeter const& a) - { - format_punct & fmt(const_cast&>(get_facet >(os))); - - fmt.delim_left = a.value[0]; - fmt.delim_right = a.value[1]; - fmt.separator = a.value[2]; - - return os; - } - - template - GLM_FUNC_QUALIFIER std::basic_ostream& operator<<(std::basic_ostream& os, order const& a) - { - const_cast&>(get_facet >(os)).order = a.value; - return os; - } -} // namespace io - -namespace detail -{ - template - GLM_FUNC_QUALIFIER std::basic_ostream& - print_vector_on(std::basic_ostream& os, V const& a) - { - typename std::basic_ostream::sentry const cerberus(os); - - if(cerberus) - { - io::format_punct const& fmt(io::get_facet >(os)); - - length_t const& components(type::components); - - if(fmt.formatted) - { - io::basic_state_saver const bss(os); - - os << std::fixed << std::right << std::setprecision(fmt.precision) << std::setfill(fmt.space) << fmt.delim_left; - - for(length_t i(0); i < components; ++i) - { - os << std::setw(fmt.width) << a[i]; - if(components-1 != i) - os << fmt.separator; - } - - os << fmt.delim_right; - } - else - { - for(length_t i(0); i < components; ++i) - { - os << a[i]; - - if(components-1 != i) - os << fmt.space; - } - } - } - - return os; - } -}//namespace detail - - template - GLM_FUNC_QUALIFIER std::basic_ostream& operator<<(std::basic_ostream& os, qua const& a) - { - return detail::print_vector_on(os, a); - } - - template - GLM_FUNC_QUALIFIER std::basic_ostream& operator<<(std::basic_ostream& os, vec<1, T, Q> const& a) - { - return detail::print_vector_on(os, a); - } - - template - GLM_FUNC_QUALIFIER std::basic_ostream& operator<<(std::basic_ostream& os, vec<2, T, Q> const& a) - { - return detail::print_vector_on(os, a); - } - - template - GLM_FUNC_QUALIFIER std::basic_ostream& operator<<(std::basic_ostream& os, vec<3, T, Q> const& a) - { - return detail::print_vector_on(os, a); - } - - template - GLM_FUNC_QUALIFIER std::basic_ostream& operator<<(std::basic_ostream& os, vec<4, T, Q> const& a) - { - return detail::print_vector_on(os, a); - } - -namespace detail -{ - template class M, length_t C, length_t R, typename T, qualifier Q> - GLM_FUNC_QUALIFIER std::basic_ostream& print_matrix_on(std::basic_ostream& os, M const& a) - { - typename std::basic_ostream::sentry const cerberus(os); - - if(cerberus) - { - io::format_punct const& fmt(io::get_facet >(os)); - - length_t const& cols(type >::cols); - length_t const& rows(type >::rows); - - if(fmt.formatted) - { - os << fmt.newline << fmt.delim_left; - - switch(fmt.order) - { - case io::column_major: - { - for(length_t i(0); i < rows; ++i) - { - if (0 != i) - os << fmt.space; - - os << row(a, i); - - if(rows-1 != i) - os << fmt.newline; - } - } - break; - - case io::row_major: - { - for(length_t i(0); i < cols; ++i) - { - if(0 != i) - os << fmt.space; - - os << column(a, i); - - if(cols-1 != i) - os << fmt.newline; - } - } - break; - } - - os << fmt.delim_right; - } - else - { - switch (fmt.order) - { - case io::column_major: - { - for(length_t i(0); i < cols; ++i) - { - os << column(a, i); - - if(cols - 1 != i) - os << fmt.space; - } - } - break; - - case io::row_major: - { - for (length_t i(0); i < rows; ++i) - { - os << row(a, i); - - if (rows-1 != i) - os << fmt.space; - } - } - break; - } - } - } - - return os; - } -}//namespace detail - - template - GLM_FUNC_QUALIFIER std::basic_ostream& operator<<(std::basic_ostream& os, mat<2, 2, T, Q> const& a) - { - return detail::print_matrix_on(os, a); - } - - template - GLM_FUNC_QUALIFIER std::basic_ostream& operator<<(std::basic_ostream& os, mat<2, 3, T, Q> const& a) - { - return detail::print_matrix_on(os, a); - } - - template - GLM_FUNC_QUALIFIER std::basic_ostream& operator<<(std::basic_ostream& os, mat<2, 4, T, Q> const& a) - { - return detail::print_matrix_on(os, a); - } - - template - GLM_FUNC_QUALIFIER std::basic_ostream& operator<<(std::basic_ostream& os, mat<3, 2, T, Q> const& a) - { - return detail::print_matrix_on(os, a); - } - - template - GLM_FUNC_QUALIFIER std::basic_ostream& operator<<(std::basic_ostream& os, mat<3, 3, T, Q> const& a) - { - return detail::print_matrix_on(os, a); - } - - template - GLM_FUNC_QUALIFIER std::basic_ostream & operator<<(std::basic_ostream& os, mat<3, 4, T, Q> const& a) - { - return detail::print_matrix_on(os, a); - } - - template - GLM_FUNC_QUALIFIER std::basic_ostream & operator<<(std::basic_ostream& os, mat<4, 2, T, Q> const& a) - { - return detail::print_matrix_on(os, a); - } - - template - GLM_FUNC_QUALIFIER std::basic_ostream & operator<<(std::basic_ostream& os, mat<4, 3, T, Q> const& a) - { - return detail::print_matrix_on(os, a); - } - - template - GLM_FUNC_QUALIFIER std::basic_ostream & operator<<(std::basic_ostream& os, mat<4, 4, T, Q> const& a) - { - return detail::print_matrix_on(os, a); - } - -namespace detail -{ - template class M, length_t C, length_t R, typename T, qualifier Q> - GLM_FUNC_QUALIFIER std::basic_ostream& print_matrix_pair_on(std::basic_ostream& os, std::pair const, M const> const& a) - { - typename std::basic_ostream::sentry const cerberus(os); - - if(cerberus) - { - io::format_punct const& fmt(io::get_facet >(os)); - M const& ml(a.first); - M const& mr(a.second); - length_t const& cols(type >::cols); - length_t const& rows(type >::rows); - - if(fmt.formatted) - { - os << fmt.newline << fmt.delim_left; - - switch(fmt.order) - { - case io::column_major: - { - for(length_t i(0); i < rows; ++i) - { - if(0 != i) - os << fmt.space; - - os << row(ml, i) << ((rows-1 != i) ? fmt.space : fmt.delim_right) << fmt.space << ((0 != i) ? fmt.space : fmt.delim_left) << row(mr, i); - - if(rows-1 != i) - os << fmt.newline; - } - } - break; - case io::row_major: - { - for(length_t i(0); i < cols; ++i) - { - if(0 != i) - os << fmt.space; - - os << column(ml, i) << ((cols-1 != i) ? fmt.space : fmt.delim_right) << fmt.space << ((0 != i) ? fmt.space : fmt.delim_left) << column(mr, i); - - if(cols-1 != i) - os << fmt.newline; - } - } - break; - } - - os << fmt.delim_right; - } - else - { - os << ml << fmt.space << mr; - } - } - - return os; - } -}//namespace detail - - template - GLM_FUNC_QUALIFIER std::basic_ostream& operator<<( - std::basic_ostream & os, - std::pair const, - mat<4, 4, T, Q> const> const& a) - { - return detail::print_matrix_pair_on(os, a); - } -}//namespace glm diff --git a/third_party/glm/gtx/log_base.hpp b/third_party/glm/gtx/log_base.hpp deleted file mode 100755 index ba28c9d..0000000 --- a/third_party/glm/gtx/log_base.hpp +++ /dev/null @@ -1,48 +0,0 @@ -/// @ref gtx_log_base -/// @file glm/gtx/log_base.hpp -/// -/// @see core (dependence) -/// -/// @defgroup gtx_log_base GLM_GTX_log_base -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Logarithm for any base. base can be a vector or a scalar. - -#pragma once - -// Dependency: -#include "../glm.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_log_base is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_log_base extension included") -# endif -#endif - -namespace glm -{ - /// @addtogroup gtx_log_base - /// @{ - - /// Logarithm for any base. - /// From GLM_GTX_log_base. - template - GLM_FUNC_DECL genType log( - genType const& x, - genType const& base); - - /// Logarithm for any base. - /// From GLM_GTX_log_base. - template - GLM_FUNC_DECL vec sign( - vec const& x, - vec const& base); - - /// @} -}//namespace glm - -#include "log_base.inl" diff --git a/third_party/glm/gtx/log_base.inl b/third_party/glm/gtx/log_base.inl deleted file mode 100755 index 4bbb8e8..0000000 --- a/third_party/glm/gtx/log_base.inl +++ /dev/null @@ -1,16 +0,0 @@ -/// @ref gtx_log_base - -namespace glm -{ - template - GLM_FUNC_QUALIFIER genType log(genType const& x, genType const& base) - { - return glm::log(x) / glm::log(base); - } - - template - GLM_FUNC_QUALIFIER vec log(vec const& x, vec const& base) - { - return glm::log(x) / glm::log(base); - } -}//namespace glm diff --git a/third_party/glm/gtx/matrix_cross_product.hpp b/third_party/glm/gtx/matrix_cross_product.hpp deleted file mode 100755 index 1e585f9..0000000 --- a/third_party/glm/gtx/matrix_cross_product.hpp +++ /dev/null @@ -1,47 +0,0 @@ -/// @ref gtx_matrix_cross_product -/// @file glm/gtx/matrix_cross_product.hpp -/// -/// @see core (dependence) -/// @see gtx_extented_min_max (dependence) -/// -/// @defgroup gtx_matrix_cross_product GLM_GTX_matrix_cross_product -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Build cross product matrices - -#pragma once - -// Dependency: -#include "../glm.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_matrix_cross_product is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_matrix_cross_product extension included") -# endif -#endif - -namespace glm -{ - /// @addtogroup gtx_matrix_cross_product - /// @{ - - //! Build a cross product matrix. - //! From GLM_GTX_matrix_cross_product extension. - template - GLM_FUNC_DECL mat<3, 3, T, Q> matrixCross3( - vec<3, T, Q> const& x); - - //! Build a cross product matrix. - //! From GLM_GTX_matrix_cross_product extension. - template - GLM_FUNC_DECL mat<4, 4, T, Q> matrixCross4( - vec<3, T, Q> const& x); - - /// @} -}//namespace glm - -#include "matrix_cross_product.inl" diff --git a/third_party/glm/gtx/matrix_cross_product.inl b/third_party/glm/gtx/matrix_cross_product.inl deleted file mode 100755 index 3a15397..0000000 --- a/third_party/glm/gtx/matrix_cross_product.inl +++ /dev/null @@ -1,37 +0,0 @@ -/// @ref gtx_matrix_cross_product - -namespace glm -{ - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> matrixCross3 - ( - vec<3, T, Q> const& x - ) - { - mat<3, 3, T, Q> Result(T(0)); - Result[0][1] = x.z; - Result[1][0] = -x.z; - Result[0][2] = -x.y; - Result[2][0] = x.y; - Result[1][2] = x.x; - Result[2][1] = -x.x; - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> matrixCross4 - ( - vec<3, T, Q> const& x - ) - { - mat<4, 4, T, Q> Result(T(0)); - Result[0][1] = x.z; - Result[1][0] = -x.z; - Result[0][2] = -x.y; - Result[2][0] = x.y; - Result[1][2] = x.x; - Result[2][1] = -x.x; - return Result; - } - -}//namespace glm diff --git a/third_party/glm/gtx/matrix_decompose.hpp b/third_party/glm/gtx/matrix_decompose.hpp deleted file mode 100755 index acd7a7f..0000000 --- a/third_party/glm/gtx/matrix_decompose.hpp +++ /dev/null @@ -1,46 +0,0 @@ -/// @ref gtx_matrix_decompose -/// @file glm/gtx/matrix_decompose.hpp -/// -/// @see core (dependence) -/// -/// @defgroup gtx_matrix_decompose GLM_GTX_matrix_decompose -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Decomposes a model matrix to translations, rotation and scale components - -#pragma once - -// Dependencies -#include "../mat4x4.hpp" -#include "../vec3.hpp" -#include "../vec4.hpp" -#include "../geometric.hpp" -#include "../gtc/quaternion.hpp" -#include "../gtc/matrix_transform.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_matrix_decompose is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_matrix_decompose extension included") -# endif -#endif - -namespace glm -{ - /// @addtogroup gtx_matrix_decompose - /// @{ - - /// Decomposes a model matrix to translations, rotation and scale components - /// @see gtx_matrix_decompose - template - GLM_FUNC_DECL bool decompose( - mat<4, 4, T, Q> const& modelMatrix, - vec<3, T, Q> & scale, qua & orientation, vec<3, T, Q> & translation, vec<3, T, Q> & skew, vec<4, T, Q> & perspective); - - /// @} -}//namespace glm - -#include "matrix_decompose.inl" diff --git a/third_party/glm/gtx/matrix_decompose.inl b/third_party/glm/gtx/matrix_decompose.inl deleted file mode 100755 index 694f5ec..0000000 --- a/third_party/glm/gtx/matrix_decompose.inl +++ /dev/null @@ -1,186 +0,0 @@ -/// @ref gtx_matrix_decompose - -#include "../gtc/constants.hpp" -#include "../gtc/epsilon.hpp" - -namespace glm{ -namespace detail -{ - /// Make a linear combination of two vectors and return the result. - // result = (a * ascl) + (b * bscl) - template - GLM_FUNC_QUALIFIER vec<3, T, Q> combine( - vec<3, T, Q> const& a, - vec<3, T, Q> const& b, - T ascl, T bscl) - { - return (a * ascl) + (b * bscl); - } - - template - GLM_FUNC_QUALIFIER vec<3, T, Q> scale(vec<3, T, Q> const& v, T desiredLength) - { - return v * desiredLength / length(v); - } -}//namespace detail - - // Matrix decompose - // http://www.opensource.apple.com/source/WebCore/WebCore-514/platform/graphics/transforms/TransformationMatrix.cpp - // Decomposes the mode matrix to translations,rotation scale components - - template - GLM_FUNC_QUALIFIER bool decompose(mat<4, 4, T, Q> const& ModelMatrix, vec<3, T, Q> & Scale, qua & Orientation, vec<3, T, Q> & Translation, vec<3, T, Q> & Skew, vec<4, T, Q> & Perspective) - { - mat<4, 4, T, Q> LocalMatrix(ModelMatrix); - - // Normalize the matrix. - if(epsilonEqual(LocalMatrix[3][3], static_cast(0), epsilon())) - return false; - - for(length_t i = 0; i < 4; ++i) - for(length_t j = 0; j < 4; ++j) - LocalMatrix[i][j] /= LocalMatrix[3][3]; - - // perspectiveMatrix is used to solve for perspective, but it also provides - // an easy way to test for singularity of the upper 3x3 component. - mat<4, 4, T, Q> PerspectiveMatrix(LocalMatrix); - - for(length_t i = 0; i < 3; i++) - PerspectiveMatrix[i][3] = static_cast(0); - PerspectiveMatrix[3][3] = static_cast(1); - - /// TODO: Fixme! - if(epsilonEqual(determinant(PerspectiveMatrix), static_cast(0), epsilon())) - return false; - - // First, isolate perspective. This is the messiest. - if( - epsilonNotEqual(LocalMatrix[0][3], static_cast(0), epsilon()) || - epsilonNotEqual(LocalMatrix[1][3], static_cast(0), epsilon()) || - epsilonNotEqual(LocalMatrix[2][3], static_cast(0), epsilon())) - { - // rightHandSide is the right hand side of the equation. - vec<4, T, Q> RightHandSide; - RightHandSide[0] = LocalMatrix[0][3]; - RightHandSide[1] = LocalMatrix[1][3]; - RightHandSide[2] = LocalMatrix[2][3]; - RightHandSide[3] = LocalMatrix[3][3]; - - // Solve the equation by inverting PerspectiveMatrix and multiplying - // rightHandSide by the inverse. (This is the easiest way, not - // necessarily the best.) - mat<4, 4, T, Q> InversePerspectiveMatrix = glm::inverse(PerspectiveMatrix);// inverse(PerspectiveMatrix, inversePerspectiveMatrix); - mat<4, 4, T, Q> TransposedInversePerspectiveMatrix = glm::transpose(InversePerspectiveMatrix);// transposeMatrix4(inversePerspectiveMatrix, transposedInversePerspectiveMatrix); - - Perspective = TransposedInversePerspectiveMatrix * RightHandSide; - // v4MulPointByMatrix(rightHandSide, transposedInversePerspectiveMatrix, perspectivePoint); - - // Clear the perspective partition - LocalMatrix[0][3] = LocalMatrix[1][3] = LocalMatrix[2][3] = static_cast(0); - LocalMatrix[3][3] = static_cast(1); - } - else - { - // No perspective. - Perspective = vec<4, T, Q>(0, 0, 0, 1); - } - - // Next take care of translation (easy). - Translation = vec<3, T, Q>(LocalMatrix[3]); - LocalMatrix[3] = vec<4, T, Q>(0, 0, 0, LocalMatrix[3].w); - - vec<3, T, Q> Row[3], Pdum3; - - // Now get scale and shear. - for(length_t i = 0; i < 3; ++i) - for(length_t j = 0; j < 3; ++j) - Row[i][j] = LocalMatrix[i][j]; - - // Compute X scale factor and normalize first row. - Scale.x = length(Row[0]);// v3Length(Row[0]); - - Row[0] = detail::scale(Row[0], static_cast(1)); - - // Compute XY shear factor and make 2nd row orthogonal to 1st. - Skew.z = dot(Row[0], Row[1]); - Row[1] = detail::combine(Row[1], Row[0], static_cast(1), -Skew.z); - - // Now, compute Y scale and normalize 2nd row. - Scale.y = length(Row[1]); - Row[1] = detail::scale(Row[1], static_cast(1)); - Skew.z /= Scale.y; - - // Compute XZ and YZ shears, orthogonalize 3rd row. - Skew.y = glm::dot(Row[0], Row[2]); - Row[2] = detail::combine(Row[2], Row[0], static_cast(1), -Skew.y); - Skew.x = glm::dot(Row[1], Row[2]); - Row[2] = detail::combine(Row[2], Row[1], static_cast(1), -Skew.x); - - // Next, get Z scale and normalize 3rd row. - Scale.z = length(Row[2]); - Row[2] = detail::scale(Row[2], static_cast(1)); - Skew.y /= Scale.z; - Skew.x /= Scale.z; - - // At this point, the matrix (in rows[]) is orthonormal. - // Check for a coordinate system flip. If the determinant - // is -1, then negate the matrix and the scaling factors. - Pdum3 = cross(Row[1], Row[2]); // v3Cross(row[1], row[2], Pdum3); - if(dot(Row[0], Pdum3) < 0) - { - for(length_t i = 0; i < 3; i++) - { - Scale[i] *= static_cast(-1); - Row[i] *= static_cast(-1); - } - } - - // Now, get the rotations out, as described in the gem. - - // FIXME - Add the ability to return either quaternions (which are - // easier to recompose with) or Euler angles (rx, ry, rz), which - // are easier for authors to deal with. The latter will only be useful - // when we fix https://bugs.webkit.org/show_bug.cgi?id=23799, so I - // will leave the Euler angle code here for now. - - // ret.rotateY = asin(-Row[0][2]); - // if (cos(ret.rotateY) != 0) { - // ret.rotateX = atan2(Row[1][2], Row[2][2]); - // ret.rotateZ = atan2(Row[0][1], Row[0][0]); - // } else { - // ret.rotateX = atan2(-Row[2][0], Row[1][1]); - // ret.rotateZ = 0; - // } - - int i, j, k = 0; - T root, trace = Row[0].x + Row[1].y + Row[2].z; - if(trace > static_cast(0)) - { - root = sqrt(trace + static_cast(1.0)); - Orientation.w = static_cast(0.5) * root; - root = static_cast(0.5) / root; - Orientation.x = root * (Row[1].z - Row[2].y); - Orientation.y = root * (Row[2].x - Row[0].z); - Orientation.z = root * (Row[0].y - Row[1].x); - } // End if > 0 - else - { - static int Next[3] = {1, 2, 0}; - i = 0; - if(Row[1].y > Row[0].x) i = 1; - if(Row[2].z > Row[i][i]) i = 2; - j = Next[i]; - k = Next[j]; - - root = sqrt(Row[i][i] - Row[j][j] - Row[k][k] + static_cast(1.0)); - - Orientation[i] = static_cast(0.5) * root; - root = static_cast(0.5) / root; - Orientation[j] = root * (Row[i][j] + Row[j][i]); - Orientation[k] = root * (Row[i][k] + Row[k][i]); - Orientation.w = root * (Row[j][k] - Row[k][j]); - } // End if <= 0 - - return true; - } -}//namespace glm diff --git a/third_party/glm/gtx/matrix_factorisation.hpp b/third_party/glm/gtx/matrix_factorisation.hpp deleted file mode 100755 index 5a975d6..0000000 --- a/third_party/glm/gtx/matrix_factorisation.hpp +++ /dev/null @@ -1,69 +0,0 @@ -/// @ref gtx_matrix_factorisation -/// @file glm/gtx/matrix_factorisation.hpp -/// -/// @see core (dependence) -/// -/// @defgroup gtx_matrix_factorisation GLM_GTX_matrix_factorisation -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Functions to factor matrices in various forms - -#pragma once - -// Dependency: -#include "../glm.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_matrix_factorisation is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_matrix_factorisation extension included") -# endif -#endif - -/* -Suggestions: - - Move helper functions flipud and fliplr to another file: They may be helpful in more general circumstances. - - Implement other types of matrix factorisation, such as: QL and LQ, L(D)U, eigendecompositions, etc... -*/ - -namespace glm -{ - /// @addtogroup gtx_matrix_factorisation - /// @{ - - /// Flips the matrix rows up and down. - /// - /// From GLM_GTX_matrix_factorisation extension. - template - GLM_FUNC_DECL mat flipud(mat const& in); - - /// Flips the matrix columns right and left. - /// - /// From GLM_GTX_matrix_factorisation extension. - template - GLM_FUNC_DECL mat fliplr(mat const& in); - - /// Performs QR factorisation of a matrix. - /// Returns 2 matrices, q and r, such that the columns of q are orthonormal and span the same subspace than those of the input matrix, r is an upper triangular matrix, and q*r=in. - /// Given an n-by-m input matrix, q has dimensions min(n,m)-by-m, and r has dimensions n-by-min(n,m). - /// - /// From GLM_GTX_matrix_factorisation extension. - template - GLM_FUNC_DECL void qr_decompose(mat const& in, mat<(C < R ? C : R), R, T, Q>& q, mat& r); - - /// Performs RQ factorisation of a matrix. - /// Returns 2 matrices, r and q, such that r is an upper triangular matrix, the rows of q are orthonormal and span the same subspace than those of the input matrix, and r*q=in. - /// Note that in the context of RQ factorisation, the diagonal is seen as starting in the lower-right corner of the matrix, instead of the usual upper-left. - /// Given an n-by-m input matrix, r has dimensions min(n,m)-by-m, and q has dimensions n-by-min(n,m). - /// - /// From GLM_GTX_matrix_factorisation extension. - template - GLM_FUNC_DECL void rq_decompose(mat const& in, mat<(C < R ? C : R), R, T, Q>& r, mat& q); - - /// @} -} - -#include "matrix_factorisation.inl" diff --git a/third_party/glm/gtx/matrix_factorisation.inl b/third_party/glm/gtx/matrix_factorisation.inl deleted file mode 100755 index c479b8a..0000000 --- a/third_party/glm/gtx/matrix_factorisation.inl +++ /dev/null @@ -1,84 +0,0 @@ -/// @ref gtx_matrix_factorisation - -namespace glm -{ - template - GLM_FUNC_QUALIFIER mat flipud(mat const& in) - { - mat tin = transpose(in); - tin = fliplr(tin); - mat out = transpose(tin); - - return out; - } - - template - GLM_FUNC_QUALIFIER mat fliplr(mat const& in) - { - mat out; - for (length_t i = 0; i < C; i++) - { - out[i] = in[(C - i) - 1]; - } - - return out; - } - - template - GLM_FUNC_QUALIFIER void qr_decompose(mat const& in, mat<(C < R ? C : R), R, T, Q>& q, mat& r) - { - // Uses modified Gram-Schmidt method - // Source: https://en.wikipedia.org/wiki/Gram–Schmidt_process - // And https://en.wikipedia.org/wiki/QR_decomposition - - //For all the linearly independs columns of the input... - // (there can be no more linearly independents columns than there are rows.) - for (length_t i = 0; i < (C < R ? C : R); i++) - { - //Copy in Q the input's i-th column. - q[i] = in[i]; - - //j = [0,i[ - // Make that column orthogonal to all the previous ones by substracting to it the non-orthogonal projection of all the previous columns. - // Also: Fill the zero elements of R - for (length_t j = 0; j < i; j++) - { - q[i] -= dot(q[i], q[j])*q[j]; - r[j][i] = 0; - } - - //Now, Q i-th column is orthogonal to all the previous columns. Normalize it. - q[i] = normalize(q[i]); - - //j = [i,C[ - //Finally, compute the corresponding coefficients of R by computing the projection of the resulting column on the other columns of the input. - for (length_t j = i; j < C; j++) - { - r[j][i] = dot(in[j], q[i]); - } - } - } - - template - GLM_FUNC_QUALIFIER void rq_decompose(mat const& in, mat<(C < R ? C : R), R, T, Q>& r, mat& q) - { - // From https://en.wikipedia.org/wiki/QR_decomposition: - // The RQ decomposition transforms a matrix A into the product of an upper triangular matrix R (also known as right-triangular) and an orthogonal matrix Q. The only difference from QR decomposition is the order of these matrices. - // QR decomposition is Gram–Schmidt orthogonalization of columns of A, started from the first column. - // RQ decomposition is Gram–Schmidt orthogonalization of rows of A, started from the last row. - - mat tin = transpose(in); - tin = fliplr(tin); - - mat tr; - mat<(C < R ? C : R), C, T, Q> tq; - qr_decompose(tin, tq, tr); - - tr = fliplr(tr); - r = transpose(tr); - r = fliplr(r); - - tq = fliplr(tq); - q = transpose(tq); - } -} //namespace glm diff --git a/third_party/glm/gtx/matrix_interpolation.hpp b/third_party/glm/gtx/matrix_interpolation.hpp deleted file mode 100755 index 7d5ad4c..0000000 --- a/third_party/glm/gtx/matrix_interpolation.hpp +++ /dev/null @@ -1,60 +0,0 @@ -/// @ref gtx_matrix_interpolation -/// @file glm/gtx/matrix_interpolation.hpp -/// @author Ghenadii Ursachi (the.asteroth@gmail.com) -/// -/// @see core (dependence) -/// -/// @defgroup gtx_matrix_interpolation GLM_GTX_matrix_interpolation -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Allows to directly interpolate two matrices. - -#pragma once - -// Dependency: -#include "../glm.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_matrix_interpolation is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_matrix_interpolation extension included") -# endif -#endif - -namespace glm -{ - /// @addtogroup gtx_matrix_interpolation - /// @{ - - /// Get the axis and angle of the rotation from a matrix. - /// From GLM_GTX_matrix_interpolation extension. - template - GLM_FUNC_DECL void axisAngle( - mat<4, 4, T, Q> const& Mat, vec<3, T, Q> & Axis, T & Angle); - - /// Build a matrix from axis and angle. - /// From GLM_GTX_matrix_interpolation extension. - template - GLM_FUNC_DECL mat<4, 4, T, Q> axisAngleMatrix( - vec<3, T, Q> const& Axis, T const Angle); - - /// Extracts the rotation part of a matrix. - /// From GLM_GTX_matrix_interpolation extension. - template - GLM_FUNC_DECL mat<4, 4, T, Q> extractMatrixRotation( - mat<4, 4, T, Q> const& Mat); - - /// Build a interpolation of 4 * 4 matrixes. - /// From GLM_GTX_matrix_interpolation extension. - /// Warning! works only with rotation and/or translation matrixes, scale will generate unexpected results. - template - GLM_FUNC_DECL mat<4, 4, T, Q> interpolate( - mat<4, 4, T, Q> const& m1, mat<4, 4, T, Q> const& m2, T const Delta); - - /// @} -}//namespace glm - -#include "matrix_interpolation.inl" diff --git a/third_party/glm/gtx/matrix_interpolation.inl b/third_party/glm/gtx/matrix_interpolation.inl deleted file mode 100755 index de40b7d..0000000 --- a/third_party/glm/gtx/matrix_interpolation.inl +++ /dev/null @@ -1,129 +0,0 @@ -/// @ref gtx_matrix_interpolation - -#include "../gtc/constants.hpp" - -namespace glm -{ - template - GLM_FUNC_QUALIFIER void axisAngle(mat<4, 4, T, Q> const& m, vec<3, T, Q> & axis, T& angle) - { - T epsilon = static_cast(0.01); - T epsilon2 = static_cast(0.1); - - if((abs(m[1][0] - m[0][1]) < epsilon) && (abs(m[2][0] - m[0][2]) < epsilon) && (abs(m[2][1] - m[1][2]) < epsilon)) - { - if ((abs(m[1][0] + m[0][1]) < epsilon2) && (abs(m[2][0] + m[0][2]) < epsilon2) && (abs(m[2][1] + m[1][2]) < epsilon2) && (abs(m[0][0] + m[1][1] + m[2][2] - static_cast(3.0)) < epsilon2)) - { - angle = static_cast(0.0); - axis.x = static_cast(1.0); - axis.y = static_cast(0.0); - axis.z = static_cast(0.0); - return; - } - angle = static_cast(3.1415926535897932384626433832795); - T xx = (m[0][0] + static_cast(1.0)) * static_cast(0.5); - T yy = (m[1][1] + static_cast(1.0)) * static_cast(0.5); - T zz = (m[2][2] + static_cast(1.0)) * static_cast(0.5); - T xy = (m[1][0] + m[0][1]) * static_cast(0.25); - T xz = (m[2][0] + m[0][2]) * static_cast(0.25); - T yz = (m[2][1] + m[1][2]) * static_cast(0.25); - if((xx > yy) && (xx > zz)) - { - if(xx < epsilon) - { - axis.x = static_cast(0.0); - axis.y = static_cast(0.7071); - axis.z = static_cast(0.7071); - } - else - { - axis.x = sqrt(xx); - axis.y = xy / axis.x; - axis.z = xz / axis.x; - } - } - else if (yy > zz) - { - if(yy < epsilon) - { - axis.x = static_cast(0.7071); - axis.y = static_cast(0.0); - axis.z = static_cast(0.7071); - } - else - { - axis.y = sqrt(yy); - axis.x = xy / axis.y; - axis.z = yz / axis.y; - } - } - else - { - if (zz < epsilon) - { - axis.x = static_cast(0.7071); - axis.y = static_cast(0.7071); - axis.z = static_cast(0.0); - } - else - { - axis.z = sqrt(zz); - axis.x = xz / axis.z; - axis.y = yz / axis.z; - } - } - return; - } - T s = sqrt((m[2][1] - m[1][2]) * (m[2][1] - m[1][2]) + (m[2][0] - m[0][2]) * (m[2][0] - m[0][2]) + (m[1][0] - m[0][1]) * (m[1][0] - m[0][1])); - if (glm::abs(s) < T(0.001)) - s = static_cast(1); - T const angleCos = (m[0][0] + m[1][1] + m[2][2] - static_cast(1)) * static_cast(0.5); - if(angleCos - static_cast(1) < epsilon) - angle = pi() * static_cast(0.25); - else - angle = acos(angleCos); - axis.x = (m[1][2] - m[2][1]) / s; - axis.y = (m[2][0] - m[0][2]) / s; - axis.z = (m[0][1] - m[1][0]) / s; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> axisAngleMatrix(vec<3, T, Q> const& axis, T const angle) - { - T c = cos(angle); - T s = sin(angle); - T t = static_cast(1) - c; - vec<3, T, Q> n = normalize(axis); - - return mat<4, 4, T, Q>( - t * n.x * n.x + c, t * n.x * n.y + n.z * s, t * n.x * n.z - n.y * s, static_cast(0.0), - t * n.x * n.y - n.z * s, t * n.y * n.y + c, t * n.y * n.z + n.x * s, static_cast(0.0), - t * n.x * n.z + n.y * s, t * n.y * n.z - n.x * s, t * n.z * n.z + c, static_cast(0.0), - static_cast(0.0), static_cast(0.0), static_cast(0.0), static_cast(1.0)); - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> extractMatrixRotation(mat<4, 4, T, Q> const& m) - { - return mat<4, 4, T, Q>( - m[0][0], m[0][1], m[0][2], static_cast(0.0), - m[1][0], m[1][1], m[1][2], static_cast(0.0), - m[2][0], m[2][1], m[2][2], static_cast(0.0), - static_cast(0.0), static_cast(0.0), static_cast(0.0), static_cast(1.0)); - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> interpolate(mat<4, 4, T, Q> const& m1, mat<4, 4, T, Q> const& m2, T const delta) - { - mat<4, 4, T, Q> m1rot = extractMatrixRotation(m1); - mat<4, 4, T, Q> dltRotation = m2 * transpose(m1rot); - vec<3, T, Q> dltAxis; - T dltAngle; - axisAngle(dltRotation, dltAxis, dltAngle); - mat<4, 4, T, Q> out = axisAngleMatrix(dltAxis, dltAngle * delta) * m1rot; - out[3][0] = m1[3][0] + delta * (m2[3][0] - m1[3][0]); - out[3][1] = m1[3][1] + delta * (m2[3][1] - m1[3][1]); - out[3][2] = m1[3][2] + delta * (m2[3][2] - m1[3][2]); - return out; - } -}//namespace glm diff --git a/third_party/glm/gtx/matrix_major_storage.hpp b/third_party/glm/gtx/matrix_major_storage.hpp deleted file mode 100755 index 8c6bc22..0000000 --- a/third_party/glm/gtx/matrix_major_storage.hpp +++ /dev/null @@ -1,119 +0,0 @@ -/// @ref gtx_matrix_major_storage -/// @file glm/gtx/matrix_major_storage.hpp -/// -/// @see core (dependence) -/// @see gtx_extented_min_max (dependence) -/// -/// @defgroup gtx_matrix_major_storage GLM_GTX_matrix_major_storage -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Build matrices with specific matrix order, row or column - -#pragma once - -// Dependency: -#include "../glm.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_matrix_major_storage is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_matrix_major_storage extension included") -# endif -#endif - -namespace glm -{ - /// @addtogroup gtx_matrix_major_storage - /// @{ - - //! Build a row major matrix from row vectors. - //! From GLM_GTX_matrix_major_storage extension. - template - GLM_FUNC_DECL mat<2, 2, T, Q> rowMajor2( - vec<2, T, Q> const& v1, - vec<2, T, Q> const& v2); - - //! Build a row major matrix from other matrix. - //! From GLM_GTX_matrix_major_storage extension. - template - GLM_FUNC_DECL mat<2, 2, T, Q> rowMajor2( - mat<2, 2, T, Q> const& m); - - //! Build a row major matrix from row vectors. - //! From GLM_GTX_matrix_major_storage extension. - template - GLM_FUNC_DECL mat<3, 3, T, Q> rowMajor3( - vec<3, T, Q> const& v1, - vec<3, T, Q> const& v2, - vec<3, T, Q> const& v3); - - //! Build a row major matrix from other matrix. - //! From GLM_GTX_matrix_major_storage extension. - template - GLM_FUNC_DECL mat<3, 3, T, Q> rowMajor3( - mat<3, 3, T, Q> const& m); - - //! Build a row major matrix from row vectors. - //! From GLM_GTX_matrix_major_storage extension. - template - GLM_FUNC_DECL mat<4, 4, T, Q> rowMajor4( - vec<4, T, Q> const& v1, - vec<4, T, Q> const& v2, - vec<4, T, Q> const& v3, - vec<4, T, Q> const& v4); - - //! Build a row major matrix from other matrix. - //! From GLM_GTX_matrix_major_storage extension. - template - GLM_FUNC_DECL mat<4, 4, T, Q> rowMajor4( - mat<4, 4, T, Q> const& m); - - //! Build a column major matrix from column vectors. - //! From GLM_GTX_matrix_major_storage extension. - template - GLM_FUNC_DECL mat<2, 2, T, Q> colMajor2( - vec<2, T, Q> const& v1, - vec<2, T, Q> const& v2); - - //! Build a column major matrix from other matrix. - //! From GLM_GTX_matrix_major_storage extension. - template - GLM_FUNC_DECL mat<2, 2, T, Q> colMajor2( - mat<2, 2, T, Q> const& m); - - //! Build a column major matrix from column vectors. - //! From GLM_GTX_matrix_major_storage extension. - template - GLM_FUNC_DECL mat<3, 3, T, Q> colMajor3( - vec<3, T, Q> const& v1, - vec<3, T, Q> const& v2, - vec<3, T, Q> const& v3); - - //! Build a column major matrix from other matrix. - //! From GLM_GTX_matrix_major_storage extension. - template - GLM_FUNC_DECL mat<3, 3, T, Q> colMajor3( - mat<3, 3, T, Q> const& m); - - //! Build a column major matrix from column vectors. - //! From GLM_GTX_matrix_major_storage extension. - template - GLM_FUNC_DECL mat<4, 4, T, Q> colMajor4( - vec<4, T, Q> const& v1, - vec<4, T, Q> const& v2, - vec<4, T, Q> const& v3, - vec<4, T, Q> const& v4); - - //! Build a column major matrix from other matrix. - //! From GLM_GTX_matrix_major_storage extension. - template - GLM_FUNC_DECL mat<4, 4, T, Q> colMajor4( - mat<4, 4, T, Q> const& m); - - /// @} -}//namespace glm - -#include "matrix_major_storage.inl" diff --git a/third_party/glm/gtx/matrix_major_storage.inl b/third_party/glm/gtx/matrix_major_storage.inl deleted file mode 100755 index 279dd34..0000000 --- a/third_party/glm/gtx/matrix_major_storage.inl +++ /dev/null @@ -1,166 +0,0 @@ -/// @ref gtx_matrix_major_storage - -namespace glm -{ - template - GLM_FUNC_QUALIFIER mat<2, 2, T, Q> rowMajor2 - ( - vec<2, T, Q> const& v1, - vec<2, T, Q> const& v2 - ) - { - mat<2, 2, T, Q> Result; - Result[0][0] = v1.x; - Result[1][0] = v1.y; - Result[0][1] = v2.x; - Result[1][1] = v2.y; - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<2, 2, T, Q> rowMajor2( - const mat<2, 2, T, Q>& m) - { - mat<2, 2, T, Q> Result; - Result[0][0] = m[0][0]; - Result[0][1] = m[1][0]; - Result[1][0] = m[0][1]; - Result[1][1] = m[1][1]; - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> rowMajor3( - const vec<3, T, Q>& v1, - const vec<3, T, Q>& v2, - const vec<3, T, Q>& v3) - { - mat<3, 3, T, Q> Result; - Result[0][0] = v1.x; - Result[1][0] = v1.y; - Result[2][0] = v1.z; - Result[0][1] = v2.x; - Result[1][1] = v2.y; - Result[2][1] = v2.z; - Result[0][2] = v3.x; - Result[1][2] = v3.y; - Result[2][2] = v3.z; - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> rowMajor3( - const mat<3, 3, T, Q>& m) - { - mat<3, 3, T, Q> Result; - Result[0][0] = m[0][0]; - Result[0][1] = m[1][0]; - Result[0][2] = m[2][0]; - Result[1][0] = m[0][1]; - Result[1][1] = m[1][1]; - Result[1][2] = m[2][1]; - Result[2][0] = m[0][2]; - Result[2][1] = m[1][2]; - Result[2][2] = m[2][2]; - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> rowMajor4( - const vec<4, T, Q>& v1, - const vec<4, T, Q>& v2, - const vec<4, T, Q>& v3, - const vec<4, T, Q>& v4) - { - mat<4, 4, T, Q> Result; - Result[0][0] = v1.x; - Result[1][0] = v1.y; - Result[2][0] = v1.z; - Result[3][0] = v1.w; - Result[0][1] = v2.x; - Result[1][1] = v2.y; - Result[2][1] = v2.z; - Result[3][1] = v2.w; - Result[0][2] = v3.x; - Result[1][2] = v3.y; - Result[2][2] = v3.z; - Result[3][2] = v3.w; - Result[0][3] = v4.x; - Result[1][3] = v4.y; - Result[2][3] = v4.z; - Result[3][3] = v4.w; - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> rowMajor4( - const mat<4, 4, T, Q>& m) - { - mat<4, 4, T, Q> Result; - Result[0][0] = m[0][0]; - Result[0][1] = m[1][0]; - Result[0][2] = m[2][0]; - Result[0][3] = m[3][0]; - Result[1][0] = m[0][1]; - Result[1][1] = m[1][1]; - Result[1][2] = m[2][1]; - Result[1][3] = m[3][1]; - Result[2][0] = m[0][2]; - Result[2][1] = m[1][2]; - Result[2][2] = m[2][2]; - Result[2][3] = m[3][2]; - Result[3][0] = m[0][3]; - Result[3][1] = m[1][3]; - Result[3][2] = m[2][3]; - Result[3][3] = m[3][3]; - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<2, 2, T, Q> colMajor2( - const vec<2, T, Q>& v1, - const vec<2, T, Q>& v2) - { - return mat<2, 2, T, Q>(v1, v2); - } - - template - GLM_FUNC_QUALIFIER mat<2, 2, T, Q> colMajor2( - const mat<2, 2, T, Q>& m) - { - return mat<2, 2, T, Q>(m); - } - - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> colMajor3( - const vec<3, T, Q>& v1, - const vec<3, T, Q>& v2, - const vec<3, T, Q>& v3) - { - return mat<3, 3, T, Q>(v1, v2, v3); - } - - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> colMajor3( - const mat<3, 3, T, Q>& m) - { - return mat<3, 3, T, Q>(m); - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> colMajor4( - const vec<4, T, Q>& v1, - const vec<4, T, Q>& v2, - const vec<4, T, Q>& v3, - const vec<4, T, Q>& v4) - { - return mat<4, 4, T, Q>(v1, v2, v3, v4); - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> colMajor4( - const mat<4, 4, T, Q>& m) - { - return mat<4, 4, T, Q>(m); - } -}//namespace glm diff --git a/third_party/glm/gtx/matrix_operation.hpp b/third_party/glm/gtx/matrix_operation.hpp deleted file mode 100755 index de6ff1f..0000000 --- a/third_party/glm/gtx/matrix_operation.hpp +++ /dev/null @@ -1,103 +0,0 @@ -/// @ref gtx_matrix_operation -/// @file glm/gtx/matrix_operation.hpp -/// -/// @see core (dependence) -/// -/// @defgroup gtx_matrix_operation GLM_GTX_matrix_operation -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Build diagonal matrices from vectors. - -#pragma once - -// Dependency: -#include "../glm.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_matrix_operation is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_matrix_operation extension included") -# endif -#endif - -namespace glm -{ - /// @addtogroup gtx_matrix_operation - /// @{ - - //! Build a diagonal matrix. - //! From GLM_GTX_matrix_operation extension. - template - GLM_FUNC_DECL mat<2, 2, T, Q> diagonal2x2( - vec<2, T, Q> const& v); - - //! Build a diagonal matrix. - //! From GLM_GTX_matrix_operation extension. - template - GLM_FUNC_DECL mat<2, 3, T, Q> diagonal2x3( - vec<2, T, Q> const& v); - - //! Build a diagonal matrix. - //! From GLM_GTX_matrix_operation extension. - template - GLM_FUNC_DECL mat<2, 4, T, Q> diagonal2x4( - vec<2, T, Q> const& v); - - //! Build a diagonal matrix. - //! From GLM_GTX_matrix_operation extension. - template - GLM_FUNC_DECL mat<3, 2, T, Q> diagonal3x2( - vec<2, T, Q> const& v); - - //! Build a diagonal matrix. - //! From GLM_GTX_matrix_operation extension. - template - GLM_FUNC_DECL mat<3, 3, T, Q> diagonal3x3( - vec<3, T, Q> const& v); - - //! Build a diagonal matrix. - //! From GLM_GTX_matrix_operation extension. - template - GLM_FUNC_DECL mat<3, 4, T, Q> diagonal3x4( - vec<3, T, Q> const& v); - - //! Build a diagonal matrix. - //! From GLM_GTX_matrix_operation extension. - template - GLM_FUNC_DECL mat<4, 2, T, Q> diagonal4x2( - vec<2, T, Q> const& v); - - //! Build a diagonal matrix. - //! From GLM_GTX_matrix_operation extension. - template - GLM_FUNC_DECL mat<4, 3, T, Q> diagonal4x3( - vec<3, T, Q> const& v); - - //! Build a diagonal matrix. - //! From GLM_GTX_matrix_operation extension. - template - GLM_FUNC_DECL mat<4, 4, T, Q> diagonal4x4( - vec<4, T, Q> const& v); - - /// Build an adjugate matrix. - /// From GLM_GTX_matrix_operation extension. - template - GLM_FUNC_DECL mat<2, 2, T, Q> adjugate(mat<2, 2, T, Q> const& m); - - /// Build an adjugate matrix. - /// From GLM_GTX_matrix_operation extension. - template - GLM_FUNC_DECL mat<3, 3, T, Q> adjugate(mat<3, 3, T, Q> const& m); - - /// Build an adjugate matrix. - /// From GLM_GTX_matrix_operation extension. - template - GLM_FUNC_DECL mat<4, 4, T, Q> adjugate(mat<4, 4, T, Q> const& m); - - /// @} -}//namespace glm - -#include "matrix_operation.inl" diff --git a/third_party/glm/gtx/matrix_operation.inl b/third_party/glm/gtx/matrix_operation.inl deleted file mode 100755 index 9de83f8..0000000 --- a/third_party/glm/gtx/matrix_operation.inl +++ /dev/null @@ -1,176 +0,0 @@ -/// @ref gtx_matrix_operation - -namespace glm -{ - template - GLM_FUNC_QUALIFIER mat<2, 2, T, Q> diagonal2x2 - ( - vec<2, T, Q> const& v - ) - { - mat<2, 2, T, Q> Result(static_cast(1)); - Result[0][0] = v[0]; - Result[1][1] = v[1]; - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<2, 3, T, Q> diagonal2x3 - ( - vec<2, T, Q> const& v - ) - { - mat<2, 3, T, Q> Result(static_cast(1)); - Result[0][0] = v[0]; - Result[1][1] = v[1]; - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<2, 4, T, Q> diagonal2x4 - ( - vec<2, T, Q> const& v - ) - { - mat<2, 4, T, Q> Result(static_cast(1)); - Result[0][0] = v[0]; - Result[1][1] = v[1]; - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<3, 2, T, Q> diagonal3x2 - ( - vec<2, T, Q> const& v - ) - { - mat<3, 2, T, Q> Result(static_cast(1)); - Result[0][0] = v[0]; - Result[1][1] = v[1]; - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> diagonal3x3 - ( - vec<3, T, Q> const& v - ) - { - mat<3, 3, T, Q> Result(static_cast(1)); - Result[0][0] = v[0]; - Result[1][1] = v[1]; - Result[2][2] = v[2]; - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<3, 4, T, Q> diagonal3x4 - ( - vec<3, T, Q> const& v - ) - { - mat<3, 4, T, Q> Result(static_cast(1)); - Result[0][0] = v[0]; - Result[1][1] = v[1]; - Result[2][2] = v[2]; - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> diagonal4x4 - ( - vec<4, T, Q> const& v - ) - { - mat<4, 4, T, Q> Result(static_cast(1)); - Result[0][0] = v[0]; - Result[1][1] = v[1]; - Result[2][2] = v[2]; - Result[3][3] = v[3]; - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 3, T, Q> diagonal4x3 - ( - vec<3, T, Q> const& v - ) - { - mat<4, 3, T, Q> Result(static_cast(1)); - Result[0][0] = v[0]; - Result[1][1] = v[1]; - Result[2][2] = v[2]; - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 2, T, Q> diagonal4x2 - ( - vec<2, T, Q> const& v - ) - { - mat<4, 2, T, Q> Result(static_cast(1)); - Result[0][0] = v[0]; - Result[1][1] = v[1]; - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<2, 2, T, Q> adjugate(mat<2, 2, T, Q> const& m) - { - return mat<2, 2, T, Q>( - +m[1][1], -m[1][0], - -m[0][1], +m[0][0]); - } - - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> adjugate(mat<3, 3, T, Q> const& m) - { - T const m00 = determinant(mat<2, 2, T, Q>(m[1][1], m[2][1], m[1][2], m[2][2])); - T const m01 = determinant(mat<2, 2, T, Q>(m[0][1], m[2][1], m[0][2], m[2][2])); - T const m02 = determinant(mat<2, 2, T, Q>(m[0][1], m[1][1], m[0][2], m[1][2])); - - T const m10 = determinant(mat<2, 2, T, Q>(m[1][0], m[2][0], m[1][2], m[2][2])); - T const m11 = determinant(mat<2, 2, T, Q>(m[0][0], m[2][0], m[0][2], m[2][2])); - T const m12 = determinant(mat<2, 2, T, Q>(m[0][0], m[1][0], m[0][2], m[1][2])); - - T const m20 = determinant(mat<2, 2, T, Q>(m[1][0], m[2][0], m[1][1], m[2][1])); - T const m21 = determinant(mat<2, 2, T, Q>(m[0][0], m[2][0], m[0][1], m[2][1])); - T const m22 = determinant(mat<2, 2, T, Q>(m[0][0], m[1][0], m[0][1], m[1][1])); - - return mat<3, 3, T, Q>( - +m00, -m01, +m02, - -m10, +m11, -m12, - +m20, -m21, +m22); - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> adjugate(mat<4, 4, T, Q> const& m) - { - T const m00 = determinant(mat<3, 3, T, Q>(m[1][1], m[1][2], m[1][3], m[2][1], m[2][2], m[2][3], m[3][1], m[3][2], m[3][3])); - T const m01 = determinant(mat<3, 3, T, Q>(m[1][0], m[1][2], m[1][3], m[2][0], m[2][2], m[2][3], m[3][0], m[3][2], m[3][3])); - T const m02 = determinant(mat<3, 3, T, Q>(m[1][0], m[1][1], m[1][3], m[2][0], m[2][2], m[2][3], m[3][0], m[3][1], m[3][3])); - T const m03 = determinant(mat<3, 3, T, Q>(m[1][0], m[1][1], m[1][2], m[2][0], m[2][1], m[2][2], m[3][0], m[3][1], m[3][2])); - - T const m10 = determinant(mat<3, 3, T, Q>(m[0][1], m[0][2], m[0][3], m[2][1], m[2][2], m[2][3], m[3][1], m[3][2], m[3][3])); - T const m11 = determinant(mat<3, 3, T, Q>(m[0][0], m[0][2], m[0][3], m[2][0], m[2][2], m[2][3], m[3][0], m[3][2], m[3][3])); - T const m12 = determinant(mat<3, 3, T, Q>(m[0][0], m[0][1], m[0][3], m[2][0], m[2][1], m[2][3], m[3][0], m[3][1], m[3][3])); - T const m13 = determinant(mat<3, 3, T, Q>(m[0][0], m[0][1], m[0][2], m[2][0], m[2][1], m[2][2], m[3][0], m[3][1], m[3][2])); - - T const m20 = determinant(mat<3, 3, T, Q>(m[0][1], m[0][2], m[0][3], m[1][1], m[1][2], m[1][3], m[3][1], m[3][2], m[3][3])); - T const m21 = determinant(mat<3, 3, T, Q>(m[0][0], m[0][2], m[0][3], m[1][0], m[1][2], m[1][3], m[3][0], m[3][2], m[3][3])); - T const m22 = determinant(mat<3, 3, T, Q>(m[0][0], m[0][1], m[0][3], m[1][0], m[1][1], m[1][3], m[3][0], m[3][1], m[3][3])); - T const m23 = determinant(mat<3, 3, T, Q>(m[0][0], m[0][1], m[0][2], m[1][0], m[1][1], m[1][2], m[3][0], m[3][1], m[3][2])); - - T const m30 = determinant(mat<3, 3, T, Q>(m[0][1], m[0][2], m[0][3], m[1][1], m[1][2], m[1][3], m[2][1], m[2][2], m[2][3])); - T const m31 = determinant(mat<3, 3, T, Q>(m[0][0], m[0][2], m[0][3], m[1][0], m[1][2], m[1][3], m[2][0], m[2][2], m[2][3])); - T const m32 = determinant(mat<3, 3, T, Q>(m[0][0], m[0][1], m[0][3], m[1][0], m[1][1], m[1][3], m[2][0], m[2][1], m[2][3])); - T const m33 = determinant(mat<3, 3, T, Q>(m[0][0], m[0][1], m[0][2], m[1][0], m[1][1], m[1][2], m[2][0], m[2][1], m[2][2])); - - return mat<4, 4, T, Q>( - +m00, -m01, +m02, -m03, - -m10, +m11, -m12, +m13, - +m20, -m21, +m22, -m23, - -m30, +m31, -m32, +m33); - } -}//namespace glm diff --git a/third_party/glm/gtx/matrix_query.hpp b/third_party/glm/gtx/matrix_query.hpp deleted file mode 100755 index 8011b2b..0000000 --- a/third_party/glm/gtx/matrix_query.hpp +++ /dev/null @@ -1,77 +0,0 @@ -/// @ref gtx_matrix_query -/// @file glm/gtx/matrix_query.hpp -/// -/// @see core (dependence) -/// @see gtx_vector_query (dependence) -/// -/// @defgroup gtx_matrix_query GLM_GTX_matrix_query -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Query to evaluate matrix properties - -#pragma once - -// Dependency: -#include "../glm.hpp" -#include "../gtx/vector_query.hpp" -#include - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_matrix_query is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_matrix_query extension included") -# endif -#endif - -namespace glm -{ - /// @addtogroup gtx_matrix_query - /// @{ - - /// Return whether a matrix a null matrix. - /// From GLM_GTX_matrix_query extension. - template - GLM_FUNC_DECL bool isNull(mat<2, 2, T, Q> const& m, T const& epsilon); - - /// Return whether a matrix a null matrix. - /// From GLM_GTX_matrix_query extension. - template - GLM_FUNC_DECL bool isNull(mat<3, 3, T, Q> const& m, T const& epsilon); - - /// Return whether a matrix is a null matrix. - /// From GLM_GTX_matrix_query extension. - template - GLM_FUNC_DECL bool isNull(mat<4, 4, T, Q> const& m, T const& epsilon); - - /// Return whether a matrix is an identity matrix. - /// From GLM_GTX_matrix_query extension. - template class matType> - GLM_FUNC_DECL bool isIdentity(matType const& m, T const& epsilon); - - /// Return whether a matrix is a normalized matrix. - /// From GLM_GTX_matrix_query extension. - template - GLM_FUNC_DECL bool isNormalized(mat<2, 2, T, Q> const& m, T const& epsilon); - - /// Return whether a matrix is a normalized matrix. - /// From GLM_GTX_matrix_query extension. - template - GLM_FUNC_DECL bool isNormalized(mat<3, 3, T, Q> const& m, T const& epsilon); - - /// Return whether a matrix is a normalized matrix. - /// From GLM_GTX_matrix_query extension. - template - GLM_FUNC_DECL bool isNormalized(mat<4, 4, T, Q> const& m, T const& epsilon); - - /// Return whether a matrix is an orthonormalized matrix. - /// From GLM_GTX_matrix_query extension. - template class matType> - GLM_FUNC_DECL bool isOrthogonal(matType const& m, T const& epsilon); - - /// @} -}//namespace glm - -#include "matrix_query.inl" diff --git a/third_party/glm/gtx/matrix_query.inl b/third_party/glm/gtx/matrix_query.inl deleted file mode 100755 index 77bd231..0000000 --- a/third_party/glm/gtx/matrix_query.inl +++ /dev/null @@ -1,113 +0,0 @@ -/// @ref gtx_matrix_query - -namespace glm -{ - template - GLM_FUNC_QUALIFIER bool isNull(mat<2, 2, T, Q> const& m, T const& epsilon) - { - bool result = true; - for(length_t i = 0; result && i < m.length() ; ++i) - result = isNull(m[i], epsilon); - return result; - } - - template - GLM_FUNC_QUALIFIER bool isNull(mat<3, 3, T, Q> const& m, T const& epsilon) - { - bool result = true; - for(length_t i = 0; result && i < m.length() ; ++i) - result = isNull(m[i], epsilon); - return result; - } - - template - GLM_FUNC_QUALIFIER bool isNull(mat<4, 4, T, Q> const& m, T const& epsilon) - { - bool result = true; - for(length_t i = 0; result && i < m.length() ; ++i) - result = isNull(m[i], epsilon); - return result; - } - - template - GLM_FUNC_QUALIFIER bool isIdentity(mat const& m, T const& epsilon) - { - bool result = true; - for(length_t i = 0; result && i < m[0].length() ; ++i) - { - for(length_t j = 0; result && j < i ; ++j) - result = abs(m[i][j]) <= epsilon; - if(result) - result = abs(m[i][i] - 1) <= epsilon; - for(length_t j = i + 1; result && j < m.length(); ++j) - result = abs(m[i][j]) <= epsilon; - } - return result; - } - - template - GLM_FUNC_QUALIFIER bool isNormalized(mat<2, 2, T, Q> const& m, T const& epsilon) - { - bool result(true); - for(length_t i = 0; result && i < m.length(); ++i) - result = isNormalized(m[i], epsilon); - for(length_t i = 0; result && i < m.length(); ++i) - { - typename mat<2, 2, T, Q>::col_type v; - for(length_t j = 0; j < m.length(); ++j) - v[j] = m[j][i]; - result = isNormalized(v, epsilon); - } - return result; - } - - template - GLM_FUNC_QUALIFIER bool isNormalized(mat<3, 3, T, Q> const& m, T const& epsilon) - { - bool result(true); - for(length_t i = 0; result && i < m.length(); ++i) - result = isNormalized(m[i], epsilon); - for(length_t i = 0; result && i < m.length(); ++i) - { - typename mat<3, 3, T, Q>::col_type v; - for(length_t j = 0; j < m.length(); ++j) - v[j] = m[j][i]; - result = isNormalized(v, epsilon); - } - return result; - } - - template - GLM_FUNC_QUALIFIER bool isNormalized(mat<4, 4, T, Q> const& m, T const& epsilon) - { - bool result(true); - for(length_t i = 0; result && i < m.length(); ++i) - result = isNormalized(m[i], epsilon); - for(length_t i = 0; result && i < m.length(); ++i) - { - typename mat<4, 4, T, Q>::col_type v; - for(length_t j = 0; j < m.length(); ++j) - v[j] = m[j][i]; - result = isNormalized(v, epsilon); - } - return result; - } - - template - GLM_FUNC_QUALIFIER bool isOrthogonal(mat const& m, T const& epsilon) - { - bool result = true; - for(length_t i(0); result && i < m.length() - 1; ++i) - for(length_t j(i + 1); result && j < m.length(); ++j) - result = areOrthogonal(m[i], m[j], epsilon); - - if(result) - { - mat tmp = transpose(m); - for(length_t i(0); result && i < m.length() - 1 ; ++i) - for(length_t j(i + 1); result && j < m.length(); ++j) - result = areOrthogonal(tmp[i], tmp[j], epsilon); - } - return result; - } -}//namespace glm diff --git a/third_party/glm/gtx/matrix_transform_2d.hpp b/third_party/glm/gtx/matrix_transform_2d.hpp deleted file mode 100755 index 5f9c540..0000000 --- a/third_party/glm/gtx/matrix_transform_2d.hpp +++ /dev/null @@ -1,81 +0,0 @@ -/// @ref gtx_matrix_transform_2d -/// @file glm/gtx/matrix_transform_2d.hpp -/// @author Miguel Ãngel Pérez Martínez -/// -/// @see core (dependence) -/// -/// @defgroup gtx_matrix_transform_2d GLM_GTX_matrix_transform_2d -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Defines functions that generate common 2d transformation matrices. - -#pragma once - -// Dependency: -#include "../mat3x3.hpp" -#include "../vec2.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_matrix_transform_2d is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_matrix_transform_2d extension included") -# endif -#endif - -namespace glm -{ - /// @addtogroup gtx_matrix_transform_2d - /// @{ - - /// Builds a translation 3 * 3 matrix created from a vector of 2 components. - /// - /// @param m Input matrix multiplied by this translation matrix. - /// @param v Coordinates of a translation vector. - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> translate( - mat<3, 3, T, Q> const& m, - vec<2, T, Q> const& v); - - /// Builds a rotation 3 * 3 matrix created from an angle. - /// - /// @param m Input matrix multiplied by this translation matrix. - /// @param angle Rotation angle expressed in radians. - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> rotate( - mat<3, 3, T, Q> const& m, - T angle); - - /// Builds a scale 3 * 3 matrix created from a vector of 2 components. - /// - /// @param m Input matrix multiplied by this translation matrix. - /// @param v Coordinates of a scale vector. - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> scale( - mat<3, 3, T, Q> const& m, - vec<2, T, Q> const& v); - - /// Builds an horizontal (parallel to the x axis) shear 3 * 3 matrix. - /// - /// @param m Input matrix multiplied by this translation matrix. - /// @param y Shear factor. - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> shearX( - mat<3, 3, T, Q> const& m, - T y); - - /// Builds a vertical (parallel to the y axis) shear 3 * 3 matrix. - /// - /// @param m Input matrix multiplied by this translation matrix. - /// @param x Shear factor. - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> shearY( - mat<3, 3, T, Q> const& m, - T x); - - /// @} -}//namespace glm - -#include "matrix_transform_2d.inl" diff --git a/third_party/glm/gtx/matrix_transform_2d.inl b/third_party/glm/gtx/matrix_transform_2d.inl deleted file mode 100755 index a68d24d..0000000 --- a/third_party/glm/gtx/matrix_transform_2d.inl +++ /dev/null @@ -1,68 +0,0 @@ -/// @ref gtx_matrix_transform_2d -/// @author Miguel Ãngel Pérez Martínez - -#include "../trigonometric.hpp" - -namespace glm -{ - - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> translate( - mat<3, 3, T, Q> const& m, - vec<2, T, Q> const& v) - { - mat<3, 3, T, Q> Result(m); - Result[2] = m[0] * v[0] + m[1] * v[1] + m[2]; - return Result; - } - - - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> rotate( - mat<3, 3, T, Q> const& m, - T angle) - { - T const a = angle; - T const c = cos(a); - T const s = sin(a); - - mat<3, 3, T, Q> Result; - Result[0] = m[0] * c + m[1] * s; - Result[1] = m[0] * -s + m[1] * c; - Result[2] = m[2]; - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> scale( - mat<3, 3, T, Q> const& m, - vec<2, T, Q> const& v) - { - mat<3, 3, T, Q> Result; - Result[0] = m[0] * v[0]; - Result[1] = m[1] * v[1]; - Result[2] = m[2]; - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> shearX( - mat<3, 3, T, Q> const& m, - T y) - { - mat<3, 3, T, Q> Result(1); - Result[0][1] = y; - return m * Result; - } - - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> shearY( - mat<3, 3, T, Q> const& m, - T x) - { - mat<3, 3, T, Q> Result(1); - Result[1][0] = x; - return m * Result; - } - -}//namespace glm diff --git a/third_party/glm/gtx/mixed_product.hpp b/third_party/glm/gtx/mixed_product.hpp deleted file mode 100755 index b242e35..0000000 --- a/third_party/glm/gtx/mixed_product.hpp +++ /dev/null @@ -1,41 +0,0 @@ -/// @ref gtx_mixed_product -/// @file glm/gtx/mixed_product.hpp -/// -/// @see core (dependence) -/// -/// @defgroup gtx_mixed_product GLM_GTX_mixed_producte -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Mixed product of 3 vectors. - -#pragma once - -// Dependency: -#include "../glm.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_mixed_product is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_mixed_product extension included") -# endif -#endif - -namespace glm -{ - /// @addtogroup gtx_mixed_product - /// @{ - - /// @brief Mixed product of 3 vectors (from GLM_GTX_mixed_product extension) - template - GLM_FUNC_DECL T mixedProduct( - vec<3, T, Q> const& v1, - vec<3, T, Q> const& v2, - vec<3, T, Q> const& v3); - - /// @} -}// namespace glm - -#include "mixed_product.inl" diff --git a/third_party/glm/gtx/mixed_product.inl b/third_party/glm/gtx/mixed_product.inl deleted file mode 100755 index e5cdbdb..0000000 --- a/third_party/glm/gtx/mixed_product.inl +++ /dev/null @@ -1,15 +0,0 @@ -/// @ref gtx_mixed_product - -namespace glm -{ - template - GLM_FUNC_QUALIFIER T mixedProduct - ( - vec<3, T, Q> const& v1, - vec<3, T, Q> const& v2, - vec<3, T, Q> const& v3 - ) - { - return dot(cross(v1, v2), v3); - } -}//namespace glm diff --git a/third_party/glm/gtx/norm.hpp b/third_party/glm/gtx/norm.hpp deleted file mode 100755 index dfaebb7..0000000 --- a/third_party/glm/gtx/norm.hpp +++ /dev/null @@ -1,88 +0,0 @@ -/// @ref gtx_norm -/// @file glm/gtx/norm.hpp -/// -/// @see core (dependence) -/// @see gtx_quaternion (dependence) -/// @see gtx_component_wise (dependence) -/// -/// @defgroup gtx_norm GLM_GTX_norm -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Various ways to compute vector norms. - -#pragma once - -// Dependency: -#include "../geometric.hpp" -#include "../gtx/quaternion.hpp" -#include "../gtx/component_wise.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_norm is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_norm extension included") -# endif -#endif - -namespace glm -{ - /// @addtogroup gtx_norm - /// @{ - - /// Returns the squared length of x. - /// From GLM_GTX_norm extension. - template - GLM_FUNC_DECL T length2(vec const& x); - - /// Returns the squared distance between p0 and p1, i.e., length2(p0 - p1). - /// From GLM_GTX_norm extension. - template - GLM_FUNC_DECL T distance2(vec const& p0, vec const& p1); - - //! Returns the L1 norm between x and y. - //! From GLM_GTX_norm extension. - template - GLM_FUNC_DECL T l1Norm(vec<3, T, Q> const& x, vec<3, T, Q> const& y); - - //! Returns the L1 norm of v. - //! From GLM_GTX_norm extension. - template - GLM_FUNC_DECL T l1Norm(vec<3, T, Q> const& v); - - //! Returns the L2 norm between x and y. - //! From GLM_GTX_norm extension. - template - GLM_FUNC_DECL T l2Norm(vec<3, T, Q> const& x, vec<3, T, Q> const& y); - - //! Returns the L2 norm of v. - //! From GLM_GTX_norm extension. - template - GLM_FUNC_DECL T l2Norm(vec<3, T, Q> const& x); - - //! Returns the L norm between x and y. - //! From GLM_GTX_norm extension. - template - GLM_FUNC_DECL T lxNorm(vec<3, T, Q> const& x, vec<3, T, Q> const& y, unsigned int Depth); - - //! Returns the L norm of v. - //! From GLM_GTX_norm extension. - template - GLM_FUNC_DECL T lxNorm(vec<3, T, Q> const& x, unsigned int Depth); - - //! Returns the LMax norm between x and y. - //! From GLM_GTX_norm extension. - template - GLM_FUNC_DECL T lMaxNorm(vec<3, T, Q> const& x, vec<3, T, Q> const& y); - - //! Returns the LMax norm of v. - //! From GLM_GTX_norm extension. - template - GLM_FUNC_DECL T lMaxNorm(vec<3, T, Q> const& x); - - /// @} -}//namespace glm - -#include "norm.inl" diff --git a/third_party/glm/gtx/norm.inl b/third_party/glm/gtx/norm.inl deleted file mode 100755 index 6db561b..0000000 --- a/third_party/glm/gtx/norm.inl +++ /dev/null @@ -1,95 +0,0 @@ -/// @ref gtx_norm - -#include "../detail/qualifier.hpp" - -namespace glm{ -namespace detail -{ - template - struct compute_length2 - { - GLM_FUNC_QUALIFIER static T call(vec const& v) - { - return dot(v, v); - } - }; -}//namespace detail - - template - GLM_FUNC_QUALIFIER genType length2(genType x) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'length2' accepts only floating-point inputs"); - return x * x; - } - - template - GLM_FUNC_QUALIFIER T length2(vec const& v) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'length2' accepts only floating-point inputs"); - return detail::compute_length2::value>::call(v); - } - - template - GLM_FUNC_QUALIFIER T distance2(T p0, T p1) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'distance2' accepts only floating-point inputs"); - return length2(p1 - p0); - } - - template - GLM_FUNC_QUALIFIER T distance2(vec const& p0, vec const& p1) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'distance2' accepts only floating-point inputs"); - return length2(p1 - p0); - } - - template - GLM_FUNC_QUALIFIER T l1Norm(vec<3, T, Q> const& a, vec<3, T, Q> const& b) - { - return abs(b.x - a.x) + abs(b.y - a.y) + abs(b.z - a.z); - } - - template - GLM_FUNC_QUALIFIER T l1Norm(vec<3, T, Q> const& v) - { - return abs(v.x) + abs(v.y) + abs(v.z); - } - - template - GLM_FUNC_QUALIFIER T l2Norm(vec<3, T, Q> const& a, vec<3, T, Q> const& b - ) - { - return length(b - a); - } - - template - GLM_FUNC_QUALIFIER T l2Norm(vec<3, T, Q> const& v) - { - return length(v); - } - - template - GLM_FUNC_QUALIFIER T lxNorm(vec<3, T, Q> const& x, vec<3, T, Q> const& y, unsigned int Depth) - { - return pow(pow(abs(y.x - x.x), T(Depth)) + pow(abs(y.y - x.y), T(Depth)) + pow(abs(y.z - x.z), T(Depth)), T(1) / T(Depth)); - } - - template - GLM_FUNC_QUALIFIER T lxNorm(vec<3, T, Q> const& v, unsigned int Depth) - { - return pow(pow(abs(v.x), T(Depth)) + pow(abs(v.y), T(Depth)) + pow(abs(v.z), T(Depth)), T(1) / T(Depth)); - } - - template - GLM_FUNC_QUALIFIER T lMaxNorm(vec<3, T, Q> const& a, vec<3, T, Q> const& b) - { - return compMax(abs(b - a)); - } - - template - GLM_FUNC_QUALIFIER T lMaxNorm(vec<3, T, Q> const& v) - { - return compMax(abs(v)); - } - -}//namespace glm diff --git a/third_party/glm/gtx/normal.hpp b/third_party/glm/gtx/normal.hpp deleted file mode 100755 index 068682f..0000000 --- a/third_party/glm/gtx/normal.hpp +++ /dev/null @@ -1,41 +0,0 @@ -/// @ref gtx_normal -/// @file glm/gtx/normal.hpp -/// -/// @see core (dependence) -/// @see gtx_extented_min_max (dependence) -/// -/// @defgroup gtx_normal GLM_GTX_normal -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Compute the normal of a triangle. - -#pragma once - -// Dependency: -#include "../glm.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_normal is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_normal extension included") -# endif -#endif - -namespace glm -{ - /// @addtogroup gtx_normal - /// @{ - - /// Computes triangle normal from triangle points. - /// - /// @see gtx_normal - template - GLM_FUNC_DECL vec<3, T, Q> triangleNormal(vec<3, T, Q> const& p1, vec<3, T, Q> const& p2, vec<3, T, Q> const& p3); - - /// @} -}//namespace glm - -#include "normal.inl" diff --git a/third_party/glm/gtx/normal.inl b/third_party/glm/gtx/normal.inl deleted file mode 100755 index 74f9fc9..0000000 --- a/third_party/glm/gtx/normal.inl +++ /dev/null @@ -1,15 +0,0 @@ -/// @ref gtx_normal - -namespace glm -{ - template - GLM_FUNC_QUALIFIER vec<3, T, Q> triangleNormal - ( - vec<3, T, Q> const& p1, - vec<3, T, Q> const& p2, - vec<3, T, Q> const& p3 - ) - { - return normalize(cross(p1 - p2, p1 - p3)); - } -}//namespace glm diff --git a/third_party/glm/gtx/normalize_dot.hpp b/third_party/glm/gtx/normalize_dot.hpp deleted file mode 100755 index 5195802..0000000 --- a/third_party/glm/gtx/normalize_dot.hpp +++ /dev/null @@ -1,49 +0,0 @@ -/// @ref gtx_normalize_dot -/// @file glm/gtx/normalize_dot.hpp -/// -/// @see core (dependence) -/// @see gtx_fast_square_root (dependence) -/// -/// @defgroup gtx_normalize_dot GLM_GTX_normalize_dot -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Dot product of vectors that need to be normalize with a single square root. - -#pragma once - -// Dependency: -#include "../gtx/fast_square_root.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_normalize_dot is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_normalize_dot extension included") -# endif -#endif - -namespace glm -{ - /// @addtogroup gtx_normalize_dot - /// @{ - - /// Normalize parameters and returns the dot product of x and y. - /// It's faster that dot(normalize(x), normalize(y)). - /// - /// @see gtx_normalize_dot extension. - template - GLM_FUNC_DECL T normalizeDot(vec const& x, vec const& y); - - /// Normalize parameters and returns the dot product of x and y. - /// Faster that dot(fastNormalize(x), fastNormalize(y)). - /// - /// @see gtx_normalize_dot extension. - template - GLM_FUNC_DECL T fastNormalizeDot(vec const& x, vec const& y); - - /// @} -}//namespace glm - -#include "normalize_dot.inl" diff --git a/third_party/glm/gtx/normalize_dot.inl b/third_party/glm/gtx/normalize_dot.inl deleted file mode 100755 index 7bcd9a5..0000000 --- a/third_party/glm/gtx/normalize_dot.inl +++ /dev/null @@ -1,16 +0,0 @@ -/// @ref gtx_normalize_dot - -namespace glm -{ - template - GLM_FUNC_QUALIFIER T normalizeDot(vec const& x, vec const& y) - { - return glm::dot(x, y) * glm::inversesqrt(glm::dot(x, x) * glm::dot(y, y)); - } - - template - GLM_FUNC_QUALIFIER T fastNormalizeDot(vec const& x, vec const& y) - { - return glm::dot(x, y) * glm::fastInverseSqrt(glm::dot(x, x) * glm::dot(y, y)); - } -}//namespace glm diff --git a/third_party/glm/gtx/number_precision.hpp b/third_party/glm/gtx/number_precision.hpp deleted file mode 100755 index 3a606bd..0000000 --- a/third_party/glm/gtx/number_precision.hpp +++ /dev/null @@ -1,61 +0,0 @@ -/// @ref gtx_number_precision -/// @file glm/gtx/number_precision.hpp -/// -/// @see core (dependence) -/// @see gtc_type_precision (dependence) -/// @see gtc_quaternion (dependence) -/// -/// @defgroup gtx_number_precision GLM_GTX_number_precision -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Defined size types. - -#pragma once - -// Dependency: -#include "../glm.hpp" -#include "../gtc/type_precision.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_number_precision is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_number_precision extension included") -# endif -#endif - -namespace glm{ -namespace gtx -{ - ///////////////////////////// - // Unsigned int vector types - - /// @addtogroup gtx_number_precision - /// @{ - - typedef u8 u8vec1; //!< \brief 8bit unsigned integer scalar. (from GLM_GTX_number_precision extension) - typedef u16 u16vec1; //!< \brief 16bit unsigned integer scalar. (from GLM_GTX_number_precision extension) - typedef u32 u32vec1; //!< \brief 32bit unsigned integer scalar. (from GLM_GTX_number_precision extension) - typedef u64 u64vec1; //!< \brief 64bit unsigned integer scalar. (from GLM_GTX_number_precision extension) - - ////////////////////// - // Float vector types - - typedef f32 f32vec1; //!< \brief Single-qualifier floating-point scalar. (from GLM_GTX_number_precision extension) - typedef f64 f64vec1; //!< \brief Single-qualifier floating-point scalar. (from GLM_GTX_number_precision extension) - - ////////////////////// - // Float matrix types - - typedef f32 f32mat1; //!< \brief Single-qualifier floating-point scalar. (from GLM_GTX_number_precision extension) - typedef f32 f32mat1x1; //!< \brief Single-qualifier floating-point scalar. (from GLM_GTX_number_precision extension) - typedef f64 f64mat1; //!< \brief Double-qualifier floating-point scalar. (from GLM_GTX_number_precision extension) - typedef f64 f64mat1x1; //!< \brief Double-qualifier floating-point scalar. (from GLM_GTX_number_precision extension) - - /// @} -}//namespace gtx -}//namespace glm - -#include "number_precision.inl" diff --git a/third_party/glm/gtx/number_precision.inl b/third_party/glm/gtx/number_precision.inl deleted file mode 100755 index b39d71c..0000000 --- a/third_party/glm/gtx/number_precision.inl +++ /dev/null @@ -1,6 +0,0 @@ -/// @ref gtx_number_precision - -namespace glm -{ - -} diff --git a/third_party/glm/gtx/optimum_pow.hpp b/third_party/glm/gtx/optimum_pow.hpp deleted file mode 100755 index 9284a47..0000000 --- a/third_party/glm/gtx/optimum_pow.hpp +++ /dev/null @@ -1,54 +0,0 @@ -/// @ref gtx_optimum_pow -/// @file glm/gtx/optimum_pow.hpp -/// -/// @see core (dependence) -/// -/// @defgroup gtx_optimum_pow GLM_GTX_optimum_pow -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Integer exponentiation of power functions. - -#pragma once - -// Dependency: -#include "../glm.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_optimum_pow is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_optimum_pow extension included") -# endif -#endif - -namespace glm{ -namespace gtx -{ - /// @addtogroup gtx_optimum_pow - /// @{ - - /// Returns x raised to the power of 2. - /// - /// @see gtx_optimum_pow - template - GLM_FUNC_DECL genType pow2(genType const& x); - - /// Returns x raised to the power of 3. - /// - /// @see gtx_optimum_pow - template - GLM_FUNC_DECL genType pow3(genType const& x); - - /// Returns x raised to the power of 4. - /// - /// @see gtx_optimum_pow - template - GLM_FUNC_DECL genType pow4(genType const& x); - - /// @} -}//namespace gtx -}//namespace glm - -#include "optimum_pow.inl" diff --git a/third_party/glm/gtx/optimum_pow.inl b/third_party/glm/gtx/optimum_pow.inl deleted file mode 100755 index a26c19c..0000000 --- a/third_party/glm/gtx/optimum_pow.inl +++ /dev/null @@ -1,22 +0,0 @@ -/// @ref gtx_optimum_pow - -namespace glm -{ - template - GLM_FUNC_QUALIFIER genType pow2(genType const& x) - { - return x * x; - } - - template - GLM_FUNC_QUALIFIER genType pow3(genType const& x) - { - return x * x * x; - } - - template - GLM_FUNC_QUALIFIER genType pow4(genType const& x) - { - return (x * x) * (x * x); - } -}//namespace glm diff --git a/third_party/glm/gtx/orthonormalize.hpp b/third_party/glm/gtx/orthonormalize.hpp deleted file mode 100755 index 3e004fb..0000000 --- a/third_party/glm/gtx/orthonormalize.hpp +++ /dev/null @@ -1,49 +0,0 @@ -/// @ref gtx_orthonormalize -/// @file glm/gtx/orthonormalize.hpp -/// -/// @see core (dependence) -/// @see gtx_extented_min_max (dependence) -/// -/// @defgroup gtx_orthonormalize GLM_GTX_orthonormalize -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Orthonormalize matrices. - -#pragma once - -// Dependency: -#include "../vec3.hpp" -#include "../mat3x3.hpp" -#include "../geometric.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_orthonormalize is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_orthonormalize extension included") -# endif -#endif - -namespace glm -{ - /// @addtogroup gtx_orthonormalize - /// @{ - - /// Returns the orthonormalized matrix of m. - /// - /// @see gtx_orthonormalize - template - GLM_FUNC_DECL mat<3, 3, T, Q> orthonormalize(mat<3, 3, T, Q> const& m); - - /// Orthonormalizes x according y. - /// - /// @see gtx_orthonormalize - template - GLM_FUNC_DECL vec<3, T, Q> orthonormalize(vec<3, T, Q> const& x, vec<3, T, Q> const& y); - - /// @} -}//namespace glm - -#include "orthonormalize.inl" diff --git a/third_party/glm/gtx/orthonormalize.inl b/third_party/glm/gtx/orthonormalize.inl deleted file mode 100755 index cb553ba..0000000 --- a/third_party/glm/gtx/orthonormalize.inl +++ /dev/null @@ -1,29 +0,0 @@ -/// @ref gtx_orthonormalize - -namespace glm -{ - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> orthonormalize(mat<3, 3, T, Q> const& m) - { - mat<3, 3, T, Q> r = m; - - r[0] = normalize(r[0]); - - T d0 = dot(r[0], r[1]); - r[1] -= r[0] * d0; - r[1] = normalize(r[1]); - - T d1 = dot(r[1], r[2]); - d0 = dot(r[0], r[2]); - r[2] -= r[0] * d0 + r[1] * d1; - r[2] = normalize(r[2]); - - return r; - } - - template - GLM_FUNC_QUALIFIER vec<3, T, Q> orthonormalize(vec<3, T, Q> const& x, vec<3, T, Q> const& y) - { - return normalize(x - y * dot(y, x)); - } -}//namespace glm diff --git a/third_party/glm/gtx/perpendicular.hpp b/third_party/glm/gtx/perpendicular.hpp deleted file mode 100755 index 72b77b6..0000000 --- a/third_party/glm/gtx/perpendicular.hpp +++ /dev/null @@ -1,41 +0,0 @@ -/// @ref gtx_perpendicular -/// @file glm/gtx/perpendicular.hpp -/// -/// @see core (dependence) -/// @see gtx_projection (dependence) -/// -/// @defgroup gtx_perpendicular GLM_GTX_perpendicular -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Perpendicular of a vector from other one - -#pragma once - -// Dependency: -#include "../glm.hpp" -#include "../gtx/projection.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_perpendicular is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_perpendicular extension included") -# endif -#endif - -namespace glm -{ - /// @addtogroup gtx_perpendicular - /// @{ - - //! Projects x a perpendicular axis of Normal. - //! From GLM_GTX_perpendicular extension. - template - GLM_FUNC_DECL genType perp(genType const& x, genType const& Normal); - - /// @} -}//namespace glm - -#include "perpendicular.inl" diff --git a/third_party/glm/gtx/perpendicular.inl b/third_party/glm/gtx/perpendicular.inl deleted file mode 100755 index 1e72f33..0000000 --- a/third_party/glm/gtx/perpendicular.inl +++ /dev/null @@ -1,10 +0,0 @@ -/// @ref gtx_perpendicular - -namespace glm -{ - template - GLM_FUNC_QUALIFIER genType perp(genType const& x, genType const& Normal) - { - return x - proj(x, Normal); - } -}//namespace glm diff --git a/third_party/glm/gtx/polar_coordinates.hpp b/third_party/glm/gtx/polar_coordinates.hpp deleted file mode 100755 index b399112..0000000 --- a/third_party/glm/gtx/polar_coordinates.hpp +++ /dev/null @@ -1,48 +0,0 @@ -/// @ref gtx_polar_coordinates -/// @file glm/gtx/polar_coordinates.hpp -/// -/// @see core (dependence) -/// -/// @defgroup gtx_polar_coordinates GLM_GTX_polar_coordinates -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Conversion from Euclidean space to polar space and revert. - -#pragma once - -// Dependency: -#include "../glm.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_polar_coordinates is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_polar_coordinates extension included") -# endif -#endif - -namespace glm -{ - /// @addtogroup gtx_polar_coordinates - /// @{ - - /// Convert Euclidean to Polar coordinates, x is the xz distance, y, the latitude and z the longitude. - /// - /// @see gtx_polar_coordinates - template - GLM_FUNC_DECL vec<3, T, Q> polar( - vec<3, T, Q> const& euclidean); - - /// Convert Polar to Euclidean coordinates. - /// - /// @see gtx_polar_coordinates - template - GLM_FUNC_DECL vec<3, T, Q> euclidean( - vec<2, T, Q> const& polar); - - /// @} -}//namespace glm - -#include "polar_coordinates.inl" diff --git a/third_party/glm/gtx/polar_coordinates.inl b/third_party/glm/gtx/polar_coordinates.inl deleted file mode 100755 index 371c8dd..0000000 --- a/third_party/glm/gtx/polar_coordinates.inl +++ /dev/null @@ -1,36 +0,0 @@ -/// @ref gtx_polar_coordinates - -namespace glm -{ - template - GLM_FUNC_QUALIFIER vec<3, T, Q> polar - ( - vec<3, T, Q> const& euclidean - ) - { - T const Length(length(euclidean)); - vec<3, T, Q> const tmp(euclidean / Length); - T const xz_dist(sqrt(tmp.x * tmp.x + tmp.z * tmp.z)); - - return vec<3, T, Q>( - asin(tmp.y), // latitude - atan(tmp.x, tmp.z), // longitude - xz_dist); // xz distance - } - - template - GLM_FUNC_QUALIFIER vec<3, T, Q> euclidean - ( - vec<2, T, Q> const& polar - ) - { - T const latitude(polar.x); - T const longitude(polar.y); - - return vec<3, T, Q>( - cos(latitude) * sin(longitude), - sin(latitude), - cos(latitude) * cos(longitude)); - } - -}//namespace glm diff --git a/third_party/glm/gtx/projection.hpp b/third_party/glm/gtx/projection.hpp deleted file mode 100755 index 678f3ad..0000000 --- a/third_party/glm/gtx/projection.hpp +++ /dev/null @@ -1,43 +0,0 @@ -/// @ref gtx_projection -/// @file glm/gtx/projection.hpp -/// -/// @see core (dependence) -/// -/// @defgroup gtx_projection GLM_GTX_projection -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Projection of a vector to other one - -#pragma once - -// Dependency: -#include "../geometric.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_projection is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_projection extension included") -# endif -#endif - -namespace glm -{ - /// @addtogroup gtx_projection - /// @{ - - /// Projects x on Normal. - /// - /// @param[in] x A vector to project - /// @param[in] Normal A normal that doesn't need to be of unit length. - /// - /// @see gtx_projection - template - GLM_FUNC_DECL genType proj(genType const& x, genType const& Normal); - - /// @} -}//namespace glm - -#include "projection.inl" diff --git a/third_party/glm/gtx/projection.inl b/third_party/glm/gtx/projection.inl deleted file mode 100755 index f23f884..0000000 --- a/third_party/glm/gtx/projection.inl +++ /dev/null @@ -1,10 +0,0 @@ -/// @ref gtx_projection - -namespace glm -{ - template - GLM_FUNC_QUALIFIER genType proj(genType const& x, genType const& Normal) - { - return glm::dot(x, Normal) / glm::dot(Normal, Normal) * Normal; - } -}//namespace glm diff --git a/third_party/glm/gtx/quaternion.hpp b/third_party/glm/gtx/quaternion.hpp deleted file mode 100755 index 5c2b5ad..0000000 --- a/third_party/glm/gtx/quaternion.hpp +++ /dev/null @@ -1,174 +0,0 @@ -/// @ref gtx_quaternion -/// @file glm/gtx/quaternion.hpp -/// -/// @see core (dependence) -/// @see gtx_extented_min_max (dependence) -/// -/// @defgroup gtx_quaternion GLM_GTX_quaternion -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Extented quaternion types and functions - -#pragma once - -// Dependency: -#include "../glm.hpp" -#include "../gtc/constants.hpp" -#include "../gtc/quaternion.hpp" -#include "../ext/quaternion_exponential.hpp" -#include "../gtx/norm.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_quaternion is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_quaternion extension included") -# endif -#endif - -namespace glm -{ - /// @addtogroup gtx_quaternion - /// @{ - - /// Create an identity quaternion. - /// - /// @see gtx_quaternion - template - GLM_FUNC_DECL GLM_CONSTEXPR qua quat_identity(); - - /// Compute a cross product between a quaternion and a vector. - /// - /// @see gtx_quaternion - template - GLM_FUNC_DECL vec<3, T, Q> cross( - qua const& q, - vec<3, T, Q> const& v); - - //! Compute a cross product between a vector and a quaternion. - /// - /// @see gtx_quaternion - template - GLM_FUNC_DECL vec<3, T, Q> cross( - vec<3, T, Q> const& v, - qua const& q); - - //! Compute a point on a path according squad equation. - //! q1 and q2 are control points; s1 and s2 are intermediate control points. - /// - /// @see gtx_quaternion - template - GLM_FUNC_DECL qua squad( - qua const& q1, - qua const& q2, - qua const& s1, - qua const& s2, - T const& h); - - //! Returns an intermediate control point for squad interpolation. - /// - /// @see gtx_quaternion - template - GLM_FUNC_DECL qua intermediate( - qua const& prev, - qua const& curr, - qua const& next); - - //! Returns quarternion square root. - /// - /// @see gtx_quaternion - //template - //qua sqrt( - // qua const& q); - - //! Rotates a 3 components vector by a quaternion. - /// - /// @see gtx_quaternion - template - GLM_FUNC_DECL vec<3, T, Q> rotate( - qua const& q, - vec<3, T, Q> const& v); - - /// Rotates a 4 components vector by a quaternion. - /// - /// @see gtx_quaternion - template - GLM_FUNC_DECL vec<4, T, Q> rotate( - qua const& q, - vec<4, T, Q> const& v); - - /// Extract the real component of a quaternion. - /// - /// @see gtx_quaternion - template - GLM_FUNC_DECL T extractRealComponent( - qua const& q); - - /// Converts a quaternion to a 3 * 3 matrix. - /// - /// @see gtx_quaternion - template - GLM_FUNC_DECL mat<3, 3, T, Q> toMat3( - qua const& x){return mat3_cast(x);} - - /// Converts a quaternion to a 4 * 4 matrix. - /// - /// @see gtx_quaternion - template - GLM_FUNC_DECL mat<4, 4, T, Q> toMat4( - qua const& x){return mat4_cast(x);} - - /// Converts a 3 * 3 matrix to a quaternion. - /// - /// @see gtx_quaternion - template - GLM_FUNC_DECL qua toQuat( - mat<3, 3, T, Q> const& x){return quat_cast(x);} - - /// Converts a 4 * 4 matrix to a quaternion. - /// - /// @see gtx_quaternion - template - GLM_FUNC_DECL qua toQuat( - mat<4, 4, T, Q> const& x){return quat_cast(x);} - - /// Quaternion interpolation using the rotation short path. - /// - /// @see gtx_quaternion - template - GLM_FUNC_DECL qua shortMix( - qua const& x, - qua const& y, - T const& a); - - /// Quaternion normalized linear interpolation. - /// - /// @see gtx_quaternion - template - GLM_FUNC_DECL qua fastMix( - qua const& x, - qua const& y, - T const& a); - - /// Compute the rotation between two vectors. - /// @param orig vector, needs to be normalized - /// @param dest vector, needs to be normalized - /// - /// @see gtx_quaternion - template - GLM_FUNC_DECL qua rotation( - vec<3, T, Q> const& orig, - vec<3, T, Q> const& dest); - - /// Returns the squared length of x. - /// - /// @see gtx_quaternion - template - GLM_FUNC_DECL GLM_CONSTEXPR T length2(qua const& q); - - /// @} -}//namespace glm - -#include "quaternion.inl" diff --git a/third_party/glm/gtx/quaternion.inl b/third_party/glm/gtx/quaternion.inl deleted file mode 100755 index d125bcc..0000000 --- a/third_party/glm/gtx/quaternion.inl +++ /dev/null @@ -1,159 +0,0 @@ -/// @ref gtx_quaternion - -#include -#include "../gtc/constants.hpp" - -namespace glm -{ - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua quat_identity() - { - return qua(static_cast(1), static_cast(0), static_cast(0), static_cast(0)); - } - - template - GLM_FUNC_QUALIFIER vec<3, T, Q> cross(vec<3, T, Q> const& v, qua const& q) - { - return inverse(q) * v; - } - - template - GLM_FUNC_QUALIFIER vec<3, T, Q> cross(qua const& q, vec<3, T, Q> const& v) - { - return q * v; - } - - template - GLM_FUNC_QUALIFIER qua squad - ( - qua const& q1, - qua const& q2, - qua const& s1, - qua const& s2, - T const& h) - { - return mix(mix(q1, q2, h), mix(s1, s2, h), static_cast(2) * (static_cast(1) - h) * h); - } - - template - GLM_FUNC_QUALIFIER qua intermediate - ( - qua const& prev, - qua const& curr, - qua const& next - ) - { - qua invQuat = inverse(curr); - return exp((log(next * invQuat) + log(prev * invQuat)) / static_cast(-4)) * curr; - } - - template - GLM_FUNC_QUALIFIER vec<3, T, Q> rotate(qua const& q, vec<3, T, Q> const& v) - { - return q * v; - } - - template - GLM_FUNC_QUALIFIER vec<4, T, Q> rotate(qua const& q, vec<4, T, Q> const& v) - { - return q * v; - } - - template - GLM_FUNC_QUALIFIER T extractRealComponent(qua const& q) - { - T w = static_cast(1) - q.x * q.x - q.y * q.y - q.z * q.z; - if(w < T(0)) - return T(0); - else - return -sqrt(w); - } - - template - GLM_FUNC_QUALIFIER GLM_CONSTEXPR T length2(qua const& q) - { - return q.x * q.x + q.y * q.y + q.z * q.z + q.w * q.w; - } - - template - GLM_FUNC_QUALIFIER qua shortMix(qua const& x, qua const& y, T const& a) - { - if(a <= static_cast(0)) return x; - if(a >= static_cast(1)) return y; - - T fCos = dot(x, y); - qua y2(y); //BUG!!! qua y2; - if(fCos < static_cast(0)) - { - y2 = -y; - fCos = -fCos; - } - - //if(fCos > 1.0f) // problem - T k0, k1; - if(fCos > (static_cast(1) - epsilon())) - { - k0 = static_cast(1) - a; - k1 = static_cast(0) + a; //BUG!!! 1.0f + a; - } - else - { - T fSin = sqrt(T(1) - fCos * fCos); - T fAngle = atan(fSin, fCos); - T fOneOverSin = static_cast(1) / fSin; - k0 = sin((static_cast(1) - a) * fAngle) * fOneOverSin; - k1 = sin((static_cast(0) + a) * fAngle) * fOneOverSin; - } - - return qua( - k0 * x.w + k1 * y2.w, - k0 * x.x + k1 * y2.x, - k0 * x.y + k1 * y2.y, - k0 * x.z + k1 * y2.z); - } - - template - GLM_FUNC_QUALIFIER qua fastMix(qua const& x, qua const& y, T const& a) - { - return glm::normalize(x * (static_cast(1) - a) + (y * a)); - } - - template - GLM_FUNC_QUALIFIER qua rotation(vec<3, T, Q> const& orig, vec<3, T, Q> const& dest) - { - T cosTheta = dot(orig, dest); - vec<3, T, Q> rotationAxis; - - if(cosTheta >= static_cast(1) - epsilon()) { - // orig and dest point in the same direction - return quat_identity(); - } - - if(cosTheta < static_cast(-1) + epsilon()) - { - // special case when vectors in opposite directions : - // there is no "ideal" rotation axis - // So guess one; any will do as long as it's perpendicular to start - // This implementation favors a rotation around the Up axis (Y), - // since it's often what you want to do. - rotationAxis = cross(vec<3, T, Q>(0, 0, 1), orig); - if(length2(rotationAxis) < epsilon()) // bad luck, they were parallel, try again! - rotationAxis = cross(vec<3, T, Q>(1, 0, 0), orig); - - rotationAxis = normalize(rotationAxis); - return angleAxis(pi(), rotationAxis); - } - - // Implementation from Stan Melax's Game Programming Gems 1 article - rotationAxis = cross(orig, dest); - - T s = sqrt((T(1) + cosTheta) * static_cast(2)); - T invs = static_cast(1) / s; - - return qua( - s * static_cast(0.5f), - rotationAxis.x * invs, - rotationAxis.y * invs, - rotationAxis.z * invs); - } -}//namespace glm diff --git a/third_party/glm/gtx/range.hpp b/third_party/glm/gtx/range.hpp deleted file mode 100755 index 93bcb9a..0000000 --- a/third_party/glm/gtx/range.hpp +++ /dev/null @@ -1,98 +0,0 @@ -/// @ref gtx_range -/// @file glm/gtx/range.hpp -/// @author Joshua Moerman -/// -/// @defgroup gtx_range GLM_GTX_range -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Defines begin and end for vectors and matrices. Useful for range-based for loop. -/// The range is defined over the elements, not over columns or rows (e.g. mat4 has 16 elements). - -#pragma once - -// Dependencies -#include "../detail/setup.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_range is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_range extension included") -# endif -#endif - -#include "../gtc/type_ptr.hpp" -#include "../gtc/vec1.hpp" - -namespace glm -{ - /// @addtogroup gtx_range - /// @{ - -# if GLM_COMPILER & GLM_COMPILER_VC -# pragma warning(push) -# pragma warning(disable : 4100) // unreferenced formal parameter -# endif - - template - inline length_t components(vec<1, T, Q> const& v) - { - return v.length(); - } - - template - inline length_t components(vec<2, T, Q> const& v) - { - return v.length(); - } - - template - inline length_t components(vec<3, T, Q> const& v) - { - return v.length(); - } - - template - inline length_t components(vec<4, T, Q> const& v) - { - return v.length(); - } - - template - inline length_t components(genType const& m) - { - return m.length() * m[0].length(); - } - - template - inline typename genType::value_type const * begin(genType const& v) - { - return value_ptr(v); - } - - template - inline typename genType::value_type const * end(genType const& v) - { - return begin(v) + components(v); - } - - template - inline typename genType::value_type * begin(genType& v) - { - return value_ptr(v); - } - - template - inline typename genType::value_type * end(genType& v) - { - return begin(v) + components(v); - } - -# if GLM_COMPILER & GLM_COMPILER_VC -# pragma warning(pop) -# endif - - /// @} -}//namespace glm diff --git a/third_party/glm/gtx/raw_data.hpp b/third_party/glm/gtx/raw_data.hpp deleted file mode 100755 index 86cbe77..0000000 --- a/third_party/glm/gtx/raw_data.hpp +++ /dev/null @@ -1,51 +0,0 @@ -/// @ref gtx_raw_data -/// @file glm/gtx/raw_data.hpp -/// -/// @see core (dependence) -/// -/// @defgroup gtx_raw_data GLM_GTX_raw_data -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Projection of a vector to other one - -#pragma once - -// Dependencies -#include "../ext/scalar_uint_sized.hpp" -#include "../detail/setup.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_raw_data is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_raw_data extension included") -# endif -#endif - -namespace glm -{ - /// @addtogroup gtx_raw_data - /// @{ - - //! Type for byte numbers. - //! From GLM_GTX_raw_data extension. - typedef detail::uint8 byte; - - //! Type for word numbers. - //! From GLM_GTX_raw_data extension. - typedef detail::uint16 word; - - //! Type for dword numbers. - //! From GLM_GTX_raw_data extension. - typedef detail::uint32 dword; - - //! Type for qword numbers. - //! From GLM_GTX_raw_data extension. - typedef detail::uint64 qword; - - /// @} -}// namespace glm - -#include "raw_data.inl" diff --git a/third_party/glm/gtx/raw_data.inl b/third_party/glm/gtx/raw_data.inl deleted file mode 100755 index c740317..0000000 --- a/third_party/glm/gtx/raw_data.inl +++ /dev/null @@ -1,2 +0,0 @@ -/// @ref gtx_raw_data - diff --git a/third_party/glm/gtx/rotate_normalized_axis.hpp b/third_party/glm/gtx/rotate_normalized_axis.hpp deleted file mode 100755 index 2103ca0..0000000 --- a/third_party/glm/gtx/rotate_normalized_axis.hpp +++ /dev/null @@ -1,68 +0,0 @@ -/// @ref gtx_rotate_normalized_axis -/// @file glm/gtx/rotate_normalized_axis.hpp -/// -/// @see core (dependence) -/// @see gtc_matrix_transform -/// @see gtc_quaternion -/// -/// @defgroup gtx_rotate_normalized_axis GLM_GTX_rotate_normalized_axis -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Quaternions and matrices rotations around normalized axis. - -#pragma once - -// Dependency: -#include "../glm.hpp" -#include "../gtc/epsilon.hpp" -#include "../gtc/quaternion.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_rotate_normalized_axis is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_rotate_normalized_axis extension included") -# endif -#endif - -namespace glm -{ - /// @addtogroup gtx_rotate_normalized_axis - /// @{ - - /// Builds a rotation 4 * 4 matrix created from a normalized axis and an angle. - /// - /// @param m Input matrix multiplied by this rotation matrix. - /// @param angle Rotation angle expressed in radians. - /// @param axis Rotation axis, must be normalized. - /// @tparam T Value type used to build the matrix. Currently supported: half (not recommended), float or double. - /// - /// @see gtx_rotate_normalized_axis - /// @see - rotate(T angle, T x, T y, T z) - /// @see - rotate(mat<4, 4, T, Q> const& m, T angle, T x, T y, T z) - /// @see - rotate(T angle, vec<3, T, Q> const& v) - template - GLM_FUNC_DECL mat<4, 4, T, Q> rotateNormalizedAxis( - mat<4, 4, T, Q> const& m, - T const& angle, - vec<3, T, Q> const& axis); - - /// Rotates a quaternion from a vector of 3 components normalized axis and an angle. - /// - /// @param q Source orientation - /// @param angle Angle expressed in radians. - /// @param axis Normalized axis of the rotation, must be normalized. - /// - /// @see gtx_rotate_normalized_axis - template - GLM_FUNC_DECL qua rotateNormalizedAxis( - qua const& q, - T const& angle, - vec<3, T, Q> const& axis); - - /// @} -}//namespace glm - -#include "rotate_normalized_axis.inl" diff --git a/third_party/glm/gtx/rotate_normalized_axis.inl b/third_party/glm/gtx/rotate_normalized_axis.inl deleted file mode 100755 index b2e9278..0000000 --- a/third_party/glm/gtx/rotate_normalized_axis.inl +++ /dev/null @@ -1,58 +0,0 @@ -/// @ref gtx_rotate_normalized_axis - -namespace glm -{ - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> rotateNormalizedAxis - ( - mat<4, 4, T, Q> const& m, - T const& angle, - vec<3, T, Q> const& v - ) - { - T const a = angle; - T const c = cos(a); - T const s = sin(a); - - vec<3, T, Q> const axis(v); - - vec<3, T, Q> const temp((static_cast(1) - c) * axis); - - mat<4, 4, T, Q> Rotate; - Rotate[0][0] = c + temp[0] * axis[0]; - Rotate[0][1] = 0 + temp[0] * axis[1] + s * axis[2]; - Rotate[0][2] = 0 + temp[0] * axis[2] - s * axis[1]; - - Rotate[1][0] = 0 + temp[1] * axis[0] - s * axis[2]; - Rotate[1][1] = c + temp[1] * axis[1]; - Rotate[1][2] = 0 + temp[1] * axis[2] + s * axis[0]; - - Rotate[2][0] = 0 + temp[2] * axis[0] + s * axis[1]; - Rotate[2][1] = 0 + temp[2] * axis[1] - s * axis[0]; - Rotate[2][2] = c + temp[2] * axis[2]; - - mat<4, 4, T, Q> Result; - Result[0] = m[0] * Rotate[0][0] + m[1] * Rotate[0][1] + m[2] * Rotate[0][2]; - Result[1] = m[0] * Rotate[1][0] + m[1] * Rotate[1][1] + m[2] * Rotate[1][2]; - Result[2] = m[0] * Rotate[2][0] + m[1] * Rotate[2][1] + m[2] * Rotate[2][2]; - Result[3] = m[3]; - return Result; - } - - template - GLM_FUNC_QUALIFIER qua rotateNormalizedAxis - ( - qua const& q, - T const& angle, - vec<3, T, Q> const& v - ) - { - vec<3, T, Q> const Tmp(v); - - T const AngleRad(angle); - T const Sin = sin(AngleRad * T(0.5)); - - return q * qua(cos(AngleRad * static_cast(0.5)), Tmp.x * Sin, Tmp.y * Sin, Tmp.z * Sin); - //return gtc::quaternion::cross(q, tquat(cos(AngleRad * T(0.5)), Tmp.x * fSin, Tmp.y * fSin, Tmp.z * fSin)); - } -}//namespace glm diff --git a/third_party/glm/gtx/rotate_vector.hpp b/third_party/glm/gtx/rotate_vector.hpp deleted file mode 100755 index dcd5b95..0000000 --- a/third_party/glm/gtx/rotate_vector.hpp +++ /dev/null @@ -1,123 +0,0 @@ -/// @ref gtx_rotate_vector -/// @file glm/gtx/rotate_vector.hpp -/// -/// @see core (dependence) -/// @see gtx_transform (dependence) -/// -/// @defgroup gtx_rotate_vector GLM_GTX_rotate_vector -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Function to directly rotate a vector - -#pragma once - -// Dependency: -#include "../gtx/transform.hpp" -#include "../gtc/epsilon.hpp" -#include "../ext/vector_relational.hpp" -#include "../glm.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_rotate_vector is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_rotate_vector extension included") -# endif -#endif - -namespace glm -{ - /// @addtogroup gtx_rotate_vector - /// @{ - - /// Returns Spherical interpolation between two vectors - /// - /// @param x A first vector - /// @param y A second vector - /// @param a Interpolation factor. The interpolation is defined beyond the range [0, 1]. - /// - /// @see gtx_rotate_vector - template - GLM_FUNC_DECL vec<3, T, Q> slerp( - vec<3, T, Q> const& x, - vec<3, T, Q> const& y, - T const& a); - - //! Rotate a two dimensional vector. - //! From GLM_GTX_rotate_vector extension. - template - GLM_FUNC_DECL vec<2, T, Q> rotate( - vec<2, T, Q> const& v, - T const& angle); - - //! Rotate a three dimensional vector around an axis. - //! From GLM_GTX_rotate_vector extension. - template - GLM_FUNC_DECL vec<3, T, Q> rotate( - vec<3, T, Q> const& v, - T const& angle, - vec<3, T, Q> const& normal); - - //! Rotate a four dimensional vector around an axis. - //! From GLM_GTX_rotate_vector extension. - template - GLM_FUNC_DECL vec<4, T, Q> rotate( - vec<4, T, Q> const& v, - T const& angle, - vec<3, T, Q> const& normal); - - //! Rotate a three dimensional vector around the X axis. - //! From GLM_GTX_rotate_vector extension. - template - GLM_FUNC_DECL vec<3, T, Q> rotateX( - vec<3, T, Q> const& v, - T const& angle); - - //! Rotate a three dimensional vector around the Y axis. - //! From GLM_GTX_rotate_vector extension. - template - GLM_FUNC_DECL vec<3, T, Q> rotateY( - vec<3, T, Q> const& v, - T const& angle); - - //! Rotate a three dimensional vector around the Z axis. - //! From GLM_GTX_rotate_vector extension. - template - GLM_FUNC_DECL vec<3, T, Q> rotateZ( - vec<3, T, Q> const& v, - T const& angle); - - //! Rotate a four dimensional vector around the X axis. - //! From GLM_GTX_rotate_vector extension. - template - GLM_FUNC_DECL vec<4, T, Q> rotateX( - vec<4, T, Q> const& v, - T const& angle); - - //! Rotate a four dimensional vector around the Y axis. - //! From GLM_GTX_rotate_vector extension. - template - GLM_FUNC_DECL vec<4, T, Q> rotateY( - vec<4, T, Q> const& v, - T const& angle); - - //! Rotate a four dimensional vector around the Z axis. - //! From GLM_GTX_rotate_vector extension. - template - GLM_FUNC_DECL vec<4, T, Q> rotateZ( - vec<4, T, Q> const& v, - T const& angle); - - //! Build a rotation matrix from a normal and a up vector. - //! From GLM_GTX_rotate_vector extension. - template - GLM_FUNC_DECL mat<4, 4, T, Q> orientation( - vec<3, T, Q> const& Normal, - vec<3, T, Q> const& Up); - - /// @} -}//namespace glm - -#include "rotate_vector.inl" diff --git a/third_party/glm/gtx/rotate_vector.inl b/third_party/glm/gtx/rotate_vector.inl deleted file mode 100755 index f8136e7..0000000 --- a/third_party/glm/gtx/rotate_vector.inl +++ /dev/null @@ -1,187 +0,0 @@ -/// @ref gtx_rotate_vector - -namespace glm -{ - template - GLM_FUNC_QUALIFIER vec<3, T, Q> slerp - ( - vec<3, T, Q> const& x, - vec<3, T, Q> const& y, - T const& a - ) - { - // get cosine of angle between vectors (-1 -> 1) - T CosAlpha = dot(x, y); - // get angle (0 -> pi) - T Alpha = acos(CosAlpha); - // get sine of angle between vectors (0 -> 1) - T SinAlpha = sin(Alpha); - // this breaks down when SinAlpha = 0, i.e. Alpha = 0 or pi - T t1 = sin((static_cast(1) - a) * Alpha) / SinAlpha; - T t2 = sin(a * Alpha) / SinAlpha; - - // interpolate src vectors - return x * t1 + y * t2; - } - - template - GLM_FUNC_QUALIFIER vec<2, T, Q> rotate - ( - vec<2, T, Q> const& v, - T const& angle - ) - { - vec<2, T, Q> Result; - T const Cos(cos(angle)); - T const Sin(sin(angle)); - - Result.x = v.x * Cos - v.y * Sin; - Result.y = v.x * Sin + v.y * Cos; - return Result; - } - - template - GLM_FUNC_QUALIFIER vec<3, T, Q> rotate - ( - vec<3, T, Q> const& v, - T const& angle, - vec<3, T, Q> const& normal - ) - { - return mat<3, 3, T, Q>(glm::rotate(angle, normal)) * v; - } - /* - template - GLM_FUNC_QUALIFIER vec<3, T, Q> rotateGTX( - const vec<3, T, Q>& x, - T angle, - const vec<3, T, Q>& normal) - { - const T Cos = cos(radians(angle)); - const T Sin = sin(radians(angle)); - return x * Cos + ((x * normal) * (T(1) - Cos)) * normal + cross(x, normal) * Sin; - } - */ - template - GLM_FUNC_QUALIFIER vec<4, T, Q> rotate - ( - vec<4, T, Q> const& v, - T const& angle, - vec<3, T, Q> const& normal - ) - { - return rotate(angle, normal) * v; - } - - template - GLM_FUNC_QUALIFIER vec<3, T, Q> rotateX - ( - vec<3, T, Q> const& v, - T const& angle - ) - { - vec<3, T, Q> Result(v); - T const Cos(cos(angle)); - T const Sin(sin(angle)); - - Result.y = v.y * Cos - v.z * Sin; - Result.z = v.y * Sin + v.z * Cos; - return Result; - } - - template - GLM_FUNC_QUALIFIER vec<3, T, Q> rotateY - ( - vec<3, T, Q> const& v, - T const& angle - ) - { - vec<3, T, Q> Result = v; - T const Cos(cos(angle)); - T const Sin(sin(angle)); - - Result.x = v.x * Cos + v.z * Sin; - Result.z = -v.x * Sin + v.z * Cos; - return Result; - } - - template - GLM_FUNC_QUALIFIER vec<3, T, Q> rotateZ - ( - vec<3, T, Q> const& v, - T const& angle - ) - { - vec<3, T, Q> Result = v; - T const Cos(cos(angle)); - T const Sin(sin(angle)); - - Result.x = v.x * Cos - v.y * Sin; - Result.y = v.x * Sin + v.y * Cos; - return Result; - } - - template - GLM_FUNC_QUALIFIER vec<4, T, Q> rotateX - ( - vec<4, T, Q> const& v, - T const& angle - ) - { - vec<4, T, Q> Result = v; - T const Cos(cos(angle)); - T const Sin(sin(angle)); - - Result.y = v.y * Cos - v.z * Sin; - Result.z = v.y * Sin + v.z * Cos; - return Result; - } - - template - GLM_FUNC_QUALIFIER vec<4, T, Q> rotateY - ( - vec<4, T, Q> const& v, - T const& angle - ) - { - vec<4, T, Q> Result = v; - T const Cos(cos(angle)); - T const Sin(sin(angle)); - - Result.x = v.x * Cos + v.z * Sin; - Result.z = -v.x * Sin + v.z * Cos; - return Result; - } - - template - GLM_FUNC_QUALIFIER vec<4, T, Q> rotateZ - ( - vec<4, T, Q> const& v, - T const& angle - ) - { - vec<4, T, Q> Result = v; - T const Cos(cos(angle)); - T const Sin(sin(angle)); - - Result.x = v.x * Cos - v.y * Sin; - Result.y = v.x * Sin + v.y * Cos; - return Result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> orientation - ( - vec<3, T, Q> const& Normal, - vec<3, T, Q> const& Up - ) - { - if(all(equal(Normal, Up, epsilon()))) - return mat<4, 4, T, Q>(static_cast(1)); - - vec<3, T, Q> RotationAxis = cross(Up, Normal); - T Angle = acos(dot(Normal, Up)); - - return rotate(Angle, RotationAxis); - } -}//namespace glm diff --git a/third_party/glm/gtx/scalar_multiplication.hpp b/third_party/glm/gtx/scalar_multiplication.hpp deleted file mode 100755 index 496ba19..0000000 --- a/third_party/glm/gtx/scalar_multiplication.hpp +++ /dev/null @@ -1,75 +0,0 @@ -/// @ref gtx -/// @file glm/gtx/scalar_multiplication.hpp -/// @author Joshua Moerman -/// -/// Include to use the features of this extension. -/// -/// Enables scalar multiplication for all types -/// -/// Since GLSL is very strict about types, the following (often used) combinations do not work: -/// double * vec4 -/// int * vec4 -/// vec4 / int -/// So we'll fix that! Of course "float * vec4" should remain the same (hence the enable_if magic) - -#pragma once - -#include "../detail/setup.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_scalar_multiplication is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_scalar_multiplication extension included") -# endif -#endif - -#include "../vec2.hpp" -#include "../vec3.hpp" -#include "../vec4.hpp" -#include "../mat2x2.hpp" -#include - -namespace glm -{ - template - using return_type_scalar_multiplication = typename std::enable_if< - !std::is_same::value // T may not be a float - && std::is_arithmetic::value, Vec // But it may be an int or double (no vec3 or mat3, ...) - >::type; - -#define GLM_IMPLEMENT_SCAL_MULT(Vec) \ - template \ - return_type_scalar_multiplication \ - operator*(T const& s, Vec rh){ \ - return rh *= static_cast(s); \ - } \ - \ - template \ - return_type_scalar_multiplication \ - operator*(Vec lh, T const& s){ \ - return lh *= static_cast(s); \ - } \ - \ - template \ - return_type_scalar_multiplication \ - operator/(Vec lh, T const& s){ \ - return lh *= 1.0f / static_cast(s); \ - } - -GLM_IMPLEMENT_SCAL_MULT(vec2) -GLM_IMPLEMENT_SCAL_MULT(vec3) -GLM_IMPLEMENT_SCAL_MULT(vec4) - -GLM_IMPLEMENT_SCAL_MULT(mat2) -GLM_IMPLEMENT_SCAL_MULT(mat2x3) -GLM_IMPLEMENT_SCAL_MULT(mat2x4) -GLM_IMPLEMENT_SCAL_MULT(mat3x2) -GLM_IMPLEMENT_SCAL_MULT(mat3) -GLM_IMPLEMENT_SCAL_MULT(mat3x4) -GLM_IMPLEMENT_SCAL_MULT(mat4x2) -GLM_IMPLEMENT_SCAL_MULT(mat4x3) -GLM_IMPLEMENT_SCAL_MULT(mat4) - -#undef GLM_IMPLEMENT_SCAL_MULT -} // namespace glm diff --git a/third_party/glm/gtx/scalar_relational.hpp b/third_party/glm/gtx/scalar_relational.hpp deleted file mode 100755 index 8be9c57..0000000 --- a/third_party/glm/gtx/scalar_relational.hpp +++ /dev/null @@ -1,36 +0,0 @@ -/// @ref gtx_scalar_relational -/// @file glm/gtx/scalar_relational.hpp -/// -/// @see core (dependence) -/// -/// @defgroup gtx_scalar_relational GLM_GTX_scalar_relational -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Extend a position from a source to a position at a defined length. - -#pragma once - -// Dependency: -#include "../glm.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_extend is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_extend extension included") -# endif -#endif - -namespace glm -{ - /// @addtogroup gtx_scalar_relational - /// @{ - - - - /// @} -}//namespace glm - -#include "scalar_relational.inl" diff --git a/third_party/glm/gtx/scalar_relational.inl b/third_party/glm/gtx/scalar_relational.inl deleted file mode 100755 index c2a121c..0000000 --- a/third_party/glm/gtx/scalar_relational.inl +++ /dev/null @@ -1,88 +0,0 @@ -/// @ref gtx_scalar_relational - -namespace glm -{ - template - GLM_FUNC_QUALIFIER bool lessThan - ( - T const& x, - T const& y - ) - { - return x < y; - } - - template - GLM_FUNC_QUALIFIER bool lessThanEqual - ( - T const& x, - T const& y - ) - { - return x <= y; - } - - template - GLM_FUNC_QUALIFIER bool greaterThan - ( - T const& x, - T const& y - ) - { - return x > y; - } - - template - GLM_FUNC_QUALIFIER bool greaterThanEqual - ( - T const& x, - T const& y - ) - { - return x >= y; - } - - template - GLM_FUNC_QUALIFIER bool equal - ( - T const& x, - T const& y - ) - { - return detail::compute_equal::is_iec559>::call(x, y); - } - - template - GLM_FUNC_QUALIFIER bool notEqual - ( - T const& x, - T const& y - ) - { - return !detail::compute_equal::is_iec559>::call(x, y); - } - - GLM_FUNC_QUALIFIER bool any - ( - bool const& x - ) - { - return x; - } - - GLM_FUNC_QUALIFIER bool all - ( - bool const& x - ) - { - return x; - } - - GLM_FUNC_QUALIFIER bool not_ - ( - bool const& x - ) - { - return !x; - } -}//namespace glm diff --git a/third_party/glm/gtx/spline.hpp b/third_party/glm/gtx/spline.hpp deleted file mode 100755 index 731c979..0000000 --- a/third_party/glm/gtx/spline.hpp +++ /dev/null @@ -1,65 +0,0 @@ -/// @ref gtx_spline -/// @file glm/gtx/spline.hpp -/// -/// @see core (dependence) -/// -/// @defgroup gtx_spline GLM_GTX_spline -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Spline functions - -#pragma once - -// Dependency: -#include "../glm.hpp" -#include "../gtx/optimum_pow.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_spline is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_spline extension included") -# endif -#endif - -namespace glm -{ - /// @addtogroup gtx_spline - /// @{ - - /// Return a point from a catmull rom curve. - /// @see gtx_spline extension. - template - GLM_FUNC_DECL genType catmullRom( - genType const& v1, - genType const& v2, - genType const& v3, - genType const& v4, - typename genType::value_type const& s); - - /// Return a point from a hermite curve. - /// @see gtx_spline extension. - template - GLM_FUNC_DECL genType hermite( - genType const& v1, - genType const& t1, - genType const& v2, - genType const& t2, - typename genType::value_type const& s); - - /// Return a point from a cubic curve. - /// @see gtx_spline extension. - template - GLM_FUNC_DECL genType cubic( - genType const& v1, - genType const& v2, - genType const& v3, - genType const& v4, - typename genType::value_type const& s); - - /// @} -}//namespace glm - -#include "spline.inl" diff --git a/third_party/glm/gtx/spline.inl b/third_party/glm/gtx/spline.inl deleted file mode 100755 index c3fd056..0000000 --- a/third_party/glm/gtx/spline.inl +++ /dev/null @@ -1,60 +0,0 @@ -/// @ref gtx_spline - -namespace glm -{ - template - GLM_FUNC_QUALIFIER genType catmullRom - ( - genType const& v1, - genType const& v2, - genType const& v3, - genType const& v4, - typename genType::value_type const& s - ) - { - typename genType::value_type s2 = pow2(s); - typename genType::value_type s3 = pow3(s); - - typename genType::value_type f1 = -s3 + typename genType::value_type(2) * s2 - s; - typename genType::value_type f2 = typename genType::value_type(3) * s3 - typename genType::value_type(5) * s2 + typename genType::value_type(2); - typename genType::value_type f3 = typename genType::value_type(-3) * s3 + typename genType::value_type(4) * s2 + s; - typename genType::value_type f4 = s3 - s2; - - return (f1 * v1 + f2 * v2 + f3 * v3 + f4 * v4) / typename genType::value_type(2); - - } - - template - GLM_FUNC_QUALIFIER genType hermite - ( - genType const& v1, - genType const& t1, - genType const& v2, - genType const& t2, - typename genType::value_type const& s - ) - { - typename genType::value_type s2 = pow2(s); - typename genType::value_type s3 = pow3(s); - - typename genType::value_type f1 = typename genType::value_type(2) * s3 - typename genType::value_type(3) * s2 + typename genType::value_type(1); - typename genType::value_type f2 = typename genType::value_type(-2) * s3 + typename genType::value_type(3) * s2; - typename genType::value_type f3 = s3 - typename genType::value_type(2) * s2 + s; - typename genType::value_type f4 = s3 - s2; - - return f1 * v1 + f2 * v2 + f3 * t1 + f4 * t2; - } - - template - GLM_FUNC_QUALIFIER genType cubic - ( - genType const& v1, - genType const& v2, - genType const& v3, - genType const& v4, - typename genType::value_type const& s - ) - { - return ((v1 * s + v2) * s + v3) * s + v4; - } -}//namespace glm diff --git a/third_party/glm/gtx/std_based_type.hpp b/third_party/glm/gtx/std_based_type.hpp deleted file mode 100755 index cd3be8c..0000000 --- a/third_party/glm/gtx/std_based_type.hpp +++ /dev/null @@ -1,68 +0,0 @@ -/// @ref gtx_std_based_type -/// @file glm/gtx/std_based_type.hpp -/// -/// @see core (dependence) -/// @see gtx_extented_min_max (dependence) -/// -/// @defgroup gtx_std_based_type GLM_GTX_std_based_type -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Adds vector types based on STL value types. - -#pragma once - -// Dependency: -#include "../glm.hpp" -#include - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_std_based_type is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_std_based_type extension included") -# endif -#endif - -namespace glm -{ - /// @addtogroup gtx_std_based_type - /// @{ - - /// Vector type based of one std::size_t component. - /// @see GLM_GTX_std_based_type - typedef vec<1, std::size_t, defaultp> size1; - - /// Vector type based of two std::size_t components. - /// @see GLM_GTX_std_based_type - typedef vec<2, std::size_t, defaultp> size2; - - /// Vector type based of three std::size_t components. - /// @see GLM_GTX_std_based_type - typedef vec<3, std::size_t, defaultp> size3; - - /// Vector type based of four std::size_t components. - /// @see GLM_GTX_std_based_type - typedef vec<4, std::size_t, defaultp> size4; - - /// Vector type based of one std::size_t component. - /// @see GLM_GTX_std_based_type - typedef vec<1, std::size_t, defaultp> size1_t; - - /// Vector type based of two std::size_t components. - /// @see GLM_GTX_std_based_type - typedef vec<2, std::size_t, defaultp> size2_t; - - /// Vector type based of three std::size_t components. - /// @see GLM_GTX_std_based_type - typedef vec<3, std::size_t, defaultp> size3_t; - - /// Vector type based of four std::size_t components. - /// @see GLM_GTX_std_based_type - typedef vec<4, std::size_t, defaultp> size4_t; - - /// @} -}//namespace glm - -#include "std_based_type.inl" diff --git a/third_party/glm/gtx/std_based_type.inl b/third_party/glm/gtx/std_based_type.inl deleted file mode 100755 index 9c34bdb..0000000 --- a/third_party/glm/gtx/std_based_type.inl +++ /dev/null @@ -1,6 +0,0 @@ -/// @ref gtx_std_based_type - -namespace glm -{ - -} diff --git a/third_party/glm/gtx/string_cast.hpp b/third_party/glm/gtx/string_cast.hpp deleted file mode 100755 index 27846bf..0000000 --- a/third_party/glm/gtx/string_cast.hpp +++ /dev/null @@ -1,52 +0,0 @@ -/// @ref gtx_string_cast -/// @file glm/gtx/string_cast.hpp -/// -/// @see core (dependence) -/// @see gtx_integer (dependence) -/// @see gtx_quaternion (dependence) -/// -/// @defgroup gtx_string_cast GLM_GTX_string_cast -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Setup strings for GLM type values -/// -/// This extension is not supported with CUDA - -#pragma once - -// Dependency: -#include "../glm.hpp" -#include "../gtc/type_precision.hpp" -#include "../gtc/quaternion.hpp" -#include "../gtx/dual_quaternion.hpp" -#include -#include - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_string_cast is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_string_cast extension included") -# endif -#endif - -#if(GLM_COMPILER & GLM_COMPILER_CUDA) -# error "GLM_GTX_string_cast is not supported on CUDA compiler" -#endif - -namespace glm -{ - /// @addtogroup gtx_string_cast - /// @{ - - /// Create a string from a GLM vector or matrix typed variable. - /// @see gtx_string_cast extension. - template - GLM_FUNC_DECL std::string to_string(genType const& x); - - /// @} -}//namespace glm - -#include "string_cast.inl" diff --git a/third_party/glm/gtx/string_cast.inl b/third_party/glm/gtx/string_cast.inl deleted file mode 100755 index f67751d..0000000 --- a/third_party/glm/gtx/string_cast.inl +++ /dev/null @@ -1,492 +0,0 @@ -/// @ref gtx_string_cast - -#include -#include - -namespace glm{ -namespace detail -{ - template - struct cast - { - typedef T value_type; - }; - - template <> - struct cast - { - typedef double value_type; - }; - - GLM_FUNC_QUALIFIER std::string format(const char* msg, ...) - { - std::size_t const STRING_BUFFER(4096); - char text[STRING_BUFFER]; - va_list list; - - if(msg == GLM_NULLPTR) - return std::string(); - - va_start(list, msg); -# if (GLM_COMPILER & GLM_COMPILER_VC) - vsprintf_s(text, STRING_BUFFER, msg, list); -# else// - std::vsprintf(text, msg, list); -# endif// - va_end(list); - - return std::string(text); - } - - static const char* LabelTrue = "true"; - static const char* LabelFalse = "false"; - - template - struct literal - { - GLM_FUNC_QUALIFIER static char const * value() {return "%d";} - }; - - template - struct literal - { - GLM_FUNC_QUALIFIER static char const * value() {return "%f";} - }; - -# if GLM_MODEL == GLM_MODEL_32 && GLM_COMPILER && GLM_COMPILER_VC - template<> - struct literal - { - GLM_FUNC_QUALIFIER static char const * value() {return "%lld";} - }; - - template<> - struct literal - { - GLM_FUNC_QUALIFIER static char const * value() {return "%lld";} - }; -# endif//GLM_MODEL == GLM_MODEL_32 && GLM_COMPILER && GLM_COMPILER_VC - - template - struct prefix{}; - - template<> - struct prefix - { - GLM_FUNC_QUALIFIER static char const * value() {return "";} - }; - - template<> - struct prefix - { - GLM_FUNC_QUALIFIER static char const * value() {return "d";} - }; - - template<> - struct prefix - { - GLM_FUNC_QUALIFIER static char const * value() {return "b";} - }; - - template<> - struct prefix - { - GLM_FUNC_QUALIFIER static char const * value() {return "u8";} - }; - - template<> - struct prefix - { - GLM_FUNC_QUALIFIER static char const * value() {return "i8";} - }; - - template<> - struct prefix - { - GLM_FUNC_QUALIFIER static char const * value() {return "u16";} - }; - - template<> - struct prefix - { - GLM_FUNC_QUALIFIER static char const * value() {return "i16";} - }; - - template<> - struct prefix - { - GLM_FUNC_QUALIFIER static char const * value() {return "u";} - }; - - template<> - struct prefix - { - GLM_FUNC_QUALIFIER static char const * value() {return "i";} - }; - - template<> - struct prefix - { - GLM_FUNC_QUALIFIER static char const * value() {return "u64";} - }; - - template<> - struct prefix - { - GLM_FUNC_QUALIFIER static char const * value() {return "i64";} - }; - - template - struct compute_to_string - {}; - - template - struct compute_to_string > - { - GLM_FUNC_QUALIFIER static std::string call(vec<1, bool, Q> const& x) - { - return detail::format("bvec1(%s)", - x[0] ? detail::LabelTrue : detail::LabelFalse); - } - }; - - template - struct compute_to_string > - { - GLM_FUNC_QUALIFIER static std::string call(vec<2, bool, Q> const& x) - { - return detail::format("bvec2(%s, %s)", - x[0] ? detail::LabelTrue : detail::LabelFalse, - x[1] ? detail::LabelTrue : detail::LabelFalse); - } - }; - - template - struct compute_to_string > - { - GLM_FUNC_QUALIFIER static std::string call(vec<3, bool, Q> const& x) - { - return detail::format("bvec3(%s, %s, %s)", - x[0] ? detail::LabelTrue : detail::LabelFalse, - x[1] ? detail::LabelTrue : detail::LabelFalse, - x[2] ? detail::LabelTrue : detail::LabelFalse); - } - }; - - template - struct compute_to_string > - { - GLM_FUNC_QUALIFIER static std::string call(vec<4, bool, Q> const& x) - { - return detail::format("bvec4(%s, %s, %s, %s)", - x[0] ? detail::LabelTrue : detail::LabelFalse, - x[1] ? detail::LabelTrue : detail::LabelFalse, - x[2] ? detail::LabelTrue : detail::LabelFalse, - x[3] ? detail::LabelTrue : detail::LabelFalse); - } - }; - - template - struct compute_to_string > - { - GLM_FUNC_QUALIFIER static std::string call(vec<1, T, Q> const& x) - { - char const * PrefixStr = prefix::value(); - char const * LiteralStr = literal::is_iec559>::value(); - std::string FormatStr(detail::format("%svec1(%s)", - PrefixStr, - LiteralStr)); - - return detail::format(FormatStr.c_str(), - static_cast::value_type>(x[0])); - } - }; - - template - struct compute_to_string > - { - GLM_FUNC_QUALIFIER static std::string call(vec<2, T, Q> const& x) - { - char const * PrefixStr = prefix::value(); - char const * LiteralStr = literal::is_iec559>::value(); - std::string FormatStr(detail::format("%svec2(%s, %s)", - PrefixStr, - LiteralStr, LiteralStr)); - - return detail::format(FormatStr.c_str(), - static_cast::value_type>(x[0]), - static_cast::value_type>(x[1])); - } - }; - - template - struct compute_to_string > - { - GLM_FUNC_QUALIFIER static std::string call(vec<3, T, Q> const& x) - { - char const * PrefixStr = prefix::value(); - char const * LiteralStr = literal::is_iec559>::value(); - std::string FormatStr(detail::format("%svec3(%s, %s, %s)", - PrefixStr, - LiteralStr, LiteralStr, LiteralStr)); - - return detail::format(FormatStr.c_str(), - static_cast::value_type>(x[0]), - static_cast::value_type>(x[1]), - static_cast::value_type>(x[2])); - } - }; - - template - struct compute_to_string > - { - GLM_FUNC_QUALIFIER static std::string call(vec<4, T, Q> const& x) - { - char const * PrefixStr = prefix::value(); - char const * LiteralStr = literal::is_iec559>::value(); - std::string FormatStr(detail::format("%svec4(%s, %s, %s, %s)", - PrefixStr, - LiteralStr, LiteralStr, LiteralStr, LiteralStr)); - - return detail::format(FormatStr.c_str(), - static_cast::value_type>(x[0]), - static_cast::value_type>(x[1]), - static_cast::value_type>(x[2]), - static_cast::value_type>(x[3])); - } - }; - - - template - struct compute_to_string > - { - GLM_FUNC_QUALIFIER static std::string call(mat<2, 2, T, Q> const& x) - { - char const * PrefixStr = prefix::value(); - char const * LiteralStr = literal::is_iec559>::value(); - std::string FormatStr(detail::format("%smat2x2((%s, %s), (%s, %s))", - PrefixStr, - LiteralStr, LiteralStr, - LiteralStr, LiteralStr)); - - return detail::format(FormatStr.c_str(), - static_cast::value_type>(x[0][0]), static_cast::value_type>(x[0][1]), - static_cast::value_type>(x[1][0]), static_cast::value_type>(x[1][1])); - } - }; - - template - struct compute_to_string > - { - GLM_FUNC_QUALIFIER static std::string call(mat<2, 3, T, Q> const& x) - { - char const * PrefixStr = prefix::value(); - char const * LiteralStr = literal::is_iec559>::value(); - std::string FormatStr(detail::format("%smat2x3((%s, %s, %s), (%s, %s, %s))", - PrefixStr, - LiteralStr, LiteralStr, LiteralStr, - LiteralStr, LiteralStr, LiteralStr)); - - return detail::format(FormatStr.c_str(), - static_cast::value_type>(x[0][0]), static_cast::value_type>(x[0][1]), static_cast::value_type>(x[0][2]), - static_cast::value_type>(x[1][0]), static_cast::value_type>(x[1][1]), static_cast::value_type>(x[1][2])); - } - }; - - template - struct compute_to_string > - { - GLM_FUNC_QUALIFIER static std::string call(mat<2, 4, T, Q> const& x) - { - char const * PrefixStr = prefix::value(); - char const * LiteralStr = literal::is_iec559>::value(); - std::string FormatStr(detail::format("%smat2x4((%s, %s, %s, %s), (%s, %s, %s, %s))", - PrefixStr, - LiteralStr, LiteralStr, LiteralStr, LiteralStr, - LiteralStr, LiteralStr, LiteralStr, LiteralStr)); - - return detail::format(FormatStr.c_str(), - static_cast::value_type>(x[0][0]), static_cast::value_type>(x[0][1]), static_cast::value_type>(x[0][2]), static_cast::value_type>(x[0][3]), - static_cast::value_type>(x[1][0]), static_cast::value_type>(x[1][1]), static_cast::value_type>(x[1][2]), static_cast::value_type>(x[1][3])); - } - }; - - template - struct compute_to_string > - { - GLM_FUNC_QUALIFIER static std::string call(mat<3, 2, T, Q> const& x) - { - char const * PrefixStr = prefix::value(); - char const * LiteralStr = literal::is_iec559>::value(); - std::string FormatStr(detail::format("%smat3x2((%s, %s), (%s, %s), (%s, %s))", - PrefixStr, - LiteralStr, LiteralStr, - LiteralStr, LiteralStr, - LiteralStr, LiteralStr)); - - return detail::format(FormatStr.c_str(), - static_cast::value_type>(x[0][0]), static_cast::value_type>(x[0][1]), - static_cast::value_type>(x[1][0]), static_cast::value_type>(x[1][1]), - static_cast::value_type>(x[2][0]), static_cast::value_type>(x[2][1])); - } - }; - - template - struct compute_to_string > - { - GLM_FUNC_QUALIFIER static std::string call(mat<3, 3, T, Q> const& x) - { - char const * PrefixStr = prefix::value(); - char const * LiteralStr = literal::is_iec559>::value(); - std::string FormatStr(detail::format("%smat3x3((%s, %s, %s), (%s, %s, %s), (%s, %s, %s))", - PrefixStr, - LiteralStr, LiteralStr, LiteralStr, - LiteralStr, LiteralStr, LiteralStr, - LiteralStr, LiteralStr, LiteralStr)); - - return detail::format(FormatStr.c_str(), - static_cast::value_type>(x[0][0]), static_cast::value_type>(x[0][1]), static_cast::value_type>(x[0][2]), - static_cast::value_type>(x[1][0]), static_cast::value_type>(x[1][1]), static_cast::value_type>(x[1][2]), - static_cast::value_type>(x[2][0]), static_cast::value_type>(x[2][1]), static_cast::value_type>(x[2][2])); - } - }; - - template - struct compute_to_string > - { - GLM_FUNC_QUALIFIER static std::string call(mat<3, 4, T, Q> const& x) - { - char const * PrefixStr = prefix::value(); - char const * LiteralStr = literal::is_iec559>::value(); - std::string FormatStr(detail::format("%smat3x4((%s, %s, %s, %s), (%s, %s, %s, %s), (%s, %s, %s, %s))", - PrefixStr, - LiteralStr, LiteralStr, LiteralStr, LiteralStr, - LiteralStr, LiteralStr, LiteralStr, LiteralStr, - LiteralStr, LiteralStr, LiteralStr, LiteralStr)); - - return detail::format(FormatStr.c_str(), - static_cast::value_type>(x[0][0]), static_cast::value_type>(x[0][1]), static_cast::value_type>(x[0][2]), static_cast::value_type>(x[0][3]), - static_cast::value_type>(x[1][0]), static_cast::value_type>(x[1][1]), static_cast::value_type>(x[1][2]), static_cast::value_type>(x[1][3]), - static_cast::value_type>(x[2][0]), static_cast::value_type>(x[2][1]), static_cast::value_type>(x[2][2]), static_cast::value_type>(x[2][3])); - } - }; - - template - struct compute_to_string > - { - GLM_FUNC_QUALIFIER static std::string call(mat<4, 2, T, Q> const& x) - { - char const * PrefixStr = prefix::value(); - char const * LiteralStr = literal::is_iec559>::value(); - std::string FormatStr(detail::format("%smat4x2((%s, %s), (%s, %s), (%s, %s), (%s, %s))", - PrefixStr, - LiteralStr, LiteralStr, - LiteralStr, LiteralStr, - LiteralStr, LiteralStr, - LiteralStr, LiteralStr)); - - return detail::format(FormatStr.c_str(), - static_cast::value_type>(x[0][0]), static_cast::value_type>(x[0][1]), - static_cast::value_type>(x[1][0]), static_cast::value_type>(x[1][1]), - static_cast::value_type>(x[2][0]), static_cast::value_type>(x[2][1]), - static_cast::value_type>(x[3][0]), static_cast::value_type>(x[3][1])); - } - }; - - template - struct compute_to_string > - { - GLM_FUNC_QUALIFIER static std::string call(mat<4, 3, T, Q> const& x) - { - char const * PrefixStr = prefix::value(); - char const * LiteralStr = literal::is_iec559>::value(); - std::string FormatStr(detail::format("%smat4x3((%s, %s, %s), (%s, %s, %s), (%s, %s, %s), (%s, %s, %s))", - PrefixStr, - LiteralStr, LiteralStr, LiteralStr, - LiteralStr, LiteralStr, LiteralStr, - LiteralStr, LiteralStr, LiteralStr, - LiteralStr, LiteralStr, LiteralStr)); - - return detail::format(FormatStr.c_str(), - static_cast::value_type>(x[0][0]), static_cast::value_type>(x[0][1]), static_cast::value_type>(x[0][2]), - static_cast::value_type>(x[1][0]), static_cast::value_type>(x[1][1]), static_cast::value_type>(x[1][2]), - static_cast::value_type>(x[2][0]), static_cast::value_type>(x[2][1]), static_cast::value_type>(x[2][2]), - static_cast::value_type>(x[3][0]), static_cast::value_type>(x[3][1]), static_cast::value_type>(x[3][2])); - } - }; - - template - struct compute_to_string > - { - GLM_FUNC_QUALIFIER static std::string call(mat<4, 4, T, Q> const& x) - { - char const * PrefixStr = prefix::value(); - char const * LiteralStr = literal::is_iec559>::value(); - std::string FormatStr(detail::format("%smat4x4((%s, %s, %s, %s), (%s, %s, %s, %s), (%s, %s, %s, %s), (%s, %s, %s, %s))", - PrefixStr, - LiteralStr, LiteralStr, LiteralStr, LiteralStr, - LiteralStr, LiteralStr, LiteralStr, LiteralStr, - LiteralStr, LiteralStr, LiteralStr, LiteralStr, - LiteralStr, LiteralStr, LiteralStr, LiteralStr)); - - return detail::format(FormatStr.c_str(), - static_cast::value_type>(x[0][0]), static_cast::value_type>(x[0][1]), static_cast::value_type>(x[0][2]), static_cast::value_type>(x[0][3]), - static_cast::value_type>(x[1][0]), static_cast::value_type>(x[1][1]), static_cast::value_type>(x[1][2]), static_cast::value_type>(x[1][3]), - static_cast::value_type>(x[2][0]), static_cast::value_type>(x[2][1]), static_cast::value_type>(x[2][2]), static_cast::value_type>(x[2][3]), - static_cast::value_type>(x[3][0]), static_cast::value_type>(x[3][1]), static_cast::value_type>(x[3][2]), static_cast::value_type>(x[3][3])); - } - }; - - - template - struct compute_to_string > - { - GLM_FUNC_QUALIFIER static std::string call(qua const& q) - { - char const * PrefixStr = prefix::value(); - char const * LiteralStr = literal::is_iec559>::value(); - std::string FormatStr(detail::format("%squat(%s, {%s, %s, %s})", - PrefixStr, - LiteralStr, LiteralStr, LiteralStr, LiteralStr)); - - return detail::format(FormatStr.c_str(), - static_cast::value_type>(q.w), - static_cast::value_type>(q.x), - static_cast::value_type>(q.y), - static_cast::value_type>(q.z)); - } - }; - - template - struct compute_to_string > - { - GLM_FUNC_QUALIFIER static std::string call(tdualquat const& x) - { - char const * PrefixStr = prefix::value(); - char const * LiteralStr = literal::is_iec559>::value(); - std::string FormatStr(detail::format("%sdualquat((%s, {%s, %s, %s}), (%s, {%s, %s, %s}))", - PrefixStr, - LiteralStr, LiteralStr, LiteralStr, LiteralStr, - LiteralStr, LiteralStr, LiteralStr, LiteralStr)); - - return detail::format(FormatStr.c_str(), - static_cast::value_type>(x.real.w), - static_cast::value_type>(x.real.x), - static_cast::value_type>(x.real.y), - static_cast::value_type>(x.real.z), - static_cast::value_type>(x.dual.w), - static_cast::value_type>(x.dual.x), - static_cast::value_type>(x.dual.y), - static_cast::value_type>(x.dual.z)); - } - }; - -}//namespace detail - -template -GLM_FUNC_QUALIFIER std::string to_string(matType const& x) -{ - return detail::compute_to_string::call(x); -} - -}//namespace glm diff --git a/third_party/glm/gtx/texture.hpp b/third_party/glm/gtx/texture.hpp deleted file mode 100755 index 20585e6..0000000 --- a/third_party/glm/gtx/texture.hpp +++ /dev/null @@ -1,46 +0,0 @@ -/// @ref gtx_texture -/// @file glm/gtx/texture.hpp -/// -/// @see core (dependence) -/// -/// @defgroup gtx_texture GLM_GTX_texture -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Wrapping mode of texture coordinates. - -#pragma once - -// Dependency: -#include "../glm.hpp" -#include "../gtc/integer.hpp" -#include "../gtx/component_wise.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_texture is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_texture extension included") -# endif -#endif - -namespace glm -{ - /// @addtogroup gtx_texture - /// @{ - - /// Compute the number of mipmaps levels necessary to create a mipmap complete texture - /// - /// @param Extent Extent of the texture base level mipmap - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point or signed integer scalar types - /// @tparam Q Value from qualifier enum - template - T levels(vec const& Extent); - - /// @} -}// namespace glm - -#include "texture.inl" - diff --git a/third_party/glm/gtx/texture.inl b/third_party/glm/gtx/texture.inl deleted file mode 100755 index 593c826..0000000 --- a/third_party/glm/gtx/texture.inl +++ /dev/null @@ -1,17 +0,0 @@ -/// @ref gtx_texture - -namespace glm -{ - template - inline T levels(vec const& Extent) - { - return glm::log2(compMax(Extent)) + static_cast(1); - } - - template - inline T levels(T Extent) - { - return vec<1, T, defaultp>(Extent).x; - } -}//namespace glm - diff --git a/third_party/glm/gtx/transform.hpp b/third_party/glm/gtx/transform.hpp deleted file mode 100755 index 0279fc8..0000000 --- a/third_party/glm/gtx/transform.hpp +++ /dev/null @@ -1,60 +0,0 @@ -/// @ref gtx_transform -/// @file glm/gtx/transform.hpp -/// -/// @see core (dependence) -/// @see gtc_matrix_transform (dependence) -/// @see gtx_transform -/// @see gtx_transform2 -/// -/// @defgroup gtx_transform GLM_GTX_transform -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Add transformation matrices - -#pragma once - -// Dependency: -#include "../glm.hpp" -#include "../gtc/matrix_transform.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_transform is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_transform extension included") -# endif -#endif - -namespace glm -{ - /// @addtogroup gtx_transform - /// @{ - - /// Transforms a matrix with a translation 4 * 4 matrix created from 3 scalars. - /// @see gtc_matrix_transform - /// @see gtx_transform - template - GLM_FUNC_DECL mat<4, 4, T, Q> translate( - vec<3, T, Q> const& v); - - /// Builds a rotation 4 * 4 matrix created from an axis of 3 scalars and an angle expressed in radians. - /// @see gtc_matrix_transform - /// @see gtx_transform - template - GLM_FUNC_DECL mat<4, 4, T, Q> rotate( - T angle, - vec<3, T, Q> const& v); - - /// Transforms a matrix with a scale 4 * 4 matrix created from a vector of 3 components. - /// @see gtc_matrix_transform - /// @see gtx_transform - template - GLM_FUNC_DECL mat<4, 4, T, Q> scale( - vec<3, T, Q> const& v); - - /// @} -}// namespace glm - -#include "transform.inl" diff --git a/third_party/glm/gtx/transform.inl b/third_party/glm/gtx/transform.inl deleted file mode 100755 index 48ee680..0000000 --- a/third_party/glm/gtx/transform.inl +++ /dev/null @@ -1,23 +0,0 @@ -/// @ref gtx_transform - -namespace glm -{ - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> translate(vec<3, T, Q> const& v) - { - return translate(mat<4, 4, T, Q>(static_cast(1)), v); - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> rotate(T angle, vec<3, T, Q> const& v) - { - return rotate(mat<4, 4, T, Q>(static_cast(1)), angle, v); - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> scale(vec<3, T, Q> const& v) - { - return scale(mat<4, 4, T, Q>(static_cast(1)), v); - } - -}//namespace glm diff --git a/third_party/glm/gtx/transform2.hpp b/third_party/glm/gtx/transform2.hpp deleted file mode 100755 index 0d8ba9d..0000000 --- a/third_party/glm/gtx/transform2.hpp +++ /dev/null @@ -1,89 +0,0 @@ -/// @ref gtx_transform2 -/// @file glm/gtx/transform2.hpp -/// -/// @see core (dependence) -/// @see gtx_transform (dependence) -/// -/// @defgroup gtx_transform2 GLM_GTX_transform2 -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Add extra transformation matrices - -#pragma once - -// Dependency: -#include "../glm.hpp" -#include "../gtx/transform.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_transform2 is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_transform2 extension included") -# endif -#endif - -namespace glm -{ - /// @addtogroup gtx_transform2 - /// @{ - - //! Transforms a matrix with a shearing on X axis. - //! From GLM_GTX_transform2 extension. - template - GLM_FUNC_DECL mat<3, 3, T, Q> shearX2D(mat<3, 3, T, Q> const& m, T y); - - //! Transforms a matrix with a shearing on Y axis. - //! From GLM_GTX_transform2 extension. - template - GLM_FUNC_DECL mat<3, 3, T, Q> shearY2D(mat<3, 3, T, Q> const& m, T x); - - //! Transforms a matrix with a shearing on X axis - //! From GLM_GTX_transform2 extension. - template - GLM_FUNC_DECL mat<4, 4, T, Q> shearX3D(mat<4, 4, T, Q> const& m, T y, T z); - - //! Transforms a matrix with a shearing on Y axis. - //! From GLM_GTX_transform2 extension. - template - GLM_FUNC_DECL mat<4, 4, T, Q> shearY3D(mat<4, 4, T, Q> const& m, T x, T z); - - //! Transforms a matrix with a shearing on Z axis. - //! From GLM_GTX_transform2 extension. - template - GLM_FUNC_DECL mat<4, 4, T, Q> shearZ3D(mat<4, 4, T, Q> const& m, T x, T y); - - //template GLM_FUNC_QUALIFIER mat<4, 4, T, Q> shear(const mat<4, 4, T, Q> & m, shearPlane, planePoint, angle) - // Identity + tan(angle) * cross(Normal, OnPlaneVector) 0 - // - dot(PointOnPlane, normal) * OnPlaneVector 1 - - // Reflect functions seem to don't work - //template mat<3, 3, T, Q> reflect2D(const mat<3, 3, T, Q> & m, const vec<3, T, Q>& normal){return reflect2DGTX(m, normal);} //!< \brief Build a reflection matrix (from GLM_GTX_transform2 extension) - //template mat<4, 4, T, Q> reflect3D(const mat<4, 4, T, Q> & m, const vec<3, T, Q>& normal){return reflect3DGTX(m, normal);} //!< \brief Build a reflection matrix (from GLM_GTX_transform2 extension) - - //! Build planar projection matrix along normal axis. - //! From GLM_GTX_transform2 extension. - template - GLM_FUNC_DECL mat<3, 3, T, Q> proj2D(mat<3, 3, T, Q> const& m, vec<3, T, Q> const& normal); - - //! Build planar projection matrix along normal axis. - //! From GLM_GTX_transform2 extension. - template - GLM_FUNC_DECL mat<4, 4, T, Q> proj3D(mat<4, 4, T, Q> const & m, vec<3, T, Q> const& normal); - - //! Build a scale bias matrix. - //! From GLM_GTX_transform2 extension. - template - GLM_FUNC_DECL mat<4, 4, T, Q> scaleBias(T scale, T bias); - - //! Build a scale bias matrix. - //! From GLM_GTX_transform2 extension. - template - GLM_FUNC_DECL mat<4, 4, T, Q> scaleBias(mat<4, 4, T, Q> const& m, T scale, T bias); - - /// @} -}// namespace glm - -#include "transform2.inl" diff --git a/third_party/glm/gtx/transform2.inl b/third_party/glm/gtx/transform2.inl deleted file mode 100755 index 2b53198..0000000 --- a/third_party/glm/gtx/transform2.inl +++ /dev/null @@ -1,125 +0,0 @@ -/// @ref gtx_transform2 - -namespace glm -{ - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> shearX2D(mat<3, 3, T, Q> const& m, T s) - { - mat<3, 3, T, Q> r(1); - r[1][0] = s; - return m * r; - } - - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> shearY2D(mat<3, 3, T, Q> const& m, T s) - { - mat<3, 3, T, Q> r(1); - r[0][1] = s; - return m * r; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> shearX3D(mat<4, 4, T, Q> const& m, T s, T t) - { - mat<4, 4, T, Q> r(1); - r[0][1] = s; - r[0][2] = t; - return m * r; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> shearY3D(mat<4, 4, T, Q> const& m, T s, T t) - { - mat<4, 4, T, Q> r(1); - r[1][0] = s; - r[1][2] = t; - return m * r; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> shearZ3D(mat<4, 4, T, Q> const& m, T s, T t) - { - mat<4, 4, T, Q> r(1); - r[2][0] = s; - r[2][1] = t; - return m * r; - } - - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> reflect2D(mat<3, 3, T, Q> const& m, vec<3, T, Q> const& normal) - { - mat<3, 3, T, Q> r(static_cast(1)); - r[0][0] = static_cast(1) - static_cast(2) * normal.x * normal.x; - r[0][1] = -static_cast(2) * normal.x * normal.y; - r[1][0] = -static_cast(2) * normal.x * normal.y; - r[1][1] = static_cast(1) - static_cast(2) * normal.y * normal.y; - return m * r; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> reflect3D(mat<4, 4, T, Q> const& m, vec<3, T, Q> const& normal) - { - mat<4, 4, T, Q> r(static_cast(1)); - r[0][0] = static_cast(1) - static_cast(2) * normal.x * normal.x; - r[0][1] = -static_cast(2) * normal.x * normal.y; - r[0][2] = -static_cast(2) * normal.x * normal.z; - - r[1][0] = -static_cast(2) * normal.x * normal.y; - r[1][1] = static_cast(1) - static_cast(2) * normal.y * normal.y; - r[1][2] = -static_cast(2) * normal.y * normal.z; - - r[2][0] = -static_cast(2) * normal.x * normal.z; - r[2][1] = -static_cast(2) * normal.y * normal.z; - r[2][2] = static_cast(1) - static_cast(2) * normal.z * normal.z; - return m * r; - } - - template - GLM_FUNC_QUALIFIER mat<3, 3, T, Q> proj2D( - const mat<3, 3, T, Q>& m, - const vec<3, T, Q>& normal) - { - mat<3, 3, T, Q> r(static_cast(1)); - r[0][0] = static_cast(1) - normal.x * normal.x; - r[0][1] = - normal.x * normal.y; - r[1][0] = - normal.x * normal.y; - r[1][1] = static_cast(1) - normal.y * normal.y; - return m * r; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> proj3D( - const mat<4, 4, T, Q>& m, - const vec<3, T, Q>& normal) - { - mat<4, 4, T, Q> r(static_cast(1)); - r[0][0] = static_cast(1) - normal.x * normal.x; - r[0][1] = - normal.x * normal.y; - r[0][2] = - normal.x * normal.z; - r[1][0] = - normal.x * normal.y; - r[1][1] = static_cast(1) - normal.y * normal.y; - r[1][2] = - normal.y * normal.z; - r[2][0] = - normal.x * normal.z; - r[2][1] = - normal.y * normal.z; - r[2][2] = static_cast(1) - normal.z * normal.z; - return m * r; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> scaleBias(T scale, T bias) - { - mat<4, 4, T, Q> result; - result[3] = vec<4, T, Q>(vec<3, T, Q>(bias), static_cast(1)); - result[0][0] = scale; - result[1][1] = scale; - result[2][2] = scale; - return result; - } - - template - GLM_FUNC_QUALIFIER mat<4, 4, T, Q> scaleBias(mat<4, 4, T, Q> const& m, T scale, T bias) - { - return m * scaleBias(scale, bias); - } -}//namespace glm - diff --git a/third_party/glm/gtx/type_aligned.hpp b/third_party/glm/gtx/type_aligned.hpp deleted file mode 100755 index 2ae522c..0000000 --- a/third_party/glm/gtx/type_aligned.hpp +++ /dev/null @@ -1,982 +0,0 @@ -/// @ref gtx_type_aligned -/// @file glm/gtx/type_aligned.hpp -/// -/// @see core (dependence) -/// @see gtc_quaternion (dependence) -/// -/// @defgroup gtx_type_aligned GLM_GTX_type_aligned -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Defines aligned types. - -#pragma once - -// Dependency: -#include "../gtc/type_precision.hpp" -#include "../gtc/quaternion.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_type_aligned is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_type_aligned extension included") -# endif -#endif - -namespace glm -{ - /////////////////////////// - // Signed int vector types - - /// @addtogroup gtx_type_aligned - /// @{ - - /// Low qualifier 8 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(lowp_int8, aligned_lowp_int8, 1); - - /// Low qualifier 16 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(lowp_int16, aligned_lowp_int16, 2); - - /// Low qualifier 32 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(lowp_int32, aligned_lowp_int32, 4); - - /// Low qualifier 64 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(lowp_int64, aligned_lowp_int64, 8); - - - /// Low qualifier 8 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(lowp_int8_t, aligned_lowp_int8_t, 1); - - /// Low qualifier 16 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(lowp_int16_t, aligned_lowp_int16_t, 2); - - /// Low qualifier 32 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(lowp_int32_t, aligned_lowp_int32_t, 4); - - /// Low qualifier 64 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(lowp_int64_t, aligned_lowp_int64_t, 8); - - - /// Low qualifier 8 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(lowp_i8, aligned_lowp_i8, 1); - - /// Low qualifier 16 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(lowp_i16, aligned_lowp_i16, 2); - - /// Low qualifier 32 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(lowp_i32, aligned_lowp_i32, 4); - - /// Low qualifier 64 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(lowp_i64, aligned_lowp_i64, 8); - - - /// Medium qualifier 8 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(mediump_int8, aligned_mediump_int8, 1); - - /// Medium qualifier 16 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(mediump_int16, aligned_mediump_int16, 2); - - /// Medium qualifier 32 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(mediump_int32, aligned_mediump_int32, 4); - - /// Medium qualifier 64 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(mediump_int64, aligned_mediump_int64, 8); - - - /// Medium qualifier 8 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(mediump_int8_t, aligned_mediump_int8_t, 1); - - /// Medium qualifier 16 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(mediump_int16_t, aligned_mediump_int16_t, 2); - - /// Medium qualifier 32 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(mediump_int32_t, aligned_mediump_int32_t, 4); - - /// Medium qualifier 64 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(mediump_int64_t, aligned_mediump_int64_t, 8); - - - /// Medium qualifier 8 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(mediump_i8, aligned_mediump_i8, 1); - - /// Medium qualifier 16 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(mediump_i16, aligned_mediump_i16, 2); - - /// Medium qualifier 32 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(mediump_i32, aligned_mediump_i32, 4); - - /// Medium qualifier 64 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(mediump_i64, aligned_mediump_i64, 8); - - - /// High qualifier 8 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(highp_int8, aligned_highp_int8, 1); - - /// High qualifier 16 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(highp_int16, aligned_highp_int16, 2); - - /// High qualifier 32 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(highp_int32, aligned_highp_int32, 4); - - /// High qualifier 64 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(highp_int64, aligned_highp_int64, 8); - - - /// High qualifier 8 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(highp_int8_t, aligned_highp_int8_t, 1); - - /// High qualifier 16 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(highp_int16_t, aligned_highp_int16_t, 2); - - /// High qualifier 32 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(highp_int32_t, aligned_highp_int32_t, 4); - - /// High qualifier 64 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(highp_int64_t, aligned_highp_int64_t, 8); - - - /// High qualifier 8 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(highp_i8, aligned_highp_i8, 1); - - /// High qualifier 16 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(highp_i16, aligned_highp_i16, 2); - - /// High qualifier 32 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(highp_i32, aligned_highp_i32, 4); - - /// High qualifier 64 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(highp_i64, aligned_highp_i64, 8); - - - /// Default qualifier 8 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(int8, aligned_int8, 1); - - /// Default qualifier 16 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(int16, aligned_int16, 2); - - /// Default qualifier 32 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(int32, aligned_int32, 4); - - /// Default qualifier 64 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(int64, aligned_int64, 8); - - - /// Default qualifier 8 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(int8_t, aligned_int8_t, 1); - - /// Default qualifier 16 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(int16_t, aligned_int16_t, 2); - - /// Default qualifier 32 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(int32_t, aligned_int32_t, 4); - - /// Default qualifier 64 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(int64_t, aligned_int64_t, 8); - - - /// Default qualifier 8 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(i8, aligned_i8, 1); - - /// Default qualifier 16 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(i16, aligned_i16, 2); - - /// Default qualifier 32 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(i32, aligned_i32, 4); - - /// Default qualifier 64 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(i64, aligned_i64, 8); - - - /// Default qualifier 32 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(ivec1, aligned_ivec1, 4); - - /// Default qualifier 32 bit signed integer aligned vector of 2 components type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(ivec2, aligned_ivec2, 8); - - /// Default qualifier 32 bit signed integer aligned vector of 3 components type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(ivec3, aligned_ivec3, 16); - - /// Default qualifier 32 bit signed integer aligned vector of 4 components type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(ivec4, aligned_ivec4, 16); - - - /// Default qualifier 8 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(i8vec1, aligned_i8vec1, 1); - - /// Default qualifier 8 bit signed integer aligned vector of 2 components type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(i8vec2, aligned_i8vec2, 2); - - /// Default qualifier 8 bit signed integer aligned vector of 3 components type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(i8vec3, aligned_i8vec3, 4); - - /// Default qualifier 8 bit signed integer aligned vector of 4 components type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(i8vec4, aligned_i8vec4, 4); - - - /// Default qualifier 16 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(i16vec1, aligned_i16vec1, 2); - - /// Default qualifier 16 bit signed integer aligned vector of 2 components type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(i16vec2, aligned_i16vec2, 4); - - /// Default qualifier 16 bit signed integer aligned vector of 3 components type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(i16vec3, aligned_i16vec3, 8); - - /// Default qualifier 16 bit signed integer aligned vector of 4 components type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(i16vec4, aligned_i16vec4, 8); - - - /// Default qualifier 32 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(i32vec1, aligned_i32vec1, 4); - - /// Default qualifier 32 bit signed integer aligned vector of 2 components type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(i32vec2, aligned_i32vec2, 8); - - /// Default qualifier 32 bit signed integer aligned vector of 3 components type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(i32vec3, aligned_i32vec3, 16); - - /// Default qualifier 32 bit signed integer aligned vector of 4 components type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(i32vec4, aligned_i32vec4, 16); - - - /// Default qualifier 64 bit signed integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(i64vec1, aligned_i64vec1, 8); - - /// Default qualifier 64 bit signed integer aligned vector of 2 components type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(i64vec2, aligned_i64vec2, 16); - - /// Default qualifier 64 bit signed integer aligned vector of 3 components type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(i64vec3, aligned_i64vec3, 32); - - /// Default qualifier 64 bit signed integer aligned vector of 4 components type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(i64vec4, aligned_i64vec4, 32); - - - ///////////////////////////// - // Unsigned int vector types - - /// Low qualifier 8 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(lowp_uint8, aligned_lowp_uint8, 1); - - /// Low qualifier 16 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(lowp_uint16, aligned_lowp_uint16, 2); - - /// Low qualifier 32 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(lowp_uint32, aligned_lowp_uint32, 4); - - /// Low qualifier 64 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(lowp_uint64, aligned_lowp_uint64, 8); - - - /// Low qualifier 8 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(lowp_uint8_t, aligned_lowp_uint8_t, 1); - - /// Low qualifier 16 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(lowp_uint16_t, aligned_lowp_uint16_t, 2); - - /// Low qualifier 32 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(lowp_uint32_t, aligned_lowp_uint32_t, 4); - - /// Low qualifier 64 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(lowp_uint64_t, aligned_lowp_uint64_t, 8); - - - /// Low qualifier 8 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(lowp_u8, aligned_lowp_u8, 1); - - /// Low qualifier 16 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(lowp_u16, aligned_lowp_u16, 2); - - /// Low qualifier 32 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(lowp_u32, aligned_lowp_u32, 4); - - /// Low qualifier 64 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(lowp_u64, aligned_lowp_u64, 8); - - - /// Medium qualifier 8 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(mediump_uint8, aligned_mediump_uint8, 1); - - /// Medium qualifier 16 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(mediump_uint16, aligned_mediump_uint16, 2); - - /// Medium qualifier 32 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(mediump_uint32, aligned_mediump_uint32, 4); - - /// Medium qualifier 64 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(mediump_uint64, aligned_mediump_uint64, 8); - - - /// Medium qualifier 8 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(mediump_uint8_t, aligned_mediump_uint8_t, 1); - - /// Medium qualifier 16 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(mediump_uint16_t, aligned_mediump_uint16_t, 2); - - /// Medium qualifier 32 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(mediump_uint32_t, aligned_mediump_uint32_t, 4); - - /// Medium qualifier 64 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(mediump_uint64_t, aligned_mediump_uint64_t, 8); - - - /// Medium qualifier 8 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(mediump_u8, aligned_mediump_u8, 1); - - /// Medium qualifier 16 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(mediump_u16, aligned_mediump_u16, 2); - - /// Medium qualifier 32 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(mediump_u32, aligned_mediump_u32, 4); - - /// Medium qualifier 64 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(mediump_u64, aligned_mediump_u64, 8); - - - /// High qualifier 8 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(highp_uint8, aligned_highp_uint8, 1); - - /// High qualifier 16 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(highp_uint16, aligned_highp_uint16, 2); - - /// High qualifier 32 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(highp_uint32, aligned_highp_uint32, 4); - - /// High qualifier 64 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(highp_uint64, aligned_highp_uint64, 8); - - - /// High qualifier 8 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(highp_uint8_t, aligned_highp_uint8_t, 1); - - /// High qualifier 16 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(highp_uint16_t, aligned_highp_uint16_t, 2); - - /// High qualifier 32 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(highp_uint32_t, aligned_highp_uint32_t, 4); - - /// High qualifier 64 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(highp_uint64_t, aligned_highp_uint64_t, 8); - - - /// High qualifier 8 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(highp_u8, aligned_highp_u8, 1); - - /// High qualifier 16 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(highp_u16, aligned_highp_u16, 2); - - /// High qualifier 32 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(highp_u32, aligned_highp_u32, 4); - - /// High qualifier 64 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(highp_u64, aligned_highp_u64, 8); - - - /// Default qualifier 8 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(uint8, aligned_uint8, 1); - - /// Default qualifier 16 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(uint16, aligned_uint16, 2); - - /// Default qualifier 32 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(uint32, aligned_uint32, 4); - - /// Default qualifier 64 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(uint64, aligned_uint64, 8); - - - /// Default qualifier 8 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(uint8_t, aligned_uint8_t, 1); - - /// Default qualifier 16 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(uint16_t, aligned_uint16_t, 2); - - /// Default qualifier 32 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(uint32_t, aligned_uint32_t, 4); - - /// Default qualifier 64 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(uint64_t, aligned_uint64_t, 8); - - - /// Default qualifier 8 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(u8, aligned_u8, 1); - - /// Default qualifier 16 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(u16, aligned_u16, 2); - - /// Default qualifier 32 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(u32, aligned_u32, 4); - - /// Default qualifier 64 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(u64, aligned_u64, 8); - - - /// Default qualifier 32 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(uvec1, aligned_uvec1, 4); - - /// Default qualifier 32 bit unsigned integer aligned vector of 2 components type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(uvec2, aligned_uvec2, 8); - - /// Default qualifier 32 bit unsigned integer aligned vector of 3 components type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(uvec3, aligned_uvec3, 16); - - /// Default qualifier 32 bit unsigned integer aligned vector of 4 components type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(uvec4, aligned_uvec4, 16); - - - /// Default qualifier 8 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(u8vec1, aligned_u8vec1, 1); - - /// Default qualifier 8 bit unsigned integer aligned vector of 2 components type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(u8vec2, aligned_u8vec2, 2); - - /// Default qualifier 8 bit unsigned integer aligned vector of 3 components type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(u8vec3, aligned_u8vec3, 4); - - /// Default qualifier 8 bit unsigned integer aligned vector of 4 components type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(u8vec4, aligned_u8vec4, 4); - - - /// Default qualifier 16 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(u16vec1, aligned_u16vec1, 2); - - /// Default qualifier 16 bit unsigned integer aligned vector of 2 components type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(u16vec2, aligned_u16vec2, 4); - - /// Default qualifier 16 bit unsigned integer aligned vector of 3 components type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(u16vec3, aligned_u16vec3, 8); - - /// Default qualifier 16 bit unsigned integer aligned vector of 4 components type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(u16vec4, aligned_u16vec4, 8); - - - /// Default qualifier 32 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(u32vec1, aligned_u32vec1, 4); - - /// Default qualifier 32 bit unsigned integer aligned vector of 2 components type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(u32vec2, aligned_u32vec2, 8); - - /// Default qualifier 32 bit unsigned integer aligned vector of 3 components type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(u32vec3, aligned_u32vec3, 16); - - /// Default qualifier 32 bit unsigned integer aligned vector of 4 components type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(u32vec4, aligned_u32vec4, 16); - - - /// Default qualifier 64 bit unsigned integer aligned scalar type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(u64vec1, aligned_u64vec1, 8); - - /// Default qualifier 64 bit unsigned integer aligned vector of 2 components type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(u64vec2, aligned_u64vec2, 16); - - /// Default qualifier 64 bit unsigned integer aligned vector of 3 components type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(u64vec3, aligned_u64vec3, 32); - - /// Default qualifier 64 bit unsigned integer aligned vector of 4 components type. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(u64vec4, aligned_u64vec4, 32); - - - ////////////////////// - // Float vector types - - /// 32 bit single-qualifier floating-point aligned scalar. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(float32, aligned_float32, 4); - - /// 32 bit single-qualifier floating-point aligned scalar. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(float32_t, aligned_float32_t, 4); - - /// 32 bit single-qualifier floating-point aligned scalar. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(float32, aligned_f32, 4); - -# ifndef GLM_FORCE_SINGLE_ONLY - - /// 64 bit double-qualifier floating-point aligned scalar. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(float64, aligned_float64, 8); - - /// 64 bit double-qualifier floating-point aligned scalar. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(float64_t, aligned_float64_t, 8); - - /// 64 bit double-qualifier floating-point aligned scalar. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(float64, aligned_f64, 8); - -# endif//GLM_FORCE_SINGLE_ONLY - - - /// Single-qualifier floating-point aligned vector of 1 component. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(vec1, aligned_vec1, 4); - - /// Single-qualifier floating-point aligned vector of 2 components. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(vec2, aligned_vec2, 8); - - /// Single-qualifier floating-point aligned vector of 3 components. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(vec3, aligned_vec3, 16); - - /// Single-qualifier floating-point aligned vector of 4 components. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(vec4, aligned_vec4, 16); - - - /// Single-qualifier floating-point aligned vector of 1 component. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(fvec1, aligned_fvec1, 4); - - /// Single-qualifier floating-point aligned vector of 2 components. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(fvec2, aligned_fvec2, 8); - - /// Single-qualifier floating-point aligned vector of 3 components. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(fvec3, aligned_fvec3, 16); - - /// Single-qualifier floating-point aligned vector of 4 components. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(fvec4, aligned_fvec4, 16); - - - /// Single-qualifier floating-point aligned vector of 1 component. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(f32vec1, aligned_f32vec1, 4); - - /// Single-qualifier floating-point aligned vector of 2 components. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(f32vec2, aligned_f32vec2, 8); - - /// Single-qualifier floating-point aligned vector of 3 components. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(f32vec3, aligned_f32vec3, 16); - - /// Single-qualifier floating-point aligned vector of 4 components. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(f32vec4, aligned_f32vec4, 16); - - - /// Double-qualifier floating-point aligned vector of 1 component. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(dvec1, aligned_dvec1, 8); - - /// Double-qualifier floating-point aligned vector of 2 components. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(dvec2, aligned_dvec2, 16); - - /// Double-qualifier floating-point aligned vector of 3 components. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(dvec3, aligned_dvec3, 32); - - /// Double-qualifier floating-point aligned vector of 4 components. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(dvec4, aligned_dvec4, 32); - - -# ifndef GLM_FORCE_SINGLE_ONLY - - /// Double-qualifier floating-point aligned vector of 1 component. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(f64vec1, aligned_f64vec1, 8); - - /// Double-qualifier floating-point aligned vector of 2 components. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(f64vec2, aligned_f64vec2, 16); - - /// Double-qualifier floating-point aligned vector of 3 components. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(f64vec3, aligned_f64vec3, 32); - - /// Double-qualifier floating-point aligned vector of 4 components. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(f64vec4, aligned_f64vec4, 32); - -# endif//GLM_FORCE_SINGLE_ONLY - - ////////////////////// - // Float matrix types - - /// Single-qualifier floating-point aligned 1x1 matrix. - /// @see gtx_type_aligned - //typedef detail::tmat1 mat1; - - /// Single-qualifier floating-point aligned 2x2 matrix. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(mat2, aligned_mat2, 16); - - /// Single-qualifier floating-point aligned 3x3 matrix. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(mat3, aligned_mat3, 16); - - /// Single-qualifier floating-point aligned 4x4 matrix. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(mat4, aligned_mat4, 16); - - - /// Single-qualifier floating-point aligned 1x1 matrix. - /// @see gtx_type_aligned - //typedef detail::tmat1x1 mat1; - - /// Single-qualifier floating-point aligned 2x2 matrix. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(mat2x2, aligned_mat2x2, 16); - - /// Single-qualifier floating-point aligned 3x3 matrix. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(mat3x3, aligned_mat3x3, 16); - - /// Single-qualifier floating-point aligned 4x4 matrix. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(mat4x4, aligned_mat4x4, 16); - - - /// Single-qualifier floating-point aligned 1x1 matrix. - /// @see gtx_type_aligned - //typedef detail::tmat1x1 fmat1; - - /// Single-qualifier floating-point aligned 2x2 matrix. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(fmat2x2, aligned_fmat2, 16); - - /// Single-qualifier floating-point aligned 3x3 matrix. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(fmat3x3, aligned_fmat3, 16); - - /// Single-qualifier floating-point aligned 4x4 matrix. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(fmat4x4, aligned_fmat4, 16); - - - /// Single-qualifier floating-point aligned 1x1 matrix. - /// @see gtx_type_aligned - //typedef f32 fmat1x1; - - /// Single-qualifier floating-point aligned 2x2 matrix. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(fmat2x2, aligned_fmat2x2, 16); - - /// Single-qualifier floating-point aligned 2x3 matrix. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(fmat2x3, aligned_fmat2x3, 16); - - /// Single-qualifier floating-point aligned 2x4 matrix. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(fmat2x4, aligned_fmat2x4, 16); - - /// Single-qualifier floating-point aligned 3x2 matrix. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(fmat3x2, aligned_fmat3x2, 16); - - /// Single-qualifier floating-point aligned 3x3 matrix. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(fmat3x3, aligned_fmat3x3, 16); - - /// Single-qualifier floating-point aligned 3x4 matrix. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(fmat3x4, aligned_fmat3x4, 16); - - /// Single-qualifier floating-point aligned 4x2 matrix. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(fmat4x2, aligned_fmat4x2, 16); - - /// Single-qualifier floating-point aligned 4x3 matrix. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(fmat4x3, aligned_fmat4x3, 16); - - /// Single-qualifier floating-point aligned 4x4 matrix. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(fmat4x4, aligned_fmat4x4, 16); - - - /// Single-qualifier floating-point aligned 1x1 matrix. - /// @see gtx_type_aligned - //typedef detail::tmat1x1 f32mat1; - - /// Single-qualifier floating-point aligned 2x2 matrix. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(f32mat2x2, aligned_f32mat2, 16); - - /// Single-qualifier floating-point aligned 3x3 matrix. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(f32mat3x3, aligned_f32mat3, 16); - - /// Single-qualifier floating-point aligned 4x4 matrix. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(f32mat4x4, aligned_f32mat4, 16); - - - /// Single-qualifier floating-point aligned 1x1 matrix. - /// @see gtx_type_aligned - //typedef f32 f32mat1x1; - - /// Single-qualifier floating-point aligned 2x2 matrix. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(f32mat2x2, aligned_f32mat2x2, 16); - - /// Single-qualifier floating-point aligned 2x3 matrix. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(f32mat2x3, aligned_f32mat2x3, 16); - - /// Single-qualifier floating-point aligned 2x4 matrix. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(f32mat2x4, aligned_f32mat2x4, 16); - - /// Single-qualifier floating-point aligned 3x2 matrix. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(f32mat3x2, aligned_f32mat3x2, 16); - - /// Single-qualifier floating-point aligned 3x3 matrix. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(f32mat3x3, aligned_f32mat3x3, 16); - - /// Single-qualifier floating-point aligned 3x4 matrix. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(f32mat3x4, aligned_f32mat3x4, 16); - - /// Single-qualifier floating-point aligned 4x2 matrix. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(f32mat4x2, aligned_f32mat4x2, 16); - - /// Single-qualifier floating-point aligned 4x3 matrix. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(f32mat4x3, aligned_f32mat4x3, 16); - - /// Single-qualifier floating-point aligned 4x4 matrix. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(f32mat4x4, aligned_f32mat4x4, 16); - - -# ifndef GLM_FORCE_SINGLE_ONLY - - /// Double-qualifier floating-point aligned 1x1 matrix. - /// @see gtx_type_aligned - //typedef detail::tmat1x1 f64mat1; - - /// Double-qualifier floating-point aligned 2x2 matrix. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(f64mat2x2, aligned_f64mat2, 32); - - /// Double-qualifier floating-point aligned 3x3 matrix. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(f64mat3x3, aligned_f64mat3, 32); - - /// Double-qualifier floating-point aligned 4x4 matrix. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(f64mat4x4, aligned_f64mat4, 32); - - - /// Double-qualifier floating-point aligned 1x1 matrix. - /// @see gtx_type_aligned - //typedef f64 f64mat1x1; - - /// Double-qualifier floating-point aligned 2x2 matrix. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(f64mat2x2, aligned_f64mat2x2, 32); - - /// Double-qualifier floating-point aligned 2x3 matrix. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(f64mat2x3, aligned_f64mat2x3, 32); - - /// Double-qualifier floating-point aligned 2x4 matrix. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(f64mat2x4, aligned_f64mat2x4, 32); - - /// Double-qualifier floating-point aligned 3x2 matrix. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(f64mat3x2, aligned_f64mat3x2, 32); - - /// Double-qualifier floating-point aligned 3x3 matrix. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(f64mat3x3, aligned_f64mat3x3, 32); - - /// Double-qualifier floating-point aligned 3x4 matrix. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(f64mat3x4, aligned_f64mat3x4, 32); - - /// Double-qualifier floating-point aligned 4x2 matrix. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(f64mat4x2, aligned_f64mat4x2, 32); - - /// Double-qualifier floating-point aligned 4x3 matrix. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(f64mat4x3, aligned_f64mat4x3, 32); - - /// Double-qualifier floating-point aligned 4x4 matrix. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(f64mat4x4, aligned_f64mat4x4, 32); - -# endif//GLM_FORCE_SINGLE_ONLY - - - ////////////////////////// - // Quaternion types - - /// Single-qualifier floating-point aligned quaternion. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(quat, aligned_quat, 16); - - /// Single-qualifier floating-point aligned quaternion. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(quat, aligned_fquat, 16); - - /// Double-qualifier floating-point aligned quaternion. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(dquat, aligned_dquat, 32); - - /// Single-qualifier floating-point aligned quaternion. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(f32quat, aligned_f32quat, 16); - -# ifndef GLM_FORCE_SINGLE_ONLY - - /// Double-qualifier floating-point aligned quaternion. - /// @see gtx_type_aligned - GLM_ALIGNED_TYPEDEF(f64quat, aligned_f64quat, 32); - -# endif//GLM_FORCE_SINGLE_ONLY - - /// @} -}//namespace glm - -#include "type_aligned.inl" diff --git a/third_party/glm/gtx/type_aligned.inl b/third_party/glm/gtx/type_aligned.inl deleted file mode 100755 index 54c1b81..0000000 --- a/third_party/glm/gtx/type_aligned.inl +++ /dev/null @@ -1,6 +0,0 @@ -/// @ref gtc_type_aligned - -namespace glm -{ - -} diff --git a/third_party/glm/gtx/type_trait.hpp b/third_party/glm/gtx/type_trait.hpp deleted file mode 100755 index 56685c8..0000000 --- a/third_party/glm/gtx/type_trait.hpp +++ /dev/null @@ -1,85 +0,0 @@ -/// @ref gtx_type_trait -/// @file glm/gtx/type_trait.hpp -/// -/// @see core (dependence) -/// -/// @defgroup gtx_type_trait GLM_GTX_type_trait -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Defines traits for each type. - -#pragma once - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_type_trait is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_type_trait extension included") -# endif -#endif - -// Dependency: -#include "../detail/qualifier.hpp" -#include "../gtc/quaternion.hpp" -#include "../gtx/dual_quaternion.hpp" - -namespace glm -{ - /// @addtogroup gtx_type_trait - /// @{ - - template - struct type - { - static bool const is_vec = false; - static bool const is_mat = false; - static bool const is_quat = false; - static length_t const components = 0; - static length_t const cols = 0; - static length_t const rows = 0; - }; - - template - struct type > - { - static bool const is_vec = true; - static bool const is_mat = false; - static bool const is_quat = false; - static length_t const components = L; - }; - - template - struct type > - { - static bool const is_vec = false; - static bool const is_mat = true; - static bool const is_quat = false; - static length_t const components = C; - static length_t const cols = C; - static length_t const rows = R; - }; - - template - struct type > - { - static bool const is_vec = false; - static bool const is_mat = false; - static bool const is_quat = true; - static length_t const components = 4; - }; - - template - struct type > - { - static bool const is_vec = false; - static bool const is_mat = false; - static bool const is_quat = true; - static length_t const components = 8; - }; - - /// @} -}//namespace glm - -#include "type_trait.inl" diff --git a/third_party/glm/gtx/type_trait.inl b/third_party/glm/gtx/type_trait.inl deleted file mode 100755 index 045de95..0000000 --- a/third_party/glm/gtx/type_trait.inl +++ /dev/null @@ -1,61 +0,0 @@ -/// @ref gtx_type_trait - -namespace glm -{ - template - bool const type::is_vec; - template - bool const type::is_mat; - template - bool const type::is_quat; - template - length_t const type::components; - template - length_t const type::cols; - template - length_t const type::rows; - - // vec - template - bool const type >::is_vec; - template - bool const type >::is_mat; - template - bool const type >::is_quat; - template - length_t const type >::components; - - // mat - template - bool const type >::is_vec; - template - bool const type >::is_mat; - template - bool const type >::is_quat; - template - length_t const type >::components; - template - length_t const type >::cols; - template - length_t const type >::rows; - - // tquat - template - bool const type >::is_vec; - template - bool const type >::is_mat; - template - bool const type >::is_quat; - template - length_t const type >::components; - - // tdualquat - template - bool const type >::is_vec; - template - bool const type >::is_mat; - template - bool const type >::is_quat; - template - length_t const type >::components; -}//namespace glm diff --git a/third_party/glm/gtx/vec_swizzle.hpp b/third_party/glm/gtx/vec_swizzle.hpp deleted file mode 100755 index 1c49abc..0000000 --- a/third_party/glm/gtx/vec_swizzle.hpp +++ /dev/null @@ -1,2782 +0,0 @@ -/// @ref gtx_vec_swizzle -/// @file glm/gtx/vec_swizzle.hpp -/// -/// @see core (dependence) -/// -/// @defgroup gtx_vec_swizzle GLM_GTX_vec_swizzle -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Functions to perform swizzle operation. - -#pragma once - -#include "../glm.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_vec_swizzle is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_vec_swizzle extension included") -# endif -#endif - -namespace glm { - // xx - template - GLM_INLINE glm::vec<2, T, Q> xx(const glm::vec<1, T, Q> &v) { - return glm::vec<2, T, Q>(v.x, v.x); - } - - template - GLM_INLINE glm::vec<2, T, Q> xx(const glm::vec<2, T, Q> &v) { - return glm::vec<2, T, Q>(v.x, v.x); - } - - template - GLM_INLINE glm::vec<2, T, Q> xx(const glm::vec<3, T, Q> &v) { - return glm::vec<2, T, Q>(v.x, v.x); - } - - template - GLM_INLINE glm::vec<2, T, Q> xx(const glm::vec<4, T, Q> &v) { - return glm::vec<2, T, Q>(v.x, v.x); - } - - // xy - template - GLM_INLINE glm::vec<2, T, Q> xy(const glm::vec<2, T, Q> &v) { - return glm::vec<2, T, Q>(v.x, v.y); - } - - template - GLM_INLINE glm::vec<2, T, Q> xy(const glm::vec<3, T, Q> &v) { - return glm::vec<2, T, Q>(v.x, v.y); - } - - template - GLM_INLINE glm::vec<2, T, Q> xy(const glm::vec<4, T, Q> &v) { - return glm::vec<2, T, Q>(v.x, v.y); - } - - // xz - template - GLM_INLINE glm::vec<2, T, Q> xz(const glm::vec<3, T, Q> &v) { - return glm::vec<2, T, Q>(v.x, v.z); - } - - template - GLM_INLINE glm::vec<2, T, Q> xz(const glm::vec<4, T, Q> &v) { - return glm::vec<2, T, Q>(v.x, v.z); - } - - // xw - template - GLM_INLINE glm::vec<2, T, Q> xw(const glm::vec<4, T, Q> &v) { - return glm::vec<2, T, Q>(v.x, v.w); - } - - // yx - template - GLM_INLINE glm::vec<2, T, Q> yx(const glm::vec<2, T, Q> &v) { - return glm::vec<2, T, Q>(v.y, v.x); - } - - template - GLM_INLINE glm::vec<2, T, Q> yx(const glm::vec<3, T, Q> &v) { - return glm::vec<2, T, Q>(v.y, v.x); - } - - template - GLM_INLINE glm::vec<2, T, Q> yx(const glm::vec<4, T, Q> &v) { - return glm::vec<2, T, Q>(v.y, v.x); - } - - // yy - template - GLM_INLINE glm::vec<2, T, Q> yy(const glm::vec<2, T, Q> &v) { - return glm::vec<2, T, Q>(v.y, v.y); - } - - template - GLM_INLINE glm::vec<2, T, Q> yy(const glm::vec<3, T, Q> &v) { - return glm::vec<2, T, Q>(v.y, v.y); - } - - template - GLM_INLINE glm::vec<2, T, Q> yy(const glm::vec<4, T, Q> &v) { - return glm::vec<2, T, Q>(v.y, v.y); - } - - // yz - template - GLM_INLINE glm::vec<2, T, Q> yz(const glm::vec<3, T, Q> &v) { - return glm::vec<2, T, Q>(v.y, v.z); - } - - template - GLM_INLINE glm::vec<2, T, Q> yz(const glm::vec<4, T, Q> &v) { - return glm::vec<2, T, Q>(v.y, v.z); - } - - // yw - template - GLM_INLINE glm::vec<2, T, Q> yw(const glm::vec<4, T, Q> &v) { - return glm::vec<2, T, Q>(v.y, v.w); - } - - // zx - template - GLM_INLINE glm::vec<2, T, Q> zx(const glm::vec<3, T, Q> &v) { - return glm::vec<2, T, Q>(v.z, v.x); - } - - template - GLM_INLINE glm::vec<2, T, Q> zx(const glm::vec<4, T, Q> &v) { - return glm::vec<2, T, Q>(v.z, v.x); - } - - // zy - template - GLM_INLINE glm::vec<2, T, Q> zy(const glm::vec<3, T, Q> &v) { - return glm::vec<2, T, Q>(v.z, v.y); - } - - template - GLM_INLINE glm::vec<2, T, Q> zy(const glm::vec<4, T, Q> &v) { - return glm::vec<2, T, Q>(v.z, v.y); - } - - // zz - template - GLM_INLINE glm::vec<2, T, Q> zz(const glm::vec<3, T, Q> &v) { - return glm::vec<2, T, Q>(v.z, v.z); - } - - template - GLM_INLINE glm::vec<2, T, Q> zz(const glm::vec<4, T, Q> &v) { - return glm::vec<2, T, Q>(v.z, v.z); - } - - // zw - template - GLM_INLINE glm::vec<2, T, Q> zw(const glm::vec<4, T, Q> &v) { - return glm::vec<2, T, Q>(v.z, v.w); - } - - // wx - template - GLM_INLINE glm::vec<2, T, Q> wx(const glm::vec<4, T, Q> &v) { - return glm::vec<2, T, Q>(v.w, v.x); - } - - // wy - template - GLM_INLINE glm::vec<2, T, Q> wy(const glm::vec<4, T, Q> &v) { - return glm::vec<2, T, Q>(v.w, v.y); - } - - // wz - template - GLM_INLINE glm::vec<2, T, Q> wz(const glm::vec<4, T, Q> &v) { - return glm::vec<2, T, Q>(v.w, v.z); - } - - // ww - template - GLM_INLINE glm::vec<2, T, Q> ww(const glm::vec<4, T, Q> &v) { - return glm::vec<2, T, Q>(v.w, v.w); - } - - // xxx - template - GLM_INLINE glm::vec<3, T, Q> xxx(const glm::vec<1, T, Q> &v) { - return glm::vec<3, T, Q>(v.x, v.x, v.x); - } - - template - GLM_INLINE glm::vec<3, T, Q> xxx(const glm::vec<2, T, Q> &v) { - return glm::vec<3, T, Q>(v.x, v.x, v.x); - } - - template - GLM_INLINE glm::vec<3, T, Q> xxx(const glm::vec<3, T, Q> &v) { - return glm::vec<3, T, Q>(v.x, v.x, v.x); - } - - template - GLM_INLINE glm::vec<3, T, Q> xxx(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.x, v.x, v.x); - } - - // xxy - template - GLM_INLINE glm::vec<3, T, Q> xxy(const glm::vec<2, T, Q> &v) { - return glm::vec<3, T, Q>(v.x, v.x, v.y); - } - - template - GLM_INLINE glm::vec<3, T, Q> xxy(const glm::vec<3, T, Q> &v) { - return glm::vec<3, T, Q>(v.x, v.x, v.y); - } - - template - GLM_INLINE glm::vec<3, T, Q> xxy(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.x, v.x, v.y); - } - - // xxz - template - GLM_INLINE glm::vec<3, T, Q> xxz(const glm::vec<3, T, Q> &v) { - return glm::vec<3, T, Q>(v.x, v.x, v.z); - } - - template - GLM_INLINE glm::vec<3, T, Q> xxz(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.x, v.x, v.z); - } - - // xxw - template - GLM_INLINE glm::vec<3, T, Q> xxw(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.x, v.x, v.w); - } - - // xyx - template - GLM_INLINE glm::vec<3, T, Q> xyx(const glm::vec<2, T, Q> &v) { - return glm::vec<3, T, Q>(v.x, v.y, v.x); - } - - template - GLM_INLINE glm::vec<3, T, Q> xyx(const glm::vec<3, T, Q> &v) { - return glm::vec<3, T, Q>(v.x, v.y, v.x); - } - - template - GLM_INLINE glm::vec<3, T, Q> xyx(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.x, v.y, v.x); - } - - // xyy - template - GLM_INLINE glm::vec<3, T, Q> xyy(const glm::vec<2, T, Q> &v) { - return glm::vec<3, T, Q>(v.x, v.y, v.y); - } - - template - GLM_INLINE glm::vec<3, T, Q> xyy(const glm::vec<3, T, Q> &v) { - return glm::vec<3, T, Q>(v.x, v.y, v.y); - } - - template - GLM_INLINE glm::vec<3, T, Q> xyy(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.x, v.y, v.y); - } - - // xyz - template - GLM_INLINE glm::vec<3, T, Q> xyz(const glm::vec<3, T, Q> &v) { - return glm::vec<3, T, Q>(v.x, v.y, v.z); - } - - template - GLM_INLINE glm::vec<3, T, Q> xyz(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.x, v.y, v.z); - } - - // xyw - template - GLM_INLINE glm::vec<3, T, Q> xyw(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.x, v.y, v.w); - } - - // xzx - template - GLM_INLINE glm::vec<3, T, Q> xzx(const glm::vec<3, T, Q> &v) { - return glm::vec<3, T, Q>(v.x, v.z, v.x); - } - - template - GLM_INLINE glm::vec<3, T, Q> xzx(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.x, v.z, v.x); - } - - // xzy - template - GLM_INLINE glm::vec<3, T, Q> xzy(const glm::vec<3, T, Q> &v) { - return glm::vec<3, T, Q>(v.x, v.z, v.y); - } - - template - GLM_INLINE glm::vec<3, T, Q> xzy(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.x, v.z, v.y); - } - - // xzz - template - GLM_INLINE glm::vec<3, T, Q> xzz(const glm::vec<3, T, Q> &v) { - return glm::vec<3, T, Q>(v.x, v.z, v.z); - } - - template - GLM_INLINE glm::vec<3, T, Q> xzz(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.x, v.z, v.z); - } - - // xzw - template - GLM_INLINE glm::vec<3, T, Q> xzw(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.x, v.z, v.w); - } - - // xwx - template - GLM_INLINE glm::vec<3, T, Q> xwx(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.x, v.w, v.x); - } - - // xwy - template - GLM_INLINE glm::vec<3, T, Q> xwy(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.x, v.w, v.y); - } - - // xwz - template - GLM_INLINE glm::vec<3, T, Q> xwz(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.x, v.w, v.z); - } - - // xww - template - GLM_INLINE glm::vec<3, T, Q> xww(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.x, v.w, v.w); - } - - // yxx - template - GLM_INLINE glm::vec<3, T, Q> yxx(const glm::vec<2, T, Q> &v) { - return glm::vec<3, T, Q>(v.y, v.x, v.x); - } - - template - GLM_INLINE glm::vec<3, T, Q> yxx(const glm::vec<3, T, Q> &v) { - return glm::vec<3, T, Q>(v.y, v.x, v.x); - } - - template - GLM_INLINE glm::vec<3, T, Q> yxx(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.y, v.x, v.x); - } - - // yxy - template - GLM_INLINE glm::vec<3, T, Q> yxy(const glm::vec<2, T, Q> &v) { - return glm::vec<3, T, Q>(v.y, v.x, v.y); - } - - template - GLM_INLINE glm::vec<3, T, Q> yxy(const glm::vec<3, T, Q> &v) { - return glm::vec<3, T, Q>(v.y, v.x, v.y); - } - - template - GLM_INLINE glm::vec<3, T, Q> yxy(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.y, v.x, v.y); - } - - // yxz - template - GLM_INLINE glm::vec<3, T, Q> yxz(const glm::vec<3, T, Q> &v) { - return glm::vec<3, T, Q>(v.y, v.x, v.z); - } - - template - GLM_INLINE glm::vec<3, T, Q> yxz(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.y, v.x, v.z); - } - - // yxw - template - GLM_INLINE glm::vec<3, T, Q> yxw(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.y, v.x, v.w); - } - - // yyx - template - GLM_INLINE glm::vec<3, T, Q> yyx(const glm::vec<2, T, Q> &v) { - return glm::vec<3, T, Q>(v.y, v.y, v.x); - } - - template - GLM_INLINE glm::vec<3, T, Q> yyx(const glm::vec<3, T, Q> &v) { - return glm::vec<3, T, Q>(v.y, v.y, v.x); - } - - template - GLM_INLINE glm::vec<3, T, Q> yyx(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.y, v.y, v.x); - } - - // yyy - template - GLM_INLINE glm::vec<3, T, Q> yyy(const glm::vec<2, T, Q> &v) { - return glm::vec<3, T, Q>(v.y, v.y, v.y); - } - - template - GLM_INLINE glm::vec<3, T, Q> yyy(const glm::vec<3, T, Q> &v) { - return glm::vec<3, T, Q>(v.y, v.y, v.y); - } - - template - GLM_INLINE glm::vec<3, T, Q> yyy(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.y, v.y, v.y); - } - - // yyz - template - GLM_INLINE glm::vec<3, T, Q> yyz(const glm::vec<3, T, Q> &v) { - return glm::vec<3, T, Q>(v.y, v.y, v.z); - } - - template - GLM_INLINE glm::vec<3, T, Q> yyz(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.y, v.y, v.z); - } - - // yyw - template - GLM_INLINE glm::vec<3, T, Q> yyw(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.y, v.y, v.w); - } - - // yzx - template - GLM_INLINE glm::vec<3, T, Q> yzx(const glm::vec<3, T, Q> &v) { - return glm::vec<3, T, Q>(v.y, v.z, v.x); - } - - template - GLM_INLINE glm::vec<3, T, Q> yzx(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.y, v.z, v.x); - } - - // yzy - template - GLM_INLINE glm::vec<3, T, Q> yzy(const glm::vec<3, T, Q> &v) { - return glm::vec<3, T, Q>(v.y, v.z, v.y); - } - - template - GLM_INLINE glm::vec<3, T, Q> yzy(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.y, v.z, v.y); - } - - // yzz - template - GLM_INLINE glm::vec<3, T, Q> yzz(const glm::vec<3, T, Q> &v) { - return glm::vec<3, T, Q>(v.y, v.z, v.z); - } - - template - GLM_INLINE glm::vec<3, T, Q> yzz(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.y, v.z, v.z); - } - - // yzw - template - GLM_INLINE glm::vec<3, T, Q> yzw(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.y, v.z, v.w); - } - - // ywx - template - GLM_INLINE glm::vec<3, T, Q> ywx(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.y, v.w, v.x); - } - - // ywy - template - GLM_INLINE glm::vec<3, T, Q> ywy(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.y, v.w, v.y); - } - - // ywz - template - GLM_INLINE glm::vec<3, T, Q> ywz(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.y, v.w, v.z); - } - - // yww - template - GLM_INLINE glm::vec<3, T, Q> yww(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.y, v.w, v.w); - } - - // zxx - template - GLM_INLINE glm::vec<3, T, Q> zxx(const glm::vec<3, T, Q> &v) { - return glm::vec<3, T, Q>(v.z, v.x, v.x); - } - - template - GLM_INLINE glm::vec<3, T, Q> zxx(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.z, v.x, v.x); - } - - // zxy - template - GLM_INLINE glm::vec<3, T, Q> zxy(const glm::vec<3, T, Q> &v) { - return glm::vec<3, T, Q>(v.z, v.x, v.y); - } - - template - GLM_INLINE glm::vec<3, T, Q> zxy(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.z, v.x, v.y); - } - - // zxz - template - GLM_INLINE glm::vec<3, T, Q> zxz(const glm::vec<3, T, Q> &v) { - return glm::vec<3, T, Q>(v.z, v.x, v.z); - } - - template - GLM_INLINE glm::vec<3, T, Q> zxz(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.z, v.x, v.z); - } - - // zxw - template - GLM_INLINE glm::vec<3, T, Q> zxw(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.z, v.x, v.w); - } - - // zyx - template - GLM_INLINE glm::vec<3, T, Q> zyx(const glm::vec<3, T, Q> &v) { - return glm::vec<3, T, Q>(v.z, v.y, v.x); - } - - template - GLM_INLINE glm::vec<3, T, Q> zyx(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.z, v.y, v.x); - } - - // zyy - template - GLM_INLINE glm::vec<3, T, Q> zyy(const glm::vec<3, T, Q> &v) { - return glm::vec<3, T, Q>(v.z, v.y, v.y); - } - - template - GLM_INLINE glm::vec<3, T, Q> zyy(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.z, v.y, v.y); - } - - // zyz - template - GLM_INLINE glm::vec<3, T, Q> zyz(const glm::vec<3, T, Q> &v) { - return glm::vec<3, T, Q>(v.z, v.y, v.z); - } - - template - GLM_INLINE glm::vec<3, T, Q> zyz(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.z, v.y, v.z); - } - - // zyw - template - GLM_INLINE glm::vec<3, T, Q> zyw(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.z, v.y, v.w); - } - - // zzx - template - GLM_INLINE glm::vec<3, T, Q> zzx(const glm::vec<3, T, Q> &v) { - return glm::vec<3, T, Q>(v.z, v.z, v.x); - } - - template - GLM_INLINE glm::vec<3, T, Q> zzx(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.z, v.z, v.x); - } - - // zzy - template - GLM_INLINE glm::vec<3, T, Q> zzy(const glm::vec<3, T, Q> &v) { - return glm::vec<3, T, Q>(v.z, v.z, v.y); - } - - template - GLM_INLINE glm::vec<3, T, Q> zzy(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.z, v.z, v.y); - } - - // zzz - template - GLM_INLINE glm::vec<3, T, Q> zzz(const glm::vec<3, T, Q> &v) { - return glm::vec<3, T, Q>(v.z, v.z, v.z); - } - - template - GLM_INLINE glm::vec<3, T, Q> zzz(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.z, v.z, v.z); - } - - // zzw - template - GLM_INLINE glm::vec<3, T, Q> zzw(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.z, v.z, v.w); - } - - // zwx - template - GLM_INLINE glm::vec<3, T, Q> zwx(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.z, v.w, v.x); - } - - // zwy - template - GLM_INLINE glm::vec<3, T, Q> zwy(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.z, v.w, v.y); - } - - // zwz - template - GLM_INLINE glm::vec<3, T, Q> zwz(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.z, v.w, v.z); - } - - // zww - template - GLM_INLINE glm::vec<3, T, Q> zww(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.z, v.w, v.w); - } - - // wxx - template - GLM_INLINE glm::vec<3, T, Q> wxx(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.w, v.x, v.x); - } - - // wxy - template - GLM_INLINE glm::vec<3, T, Q> wxy(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.w, v.x, v.y); - } - - // wxz - template - GLM_INLINE glm::vec<3, T, Q> wxz(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.w, v.x, v.z); - } - - // wxw - template - GLM_INLINE glm::vec<3, T, Q> wxw(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.w, v.x, v.w); - } - - // wyx - template - GLM_INLINE glm::vec<3, T, Q> wyx(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.w, v.y, v.x); - } - - // wyy - template - GLM_INLINE glm::vec<3, T, Q> wyy(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.w, v.y, v.y); - } - - // wyz - template - GLM_INLINE glm::vec<3, T, Q> wyz(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.w, v.y, v.z); - } - - // wyw - template - GLM_INLINE glm::vec<3, T, Q> wyw(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.w, v.y, v.w); - } - - // wzx - template - GLM_INLINE glm::vec<3, T, Q> wzx(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.w, v.z, v.x); - } - - // wzy - template - GLM_INLINE glm::vec<3, T, Q> wzy(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.w, v.z, v.y); - } - - // wzz - template - GLM_INLINE glm::vec<3, T, Q> wzz(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.w, v.z, v.z); - } - - // wzw - template - GLM_INLINE glm::vec<3, T, Q> wzw(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.w, v.z, v.w); - } - - // wwx - template - GLM_INLINE glm::vec<3, T, Q> wwx(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.w, v.w, v.x); - } - - // wwy - template - GLM_INLINE glm::vec<3, T, Q> wwy(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.w, v.w, v.y); - } - - // wwz - template - GLM_INLINE glm::vec<3, T, Q> wwz(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.w, v.w, v.z); - } - - // www - template - GLM_INLINE glm::vec<3, T, Q> www(const glm::vec<4, T, Q> &v) { - return glm::vec<3, T, Q>(v.w, v.w, v.w); - } - - // xxxx - template - GLM_INLINE glm::vec<4, T, Q> xxxx(const glm::vec<1, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.x, v.x, v.x); - } - - template - GLM_INLINE glm::vec<4, T, Q> xxxx(const glm::vec<2, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.x, v.x, v.x); - } - - template - GLM_INLINE glm::vec<4, T, Q> xxxx(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.x, v.x, v.x); - } - - template - GLM_INLINE glm::vec<4, T, Q> xxxx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.x, v.x, v.x); - } - - // xxxy - template - GLM_INLINE glm::vec<4, T, Q> xxxy(const glm::vec<2, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.x, v.x, v.y); - } - - template - GLM_INLINE glm::vec<4, T, Q> xxxy(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.x, v.x, v.y); - } - - template - GLM_INLINE glm::vec<4, T, Q> xxxy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.x, v.x, v.y); - } - - // xxxz - template - GLM_INLINE glm::vec<4, T, Q> xxxz(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.x, v.x, v.z); - } - - template - GLM_INLINE glm::vec<4, T, Q> xxxz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.x, v.x, v.z); - } - - // xxxw - template - GLM_INLINE glm::vec<4, T, Q> xxxw(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.x, v.x, v.w); - } - - // xxyx - template - GLM_INLINE glm::vec<4, T, Q> xxyx(const glm::vec<2, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.x, v.y, v.x); - } - - template - GLM_INLINE glm::vec<4, T, Q> xxyx(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.x, v.y, v.x); - } - - template - GLM_INLINE glm::vec<4, T, Q> xxyx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.x, v.y, v.x); - } - - // xxyy - template - GLM_INLINE glm::vec<4, T, Q> xxyy(const glm::vec<2, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.x, v.y, v.y); - } - - template - GLM_INLINE glm::vec<4, T, Q> xxyy(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.x, v.y, v.y); - } - - template - GLM_INLINE glm::vec<4, T, Q> xxyy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.x, v.y, v.y); - } - - // xxyz - template - GLM_INLINE glm::vec<4, T, Q> xxyz(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.x, v.y, v.z); - } - - template - GLM_INLINE glm::vec<4, T, Q> xxyz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.x, v.y, v.z); - } - - // xxyw - template - GLM_INLINE glm::vec<4, T, Q> xxyw(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.x, v.y, v.w); - } - - // xxzx - template - GLM_INLINE glm::vec<4, T, Q> xxzx(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.x, v.z, v.x); - } - - template - GLM_INLINE glm::vec<4, T, Q> xxzx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.x, v.z, v.x); - } - - // xxzy - template - GLM_INLINE glm::vec<4, T, Q> xxzy(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.x, v.z, v.y); - } - - template - GLM_INLINE glm::vec<4, T, Q> xxzy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.x, v.z, v.y); - } - - // xxzz - template - GLM_INLINE glm::vec<4, T, Q> xxzz(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.x, v.z, v.z); - } - - template - GLM_INLINE glm::vec<4, T, Q> xxzz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.x, v.z, v.z); - } - - // xxzw - template - GLM_INLINE glm::vec<4, T, Q> xxzw(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.x, v.z, v.w); - } - - // xxwx - template - GLM_INLINE glm::vec<4, T, Q> xxwx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.x, v.w, v.x); - } - - // xxwy - template - GLM_INLINE glm::vec<4, T, Q> xxwy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.x, v.w, v.y); - } - - // xxwz - template - GLM_INLINE glm::vec<4, T, Q> xxwz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.x, v.w, v.z); - } - - // xxww - template - GLM_INLINE glm::vec<4, T, Q> xxww(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.x, v.w, v.w); - } - - // xyxx - template - GLM_INLINE glm::vec<4, T, Q> xyxx(const glm::vec<2, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.y, v.x, v.x); - } - - template - GLM_INLINE glm::vec<4, T, Q> xyxx(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.y, v.x, v.x); - } - - template - GLM_INLINE glm::vec<4, T, Q> xyxx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.y, v.x, v.x); - } - - // xyxy - template - GLM_INLINE glm::vec<4, T, Q> xyxy(const glm::vec<2, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.y, v.x, v.y); - } - - template - GLM_INLINE glm::vec<4, T, Q> xyxy(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.y, v.x, v.y); - } - - template - GLM_INLINE glm::vec<4, T, Q> xyxy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.y, v.x, v.y); - } - - // xyxz - template - GLM_INLINE glm::vec<4, T, Q> xyxz(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.y, v.x, v.z); - } - - template - GLM_INLINE glm::vec<4, T, Q> xyxz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.y, v.x, v.z); - } - - // xyxw - template - GLM_INLINE glm::vec<4, T, Q> xyxw(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.y, v.x, v.w); - } - - // xyyx - template - GLM_INLINE glm::vec<4, T, Q> xyyx(const glm::vec<2, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.y, v.y, v.x); - } - - template - GLM_INLINE glm::vec<4, T, Q> xyyx(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.y, v.y, v.x); - } - - template - GLM_INLINE glm::vec<4, T, Q> xyyx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.y, v.y, v.x); - } - - // xyyy - template - GLM_INLINE glm::vec<4, T, Q> xyyy(const glm::vec<2, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.y, v.y, v.y); - } - - template - GLM_INLINE glm::vec<4, T, Q> xyyy(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.y, v.y, v.y); - } - - template - GLM_INLINE glm::vec<4, T, Q> xyyy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.y, v.y, v.y); - } - - // xyyz - template - GLM_INLINE glm::vec<4, T, Q> xyyz(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.y, v.y, v.z); - } - - template - GLM_INLINE glm::vec<4, T, Q> xyyz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.y, v.y, v.z); - } - - // xyyw - template - GLM_INLINE glm::vec<4, T, Q> xyyw(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.y, v.y, v.w); - } - - // xyzx - template - GLM_INLINE glm::vec<4, T, Q> xyzx(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.y, v.z, v.x); - } - - template - GLM_INLINE glm::vec<4, T, Q> xyzx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.y, v.z, v.x); - } - - // xyzy - template - GLM_INLINE glm::vec<4, T, Q> xyzy(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.y, v.z, v.y); - } - - template - GLM_INLINE glm::vec<4, T, Q> xyzy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.y, v.z, v.y); - } - - // xyzz - template - GLM_INLINE glm::vec<4, T, Q> xyzz(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.y, v.z, v.z); - } - - template - GLM_INLINE glm::vec<4, T, Q> xyzz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.y, v.z, v.z); - } - - // xyzw - template - GLM_INLINE glm::vec<4, T, Q> xyzw(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.y, v.z, v.w); - } - - // xywx - template - GLM_INLINE glm::vec<4, T, Q> xywx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.y, v.w, v.x); - } - - // xywy - template - GLM_INLINE glm::vec<4, T, Q> xywy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.y, v.w, v.y); - } - - // xywz - template - GLM_INLINE glm::vec<4, T, Q> xywz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.y, v.w, v.z); - } - - // xyww - template - GLM_INLINE glm::vec<4, T, Q> xyww(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.y, v.w, v.w); - } - - // xzxx - template - GLM_INLINE glm::vec<4, T, Q> xzxx(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.z, v.x, v.x); - } - - template - GLM_INLINE glm::vec<4, T, Q> xzxx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.z, v.x, v.x); - } - - // xzxy - template - GLM_INLINE glm::vec<4, T, Q> xzxy(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.z, v.x, v.y); - } - - template - GLM_INLINE glm::vec<4, T, Q> xzxy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.z, v.x, v.y); - } - - // xzxz - template - GLM_INLINE glm::vec<4, T, Q> xzxz(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.z, v.x, v.z); - } - - template - GLM_INLINE glm::vec<4, T, Q> xzxz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.z, v.x, v.z); - } - - // xzxw - template - GLM_INLINE glm::vec<4, T, Q> xzxw(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.z, v.x, v.w); - } - - // xzyx - template - GLM_INLINE glm::vec<4, T, Q> xzyx(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.z, v.y, v.x); - } - - template - GLM_INLINE glm::vec<4, T, Q> xzyx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.z, v.y, v.x); - } - - // xzyy - template - GLM_INLINE glm::vec<4, T, Q> xzyy(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.z, v.y, v.y); - } - - template - GLM_INLINE glm::vec<4, T, Q> xzyy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.z, v.y, v.y); - } - - // xzyz - template - GLM_INLINE glm::vec<4, T, Q> xzyz(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.z, v.y, v.z); - } - - template - GLM_INLINE glm::vec<4, T, Q> xzyz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.z, v.y, v.z); - } - - // xzyw - template - GLM_INLINE glm::vec<4, T, Q> xzyw(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.z, v.y, v.w); - } - - // xzzx - template - GLM_INLINE glm::vec<4, T, Q> xzzx(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.z, v.z, v.x); - } - - template - GLM_INLINE glm::vec<4, T, Q> xzzx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.z, v.z, v.x); - } - - // xzzy - template - GLM_INLINE glm::vec<4, T, Q> xzzy(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.z, v.z, v.y); - } - - template - GLM_INLINE glm::vec<4, T, Q> xzzy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.z, v.z, v.y); - } - - // xzzz - template - GLM_INLINE glm::vec<4, T, Q> xzzz(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.z, v.z, v.z); - } - - template - GLM_INLINE glm::vec<4, T, Q> xzzz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.z, v.z, v.z); - } - - // xzzw - template - GLM_INLINE glm::vec<4, T, Q> xzzw(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.z, v.z, v.w); - } - - // xzwx - template - GLM_INLINE glm::vec<4, T, Q> xzwx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.z, v.w, v.x); - } - - // xzwy - template - GLM_INLINE glm::vec<4, T, Q> xzwy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.z, v.w, v.y); - } - - // xzwz - template - GLM_INLINE glm::vec<4, T, Q> xzwz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.z, v.w, v.z); - } - - // xzww - template - GLM_INLINE glm::vec<4, T, Q> xzww(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.z, v.w, v.w); - } - - // xwxx - template - GLM_INLINE glm::vec<4, T, Q> xwxx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.w, v.x, v.x); - } - - // xwxy - template - GLM_INLINE glm::vec<4, T, Q> xwxy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.w, v.x, v.y); - } - - // xwxz - template - GLM_INLINE glm::vec<4, T, Q> xwxz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.w, v.x, v.z); - } - - // xwxw - template - GLM_INLINE glm::vec<4, T, Q> xwxw(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.w, v.x, v.w); - } - - // xwyx - template - GLM_INLINE glm::vec<4, T, Q> xwyx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.w, v.y, v.x); - } - - // xwyy - template - GLM_INLINE glm::vec<4, T, Q> xwyy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.w, v.y, v.y); - } - - // xwyz - template - GLM_INLINE glm::vec<4, T, Q> xwyz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.w, v.y, v.z); - } - - // xwyw - template - GLM_INLINE glm::vec<4, T, Q> xwyw(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.w, v.y, v.w); - } - - // xwzx - template - GLM_INLINE glm::vec<4, T, Q> xwzx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.w, v.z, v.x); - } - - // xwzy - template - GLM_INLINE glm::vec<4, T, Q> xwzy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.w, v.z, v.y); - } - - // xwzz - template - GLM_INLINE glm::vec<4, T, Q> xwzz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.w, v.z, v.z); - } - - // xwzw - template - GLM_INLINE glm::vec<4, T, Q> xwzw(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.w, v.z, v.w); - } - - // xwwx - template - GLM_INLINE glm::vec<4, T, Q> xwwx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.w, v.w, v.x); - } - - // xwwy - template - GLM_INLINE glm::vec<4, T, Q> xwwy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.w, v.w, v.y); - } - - // xwwz - template - GLM_INLINE glm::vec<4, T, Q> xwwz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.w, v.w, v.z); - } - - // xwww - template - GLM_INLINE glm::vec<4, T, Q> xwww(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.x, v.w, v.w, v.w); - } - - // yxxx - template - GLM_INLINE glm::vec<4, T, Q> yxxx(const glm::vec<2, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.x, v.x, v.x); - } - - template - GLM_INLINE glm::vec<4, T, Q> yxxx(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.x, v.x, v.x); - } - - template - GLM_INLINE glm::vec<4, T, Q> yxxx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.x, v.x, v.x); - } - - // yxxy - template - GLM_INLINE glm::vec<4, T, Q> yxxy(const glm::vec<2, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.x, v.x, v.y); - } - - template - GLM_INLINE glm::vec<4, T, Q> yxxy(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.x, v.x, v.y); - } - - template - GLM_INLINE glm::vec<4, T, Q> yxxy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.x, v.x, v.y); - } - - // yxxz - template - GLM_INLINE glm::vec<4, T, Q> yxxz(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.x, v.x, v.z); - } - - template - GLM_INLINE glm::vec<4, T, Q> yxxz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.x, v.x, v.z); - } - - // yxxw - template - GLM_INLINE glm::vec<4, T, Q> yxxw(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.x, v.x, v.w); - } - - // yxyx - template - GLM_INLINE glm::vec<4, T, Q> yxyx(const glm::vec<2, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.x, v.y, v.x); - } - - template - GLM_INLINE glm::vec<4, T, Q> yxyx(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.x, v.y, v.x); - } - - template - GLM_INLINE glm::vec<4, T, Q> yxyx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.x, v.y, v.x); - } - - // yxyy - template - GLM_INLINE glm::vec<4, T, Q> yxyy(const glm::vec<2, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.x, v.y, v.y); - } - - template - GLM_INLINE glm::vec<4, T, Q> yxyy(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.x, v.y, v.y); - } - - template - GLM_INLINE glm::vec<4, T, Q> yxyy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.x, v.y, v.y); - } - - // yxyz - template - GLM_INLINE glm::vec<4, T, Q> yxyz(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.x, v.y, v.z); - } - - template - GLM_INLINE glm::vec<4, T, Q> yxyz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.x, v.y, v.z); - } - - // yxyw - template - GLM_INLINE glm::vec<4, T, Q> yxyw(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.x, v.y, v.w); - } - - // yxzx - template - GLM_INLINE glm::vec<4, T, Q> yxzx(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.x, v.z, v.x); - } - - template - GLM_INLINE glm::vec<4, T, Q> yxzx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.x, v.z, v.x); - } - - // yxzy - template - GLM_INLINE glm::vec<4, T, Q> yxzy(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.x, v.z, v.y); - } - - template - GLM_INLINE glm::vec<4, T, Q> yxzy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.x, v.z, v.y); - } - - // yxzz - template - GLM_INLINE glm::vec<4, T, Q> yxzz(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.x, v.z, v.z); - } - - template - GLM_INLINE glm::vec<4, T, Q> yxzz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.x, v.z, v.z); - } - - // yxzw - template - GLM_INLINE glm::vec<4, T, Q> yxzw(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.x, v.z, v.w); - } - - // yxwx - template - GLM_INLINE glm::vec<4, T, Q> yxwx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.x, v.w, v.x); - } - - // yxwy - template - GLM_INLINE glm::vec<4, T, Q> yxwy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.x, v.w, v.y); - } - - // yxwz - template - GLM_INLINE glm::vec<4, T, Q> yxwz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.x, v.w, v.z); - } - - // yxww - template - GLM_INLINE glm::vec<4, T, Q> yxww(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.x, v.w, v.w); - } - - // yyxx - template - GLM_INLINE glm::vec<4, T, Q> yyxx(const glm::vec<2, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.y, v.x, v.x); - } - - template - GLM_INLINE glm::vec<4, T, Q> yyxx(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.y, v.x, v.x); - } - - template - GLM_INLINE glm::vec<4, T, Q> yyxx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.y, v.x, v.x); - } - - // yyxy - template - GLM_INLINE glm::vec<4, T, Q> yyxy(const glm::vec<2, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.y, v.x, v.y); - } - - template - GLM_INLINE glm::vec<4, T, Q> yyxy(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.y, v.x, v.y); - } - - template - GLM_INLINE glm::vec<4, T, Q> yyxy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.y, v.x, v.y); - } - - // yyxz - template - GLM_INLINE glm::vec<4, T, Q> yyxz(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.y, v.x, v.z); - } - - template - GLM_INLINE glm::vec<4, T, Q> yyxz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.y, v.x, v.z); - } - - // yyxw - template - GLM_INLINE glm::vec<4, T, Q> yyxw(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.y, v.x, v.w); - } - - // yyyx - template - GLM_INLINE glm::vec<4, T, Q> yyyx(const glm::vec<2, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.y, v.y, v.x); - } - - template - GLM_INLINE glm::vec<4, T, Q> yyyx(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.y, v.y, v.x); - } - - template - GLM_INLINE glm::vec<4, T, Q> yyyx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.y, v.y, v.x); - } - - // yyyy - template - GLM_INLINE glm::vec<4, T, Q> yyyy(const glm::vec<2, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.y, v.y, v.y); - } - - template - GLM_INLINE glm::vec<4, T, Q> yyyy(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.y, v.y, v.y); - } - - template - GLM_INLINE glm::vec<4, T, Q> yyyy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.y, v.y, v.y); - } - - // yyyz - template - GLM_INLINE glm::vec<4, T, Q> yyyz(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.y, v.y, v.z); - } - - template - GLM_INLINE glm::vec<4, T, Q> yyyz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.y, v.y, v.z); - } - - // yyyw - template - GLM_INLINE glm::vec<4, T, Q> yyyw(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.y, v.y, v.w); - } - - // yyzx - template - GLM_INLINE glm::vec<4, T, Q> yyzx(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.y, v.z, v.x); - } - - template - GLM_INLINE glm::vec<4, T, Q> yyzx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.y, v.z, v.x); - } - - // yyzy - template - GLM_INLINE glm::vec<4, T, Q> yyzy(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.y, v.z, v.y); - } - - template - GLM_INLINE glm::vec<4, T, Q> yyzy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.y, v.z, v.y); - } - - // yyzz - template - GLM_INLINE glm::vec<4, T, Q> yyzz(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.y, v.z, v.z); - } - - template - GLM_INLINE glm::vec<4, T, Q> yyzz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.y, v.z, v.z); - } - - // yyzw - template - GLM_INLINE glm::vec<4, T, Q> yyzw(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.y, v.z, v.w); - } - - // yywx - template - GLM_INLINE glm::vec<4, T, Q> yywx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.y, v.w, v.x); - } - - // yywy - template - GLM_INLINE glm::vec<4, T, Q> yywy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.y, v.w, v.y); - } - - // yywz - template - GLM_INLINE glm::vec<4, T, Q> yywz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.y, v.w, v.z); - } - - // yyww - template - GLM_INLINE glm::vec<4, T, Q> yyww(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.y, v.w, v.w); - } - - // yzxx - template - GLM_INLINE glm::vec<4, T, Q> yzxx(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.z, v.x, v.x); - } - - template - GLM_INLINE glm::vec<4, T, Q> yzxx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.z, v.x, v.x); - } - - // yzxy - template - GLM_INLINE glm::vec<4, T, Q> yzxy(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.z, v.x, v.y); - } - - template - GLM_INLINE glm::vec<4, T, Q> yzxy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.z, v.x, v.y); - } - - // yzxz - template - GLM_INLINE glm::vec<4, T, Q> yzxz(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.z, v.x, v.z); - } - - template - GLM_INLINE glm::vec<4, T, Q> yzxz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.z, v.x, v.z); - } - - // yzxw - template - GLM_INLINE glm::vec<4, T, Q> yzxw(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.z, v.x, v.w); - } - - // yzyx - template - GLM_INLINE glm::vec<4, T, Q> yzyx(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.z, v.y, v.x); - } - - template - GLM_INLINE glm::vec<4, T, Q> yzyx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.z, v.y, v.x); - } - - // yzyy - template - GLM_INLINE glm::vec<4, T, Q> yzyy(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.z, v.y, v.y); - } - - template - GLM_INLINE glm::vec<4, T, Q> yzyy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.z, v.y, v.y); - } - - // yzyz - template - GLM_INLINE glm::vec<4, T, Q> yzyz(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.z, v.y, v.z); - } - - template - GLM_INLINE glm::vec<4, T, Q> yzyz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.z, v.y, v.z); - } - - // yzyw - template - GLM_INLINE glm::vec<4, T, Q> yzyw(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.z, v.y, v.w); - } - - // yzzx - template - GLM_INLINE glm::vec<4, T, Q> yzzx(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.z, v.z, v.x); - } - - template - GLM_INLINE glm::vec<4, T, Q> yzzx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.z, v.z, v.x); - } - - // yzzy - template - GLM_INLINE glm::vec<4, T, Q> yzzy(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.z, v.z, v.y); - } - - template - GLM_INLINE glm::vec<4, T, Q> yzzy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.z, v.z, v.y); - } - - // yzzz - template - GLM_INLINE glm::vec<4, T, Q> yzzz(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.z, v.z, v.z); - } - - template - GLM_INLINE glm::vec<4, T, Q> yzzz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.z, v.z, v.z); - } - - // yzzw - template - GLM_INLINE glm::vec<4, T, Q> yzzw(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.z, v.z, v.w); - } - - // yzwx - template - GLM_INLINE glm::vec<4, T, Q> yzwx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.z, v.w, v.x); - } - - // yzwy - template - GLM_INLINE glm::vec<4, T, Q> yzwy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.z, v.w, v.y); - } - - // yzwz - template - GLM_INLINE glm::vec<4, T, Q> yzwz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.z, v.w, v.z); - } - - // yzww - template - GLM_INLINE glm::vec<4, T, Q> yzww(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.z, v.w, v.w); - } - - // ywxx - template - GLM_INLINE glm::vec<4, T, Q> ywxx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.w, v.x, v.x); - } - - // ywxy - template - GLM_INLINE glm::vec<4, T, Q> ywxy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.w, v.x, v.y); - } - - // ywxz - template - GLM_INLINE glm::vec<4, T, Q> ywxz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.w, v.x, v.z); - } - - // ywxw - template - GLM_INLINE glm::vec<4, T, Q> ywxw(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.w, v.x, v.w); - } - - // ywyx - template - GLM_INLINE glm::vec<4, T, Q> ywyx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.w, v.y, v.x); - } - - // ywyy - template - GLM_INLINE glm::vec<4, T, Q> ywyy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.w, v.y, v.y); - } - - // ywyz - template - GLM_INLINE glm::vec<4, T, Q> ywyz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.w, v.y, v.z); - } - - // ywyw - template - GLM_INLINE glm::vec<4, T, Q> ywyw(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.w, v.y, v.w); - } - - // ywzx - template - GLM_INLINE glm::vec<4, T, Q> ywzx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.w, v.z, v.x); - } - - // ywzy - template - GLM_INLINE glm::vec<4, T, Q> ywzy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.w, v.z, v.y); - } - - // ywzz - template - GLM_INLINE glm::vec<4, T, Q> ywzz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.w, v.z, v.z); - } - - // ywzw - template - GLM_INLINE glm::vec<4, T, Q> ywzw(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.w, v.z, v.w); - } - - // ywwx - template - GLM_INLINE glm::vec<4, T, Q> ywwx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.w, v.w, v.x); - } - - // ywwy - template - GLM_INLINE glm::vec<4, T, Q> ywwy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.w, v.w, v.y); - } - - // ywwz - template - GLM_INLINE glm::vec<4, T, Q> ywwz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.w, v.w, v.z); - } - - // ywww - template - GLM_INLINE glm::vec<4, T, Q> ywww(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.y, v.w, v.w, v.w); - } - - // zxxx - template - GLM_INLINE glm::vec<4, T, Q> zxxx(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.x, v.x, v.x); - } - - template - GLM_INLINE glm::vec<4, T, Q> zxxx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.x, v.x, v.x); - } - - // zxxy - template - GLM_INLINE glm::vec<4, T, Q> zxxy(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.x, v.x, v.y); - } - - template - GLM_INLINE glm::vec<4, T, Q> zxxy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.x, v.x, v.y); - } - - // zxxz - template - GLM_INLINE glm::vec<4, T, Q> zxxz(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.x, v.x, v.z); - } - - template - GLM_INLINE glm::vec<4, T, Q> zxxz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.x, v.x, v.z); - } - - // zxxw - template - GLM_INLINE glm::vec<4, T, Q> zxxw(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.x, v.x, v.w); - } - - // zxyx - template - GLM_INLINE glm::vec<4, T, Q> zxyx(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.x, v.y, v.x); - } - - template - GLM_INLINE glm::vec<4, T, Q> zxyx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.x, v.y, v.x); - } - - // zxyy - template - GLM_INLINE glm::vec<4, T, Q> zxyy(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.x, v.y, v.y); - } - - template - GLM_INLINE glm::vec<4, T, Q> zxyy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.x, v.y, v.y); - } - - // zxyz - template - GLM_INLINE glm::vec<4, T, Q> zxyz(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.x, v.y, v.z); - } - - template - GLM_INLINE glm::vec<4, T, Q> zxyz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.x, v.y, v.z); - } - - // zxyw - template - GLM_INLINE glm::vec<4, T, Q> zxyw(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.x, v.y, v.w); - } - - // zxzx - template - GLM_INLINE glm::vec<4, T, Q> zxzx(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.x, v.z, v.x); - } - - template - GLM_INLINE glm::vec<4, T, Q> zxzx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.x, v.z, v.x); - } - - // zxzy - template - GLM_INLINE glm::vec<4, T, Q> zxzy(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.x, v.z, v.y); - } - - template - GLM_INLINE glm::vec<4, T, Q> zxzy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.x, v.z, v.y); - } - - // zxzz - template - GLM_INLINE glm::vec<4, T, Q> zxzz(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.x, v.z, v.z); - } - - template - GLM_INLINE glm::vec<4, T, Q> zxzz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.x, v.z, v.z); - } - - // zxzw - template - GLM_INLINE glm::vec<4, T, Q> zxzw(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.x, v.z, v.w); - } - - // zxwx - template - GLM_INLINE glm::vec<4, T, Q> zxwx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.x, v.w, v.x); - } - - // zxwy - template - GLM_INLINE glm::vec<4, T, Q> zxwy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.x, v.w, v.y); - } - - // zxwz - template - GLM_INLINE glm::vec<4, T, Q> zxwz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.x, v.w, v.z); - } - - // zxww - template - GLM_INLINE glm::vec<4, T, Q> zxww(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.x, v.w, v.w); - } - - // zyxx - template - GLM_INLINE glm::vec<4, T, Q> zyxx(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.y, v.x, v.x); - } - - template - GLM_INLINE glm::vec<4, T, Q> zyxx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.y, v.x, v.x); - } - - // zyxy - template - GLM_INLINE glm::vec<4, T, Q> zyxy(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.y, v.x, v.y); - } - - template - GLM_INLINE glm::vec<4, T, Q> zyxy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.y, v.x, v.y); - } - - // zyxz - template - GLM_INLINE glm::vec<4, T, Q> zyxz(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.y, v.x, v.z); - } - - template - GLM_INLINE glm::vec<4, T, Q> zyxz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.y, v.x, v.z); - } - - // zyxw - template - GLM_INLINE glm::vec<4, T, Q> zyxw(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.y, v.x, v.w); - } - - // zyyx - template - GLM_INLINE glm::vec<4, T, Q> zyyx(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.y, v.y, v.x); - } - - template - GLM_INLINE glm::vec<4, T, Q> zyyx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.y, v.y, v.x); - } - - // zyyy - template - GLM_INLINE glm::vec<4, T, Q> zyyy(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.y, v.y, v.y); - } - - template - GLM_INLINE glm::vec<4, T, Q> zyyy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.y, v.y, v.y); - } - - // zyyz - template - GLM_INLINE glm::vec<4, T, Q> zyyz(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.y, v.y, v.z); - } - - template - GLM_INLINE glm::vec<4, T, Q> zyyz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.y, v.y, v.z); - } - - // zyyw - template - GLM_INLINE glm::vec<4, T, Q> zyyw(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.y, v.y, v.w); - } - - // zyzx - template - GLM_INLINE glm::vec<4, T, Q> zyzx(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.y, v.z, v.x); - } - - template - GLM_INLINE glm::vec<4, T, Q> zyzx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.y, v.z, v.x); - } - - // zyzy - template - GLM_INLINE glm::vec<4, T, Q> zyzy(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.y, v.z, v.y); - } - - template - GLM_INLINE glm::vec<4, T, Q> zyzy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.y, v.z, v.y); - } - - // zyzz - template - GLM_INLINE glm::vec<4, T, Q> zyzz(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.y, v.z, v.z); - } - - template - GLM_INLINE glm::vec<4, T, Q> zyzz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.y, v.z, v.z); - } - - // zyzw - template - GLM_INLINE glm::vec<4, T, Q> zyzw(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.y, v.z, v.w); - } - - // zywx - template - GLM_INLINE glm::vec<4, T, Q> zywx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.y, v.w, v.x); - } - - // zywy - template - GLM_INLINE glm::vec<4, T, Q> zywy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.y, v.w, v.y); - } - - // zywz - template - GLM_INLINE glm::vec<4, T, Q> zywz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.y, v.w, v.z); - } - - // zyww - template - GLM_INLINE glm::vec<4, T, Q> zyww(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.y, v.w, v.w); - } - - // zzxx - template - GLM_INLINE glm::vec<4, T, Q> zzxx(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.z, v.x, v.x); - } - - template - GLM_INLINE glm::vec<4, T, Q> zzxx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.z, v.x, v.x); - } - - // zzxy - template - GLM_INLINE glm::vec<4, T, Q> zzxy(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.z, v.x, v.y); - } - - template - GLM_INLINE glm::vec<4, T, Q> zzxy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.z, v.x, v.y); - } - - // zzxz - template - GLM_INLINE glm::vec<4, T, Q> zzxz(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.z, v.x, v.z); - } - - template - GLM_INLINE glm::vec<4, T, Q> zzxz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.z, v.x, v.z); - } - - // zzxw - template - GLM_INLINE glm::vec<4, T, Q> zzxw(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.z, v.x, v.w); - } - - // zzyx - template - GLM_INLINE glm::vec<4, T, Q> zzyx(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.z, v.y, v.x); - } - - template - GLM_INLINE glm::vec<4, T, Q> zzyx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.z, v.y, v.x); - } - - // zzyy - template - GLM_INLINE glm::vec<4, T, Q> zzyy(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.z, v.y, v.y); - } - - template - GLM_INLINE glm::vec<4, T, Q> zzyy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.z, v.y, v.y); - } - - // zzyz - template - GLM_INLINE glm::vec<4, T, Q> zzyz(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.z, v.y, v.z); - } - - template - GLM_INLINE glm::vec<4, T, Q> zzyz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.z, v.y, v.z); - } - - // zzyw - template - GLM_INLINE glm::vec<4, T, Q> zzyw(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.z, v.y, v.w); - } - - // zzzx - template - GLM_INLINE glm::vec<4, T, Q> zzzx(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.z, v.z, v.x); - } - - template - GLM_INLINE glm::vec<4, T, Q> zzzx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.z, v.z, v.x); - } - - // zzzy - template - GLM_INLINE glm::vec<4, T, Q> zzzy(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.z, v.z, v.y); - } - - template - GLM_INLINE glm::vec<4, T, Q> zzzy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.z, v.z, v.y); - } - - // zzzz - template - GLM_INLINE glm::vec<4, T, Q> zzzz(const glm::vec<3, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.z, v.z, v.z); - } - - template - GLM_INLINE glm::vec<4, T, Q> zzzz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.z, v.z, v.z); - } - - // zzzw - template - GLM_INLINE glm::vec<4, T, Q> zzzw(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.z, v.z, v.w); - } - - // zzwx - template - GLM_INLINE glm::vec<4, T, Q> zzwx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.z, v.w, v.x); - } - - // zzwy - template - GLM_INLINE glm::vec<4, T, Q> zzwy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.z, v.w, v.y); - } - - // zzwz - template - GLM_INLINE glm::vec<4, T, Q> zzwz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.z, v.w, v.z); - } - - // zzww - template - GLM_INLINE glm::vec<4, T, Q> zzww(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.z, v.w, v.w); - } - - // zwxx - template - GLM_INLINE glm::vec<4, T, Q> zwxx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.w, v.x, v.x); - } - - // zwxy - template - GLM_INLINE glm::vec<4, T, Q> zwxy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.w, v.x, v.y); - } - - // zwxz - template - GLM_INLINE glm::vec<4, T, Q> zwxz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.w, v.x, v.z); - } - - // zwxw - template - GLM_INLINE glm::vec<4, T, Q> zwxw(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.w, v.x, v.w); - } - - // zwyx - template - GLM_INLINE glm::vec<4, T, Q> zwyx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.w, v.y, v.x); - } - - // zwyy - template - GLM_INLINE glm::vec<4, T, Q> zwyy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.w, v.y, v.y); - } - - // zwyz - template - GLM_INLINE glm::vec<4, T, Q> zwyz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.w, v.y, v.z); - } - - // zwyw - template - GLM_INLINE glm::vec<4, T, Q> zwyw(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.w, v.y, v.w); - } - - // zwzx - template - GLM_INLINE glm::vec<4, T, Q> zwzx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.w, v.z, v.x); - } - - // zwzy - template - GLM_INLINE glm::vec<4, T, Q> zwzy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.w, v.z, v.y); - } - - // zwzz - template - GLM_INLINE glm::vec<4, T, Q> zwzz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.w, v.z, v.z); - } - - // zwzw - template - GLM_INLINE glm::vec<4, T, Q> zwzw(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.w, v.z, v.w); - } - - // zwwx - template - GLM_INLINE glm::vec<4, T, Q> zwwx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.w, v.w, v.x); - } - - // zwwy - template - GLM_INLINE glm::vec<4, T, Q> zwwy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.w, v.w, v.y); - } - - // zwwz - template - GLM_INLINE glm::vec<4, T, Q> zwwz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.w, v.w, v.z); - } - - // zwww - template - GLM_INLINE glm::vec<4, T, Q> zwww(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.z, v.w, v.w, v.w); - } - - // wxxx - template - GLM_INLINE glm::vec<4, T, Q> wxxx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.x, v.x, v.x); - } - - // wxxy - template - GLM_INLINE glm::vec<4, T, Q> wxxy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.x, v.x, v.y); - } - - // wxxz - template - GLM_INLINE glm::vec<4, T, Q> wxxz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.x, v.x, v.z); - } - - // wxxw - template - GLM_INLINE glm::vec<4, T, Q> wxxw(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.x, v.x, v.w); - } - - // wxyx - template - GLM_INLINE glm::vec<4, T, Q> wxyx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.x, v.y, v.x); - } - - // wxyy - template - GLM_INLINE glm::vec<4, T, Q> wxyy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.x, v.y, v.y); - } - - // wxyz - template - GLM_INLINE glm::vec<4, T, Q> wxyz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.x, v.y, v.z); - } - - // wxyw - template - GLM_INLINE glm::vec<4, T, Q> wxyw(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.x, v.y, v.w); - } - - // wxzx - template - GLM_INLINE glm::vec<4, T, Q> wxzx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.x, v.z, v.x); - } - - // wxzy - template - GLM_INLINE glm::vec<4, T, Q> wxzy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.x, v.z, v.y); - } - - // wxzz - template - GLM_INLINE glm::vec<4, T, Q> wxzz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.x, v.z, v.z); - } - - // wxzw - template - GLM_INLINE glm::vec<4, T, Q> wxzw(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.x, v.z, v.w); - } - - // wxwx - template - GLM_INLINE glm::vec<4, T, Q> wxwx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.x, v.w, v.x); - } - - // wxwy - template - GLM_INLINE glm::vec<4, T, Q> wxwy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.x, v.w, v.y); - } - - // wxwz - template - GLM_INLINE glm::vec<4, T, Q> wxwz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.x, v.w, v.z); - } - - // wxww - template - GLM_INLINE glm::vec<4, T, Q> wxww(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.x, v.w, v.w); - } - - // wyxx - template - GLM_INLINE glm::vec<4, T, Q> wyxx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.y, v.x, v.x); - } - - // wyxy - template - GLM_INLINE glm::vec<4, T, Q> wyxy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.y, v.x, v.y); - } - - // wyxz - template - GLM_INLINE glm::vec<4, T, Q> wyxz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.y, v.x, v.z); - } - - // wyxw - template - GLM_INLINE glm::vec<4, T, Q> wyxw(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.y, v.x, v.w); - } - - // wyyx - template - GLM_INLINE glm::vec<4, T, Q> wyyx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.y, v.y, v.x); - } - - // wyyy - template - GLM_INLINE glm::vec<4, T, Q> wyyy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.y, v.y, v.y); - } - - // wyyz - template - GLM_INLINE glm::vec<4, T, Q> wyyz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.y, v.y, v.z); - } - - // wyyw - template - GLM_INLINE glm::vec<4, T, Q> wyyw(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.y, v.y, v.w); - } - - // wyzx - template - GLM_INLINE glm::vec<4, T, Q> wyzx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.y, v.z, v.x); - } - - // wyzy - template - GLM_INLINE glm::vec<4, T, Q> wyzy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.y, v.z, v.y); - } - - // wyzz - template - GLM_INLINE glm::vec<4, T, Q> wyzz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.y, v.z, v.z); - } - - // wyzw - template - GLM_INLINE glm::vec<4, T, Q> wyzw(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.y, v.z, v.w); - } - - // wywx - template - GLM_INLINE glm::vec<4, T, Q> wywx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.y, v.w, v.x); - } - - // wywy - template - GLM_INLINE glm::vec<4, T, Q> wywy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.y, v.w, v.y); - } - - // wywz - template - GLM_INLINE glm::vec<4, T, Q> wywz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.y, v.w, v.z); - } - - // wyww - template - GLM_INLINE glm::vec<4, T, Q> wyww(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.y, v.w, v.w); - } - - // wzxx - template - GLM_INLINE glm::vec<4, T, Q> wzxx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.z, v.x, v.x); - } - - // wzxy - template - GLM_INLINE glm::vec<4, T, Q> wzxy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.z, v.x, v.y); - } - - // wzxz - template - GLM_INLINE glm::vec<4, T, Q> wzxz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.z, v.x, v.z); - } - - // wzxw - template - GLM_INLINE glm::vec<4, T, Q> wzxw(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.z, v.x, v.w); - } - - // wzyx - template - GLM_INLINE glm::vec<4, T, Q> wzyx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.z, v.y, v.x); - } - - // wzyy - template - GLM_INLINE glm::vec<4, T, Q> wzyy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.z, v.y, v.y); - } - - // wzyz - template - GLM_INLINE glm::vec<4, T, Q> wzyz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.z, v.y, v.z); - } - - // wzyw - template - GLM_INLINE glm::vec<4, T, Q> wzyw(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.z, v.y, v.w); - } - - // wzzx - template - GLM_INLINE glm::vec<4, T, Q> wzzx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.z, v.z, v.x); - } - - // wzzy - template - GLM_INLINE glm::vec<4, T, Q> wzzy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.z, v.z, v.y); - } - - // wzzz - template - GLM_INLINE glm::vec<4, T, Q> wzzz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.z, v.z, v.z); - } - - // wzzw - template - GLM_INLINE glm::vec<4, T, Q> wzzw(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.z, v.z, v.w); - } - - // wzwx - template - GLM_INLINE glm::vec<4, T, Q> wzwx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.z, v.w, v.x); - } - - // wzwy - template - GLM_INLINE glm::vec<4, T, Q> wzwy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.z, v.w, v.y); - } - - // wzwz - template - GLM_INLINE glm::vec<4, T, Q> wzwz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.z, v.w, v.z); - } - - // wzww - template - GLM_INLINE glm::vec<4, T, Q> wzww(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.z, v.w, v.w); - } - - // wwxx - template - GLM_INLINE glm::vec<4, T, Q> wwxx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.w, v.x, v.x); - } - - // wwxy - template - GLM_INLINE glm::vec<4, T, Q> wwxy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.w, v.x, v.y); - } - - // wwxz - template - GLM_INLINE glm::vec<4, T, Q> wwxz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.w, v.x, v.z); - } - - // wwxw - template - GLM_INLINE glm::vec<4, T, Q> wwxw(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.w, v.x, v.w); - } - - // wwyx - template - GLM_INLINE glm::vec<4, T, Q> wwyx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.w, v.y, v.x); - } - - // wwyy - template - GLM_INLINE glm::vec<4, T, Q> wwyy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.w, v.y, v.y); - } - - // wwyz - template - GLM_INLINE glm::vec<4, T, Q> wwyz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.w, v.y, v.z); - } - - // wwyw - template - GLM_INLINE glm::vec<4, T, Q> wwyw(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.w, v.y, v.w); - } - - // wwzx - template - GLM_INLINE glm::vec<4, T, Q> wwzx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.w, v.z, v.x); - } - - // wwzy - template - GLM_INLINE glm::vec<4, T, Q> wwzy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.w, v.z, v.y); - } - - // wwzz - template - GLM_INLINE glm::vec<4, T, Q> wwzz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.w, v.z, v.z); - } - - // wwzw - template - GLM_INLINE glm::vec<4, T, Q> wwzw(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.w, v.z, v.w); - } - - // wwwx - template - GLM_INLINE glm::vec<4, T, Q> wwwx(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.w, v.w, v.x); - } - - // wwwy - template - GLM_INLINE glm::vec<4, T, Q> wwwy(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.w, v.w, v.y); - } - - // wwwz - template - GLM_INLINE glm::vec<4, T, Q> wwwz(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.w, v.w, v.z); - } - - // wwww - template - GLM_INLINE glm::vec<4, T, Q> wwww(const glm::vec<4, T, Q> &v) { - return glm::vec<4, T, Q>(v.w, v.w, v.w, v.w); - } - -} diff --git a/third_party/glm/gtx/vector_angle.hpp b/third_party/glm/gtx/vector_angle.hpp deleted file mode 100755 index 9ae4371..0000000 --- a/third_party/glm/gtx/vector_angle.hpp +++ /dev/null @@ -1,57 +0,0 @@ -/// @ref gtx_vector_angle -/// @file glm/gtx/vector_angle.hpp -/// -/// @see core (dependence) -/// @see gtx_quaternion (dependence) -/// @see gtx_epsilon (dependence) -/// -/// @defgroup gtx_vector_angle GLM_GTX_vector_angle -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Compute angle between vectors - -#pragma once - -// Dependency: -#include "../glm.hpp" -#include "../gtc/epsilon.hpp" -#include "../gtx/quaternion.hpp" -#include "../gtx/rotate_vector.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_vector_angle is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_vector_angle extension included") -# endif -#endif - -namespace glm -{ - /// @addtogroup gtx_vector_angle - /// @{ - - //! Returns the absolute angle between two vectors. - //! Parameters need to be normalized. - /// @see gtx_vector_angle extension. - template - GLM_FUNC_DECL T angle(vec const& x, vec const& y); - - //! Returns the oriented angle between two 2d vectors. - //! Parameters need to be normalized. - /// @see gtx_vector_angle extension. - template - GLM_FUNC_DECL T orientedAngle(vec<2, T, Q> const& x, vec<2, T, Q> const& y); - - //! Returns the oriented angle between two 3d vectors based from a reference axis. - //! Parameters need to be normalized. - /// @see gtx_vector_angle extension. - template - GLM_FUNC_DECL T orientedAngle(vec<3, T, Q> const& x, vec<3, T, Q> const& y, vec<3, T, Q> const& ref); - - /// @} -}// namespace glm - -#include "vector_angle.inl" diff --git a/third_party/glm/gtx/vector_angle.inl b/third_party/glm/gtx/vector_angle.inl deleted file mode 100755 index a1f957a..0000000 --- a/third_party/glm/gtx/vector_angle.inl +++ /dev/null @@ -1,44 +0,0 @@ -/// @ref gtx_vector_angle - -namespace glm -{ - template - GLM_FUNC_QUALIFIER genType angle - ( - genType const& x, - genType const& y - ) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'angle' only accept floating-point inputs"); - return acos(clamp(dot(x, y), genType(-1), genType(1))); - } - - template - GLM_FUNC_QUALIFIER T angle(vec const& x, vec const& y) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'angle' only accept floating-point inputs"); - return acos(clamp(dot(x, y), T(-1), T(1))); - } - - //! \todo epsilon is hard coded to 0.01 - template - GLM_FUNC_QUALIFIER T orientedAngle(vec<2, T, Q> const& x, vec<2, T, Q> const& y) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'orientedAngle' only accept floating-point inputs"); - T const Angle(acos(clamp(dot(x, y), T(-1), T(1)))); - - if(all(epsilonEqual(y, glm::rotate(x, Angle), T(0.0001)))) - return Angle; - else - return -Angle; - } - - template - GLM_FUNC_QUALIFIER T orientedAngle(vec<3, T, Q> const& x, vec<3, T, Q> const& y, vec<3, T, Q> const& ref) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'orientedAngle' only accept floating-point inputs"); - - T const Angle(acos(clamp(dot(x, y), T(-1), T(1)))); - return mix(Angle, -Angle, dot(ref, cross(x, y)) < T(0)); - } -}//namespace glm diff --git a/third_party/glm/gtx/vector_query.hpp b/third_party/glm/gtx/vector_query.hpp deleted file mode 100755 index 77c7b97..0000000 --- a/third_party/glm/gtx/vector_query.hpp +++ /dev/null @@ -1,66 +0,0 @@ -/// @ref gtx_vector_query -/// @file glm/gtx/vector_query.hpp -/// -/// @see core (dependence) -/// -/// @defgroup gtx_vector_query GLM_GTX_vector_query -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Query informations of vector types - -#pragma once - -// Dependency: -#include "../glm.hpp" -#include -#include - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_vector_query is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_vector_query extension included") -# endif -#endif - -namespace glm -{ - /// @addtogroup gtx_vector_query - /// @{ - - //! Check whether two vectors are collinears. - /// @see gtx_vector_query extensions. - template - GLM_FUNC_DECL bool areCollinear(vec const& v0, vec const& v1, T const& epsilon); - - //! Check whether two vectors are orthogonals. - /// @see gtx_vector_query extensions. - template - GLM_FUNC_DECL bool areOrthogonal(vec const& v0, vec const& v1, T const& epsilon); - - //! Check whether a vector is normalized. - /// @see gtx_vector_query extensions. - template - GLM_FUNC_DECL bool isNormalized(vec const& v, T const& epsilon); - - //! Check whether a vector is null. - /// @see gtx_vector_query extensions. - template - GLM_FUNC_DECL bool isNull(vec const& v, T const& epsilon); - - //! Check whether a each component of a vector is null. - /// @see gtx_vector_query extensions. - template - GLM_FUNC_DECL vec isCompNull(vec const& v, T const& epsilon); - - //! Check whether two vectors are orthonormal. - /// @see gtx_vector_query extensions. - template - GLM_FUNC_DECL bool areOrthonormal(vec const& v0, vec const& v1, T const& epsilon); - - /// @} -}// namespace glm - -#include "vector_query.inl" diff --git a/third_party/glm/gtx/vector_query.inl b/third_party/glm/gtx/vector_query.inl deleted file mode 100755 index d1a5c9b..0000000 --- a/third_party/glm/gtx/vector_query.inl +++ /dev/null @@ -1,154 +0,0 @@ -/// @ref gtx_vector_query - -#include - -namespace glm{ -namespace detail -{ - template - struct compute_areCollinear{}; - - template - struct compute_areCollinear<2, T, Q> - { - GLM_FUNC_QUALIFIER static bool call(vec<2, T, Q> const& v0, vec<2, T, Q> const& v1, T const& epsilon) - { - return length(cross(vec<3, T, Q>(v0, static_cast(0)), vec<3, T, Q>(v1, static_cast(0)))) < epsilon; - } - }; - - template - struct compute_areCollinear<3, T, Q> - { - GLM_FUNC_QUALIFIER static bool call(vec<3, T, Q> const& v0, vec<3, T, Q> const& v1, T const& epsilon) - { - return length(cross(v0, v1)) < epsilon; - } - }; - - template - struct compute_areCollinear<4, T, Q> - { - GLM_FUNC_QUALIFIER static bool call(vec<4, T, Q> const& v0, vec<4, T, Q> const& v1, T const& epsilon) - { - return length(cross(vec<3, T, Q>(v0), vec<3, T, Q>(v1))) < epsilon; - } - }; - - template - struct compute_isCompNull{}; - - template - struct compute_isCompNull<2, T, Q> - { - GLM_FUNC_QUALIFIER static vec<2, bool, Q> call(vec<2, T, Q> const& v, T const& epsilon) - { - return vec<2, bool, Q>( - (abs(v.x) < epsilon), - (abs(v.y) < epsilon)); - } - }; - - template - struct compute_isCompNull<3, T, Q> - { - GLM_FUNC_QUALIFIER static vec<3, bool, Q> call(vec<3, T, Q> const& v, T const& epsilon) - { - return vec<3, bool, Q>( - (abs(v.x) < epsilon), - (abs(v.y) < epsilon), - (abs(v.z) < epsilon)); - } - }; - - template - struct compute_isCompNull<4, T, Q> - { - GLM_FUNC_QUALIFIER static vec<4, bool, Q> call(vec<4, T, Q> const& v, T const& epsilon) - { - return vec<4, bool, Q>( - (abs(v.x) < epsilon), - (abs(v.y) < epsilon), - (abs(v.z) < epsilon), - (abs(v.w) < epsilon)); - } - }; - -}//namespace detail - - template - GLM_FUNC_QUALIFIER bool areCollinear(vec const& v0, vec const& v1, T const& epsilon) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'areCollinear' only accept floating-point inputs"); - - return detail::compute_areCollinear::call(v0, v1, epsilon); - } - - template - GLM_FUNC_QUALIFIER bool areOrthogonal(vec const& v0, vec const& v1, T const& epsilon) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'areOrthogonal' only accept floating-point inputs"); - - return abs(dot(v0, v1)) <= max( - static_cast(1), - length(v0)) * max(static_cast(1), length(v1)) * epsilon; - } - - template - GLM_FUNC_QUALIFIER bool isNormalized(vec const& v, T const& epsilon) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'isNormalized' only accept floating-point inputs"); - - return abs(length(v) - static_cast(1)) <= static_cast(2) * epsilon; - } - - template - GLM_FUNC_QUALIFIER bool isNull(vec const& v, T const& epsilon) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'isNull' only accept floating-point inputs"); - - return length(v) <= epsilon; - } - - template - GLM_FUNC_QUALIFIER vec isCompNull(vec const& v, T const& epsilon) - { - GLM_STATIC_ASSERT(std::numeric_limits::is_iec559, "'isCompNull' only accept floating-point inputs"); - - return detail::compute_isCompNull::call(v, epsilon); - } - - template - GLM_FUNC_QUALIFIER vec<2, bool, Q> isCompNull(vec<2, T, Q> const& v, T const& epsilon) - { - return vec<2, bool, Q>( - abs(v.x) < epsilon, - abs(v.y) < epsilon); - } - - template - GLM_FUNC_QUALIFIER vec<3, bool, Q> isCompNull(vec<3, T, Q> const& v, T const& epsilon) - { - return vec<3, bool, Q>( - abs(v.x) < epsilon, - abs(v.y) < epsilon, - abs(v.z) < epsilon); - } - - template - GLM_FUNC_QUALIFIER vec<4, bool, Q> isCompNull(vec<4, T, Q> const& v, T const& epsilon) - { - return vec<4, bool, Q>( - abs(v.x) < epsilon, - abs(v.y) < epsilon, - abs(v.z) < epsilon, - abs(v.w) < epsilon); - } - - template - GLM_FUNC_QUALIFIER bool areOrthonormal(vec const& v0, vec const& v1, T const& epsilon) - { - return isNormalized(v0, epsilon) && isNormalized(v1, epsilon) && (abs(dot(v0, v1)) <= epsilon); - } - -}//namespace glm diff --git a/third_party/glm/gtx/wrap.hpp b/third_party/glm/gtx/wrap.hpp deleted file mode 100755 index 02c5196..0000000 --- a/third_party/glm/gtx/wrap.hpp +++ /dev/null @@ -1,55 +0,0 @@ -/// @ref gtx_wrap -/// @file glm/gtx/wrap.hpp -/// -/// @see core (dependence) -/// -/// @defgroup gtx_wrap GLM_GTX_wrap -/// @ingroup gtx -/// -/// Include to use the features of this extension. -/// -/// Wrapping mode of texture coordinates. - -#pragma once - -// Dependency: -#include "../glm.hpp" -#include "../gtc/vec1.hpp" - -#if GLM_MESSAGES == GLM_ENABLE && !defined(GLM_EXT_INCLUDED) -# ifndef GLM_ENABLE_EXPERIMENTAL -# pragma message("GLM: GLM_GTX_wrap is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it.") -# else -# pragma message("GLM: GLM_GTX_wrap extension included") -# endif -#endif - -namespace glm -{ - /// @addtogroup gtx_wrap - /// @{ - - /// Simulate GL_CLAMP OpenGL wrap mode - /// @see gtx_wrap extension. - template - GLM_FUNC_DECL genType clamp(genType const& Texcoord); - - /// Simulate GL_REPEAT OpenGL wrap mode - /// @see gtx_wrap extension. - template - GLM_FUNC_DECL genType repeat(genType const& Texcoord); - - /// Simulate GL_MIRRORED_REPEAT OpenGL wrap mode - /// @see gtx_wrap extension. - template - GLM_FUNC_DECL genType mirrorClamp(genType const& Texcoord); - - /// Simulate GL_MIRROR_REPEAT OpenGL wrap mode - /// @see gtx_wrap extension. - template - GLM_FUNC_DECL genType mirrorRepeat(genType const& Texcoord); - - /// @} -}// namespace glm - -#include "wrap.inl" diff --git a/third_party/glm/gtx/wrap.inl b/third_party/glm/gtx/wrap.inl deleted file mode 100755 index 409a316..0000000 --- a/third_party/glm/gtx/wrap.inl +++ /dev/null @@ -1,57 +0,0 @@ -/// @ref gtx_wrap - -namespace glm -{ - template - GLM_FUNC_QUALIFIER vec clamp(vec const& Texcoord) - { - return glm::clamp(Texcoord, vec(0), vec(1)); - } - - template - GLM_FUNC_QUALIFIER genType clamp(genType const& Texcoord) - { - return clamp(vec<1, genType, defaultp>(Texcoord)).x; - } - - template - GLM_FUNC_QUALIFIER vec repeat(vec const& Texcoord) - { - return glm::fract(Texcoord); - } - - template - GLM_FUNC_QUALIFIER genType repeat(genType const& Texcoord) - { - return repeat(vec<1, genType, defaultp>(Texcoord)).x; - } - - template - GLM_FUNC_QUALIFIER vec mirrorClamp(vec const& Texcoord) - { - return glm::fract(glm::abs(Texcoord)); - } - - template - GLM_FUNC_QUALIFIER genType mirrorClamp(genType const& Texcoord) - { - return mirrorClamp(vec<1, genType, defaultp>(Texcoord)).x; - } - - template - GLM_FUNC_QUALIFIER vec mirrorRepeat(vec const& Texcoord) - { - vec const Abs = glm::abs(Texcoord); - vec const Clamp = glm::mod(glm::floor(Abs), vec(2)); - vec const Floor = glm::floor(Abs); - vec const Rest = Abs - Floor; - vec const Mirror = Clamp + Rest; - return mix(Rest, vec(1) - Rest, glm::greaterThanEqual(Mirror, vec(1))); - } - - template - GLM_FUNC_QUALIFIER genType mirrorRepeat(genType const& Texcoord) - { - return mirrorRepeat(vec<1, genType, defaultp>(Texcoord)).x; - } -}//namespace glm diff --git a/third_party/glm/integer.hpp b/third_party/glm/integer.hpp deleted file mode 100755 index 8817db3..0000000 --- a/third_party/glm/integer.hpp +++ /dev/null @@ -1,212 +0,0 @@ -/// @ref core -/// @file glm/integer.hpp -/// -/// @see GLSL 4.20.8 specification, section 8.8 Integer Functions -/// -/// @defgroup core_func_integer Integer functions -/// @ingroup core -/// -/// Provides GLSL functions on integer types -/// -/// These all operate component-wise. The description is per component. -/// The notation [a, b] means the set of bits from bit-number a through bit-number -/// b, inclusive. The lowest-order bit is bit 0. -/// -/// Include to use these core features. - -#pragma once - -#include "detail/qualifier.hpp" -#include "common.hpp" -#include "vector_relational.hpp" - -namespace glm -{ - /// @addtogroup core_func_integer - /// @{ - - /// Adds 32-bit unsigned integer x and y, returning the sum - /// modulo pow(2, 32). The value carry is set to 0 if the sum was - /// less than pow(2, 32), or to 1 otherwise. - /// - /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. - /// - /// @see GLSL uaddCarry man page - /// @see GLSL 4.20.8 specification, section 8.8 Integer Functions - template - GLM_FUNC_DECL vec uaddCarry( - vec const& x, - vec const& y, - vec & carry); - - /// Subtracts the 32-bit unsigned integer y from x, returning - /// the difference if non-negative, or pow(2, 32) plus the difference - /// otherwise. The value borrow is set to 0 if x >= y, or to 1 otherwise. - /// - /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. - /// - /// @see GLSL usubBorrow man page - /// @see GLSL 4.20.8 specification, section 8.8 Integer Functions - template - GLM_FUNC_DECL vec usubBorrow( - vec const& x, - vec const& y, - vec & borrow); - - /// Multiplies 32-bit integers x and y, producing a 64-bit - /// result. The 32 least-significant bits are returned in lsb. - /// The 32 most-significant bits are returned in msb. - /// - /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. - /// - /// @see GLSL umulExtended man page - /// @see GLSL 4.20.8 specification, section 8.8 Integer Functions - template - GLM_FUNC_DECL void umulExtended( - vec const& x, - vec const& y, - vec & msb, - vec & lsb); - - /// Multiplies 32-bit integers x and y, producing a 64-bit - /// result. The 32 least-significant bits are returned in lsb. - /// The 32 most-significant bits are returned in msb. - /// - /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. - /// - /// @see GLSL imulExtended man page - /// @see GLSL 4.20.8 specification, section 8.8 Integer Functions - template - GLM_FUNC_DECL void imulExtended( - vec const& x, - vec const& y, - vec & msb, - vec & lsb); - - /// Extracts bits [offset, offset + bits - 1] from value, - /// returning them in the least significant bits of the result. - /// For unsigned data types, the most significant bits of the - /// result will be set to zero. For signed data types, the - /// most significant bits will be set to the value of bit offset + base - 1. - /// - /// If bits is zero, the result will be zero. The result will be - /// undefined if offset or bits is negative, or if the sum of - /// offset and bits is greater than the number of bits used - /// to store the operand. - /// - /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. - /// @tparam T Signed or unsigned integer scalar types. - /// - /// @see GLSL bitfieldExtract man page - /// @see GLSL 4.20.8 specification, section 8.8 Integer Functions - template - GLM_FUNC_DECL vec bitfieldExtract( - vec const& Value, - int Offset, - int Bits); - - /// Returns the insertion the bits least-significant bits of insert into base. - /// - /// The result will have bits [offset, offset + bits - 1] taken - /// from bits [0, bits - 1] of insert, and all other bits taken - /// directly from the corresponding bits of base. If bits is - /// zero, the result will simply be base. The result will be - /// undefined if offset or bits is negative, or if the sum of - /// offset and bits is greater than the number of bits used to - /// store the operand. - /// - /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. - /// @tparam T Signed or unsigned integer scalar or vector types. - /// - /// @see GLSL bitfieldInsert man page - /// @see GLSL 4.20.8 specification, section 8.8 Integer Functions - template - GLM_FUNC_DECL vec bitfieldInsert( - vec const& Base, - vec const& Insert, - int Offset, - int Bits); - - /// Returns the reversal of the bits of value. - /// The bit numbered n of the result will be taken from bit (bits - 1) - n of value, - /// where bits is the total number of bits used to represent value. - /// - /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. - /// @tparam T Signed or unsigned integer scalar or vector types. - /// - /// @see GLSL bitfieldReverse man page - /// @see GLSL 4.20.8 specification, section 8.8 Integer Functions - template - GLM_FUNC_DECL vec bitfieldReverse(vec const& v); - - /// Returns the number of bits set to 1 in the binary representation of value. - /// - /// @tparam genType Signed or unsigned integer scalar or vector types. - /// - /// @see GLSL bitCount man page - /// @see GLSL 4.20.8 specification, section 8.8 Integer Functions - template - GLM_FUNC_DECL int bitCount(genType v); - - /// Returns the number of bits set to 1 in the binary representation of value. - /// - /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. - /// @tparam T Signed or unsigned integer scalar or vector types. - /// - /// @see GLSL bitCount man page - /// @see GLSL 4.20.8 specification, section 8.8 Integer Functions - template - GLM_FUNC_DECL vec bitCount(vec const& v); - - /// Returns the bit number of the least significant bit set to - /// 1 in the binary representation of value. - /// If value is zero, -1 will be returned. - /// - /// @tparam genIUType Signed or unsigned integer scalar types. - /// - /// @see GLSL findLSB man page - /// @see GLSL 4.20.8 specification, section 8.8 Integer Functions - template - GLM_FUNC_DECL int findLSB(genIUType x); - - /// Returns the bit number of the least significant bit set to - /// 1 in the binary representation of value. - /// If value is zero, -1 will be returned. - /// - /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. - /// @tparam T Signed or unsigned integer scalar types. - /// - /// @see GLSL findLSB man page - /// @see GLSL 4.20.8 specification, section 8.8 Integer Functions - template - GLM_FUNC_DECL vec findLSB(vec const& v); - - /// Returns the bit number of the most significant bit in the binary representation of value. - /// For positive integers, the result will be the bit number of the most significant bit set to 1. - /// For negative integers, the result will be the bit number of the most significant - /// bit set to 0. For a value of zero or negative one, -1 will be returned. - /// - /// @tparam genIUType Signed or unsigned integer scalar types. - /// - /// @see GLSL findMSB man page - /// @see GLSL 4.20.8 specification, section 8.8 Integer Functions - template - GLM_FUNC_DECL int findMSB(genIUType x); - - /// Returns the bit number of the most significant bit in the binary representation of value. - /// For positive integers, the result will be the bit number of the most significant bit set to 1. - /// For negative integers, the result will be the bit number of the most significant - /// bit set to 0. For a value of zero or negative one, -1 will be returned. - /// - /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. - /// @tparam T Signed or unsigned integer scalar types. - /// - /// @see GLSL findMSB man page - /// @see GLSL 4.20.8 specification, section 8.8 Integer Functions - template - GLM_FUNC_DECL vec findMSB(vec const& v); - - /// @} -}//namespace glm - -#include "detail/func_integer.inl" diff --git a/third_party/glm/mat2x2.hpp b/third_party/glm/mat2x2.hpp deleted file mode 100755 index 96bec96..0000000 --- a/third_party/glm/mat2x2.hpp +++ /dev/null @@ -1,9 +0,0 @@ -/// @ref core -/// @file glm/mat2x2.hpp - -#pragma once -#include "./ext/matrix_double2x2.hpp" -#include "./ext/matrix_double2x2_precision.hpp" -#include "./ext/matrix_float2x2.hpp" -#include "./ext/matrix_float2x2_precision.hpp" - diff --git a/third_party/glm/mat2x3.hpp b/third_party/glm/mat2x3.hpp deleted file mode 100755 index d68dc25..0000000 --- a/third_party/glm/mat2x3.hpp +++ /dev/null @@ -1,9 +0,0 @@ -/// @ref core -/// @file glm/mat2x3.hpp - -#pragma once -#include "./ext/matrix_double2x3.hpp" -#include "./ext/matrix_double2x3_precision.hpp" -#include "./ext/matrix_float2x3.hpp" -#include "./ext/matrix_float2x3_precision.hpp" - diff --git a/third_party/glm/mat2x4.hpp b/third_party/glm/mat2x4.hpp deleted file mode 100755 index b04b738..0000000 --- a/third_party/glm/mat2x4.hpp +++ /dev/null @@ -1,9 +0,0 @@ -/// @ref core -/// @file glm/mat2x4.hpp - -#pragma once -#include "./ext/matrix_double2x4.hpp" -#include "./ext/matrix_double2x4_precision.hpp" -#include "./ext/matrix_float2x4.hpp" -#include "./ext/matrix_float2x4_precision.hpp" - diff --git a/third_party/glm/mat3x2.hpp b/third_party/glm/mat3x2.hpp deleted file mode 100755 index c853153..0000000 --- a/third_party/glm/mat3x2.hpp +++ /dev/null @@ -1,9 +0,0 @@ -/// @ref core -/// @file glm/mat3x2.hpp - -#pragma once -#include "./ext/matrix_double3x2.hpp" -#include "./ext/matrix_double3x2_precision.hpp" -#include "./ext/matrix_float3x2.hpp" -#include "./ext/matrix_float3x2_precision.hpp" - diff --git a/third_party/glm/mat3x3.hpp b/third_party/glm/mat3x3.hpp deleted file mode 100755 index fd4fa31..0000000 --- a/third_party/glm/mat3x3.hpp +++ /dev/null @@ -1,8 +0,0 @@ -/// @ref core -/// @file glm/mat3x3.hpp - -#pragma once -#include "./ext/matrix_double3x3.hpp" -#include "./ext/matrix_double3x3_precision.hpp" -#include "./ext/matrix_float3x3.hpp" -#include "./ext/matrix_float3x3_precision.hpp" diff --git a/third_party/glm/mat3x4.hpp b/third_party/glm/mat3x4.hpp deleted file mode 100755 index 6342bf5..0000000 --- a/third_party/glm/mat3x4.hpp +++ /dev/null @@ -1,8 +0,0 @@ -/// @ref core -/// @file glm/mat3x4.hpp - -#pragma once -#include "./ext/matrix_double3x4.hpp" -#include "./ext/matrix_double3x4_precision.hpp" -#include "./ext/matrix_float3x4.hpp" -#include "./ext/matrix_float3x4_precision.hpp" diff --git a/third_party/glm/mat4x2.hpp b/third_party/glm/mat4x2.hpp deleted file mode 100755 index e013e46..0000000 --- a/third_party/glm/mat4x2.hpp +++ /dev/null @@ -1,9 +0,0 @@ -/// @ref core -/// @file glm/mat4x2.hpp - -#pragma once -#include "./ext/matrix_double4x2.hpp" -#include "./ext/matrix_double4x2_precision.hpp" -#include "./ext/matrix_float4x2.hpp" -#include "./ext/matrix_float4x2_precision.hpp" - diff --git a/third_party/glm/mat4x3.hpp b/third_party/glm/mat4x3.hpp deleted file mode 100755 index 205725a..0000000 --- a/third_party/glm/mat4x3.hpp +++ /dev/null @@ -1,8 +0,0 @@ -/// @ref core -/// @file glm/mat4x3.hpp - -#pragma once -#include "./ext/matrix_double4x3.hpp" -#include "./ext/matrix_double4x3_precision.hpp" -#include "./ext/matrix_float4x3.hpp" -#include "./ext/matrix_float4x3_precision.hpp" diff --git a/third_party/glm/mat4x4.hpp b/third_party/glm/mat4x4.hpp deleted file mode 100755 index 3515f7f..0000000 --- a/third_party/glm/mat4x4.hpp +++ /dev/null @@ -1,9 +0,0 @@ -/// @ref core -/// @file glm/mat4x4.hpp - -#pragma once -#include "./ext/matrix_double4x4.hpp" -#include "./ext/matrix_double4x4_precision.hpp" -#include "./ext/matrix_float4x4.hpp" -#include "./ext/matrix_float4x4_precision.hpp" - diff --git a/third_party/glm/matrix.hpp b/third_party/glm/matrix.hpp deleted file mode 100755 index 6badf53..0000000 --- a/third_party/glm/matrix.hpp +++ /dev/null @@ -1,161 +0,0 @@ -/// @ref core -/// @file glm/matrix.hpp -/// -/// @see GLSL 4.20.8 specification, section 8.6 Matrix Functions -/// -/// @defgroup core_func_matrix Matrix functions -/// @ingroup core -/// -/// Provides GLSL matrix functions. -/// -/// Include to use these core features. - -#pragma once - -// Dependencies -#include "detail/qualifier.hpp" -#include "detail/setup.hpp" -#include "vec2.hpp" -#include "vec3.hpp" -#include "vec4.hpp" -#include "mat2x2.hpp" -#include "mat2x3.hpp" -#include "mat2x4.hpp" -#include "mat3x2.hpp" -#include "mat3x3.hpp" -#include "mat3x4.hpp" -#include "mat4x2.hpp" -#include "mat4x3.hpp" -#include "mat4x4.hpp" - -namespace glm { -namespace detail -{ - template - struct outerProduct_trait{}; - - template - struct outerProduct_trait<2, 2, T, Q> - { - typedef mat<2, 2, T, Q> type; - }; - - template - struct outerProduct_trait<2, 3, T, Q> - { - typedef mat<3, 2, T, Q> type; - }; - - template - struct outerProduct_trait<2, 4, T, Q> - { - typedef mat<4, 2, T, Q> type; - }; - - template - struct outerProduct_trait<3, 2, T, Q> - { - typedef mat<2, 3, T, Q> type; - }; - - template - struct outerProduct_trait<3, 3, T, Q> - { - typedef mat<3, 3, T, Q> type; - }; - - template - struct outerProduct_trait<3, 4, T, Q> - { - typedef mat<4, 3, T, Q> type; - }; - - template - struct outerProduct_trait<4, 2, T, Q> - { - typedef mat<2, 4, T, Q> type; - }; - - template - struct outerProduct_trait<4, 3, T, Q> - { - typedef mat<3, 4, T, Q> type; - }; - - template - struct outerProduct_trait<4, 4, T, Q> - { - typedef mat<4, 4, T, Q> type; - }; -}//namespace detail - - /// @addtogroup core_func_matrix - /// @{ - - /// Multiply matrix x by matrix y component-wise, i.e., - /// result[i][j] is the scalar product of x[i][j] and y[i][j]. - /// - /// @tparam C Integer between 1 and 4 included that qualify the number a column - /// @tparam R Integer between 1 and 4 included that qualify the number a row - /// @tparam T Floating-point or signed integer scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see GLSL matrixCompMult man page - /// @see GLSL 4.20.8 specification, section 8.6 Matrix Functions - template - GLM_FUNC_DECL mat matrixCompMult(mat const& x, mat const& y); - - /// Treats the first parameter c as a column vector - /// and the second parameter r as a row vector - /// and does a linear algebraic matrix multiply c * r. - /// - /// @tparam C Integer between 1 and 4 included that qualify the number a column - /// @tparam R Integer between 1 and 4 included that qualify the number a row - /// @tparam T Floating-point or signed integer scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see GLSL outerProduct man page - /// @see GLSL 4.20.8 specification, section 8.6 Matrix Functions - template - GLM_FUNC_DECL typename detail::outerProduct_trait::type outerProduct(vec const& c, vec const& r); - - /// Returns the transposed matrix of x - /// - /// @tparam C Integer between 1 and 4 included that qualify the number a column - /// @tparam R Integer between 1 and 4 included that qualify the number a row - /// @tparam T Floating-point or signed integer scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see GLSL transpose man page - /// @see GLSL 4.20.8 specification, section 8.6 Matrix Functions - template - GLM_FUNC_DECL typename mat::transpose_type transpose(mat const& x); - - /// Return the determinant of a squared matrix. - /// - /// @tparam C Integer between 1 and 4 included that qualify the number a column - /// @tparam R Integer between 1 and 4 included that qualify the number a row - /// @tparam T Floating-point or signed integer scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see GLSL determinant man page - /// @see GLSL 4.20.8 specification, section 8.6 Matrix Functions - template - GLM_FUNC_DECL T determinant(mat const& m); - - /// Return the inverse of a squared matrix. - /// - /// @tparam C Integer between 1 and 4 included that qualify the number a column - /// @tparam R Integer between 1 and 4 included that qualify the number a row - /// @tparam T Floating-point or signed integer scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see GLSL inverse man page - /// @see GLSL 4.20.8 specification, section 8.6 Matrix Functions - template - GLM_FUNC_DECL mat inverse(mat const& m); - - /// @} -}//namespace glm - -#include "detail/func_matrix.inl" diff --git a/third_party/glm/packing.hpp b/third_party/glm/packing.hpp deleted file mode 100755 index ca83ac1..0000000 --- a/third_party/glm/packing.hpp +++ /dev/null @@ -1,173 +0,0 @@ -/// @ref core -/// @file glm/packing.hpp -/// -/// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions -/// @see gtc_packing -/// -/// @defgroup core_func_packing Floating-Point Pack and Unpack Functions -/// @ingroup core -/// -/// Provides GLSL functions to pack and unpack half, single and double-precision floating point values into more compact integer types. -/// -/// These functions do not operate component-wise, rather as described in each case. -/// -/// Include to use these core features. - -#pragma once - -#include "./ext/vector_uint2.hpp" -#include "./ext/vector_float2.hpp" -#include "./ext/vector_float4.hpp" - -namespace glm -{ - /// @addtogroup core_func_packing - /// @{ - - /// First, converts each component of the normalized floating-point value v into 8- or 16-bit integer values. - /// Then, the results are packed into the returned 32-bit unsigned integer. - /// - /// The conversion for component c of v to fixed point is done as follows: - /// packUnorm2x16: round(clamp(c, 0, +1) * 65535.0) - /// - /// The first component of the vector will be written to the least significant bits of the output; - /// the last component will be written to the most significant bits. - /// - /// @see GLSL packUnorm2x16 man page - /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions - GLM_FUNC_DECL uint packUnorm2x16(vec2 const& v); - - /// First, converts each component of the normalized floating-point value v into 8- or 16-bit integer values. - /// Then, the results are packed into the returned 32-bit unsigned integer. - /// - /// The conversion for component c of v to fixed point is done as follows: - /// packSnorm2x16: round(clamp(v, -1, +1) * 32767.0) - /// - /// The first component of the vector will be written to the least significant bits of the output; - /// the last component will be written to the most significant bits. - /// - /// @see GLSL packSnorm2x16 man page - /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions - GLM_FUNC_DECL uint packSnorm2x16(vec2 const& v); - - /// First, converts each component of the normalized floating-point value v into 8- or 16-bit integer values. - /// Then, the results are packed into the returned 32-bit unsigned integer. - /// - /// The conversion for component c of v to fixed point is done as follows: - /// packUnorm4x8: round(clamp(c, 0, +1) * 255.0) - /// - /// The first component of the vector will be written to the least significant bits of the output; - /// the last component will be written to the most significant bits. - /// - /// @see GLSL packUnorm4x8 man page - /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions - GLM_FUNC_DECL uint packUnorm4x8(vec4 const& v); - - /// First, converts each component of the normalized floating-point value v into 8- or 16-bit integer values. - /// Then, the results are packed into the returned 32-bit unsigned integer. - /// - /// The conversion for component c of v to fixed point is done as follows: - /// packSnorm4x8: round(clamp(c, -1, +1) * 127.0) - /// - /// The first component of the vector will be written to the least significant bits of the output; - /// the last component will be written to the most significant bits. - /// - /// @see GLSL packSnorm4x8 man page - /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions - GLM_FUNC_DECL uint packSnorm4x8(vec4 const& v); - - /// First, unpacks a single 32-bit unsigned integer p into a pair of 16-bit unsigned integers, four 8-bit unsigned integers, or four 8-bit signed integers. - /// Then, each component is converted to a normalized floating-point value to generate the returned two- or four-component vector. - /// - /// The conversion for unpacked fixed-point value f to floating point is done as follows: - /// unpackUnorm2x16: f / 65535.0 - /// - /// The first component of the returned vector will be extracted from the least significant bits of the input; - /// the last component will be extracted from the most significant bits. - /// - /// @see GLSL unpackUnorm2x16 man page - /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions - GLM_FUNC_DECL vec2 unpackUnorm2x16(uint p); - - /// First, unpacks a single 32-bit unsigned integer p into a pair of 16-bit unsigned integers, four 8-bit unsigned integers, or four 8-bit signed integers. - /// Then, each component is converted to a normalized floating-point value to generate the returned two- or four-component vector. - /// - /// The conversion for unpacked fixed-point value f to floating point is done as follows: - /// unpackSnorm2x16: clamp(f / 32767.0, -1, +1) - /// - /// The first component of the returned vector will be extracted from the least significant bits of the input; - /// the last component will be extracted from the most significant bits. - /// - /// @see GLSL unpackSnorm2x16 man page - /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions - GLM_FUNC_DECL vec2 unpackSnorm2x16(uint p); - - /// First, unpacks a single 32-bit unsigned integer p into a pair of 16-bit unsigned integers, four 8-bit unsigned integers, or four 8-bit signed integers. - /// Then, each component is converted to a normalized floating-point value to generate the returned two- or four-component vector. - /// - /// The conversion for unpacked fixed-point value f to floating point is done as follows: - /// unpackUnorm4x8: f / 255.0 - /// - /// The first component of the returned vector will be extracted from the least significant bits of the input; - /// the last component will be extracted from the most significant bits. - /// - /// @see GLSL unpackUnorm4x8 man page - /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions - GLM_FUNC_DECL vec4 unpackUnorm4x8(uint p); - - /// First, unpacks a single 32-bit unsigned integer p into a pair of 16-bit unsigned integers, four 8-bit unsigned integers, or four 8-bit signed integers. - /// Then, each component is converted to a normalized floating-point value to generate the returned two- or four-component vector. - /// - /// The conversion for unpacked fixed-point value f to floating point is done as follows: - /// unpackSnorm4x8: clamp(f / 127.0, -1, +1) - /// - /// The first component of the returned vector will be extracted from the least significant bits of the input; - /// the last component will be extracted from the most significant bits. - /// - /// @see GLSL unpackSnorm4x8 man page - /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions - GLM_FUNC_DECL vec4 unpackSnorm4x8(uint p); - - /// Returns a double-qualifier value obtained by packing the components of v into a 64-bit value. - /// If an IEEE 754 Inf or NaN is created, it will not signal, and the resulting floating point value is unspecified. - /// Otherwise, the bit- level representation of v is preserved. - /// The first vector component specifies the 32 least significant bits; - /// the second component specifies the 32 most significant bits. - /// - /// @see GLSL packDouble2x32 man page - /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions - GLM_FUNC_DECL double packDouble2x32(uvec2 const& v); - - /// Returns a two-component unsigned integer vector representation of v. - /// The bit-level representation of v is preserved. - /// The first component of the vector contains the 32 least significant bits of the double; - /// the second component consists the 32 most significant bits. - /// - /// @see GLSL unpackDouble2x32 man page - /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions - GLM_FUNC_DECL uvec2 unpackDouble2x32(double v); - - /// Returns an unsigned integer obtained by converting the components of a two-component floating-point vector - /// to the 16-bit floating-point representation found in the OpenGL Specification, - /// and then packing these two 16- bit integers into a 32-bit unsigned integer. - /// The first vector component specifies the 16 least-significant bits of the result; - /// the second component specifies the 16 most-significant bits. - /// - /// @see GLSL packHalf2x16 man page - /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions - GLM_FUNC_DECL uint packHalf2x16(vec2 const& v); - - /// Returns a two-component floating-point vector with components obtained by unpacking a 32-bit unsigned integer into a pair of 16-bit values, - /// interpreting those values as 16-bit floating-point numbers according to the OpenGL Specification, - /// and converting them to 32-bit floating-point values. - /// The first component of the vector is obtained from the 16 least-significant bits of v; - /// the second component is obtained from the 16 most-significant bits of v. - /// - /// @see GLSL unpackHalf2x16 man page - /// @see GLSL 4.20.8 specification, section 8.4 Floating-Point Pack and Unpack Functions - GLM_FUNC_DECL vec2 unpackHalf2x16(uint v); - - /// @} -}//namespace glm - -#include "detail/func_packing.inl" diff --git a/third_party/glm/simd/common.h b/third_party/glm/simd/common.h deleted file mode 100755 index 9b017cb..0000000 --- a/third_party/glm/simd/common.h +++ /dev/null @@ -1,240 +0,0 @@ -/// @ref simd -/// @file glm/simd/common.h - -#pragma once - -#include "platform.h" - -#if GLM_ARCH & GLM_ARCH_SSE2_BIT - -GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec4_add(glm_f32vec4 a, glm_f32vec4 b) -{ - return _mm_add_ps(a, b); -} - -GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec1_add(glm_f32vec4 a, glm_f32vec4 b) -{ - return _mm_add_ss(a, b); -} - -GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec4_sub(glm_f32vec4 a, glm_f32vec4 b) -{ - return _mm_sub_ps(a, b); -} - -GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec1_sub(glm_f32vec4 a, glm_f32vec4 b) -{ - return _mm_sub_ss(a, b); -} - -GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec4_mul(glm_f32vec4 a, glm_f32vec4 b) -{ - return _mm_mul_ps(a, b); -} - -GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec1_mul(glm_f32vec4 a, glm_f32vec4 b) -{ - return _mm_mul_ss(a, b); -} - -GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec4_div(glm_f32vec4 a, glm_f32vec4 b) -{ - return _mm_div_ps(a, b); -} - -GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec1_div(glm_f32vec4 a, glm_f32vec4 b) -{ - return _mm_div_ss(a, b); -} - -GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec4_div_lowp(glm_f32vec4 a, glm_f32vec4 b) -{ - return glm_vec4_mul(a, _mm_rcp_ps(b)); -} - -GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec4_swizzle_xyzw(glm_f32vec4 a) -{ -# if GLM_ARCH & GLM_ARCH_AVX2_BIT - return _mm_permute_ps(a, _MM_SHUFFLE(3, 2, 1, 0)); -# else - return _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 2, 1, 0)); -# endif -} - -GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec1_fma(glm_f32vec4 a, glm_f32vec4 b, glm_f32vec4 c) -{ -# if (GLM_ARCH & GLM_ARCH_AVX2_BIT) && !(GLM_COMPILER & GLM_COMPILER_CLANG) - return _mm_fmadd_ss(a, b, c); -# else - return _mm_add_ss(_mm_mul_ss(a, b), c); -# endif -} - -GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec4_fma(glm_f32vec4 a, glm_f32vec4 b, glm_f32vec4 c) -{ -# if (GLM_ARCH & GLM_ARCH_AVX2_BIT) && !(GLM_COMPILER & GLM_COMPILER_CLANG) - return _mm_fmadd_ps(a, b, c); -# else - return glm_vec4_add(glm_vec4_mul(a, b), c); -# endif -} - -GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec4_abs(glm_f32vec4 x) -{ - return _mm_and_ps(x, _mm_castsi128_ps(_mm_set1_epi32(0x7FFFFFFF))); -} - -GLM_FUNC_QUALIFIER glm_ivec4 glm_ivec4_abs(glm_ivec4 x) -{ -# if GLM_ARCH & GLM_ARCH_SSSE3_BIT - return _mm_sign_epi32(x, x); -# else - glm_ivec4 const sgn0 = _mm_srai_epi32(x, 31); - glm_ivec4 const inv0 = _mm_xor_si128(x, sgn0); - glm_ivec4 const sub0 = _mm_sub_epi32(inv0, sgn0); - return sub0; -# endif -} - -GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_sign(glm_vec4 x) -{ - glm_vec4 const zro0 = _mm_setzero_ps(); - glm_vec4 const cmp0 = _mm_cmplt_ps(x, zro0); - glm_vec4 const cmp1 = _mm_cmpgt_ps(x, zro0); - glm_vec4 const and0 = _mm_and_ps(cmp0, _mm_set1_ps(-1.0f)); - glm_vec4 const and1 = _mm_and_ps(cmp1, _mm_set1_ps(1.0f)); - glm_vec4 const or0 = _mm_or_ps(and0, and1); - return or0; -} - -GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_round(glm_vec4 x) -{ -# if GLM_ARCH & GLM_ARCH_SSE41_BIT - return _mm_round_ps(x, _MM_FROUND_TO_NEAREST_INT); -# else - glm_vec4 const sgn0 = _mm_castsi128_ps(_mm_set1_epi32(int(0x80000000))); - glm_vec4 const and0 = _mm_and_ps(sgn0, x); - glm_vec4 const or0 = _mm_or_ps(and0, _mm_set_ps1(8388608.0f)); - glm_vec4 const add0 = glm_vec4_add(x, or0); - glm_vec4 const sub0 = glm_vec4_sub(add0, or0); - return sub0; -# endif -} - -GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_floor(glm_vec4 x) -{ -# if GLM_ARCH & GLM_ARCH_SSE41_BIT - return _mm_floor_ps(x); -# else - glm_vec4 const rnd0 = glm_vec4_round(x); - glm_vec4 const cmp0 = _mm_cmplt_ps(x, rnd0); - glm_vec4 const and0 = _mm_and_ps(cmp0, _mm_set1_ps(1.0f)); - glm_vec4 const sub0 = glm_vec4_sub(rnd0, and0); - return sub0; -# endif -} - -/* trunc TODO -GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_trunc(glm_vec4 x) -{ - return glm_vec4(); -} -*/ - -//roundEven -GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_roundEven(glm_vec4 x) -{ - glm_vec4 const sgn0 = _mm_castsi128_ps(_mm_set1_epi32(int(0x80000000))); - glm_vec4 const and0 = _mm_and_ps(sgn0, x); - glm_vec4 const or0 = _mm_or_ps(and0, _mm_set_ps1(8388608.0f)); - glm_vec4 const add0 = glm_vec4_add(x, or0); - glm_vec4 const sub0 = glm_vec4_sub(add0, or0); - return sub0; -} - -GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_ceil(glm_vec4 x) -{ -# if GLM_ARCH & GLM_ARCH_SSE41_BIT - return _mm_ceil_ps(x); -# else - glm_vec4 const rnd0 = glm_vec4_round(x); - glm_vec4 const cmp0 = _mm_cmpgt_ps(x, rnd0); - glm_vec4 const and0 = _mm_and_ps(cmp0, _mm_set1_ps(1.0f)); - glm_vec4 const add0 = glm_vec4_add(rnd0, and0); - return add0; -# endif -} - -GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_fract(glm_vec4 x) -{ - glm_vec4 const flr0 = glm_vec4_floor(x); - glm_vec4 const sub0 = glm_vec4_sub(x, flr0); - return sub0; -} - -GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_mod(glm_vec4 x, glm_vec4 y) -{ - glm_vec4 const div0 = glm_vec4_div(x, y); - glm_vec4 const flr0 = glm_vec4_floor(div0); - glm_vec4 const mul0 = glm_vec4_mul(y, flr0); - glm_vec4 const sub0 = glm_vec4_sub(x, mul0); - return sub0; -} - -GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_clamp(glm_vec4 v, glm_vec4 minVal, glm_vec4 maxVal) -{ - glm_vec4 const min0 = _mm_min_ps(v, maxVal); - glm_vec4 const max0 = _mm_max_ps(min0, minVal); - return max0; -} - -GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_mix(glm_vec4 v1, glm_vec4 v2, glm_vec4 a) -{ - glm_vec4 const sub0 = glm_vec4_sub(_mm_set1_ps(1.0f), a); - glm_vec4 const mul0 = glm_vec4_mul(v1, sub0); - glm_vec4 const mad0 = glm_vec4_fma(v2, a, mul0); - return mad0; -} - -GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_step(glm_vec4 edge, glm_vec4 x) -{ - glm_vec4 const cmp = _mm_cmple_ps(x, edge); - return _mm_movemask_ps(cmp) == 0 ? _mm_set1_ps(1.0f) : _mm_setzero_ps(); -} - -GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_smoothstep(glm_vec4 edge0, glm_vec4 edge1, glm_vec4 x) -{ - glm_vec4 const sub0 = glm_vec4_sub(x, edge0); - glm_vec4 const sub1 = glm_vec4_sub(edge1, edge0); - glm_vec4 const div0 = glm_vec4_sub(sub0, sub1); - glm_vec4 const clp0 = glm_vec4_clamp(div0, _mm_setzero_ps(), _mm_set1_ps(1.0f)); - glm_vec4 const mul0 = glm_vec4_mul(_mm_set1_ps(2.0f), clp0); - glm_vec4 const sub2 = glm_vec4_sub(_mm_set1_ps(3.0f), mul0); - glm_vec4 const mul1 = glm_vec4_mul(clp0, clp0); - glm_vec4 const mul2 = glm_vec4_mul(mul1, sub2); - return mul2; -} - -// Agner Fog method -GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_nan(glm_vec4 x) -{ - glm_ivec4 const t1 = _mm_castps_si128(x); // reinterpret as 32-bit integer - glm_ivec4 const t2 = _mm_sll_epi32(t1, _mm_cvtsi32_si128(1)); // shift out sign bit - glm_ivec4 const t3 = _mm_set1_epi32(int(0xFF000000)); // exponent mask - glm_ivec4 const t4 = _mm_and_si128(t2, t3); // exponent - glm_ivec4 const t5 = _mm_andnot_si128(t3, t2); // fraction - glm_ivec4 const Equal = _mm_cmpeq_epi32(t3, t4); - glm_ivec4 const Nequal = _mm_cmpeq_epi32(t5, _mm_setzero_si128()); - glm_ivec4 const And = _mm_and_si128(Equal, Nequal); - return _mm_castsi128_ps(And); // exponent = all 1s and fraction != 0 -} - -// Agner Fog method -GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_inf(glm_vec4 x) -{ - glm_ivec4 const t1 = _mm_castps_si128(x); // reinterpret as 32-bit integer - glm_ivec4 const t2 = _mm_sll_epi32(t1, _mm_cvtsi32_si128(1)); // shift out sign bit - return _mm_castsi128_ps(_mm_cmpeq_epi32(t2, _mm_set1_epi32(int(0xFF000000)))); // exponent is all 1s, fraction is 0 -} - -#endif//GLM_ARCH & GLM_ARCH_SSE2_BIT diff --git a/third_party/glm/simd/exponential.h b/third_party/glm/simd/exponential.h deleted file mode 100755 index bc351d0..0000000 --- a/third_party/glm/simd/exponential.h +++ /dev/null @@ -1,20 +0,0 @@ -/// @ref simd -/// @file glm/simd/experimental.h - -#pragma once - -#include "platform.h" - -#if GLM_ARCH & GLM_ARCH_SSE2_BIT - -GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec1_sqrt_lowp(glm_f32vec4 x) -{ - return _mm_mul_ss(_mm_rsqrt_ss(x), x); -} - -GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec4_sqrt_lowp(glm_f32vec4 x) -{ - return _mm_mul_ps(_mm_rsqrt_ps(x), x); -} - -#endif//GLM_ARCH & GLM_ARCH_SSE2_BIT diff --git a/third_party/glm/simd/geometric.h b/third_party/glm/simd/geometric.h deleted file mode 100755 index 07d7cbc..0000000 --- a/third_party/glm/simd/geometric.h +++ /dev/null @@ -1,124 +0,0 @@ -/// @ref simd -/// @file glm/simd/geometric.h - -#pragma once - -#include "common.h" - -#if GLM_ARCH & GLM_ARCH_SSE2_BIT - -GLM_FUNC_DECL glm_vec4 glm_vec4_dot(glm_vec4 v1, glm_vec4 v2); -GLM_FUNC_DECL glm_vec4 glm_vec1_dot(glm_vec4 v1, glm_vec4 v2); - -GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_length(glm_vec4 x) -{ - glm_vec4 const dot0 = glm_vec4_dot(x, x); - glm_vec4 const sqt0 = _mm_sqrt_ps(dot0); - return sqt0; -} - -GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_distance(glm_vec4 p0, glm_vec4 p1) -{ - glm_vec4 const sub0 = _mm_sub_ps(p0, p1); - glm_vec4 const len0 = glm_vec4_length(sub0); - return len0; -} - -GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_dot(glm_vec4 v1, glm_vec4 v2) -{ -# if GLM_ARCH & GLM_ARCH_AVX_BIT - return _mm_dp_ps(v1, v2, 0xff); -# elif GLM_ARCH & GLM_ARCH_SSE3_BIT - glm_vec4 const mul0 = _mm_mul_ps(v1, v2); - glm_vec4 const hadd0 = _mm_hadd_ps(mul0, mul0); - glm_vec4 const hadd1 = _mm_hadd_ps(hadd0, hadd0); - return hadd1; -# else - glm_vec4 const mul0 = _mm_mul_ps(v1, v2); - glm_vec4 const swp0 = _mm_shuffle_ps(mul0, mul0, _MM_SHUFFLE(2, 3, 0, 1)); - glm_vec4 const add0 = _mm_add_ps(mul0, swp0); - glm_vec4 const swp1 = _mm_shuffle_ps(add0, add0, _MM_SHUFFLE(0, 1, 2, 3)); - glm_vec4 const add1 = _mm_add_ps(add0, swp1); - return add1; -# endif -} - -GLM_FUNC_QUALIFIER glm_vec4 glm_vec1_dot(glm_vec4 v1, glm_vec4 v2) -{ -# if GLM_ARCH & GLM_ARCH_AVX_BIT - return _mm_dp_ps(v1, v2, 0xff); -# elif GLM_ARCH & GLM_ARCH_SSE3_BIT - glm_vec4 const mul0 = _mm_mul_ps(v1, v2); - glm_vec4 const had0 = _mm_hadd_ps(mul0, mul0); - glm_vec4 const had1 = _mm_hadd_ps(had0, had0); - return had1; -# else - glm_vec4 const mul0 = _mm_mul_ps(v1, v2); - glm_vec4 const mov0 = _mm_movehl_ps(mul0, mul0); - glm_vec4 const add0 = _mm_add_ps(mov0, mul0); - glm_vec4 const swp1 = _mm_shuffle_ps(add0, add0, 1); - glm_vec4 const add1 = _mm_add_ss(add0, swp1); - return add1; -# endif -} - -GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_cross(glm_vec4 v1, glm_vec4 v2) -{ - glm_vec4 const swp0 = _mm_shuffle_ps(v1, v1, _MM_SHUFFLE(3, 0, 2, 1)); - glm_vec4 const swp1 = _mm_shuffle_ps(v1, v1, _MM_SHUFFLE(3, 1, 0, 2)); - glm_vec4 const swp2 = _mm_shuffle_ps(v2, v2, _MM_SHUFFLE(3, 0, 2, 1)); - glm_vec4 const swp3 = _mm_shuffle_ps(v2, v2, _MM_SHUFFLE(3, 1, 0, 2)); - glm_vec4 const mul0 = _mm_mul_ps(swp0, swp3); - glm_vec4 const mul1 = _mm_mul_ps(swp1, swp2); - glm_vec4 const sub0 = _mm_sub_ps(mul0, mul1); - return sub0; -} - -GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_normalize(glm_vec4 v) -{ - glm_vec4 const dot0 = glm_vec4_dot(v, v); - glm_vec4 const isr0 = _mm_rsqrt_ps(dot0); - glm_vec4 const mul0 = _mm_mul_ps(v, isr0); - return mul0; -} - -GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_faceforward(glm_vec4 N, glm_vec4 I, glm_vec4 Nref) -{ - glm_vec4 const dot0 = glm_vec4_dot(Nref, I); - glm_vec4 const sgn0 = glm_vec4_sign(dot0); - glm_vec4 const mul0 = _mm_mul_ps(sgn0, _mm_set1_ps(-1.0f)); - glm_vec4 const mul1 = _mm_mul_ps(N, mul0); - return mul1; -} - -GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_reflect(glm_vec4 I, glm_vec4 N) -{ - glm_vec4 const dot0 = glm_vec4_dot(N, I); - glm_vec4 const mul0 = _mm_mul_ps(N, dot0); - glm_vec4 const mul1 = _mm_mul_ps(mul0, _mm_set1_ps(2.0f)); - glm_vec4 const sub0 = _mm_sub_ps(I, mul1); - return sub0; -} - -GLM_FUNC_QUALIFIER __m128 glm_vec4_refract(glm_vec4 I, glm_vec4 N, glm_vec4 eta) -{ - glm_vec4 const dot0 = glm_vec4_dot(N, I); - glm_vec4 const mul0 = _mm_mul_ps(eta, eta); - glm_vec4 const mul1 = _mm_mul_ps(dot0, dot0); - glm_vec4 const sub0 = _mm_sub_ps(_mm_set1_ps(1.0f), mul0); - glm_vec4 const sub1 = _mm_sub_ps(_mm_set1_ps(1.0f), mul1); - glm_vec4 const mul2 = _mm_mul_ps(sub0, sub1); - - if(_mm_movemask_ps(_mm_cmplt_ss(mul2, _mm_set1_ps(0.0f))) == 0) - return _mm_set1_ps(0.0f); - - glm_vec4 const sqt0 = _mm_sqrt_ps(mul2); - glm_vec4 const mad0 = glm_vec4_fma(eta, dot0, sqt0); - glm_vec4 const mul4 = _mm_mul_ps(mad0, N); - glm_vec4 const mul5 = _mm_mul_ps(eta, I); - glm_vec4 const sub2 = _mm_sub_ps(mul5, mul4); - - return sub2; -} - -#endif//GLM_ARCH & GLM_ARCH_SSE2_BIT diff --git a/third_party/glm/simd/integer.h b/third_party/glm/simd/integer.h deleted file mode 100755 index 9381418..0000000 --- a/third_party/glm/simd/integer.h +++ /dev/null @@ -1,115 +0,0 @@ -/// @ref simd -/// @file glm/simd/integer.h - -#pragma once - -#if GLM_ARCH & GLM_ARCH_SSE2_BIT - -GLM_FUNC_QUALIFIER glm_uvec4 glm_i128_interleave(glm_uvec4 x) -{ - glm_uvec4 const Mask4 = _mm_set1_epi32(0x0000FFFF); - glm_uvec4 const Mask3 = _mm_set1_epi32(0x00FF00FF); - glm_uvec4 const Mask2 = _mm_set1_epi32(0x0F0F0F0F); - glm_uvec4 const Mask1 = _mm_set1_epi32(0x33333333); - glm_uvec4 const Mask0 = _mm_set1_epi32(0x55555555); - - glm_uvec4 Reg1; - glm_uvec4 Reg2; - - // REG1 = x; - // REG2 = y; - //Reg1 = _mm_unpacklo_epi64(x, y); - Reg1 = x; - - //REG1 = ((REG1 << 16) | REG1) & glm::uint64(0x0000FFFF0000FFFF); - //REG2 = ((REG2 << 16) | REG2) & glm::uint64(0x0000FFFF0000FFFF); - Reg2 = _mm_slli_si128(Reg1, 2); - Reg1 = _mm_or_si128(Reg2, Reg1); - Reg1 = _mm_and_si128(Reg1, Mask4); - - //REG1 = ((REG1 << 8) | REG1) & glm::uint64(0x00FF00FF00FF00FF); - //REG2 = ((REG2 << 8) | REG2) & glm::uint64(0x00FF00FF00FF00FF); - Reg2 = _mm_slli_si128(Reg1, 1); - Reg1 = _mm_or_si128(Reg2, Reg1); - Reg1 = _mm_and_si128(Reg1, Mask3); - - //REG1 = ((REG1 << 4) | REG1) & glm::uint64(0x0F0F0F0F0F0F0F0F); - //REG2 = ((REG2 << 4) | REG2) & glm::uint64(0x0F0F0F0F0F0F0F0F); - Reg2 = _mm_slli_epi32(Reg1, 4); - Reg1 = _mm_or_si128(Reg2, Reg1); - Reg1 = _mm_and_si128(Reg1, Mask2); - - //REG1 = ((REG1 << 2) | REG1) & glm::uint64(0x3333333333333333); - //REG2 = ((REG2 << 2) | REG2) & glm::uint64(0x3333333333333333); - Reg2 = _mm_slli_epi32(Reg1, 2); - Reg1 = _mm_or_si128(Reg2, Reg1); - Reg1 = _mm_and_si128(Reg1, Mask1); - - //REG1 = ((REG1 << 1) | REG1) & glm::uint64(0x5555555555555555); - //REG2 = ((REG2 << 1) | REG2) & glm::uint64(0x5555555555555555); - Reg2 = _mm_slli_epi32(Reg1, 1); - Reg1 = _mm_or_si128(Reg2, Reg1); - Reg1 = _mm_and_si128(Reg1, Mask0); - - //return REG1 | (REG2 << 1); - Reg2 = _mm_slli_epi32(Reg1, 1); - Reg2 = _mm_srli_si128(Reg2, 8); - Reg1 = _mm_or_si128(Reg1, Reg2); - - return Reg1; -} - -GLM_FUNC_QUALIFIER glm_uvec4 glm_i128_interleave2(glm_uvec4 x, glm_uvec4 y) -{ - glm_uvec4 const Mask4 = _mm_set1_epi32(0x0000FFFF); - glm_uvec4 const Mask3 = _mm_set1_epi32(0x00FF00FF); - glm_uvec4 const Mask2 = _mm_set1_epi32(0x0F0F0F0F); - glm_uvec4 const Mask1 = _mm_set1_epi32(0x33333333); - glm_uvec4 const Mask0 = _mm_set1_epi32(0x55555555); - - glm_uvec4 Reg1; - glm_uvec4 Reg2; - - // REG1 = x; - // REG2 = y; - Reg1 = _mm_unpacklo_epi64(x, y); - - //REG1 = ((REG1 << 16) | REG1) & glm::uint64(0x0000FFFF0000FFFF); - //REG2 = ((REG2 << 16) | REG2) & glm::uint64(0x0000FFFF0000FFFF); - Reg2 = _mm_slli_si128(Reg1, 2); - Reg1 = _mm_or_si128(Reg2, Reg1); - Reg1 = _mm_and_si128(Reg1, Mask4); - - //REG1 = ((REG1 << 8) | REG1) & glm::uint64(0x00FF00FF00FF00FF); - //REG2 = ((REG2 << 8) | REG2) & glm::uint64(0x00FF00FF00FF00FF); - Reg2 = _mm_slli_si128(Reg1, 1); - Reg1 = _mm_or_si128(Reg2, Reg1); - Reg1 = _mm_and_si128(Reg1, Mask3); - - //REG1 = ((REG1 << 4) | REG1) & glm::uint64(0x0F0F0F0F0F0F0F0F); - //REG2 = ((REG2 << 4) | REG2) & glm::uint64(0x0F0F0F0F0F0F0F0F); - Reg2 = _mm_slli_epi32(Reg1, 4); - Reg1 = _mm_or_si128(Reg2, Reg1); - Reg1 = _mm_and_si128(Reg1, Mask2); - - //REG1 = ((REG1 << 2) | REG1) & glm::uint64(0x3333333333333333); - //REG2 = ((REG2 << 2) | REG2) & glm::uint64(0x3333333333333333); - Reg2 = _mm_slli_epi32(Reg1, 2); - Reg1 = _mm_or_si128(Reg2, Reg1); - Reg1 = _mm_and_si128(Reg1, Mask1); - - //REG1 = ((REG1 << 1) | REG1) & glm::uint64(0x5555555555555555); - //REG2 = ((REG2 << 1) | REG2) & glm::uint64(0x5555555555555555); - Reg2 = _mm_slli_epi32(Reg1, 1); - Reg1 = _mm_or_si128(Reg2, Reg1); - Reg1 = _mm_and_si128(Reg1, Mask0); - - //return REG1 | (REG2 << 1); - Reg2 = _mm_slli_epi32(Reg1, 1); - Reg2 = _mm_srli_si128(Reg2, 8); - Reg1 = _mm_or_si128(Reg1, Reg2); - - return Reg1; -} - -#endif//GLM_ARCH & GLM_ARCH_SSE2_BIT diff --git a/third_party/glm/simd/matrix.h b/third_party/glm/simd/matrix.h deleted file mode 100755 index b6c42ea..0000000 --- a/third_party/glm/simd/matrix.h +++ /dev/null @@ -1,1028 +0,0 @@ -/// @ref simd -/// @file glm/simd/matrix.h - -#pragma once - -#include "geometric.h" - -#if GLM_ARCH & GLM_ARCH_SSE2_BIT - -GLM_FUNC_QUALIFIER void glm_mat4_matrixCompMult(glm_vec4 const in1[4], glm_vec4 const in2[4], glm_vec4 out[4]) -{ - out[0] = _mm_mul_ps(in1[0], in2[0]); - out[1] = _mm_mul_ps(in1[1], in2[1]); - out[2] = _mm_mul_ps(in1[2], in2[2]); - out[3] = _mm_mul_ps(in1[3], in2[3]); -} - -GLM_FUNC_QUALIFIER void glm_mat4_add(glm_vec4 const in1[4], glm_vec4 const in2[4], glm_vec4 out[4]) -{ - out[0] = _mm_add_ps(in1[0], in2[0]); - out[1] = _mm_add_ps(in1[1], in2[1]); - out[2] = _mm_add_ps(in1[2], in2[2]); - out[3] = _mm_add_ps(in1[3], in2[3]); -} - -GLM_FUNC_QUALIFIER void glm_mat4_sub(glm_vec4 const in1[4], glm_vec4 const in2[4], glm_vec4 out[4]) -{ - out[0] = _mm_sub_ps(in1[0], in2[0]); - out[1] = _mm_sub_ps(in1[1], in2[1]); - out[2] = _mm_sub_ps(in1[2], in2[2]); - out[3] = _mm_sub_ps(in1[3], in2[3]); -} - -GLM_FUNC_QUALIFIER glm_vec4 glm_mat4_mul_vec4(glm_vec4 const m[4], glm_vec4 v) -{ - __m128 v0 = _mm_shuffle_ps(v, v, _MM_SHUFFLE(0, 0, 0, 0)); - __m128 v1 = _mm_shuffle_ps(v, v, _MM_SHUFFLE(1, 1, 1, 1)); - __m128 v2 = _mm_shuffle_ps(v, v, _MM_SHUFFLE(2, 2, 2, 2)); - __m128 v3 = _mm_shuffle_ps(v, v, _MM_SHUFFLE(3, 3, 3, 3)); - - __m128 m0 = _mm_mul_ps(m[0], v0); - __m128 m1 = _mm_mul_ps(m[1], v1); - __m128 m2 = _mm_mul_ps(m[2], v2); - __m128 m3 = _mm_mul_ps(m[3], v3); - - __m128 a0 = _mm_add_ps(m0, m1); - __m128 a1 = _mm_add_ps(m2, m3); - __m128 a2 = _mm_add_ps(a0, a1); - - return a2; -} - -GLM_FUNC_QUALIFIER __m128 glm_vec4_mul_mat4(glm_vec4 v, glm_vec4 const m[4]) -{ - __m128 i0 = m[0]; - __m128 i1 = m[1]; - __m128 i2 = m[2]; - __m128 i3 = m[3]; - - __m128 m0 = _mm_mul_ps(v, i0); - __m128 m1 = _mm_mul_ps(v, i1); - __m128 m2 = _mm_mul_ps(v, i2); - __m128 m3 = _mm_mul_ps(v, i3); - - __m128 u0 = _mm_unpacklo_ps(m0, m1); - __m128 u1 = _mm_unpackhi_ps(m0, m1); - __m128 a0 = _mm_add_ps(u0, u1); - - __m128 u2 = _mm_unpacklo_ps(m2, m3); - __m128 u3 = _mm_unpackhi_ps(m2, m3); - __m128 a1 = _mm_add_ps(u2, u3); - - __m128 f0 = _mm_movelh_ps(a0, a1); - __m128 f1 = _mm_movehl_ps(a1, a0); - __m128 f2 = _mm_add_ps(f0, f1); - - return f2; -} - -GLM_FUNC_QUALIFIER void glm_mat4_mul(glm_vec4 const in1[4], glm_vec4 const in2[4], glm_vec4 out[4]) -{ - { - __m128 e0 = _mm_shuffle_ps(in2[0], in2[0], _MM_SHUFFLE(0, 0, 0, 0)); - __m128 e1 = _mm_shuffle_ps(in2[0], in2[0], _MM_SHUFFLE(1, 1, 1, 1)); - __m128 e2 = _mm_shuffle_ps(in2[0], in2[0], _MM_SHUFFLE(2, 2, 2, 2)); - __m128 e3 = _mm_shuffle_ps(in2[0], in2[0], _MM_SHUFFLE(3, 3, 3, 3)); - - __m128 m0 = _mm_mul_ps(in1[0], e0); - __m128 m1 = _mm_mul_ps(in1[1], e1); - __m128 m2 = _mm_mul_ps(in1[2], e2); - __m128 m3 = _mm_mul_ps(in1[3], e3); - - __m128 a0 = _mm_add_ps(m0, m1); - __m128 a1 = _mm_add_ps(m2, m3); - __m128 a2 = _mm_add_ps(a0, a1); - - out[0] = a2; - } - - { - __m128 e0 = _mm_shuffle_ps(in2[1], in2[1], _MM_SHUFFLE(0, 0, 0, 0)); - __m128 e1 = _mm_shuffle_ps(in2[1], in2[1], _MM_SHUFFLE(1, 1, 1, 1)); - __m128 e2 = _mm_shuffle_ps(in2[1], in2[1], _MM_SHUFFLE(2, 2, 2, 2)); - __m128 e3 = _mm_shuffle_ps(in2[1], in2[1], _MM_SHUFFLE(3, 3, 3, 3)); - - __m128 m0 = _mm_mul_ps(in1[0], e0); - __m128 m1 = _mm_mul_ps(in1[1], e1); - __m128 m2 = _mm_mul_ps(in1[2], e2); - __m128 m3 = _mm_mul_ps(in1[3], e3); - - __m128 a0 = _mm_add_ps(m0, m1); - __m128 a1 = _mm_add_ps(m2, m3); - __m128 a2 = _mm_add_ps(a0, a1); - - out[1] = a2; - } - - { - __m128 e0 = _mm_shuffle_ps(in2[2], in2[2], _MM_SHUFFLE(0, 0, 0, 0)); - __m128 e1 = _mm_shuffle_ps(in2[2], in2[2], _MM_SHUFFLE(1, 1, 1, 1)); - __m128 e2 = _mm_shuffle_ps(in2[2], in2[2], _MM_SHUFFLE(2, 2, 2, 2)); - __m128 e3 = _mm_shuffle_ps(in2[2], in2[2], _MM_SHUFFLE(3, 3, 3, 3)); - - __m128 m0 = _mm_mul_ps(in1[0], e0); - __m128 m1 = _mm_mul_ps(in1[1], e1); - __m128 m2 = _mm_mul_ps(in1[2], e2); - __m128 m3 = _mm_mul_ps(in1[3], e3); - - __m128 a0 = _mm_add_ps(m0, m1); - __m128 a1 = _mm_add_ps(m2, m3); - __m128 a2 = _mm_add_ps(a0, a1); - - out[2] = a2; - } - - { - //(__m128&)_mm_shuffle_epi32(__m128i&)in2[0], _MM_SHUFFLE(3, 3, 3, 3)) - __m128 e0 = _mm_shuffle_ps(in2[3], in2[3], _MM_SHUFFLE(0, 0, 0, 0)); - __m128 e1 = _mm_shuffle_ps(in2[3], in2[3], _MM_SHUFFLE(1, 1, 1, 1)); - __m128 e2 = _mm_shuffle_ps(in2[3], in2[3], _MM_SHUFFLE(2, 2, 2, 2)); - __m128 e3 = _mm_shuffle_ps(in2[3], in2[3], _MM_SHUFFLE(3, 3, 3, 3)); - - __m128 m0 = _mm_mul_ps(in1[0], e0); - __m128 m1 = _mm_mul_ps(in1[1], e1); - __m128 m2 = _mm_mul_ps(in1[2], e2); - __m128 m3 = _mm_mul_ps(in1[3], e3); - - __m128 a0 = _mm_add_ps(m0, m1); - __m128 a1 = _mm_add_ps(m2, m3); - __m128 a2 = _mm_add_ps(a0, a1); - - out[3] = a2; - } -} - -GLM_FUNC_QUALIFIER void glm_mat4_transpose(glm_vec4 const in[4], glm_vec4 out[4]) -{ - __m128 tmp0 = _mm_shuffle_ps(in[0], in[1], 0x44); - __m128 tmp2 = _mm_shuffle_ps(in[0], in[1], 0xEE); - __m128 tmp1 = _mm_shuffle_ps(in[2], in[3], 0x44); - __m128 tmp3 = _mm_shuffle_ps(in[2], in[3], 0xEE); - - out[0] = _mm_shuffle_ps(tmp0, tmp1, 0x88); - out[1] = _mm_shuffle_ps(tmp0, tmp1, 0xDD); - out[2] = _mm_shuffle_ps(tmp2, tmp3, 0x88); - out[3] = _mm_shuffle_ps(tmp2, tmp3, 0xDD); -} - -GLM_FUNC_QUALIFIER glm_vec4 glm_mat4_determinant_highp(glm_vec4 const in[4]) -{ - __m128 Fac0; - { - // valType SubFactor00 = m[2][2] * m[3][3] - m[3][2] * m[2][3]; - // valType SubFactor00 = m[2][2] * m[3][3] - m[3][2] * m[2][3]; - // valType SubFactor06 = m[1][2] * m[3][3] - m[3][2] * m[1][3]; - // valType SubFactor13 = m[1][2] * m[2][3] - m[2][2] * m[1][3]; - - __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(3, 3, 3, 3)); - __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(2, 2, 2, 2)); - - __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(2, 2, 2, 2)); - __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(3, 3, 3, 3)); - - __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); - __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); - Fac0 = _mm_sub_ps(Mul00, Mul01); - } - - __m128 Fac1; - { - // valType SubFactor01 = m[2][1] * m[3][3] - m[3][1] * m[2][3]; - // valType SubFactor01 = m[2][1] * m[3][3] - m[3][1] * m[2][3]; - // valType SubFactor07 = m[1][1] * m[3][3] - m[3][1] * m[1][3]; - // valType SubFactor14 = m[1][1] * m[2][3] - m[2][1] * m[1][3]; - - __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(3, 3, 3, 3)); - __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(1, 1, 1, 1)); - - __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(1, 1, 1, 1)); - __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(3, 3, 3, 3)); - - __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); - __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); - Fac1 = _mm_sub_ps(Mul00, Mul01); - } - - - __m128 Fac2; - { - // valType SubFactor02 = m[2][1] * m[3][2] - m[3][1] * m[2][2]; - // valType SubFactor02 = m[2][1] * m[3][2] - m[3][1] * m[2][2]; - // valType SubFactor08 = m[1][1] * m[3][2] - m[3][1] * m[1][2]; - // valType SubFactor15 = m[1][1] * m[2][2] - m[2][1] * m[1][2]; - - __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(2, 2, 2, 2)); - __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(1, 1, 1, 1)); - - __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(1, 1, 1, 1)); - __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(2, 2, 2, 2)); - - __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); - __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); - Fac2 = _mm_sub_ps(Mul00, Mul01); - } - - __m128 Fac3; - { - // valType SubFactor03 = m[2][0] * m[3][3] - m[3][0] * m[2][3]; - // valType SubFactor03 = m[2][0] * m[3][3] - m[3][0] * m[2][3]; - // valType SubFactor09 = m[1][0] * m[3][3] - m[3][0] * m[1][3]; - // valType SubFactor16 = m[1][0] * m[2][3] - m[2][0] * m[1][3]; - - __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(3, 3, 3, 3)); - __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(0, 0, 0, 0)); - - __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(0, 0, 0, 0)); - __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(3, 3, 3, 3)); - - __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); - __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); - Fac3 = _mm_sub_ps(Mul00, Mul01); - } - - __m128 Fac4; - { - // valType SubFactor04 = m[2][0] * m[3][2] - m[3][0] * m[2][2]; - // valType SubFactor04 = m[2][0] * m[3][2] - m[3][0] * m[2][2]; - // valType SubFactor10 = m[1][0] * m[3][2] - m[3][0] * m[1][2]; - // valType SubFactor17 = m[1][0] * m[2][2] - m[2][0] * m[1][2]; - - __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(2, 2, 2, 2)); - __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(0, 0, 0, 0)); - - __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(0, 0, 0, 0)); - __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(2, 2, 2, 2)); - - __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); - __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); - Fac4 = _mm_sub_ps(Mul00, Mul01); - } - - __m128 Fac5; - { - // valType SubFactor05 = m[2][0] * m[3][1] - m[3][0] * m[2][1]; - // valType SubFactor05 = m[2][0] * m[3][1] - m[3][0] * m[2][1]; - // valType SubFactor12 = m[1][0] * m[3][1] - m[3][0] * m[1][1]; - // valType SubFactor18 = m[1][0] * m[2][1] - m[2][0] * m[1][1]; - - __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(1, 1, 1, 1)); - __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(0, 0, 0, 0)); - - __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(0, 0, 0, 0)); - __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(1, 1, 1, 1)); - - __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); - __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); - Fac5 = _mm_sub_ps(Mul00, Mul01); - } - - __m128 SignA = _mm_set_ps( 1.0f,-1.0f, 1.0f,-1.0f); - __m128 SignB = _mm_set_ps(-1.0f, 1.0f,-1.0f, 1.0f); - - // m[1][0] - // m[0][0] - // m[0][0] - // m[0][0] - __m128 Temp0 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(0, 0, 0, 0)); - __m128 Vec0 = _mm_shuffle_ps(Temp0, Temp0, _MM_SHUFFLE(2, 2, 2, 0)); - - // m[1][1] - // m[0][1] - // m[0][1] - // m[0][1] - __m128 Temp1 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(1, 1, 1, 1)); - __m128 Vec1 = _mm_shuffle_ps(Temp1, Temp1, _MM_SHUFFLE(2, 2, 2, 0)); - - // m[1][2] - // m[0][2] - // m[0][2] - // m[0][2] - __m128 Temp2 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(2, 2, 2, 2)); - __m128 Vec2 = _mm_shuffle_ps(Temp2, Temp2, _MM_SHUFFLE(2, 2, 2, 0)); - - // m[1][3] - // m[0][3] - // m[0][3] - // m[0][3] - __m128 Temp3 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(3, 3, 3, 3)); - __m128 Vec3 = _mm_shuffle_ps(Temp3, Temp3, _MM_SHUFFLE(2, 2, 2, 0)); - - // col0 - // + (Vec1[0] * Fac0[0] - Vec2[0] * Fac1[0] + Vec3[0] * Fac2[0]), - // - (Vec1[1] * Fac0[1] - Vec2[1] * Fac1[1] + Vec3[1] * Fac2[1]), - // + (Vec1[2] * Fac0[2] - Vec2[2] * Fac1[2] + Vec3[2] * Fac2[2]), - // - (Vec1[3] * Fac0[3] - Vec2[3] * Fac1[3] + Vec3[3] * Fac2[3]), - __m128 Mul00 = _mm_mul_ps(Vec1, Fac0); - __m128 Mul01 = _mm_mul_ps(Vec2, Fac1); - __m128 Mul02 = _mm_mul_ps(Vec3, Fac2); - __m128 Sub00 = _mm_sub_ps(Mul00, Mul01); - __m128 Add00 = _mm_add_ps(Sub00, Mul02); - __m128 Inv0 = _mm_mul_ps(SignB, Add00); - - // col1 - // - (Vec0[0] * Fac0[0] - Vec2[0] * Fac3[0] + Vec3[0] * Fac4[0]), - // + (Vec0[0] * Fac0[1] - Vec2[1] * Fac3[1] + Vec3[1] * Fac4[1]), - // - (Vec0[0] * Fac0[2] - Vec2[2] * Fac3[2] + Vec3[2] * Fac4[2]), - // + (Vec0[0] * Fac0[3] - Vec2[3] * Fac3[3] + Vec3[3] * Fac4[3]), - __m128 Mul03 = _mm_mul_ps(Vec0, Fac0); - __m128 Mul04 = _mm_mul_ps(Vec2, Fac3); - __m128 Mul05 = _mm_mul_ps(Vec3, Fac4); - __m128 Sub01 = _mm_sub_ps(Mul03, Mul04); - __m128 Add01 = _mm_add_ps(Sub01, Mul05); - __m128 Inv1 = _mm_mul_ps(SignA, Add01); - - // col2 - // + (Vec0[0] * Fac1[0] - Vec1[0] * Fac3[0] + Vec3[0] * Fac5[0]), - // - (Vec0[0] * Fac1[1] - Vec1[1] * Fac3[1] + Vec3[1] * Fac5[1]), - // + (Vec0[0] * Fac1[2] - Vec1[2] * Fac3[2] + Vec3[2] * Fac5[2]), - // - (Vec0[0] * Fac1[3] - Vec1[3] * Fac3[3] + Vec3[3] * Fac5[3]), - __m128 Mul06 = _mm_mul_ps(Vec0, Fac1); - __m128 Mul07 = _mm_mul_ps(Vec1, Fac3); - __m128 Mul08 = _mm_mul_ps(Vec3, Fac5); - __m128 Sub02 = _mm_sub_ps(Mul06, Mul07); - __m128 Add02 = _mm_add_ps(Sub02, Mul08); - __m128 Inv2 = _mm_mul_ps(SignB, Add02); - - // col3 - // - (Vec1[0] * Fac2[0] - Vec1[0] * Fac4[0] + Vec2[0] * Fac5[0]), - // + (Vec1[0] * Fac2[1] - Vec1[1] * Fac4[1] + Vec2[1] * Fac5[1]), - // - (Vec1[0] * Fac2[2] - Vec1[2] * Fac4[2] + Vec2[2] * Fac5[2]), - // + (Vec1[0] * Fac2[3] - Vec1[3] * Fac4[3] + Vec2[3] * Fac5[3])); - __m128 Mul09 = _mm_mul_ps(Vec0, Fac2); - __m128 Mul10 = _mm_mul_ps(Vec1, Fac4); - __m128 Mul11 = _mm_mul_ps(Vec2, Fac5); - __m128 Sub03 = _mm_sub_ps(Mul09, Mul10); - __m128 Add03 = _mm_add_ps(Sub03, Mul11); - __m128 Inv3 = _mm_mul_ps(SignA, Add03); - - __m128 Row0 = _mm_shuffle_ps(Inv0, Inv1, _MM_SHUFFLE(0, 0, 0, 0)); - __m128 Row1 = _mm_shuffle_ps(Inv2, Inv3, _MM_SHUFFLE(0, 0, 0, 0)); - __m128 Row2 = _mm_shuffle_ps(Row0, Row1, _MM_SHUFFLE(2, 0, 2, 0)); - - // valType Determinant = m[0][0] * Inverse[0][0] - // + m[0][1] * Inverse[1][0] - // + m[0][2] * Inverse[2][0] - // + m[0][3] * Inverse[3][0]; - __m128 Det0 = glm_vec4_dot(in[0], Row2); - return Det0; -} - -GLM_FUNC_QUALIFIER glm_vec4 glm_mat4_determinant_lowp(glm_vec4 const m[4]) -{ - // _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128( - - //T SubFactor00 = m[2][2] * m[3][3] - m[3][2] * m[2][3]; - //T SubFactor01 = m[2][1] * m[3][3] - m[3][1] * m[2][3]; - //T SubFactor02 = m[2][1] * m[3][2] - m[3][1] * m[2][2]; - //T SubFactor03 = m[2][0] * m[3][3] - m[3][0] * m[2][3]; - //T SubFactor04 = m[2][0] * m[3][2] - m[3][0] * m[2][2]; - //T SubFactor05 = m[2][0] * m[3][1] - m[3][0] * m[2][1]; - - // First 2 columns - __m128 Swp2A = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(m[2]), _MM_SHUFFLE(0, 1, 1, 2))); - __m128 Swp3A = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(m[3]), _MM_SHUFFLE(3, 2, 3, 3))); - __m128 MulA = _mm_mul_ps(Swp2A, Swp3A); - - // Second 2 columns - __m128 Swp2B = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(m[2]), _MM_SHUFFLE(3, 2, 3, 3))); - __m128 Swp3B = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(m[3]), _MM_SHUFFLE(0, 1, 1, 2))); - __m128 MulB = _mm_mul_ps(Swp2B, Swp3B); - - // Columns subtraction - __m128 SubE = _mm_sub_ps(MulA, MulB); - - // Last 2 rows - __m128 Swp2C = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(m[2]), _MM_SHUFFLE(0, 0, 1, 2))); - __m128 Swp3C = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(m[3]), _MM_SHUFFLE(1, 2, 0, 0))); - __m128 MulC = _mm_mul_ps(Swp2C, Swp3C); - __m128 SubF = _mm_sub_ps(_mm_movehl_ps(MulC, MulC), MulC); - - //vec<4, T, Q> DetCof( - // + (m[1][1] * SubFactor00 - m[1][2] * SubFactor01 + m[1][3] * SubFactor02), - // - (m[1][0] * SubFactor00 - m[1][2] * SubFactor03 + m[1][3] * SubFactor04), - // + (m[1][0] * SubFactor01 - m[1][1] * SubFactor03 + m[1][3] * SubFactor05), - // - (m[1][0] * SubFactor02 - m[1][1] * SubFactor04 + m[1][2] * SubFactor05)); - - __m128 SubFacA = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(SubE), _MM_SHUFFLE(2, 1, 0, 0))); - __m128 SwpFacA = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(m[1]), _MM_SHUFFLE(0, 0, 0, 1))); - __m128 MulFacA = _mm_mul_ps(SwpFacA, SubFacA); - - __m128 SubTmpB = _mm_shuffle_ps(SubE, SubF, _MM_SHUFFLE(0, 0, 3, 1)); - __m128 SubFacB = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(SubTmpB), _MM_SHUFFLE(3, 1, 1, 0)));//SubF[0], SubE[3], SubE[3], SubE[1]; - __m128 SwpFacB = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(m[1]), _MM_SHUFFLE(1, 1, 2, 2))); - __m128 MulFacB = _mm_mul_ps(SwpFacB, SubFacB); - - __m128 SubRes = _mm_sub_ps(MulFacA, MulFacB); - - __m128 SubTmpC = _mm_shuffle_ps(SubE, SubF, _MM_SHUFFLE(1, 0, 2, 2)); - __m128 SubFacC = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(SubTmpC), _MM_SHUFFLE(3, 3, 2, 0))); - __m128 SwpFacC = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(m[1]), _MM_SHUFFLE(2, 3, 3, 3))); - __m128 MulFacC = _mm_mul_ps(SwpFacC, SubFacC); - - __m128 AddRes = _mm_add_ps(SubRes, MulFacC); - __m128 DetCof = _mm_mul_ps(AddRes, _mm_setr_ps( 1.0f,-1.0f, 1.0f,-1.0f)); - - //return m[0][0] * DetCof[0] - // + m[0][1] * DetCof[1] - // + m[0][2] * DetCof[2] - // + m[0][3] * DetCof[3]; - - return glm_vec4_dot(m[0], DetCof); -} - -GLM_FUNC_QUALIFIER glm_vec4 glm_mat4_determinant(glm_vec4 const m[4]) -{ - // _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(add) - - //T SubFactor00 = m[2][2] * m[3][3] - m[3][2] * m[2][3]; - //T SubFactor01 = m[2][1] * m[3][3] - m[3][1] * m[2][3]; - //T SubFactor02 = m[2][1] * m[3][2] - m[3][1] * m[2][2]; - //T SubFactor03 = m[2][0] * m[3][3] - m[3][0] * m[2][3]; - //T SubFactor04 = m[2][0] * m[3][2] - m[3][0] * m[2][2]; - //T SubFactor05 = m[2][0] * m[3][1] - m[3][0] * m[2][1]; - - // First 2 columns - __m128 Swp2A = _mm_shuffle_ps(m[2], m[2], _MM_SHUFFLE(0, 1, 1, 2)); - __m128 Swp3A = _mm_shuffle_ps(m[3], m[3], _MM_SHUFFLE(3, 2, 3, 3)); - __m128 MulA = _mm_mul_ps(Swp2A, Swp3A); - - // Second 2 columns - __m128 Swp2B = _mm_shuffle_ps(m[2], m[2], _MM_SHUFFLE(3, 2, 3, 3)); - __m128 Swp3B = _mm_shuffle_ps(m[3], m[3], _MM_SHUFFLE(0, 1, 1, 2)); - __m128 MulB = _mm_mul_ps(Swp2B, Swp3B); - - // Columns subtraction - __m128 SubE = _mm_sub_ps(MulA, MulB); - - // Last 2 rows - __m128 Swp2C = _mm_shuffle_ps(m[2], m[2], _MM_SHUFFLE(0, 0, 1, 2)); - __m128 Swp3C = _mm_shuffle_ps(m[3], m[3], _MM_SHUFFLE(1, 2, 0, 0)); - __m128 MulC = _mm_mul_ps(Swp2C, Swp3C); - __m128 SubF = _mm_sub_ps(_mm_movehl_ps(MulC, MulC), MulC); - - //vec<4, T, Q> DetCof( - // + (m[1][1] * SubFactor00 - m[1][2] * SubFactor01 + m[1][3] * SubFactor02), - // - (m[1][0] * SubFactor00 - m[1][2] * SubFactor03 + m[1][3] * SubFactor04), - // + (m[1][0] * SubFactor01 - m[1][1] * SubFactor03 + m[1][3] * SubFactor05), - // - (m[1][0] * SubFactor02 - m[1][1] * SubFactor04 + m[1][2] * SubFactor05)); - - __m128 SubFacA = _mm_shuffle_ps(SubE, SubE, _MM_SHUFFLE(2, 1, 0, 0)); - __m128 SwpFacA = _mm_shuffle_ps(m[1], m[1], _MM_SHUFFLE(0, 0, 0, 1)); - __m128 MulFacA = _mm_mul_ps(SwpFacA, SubFacA); - - __m128 SubTmpB = _mm_shuffle_ps(SubE, SubF, _MM_SHUFFLE(0, 0, 3, 1)); - __m128 SubFacB = _mm_shuffle_ps(SubTmpB, SubTmpB, _MM_SHUFFLE(3, 1, 1, 0));//SubF[0], SubE[3], SubE[3], SubE[1]; - __m128 SwpFacB = _mm_shuffle_ps(m[1], m[1], _MM_SHUFFLE(1, 1, 2, 2)); - __m128 MulFacB = _mm_mul_ps(SwpFacB, SubFacB); - - __m128 SubRes = _mm_sub_ps(MulFacA, MulFacB); - - __m128 SubTmpC = _mm_shuffle_ps(SubE, SubF, _MM_SHUFFLE(1, 0, 2, 2)); - __m128 SubFacC = _mm_shuffle_ps(SubTmpC, SubTmpC, _MM_SHUFFLE(3, 3, 2, 0)); - __m128 SwpFacC = _mm_shuffle_ps(m[1], m[1], _MM_SHUFFLE(2, 3, 3, 3)); - __m128 MulFacC = _mm_mul_ps(SwpFacC, SubFacC); - - __m128 AddRes = _mm_add_ps(SubRes, MulFacC); - __m128 DetCof = _mm_mul_ps(AddRes, _mm_setr_ps( 1.0f,-1.0f, 1.0f,-1.0f)); - - //return m[0][0] * DetCof[0] - // + m[0][1] * DetCof[1] - // + m[0][2] * DetCof[2] - // + m[0][3] * DetCof[3]; - - return glm_vec4_dot(m[0], DetCof); -} - -GLM_FUNC_QUALIFIER void glm_mat4_inverse(glm_vec4 const in[4], glm_vec4 out[4]) -{ - __m128 Fac0; - { - // valType SubFactor00 = m[2][2] * m[3][3] - m[3][2] * m[2][3]; - // valType SubFactor00 = m[2][2] * m[3][3] - m[3][2] * m[2][3]; - // valType SubFactor06 = m[1][2] * m[3][3] - m[3][2] * m[1][3]; - // valType SubFactor13 = m[1][2] * m[2][3] - m[2][2] * m[1][3]; - - __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(3, 3, 3, 3)); - __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(2, 2, 2, 2)); - - __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(2, 2, 2, 2)); - __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(3, 3, 3, 3)); - - __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); - __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); - Fac0 = _mm_sub_ps(Mul00, Mul01); - } - - __m128 Fac1; - { - // valType SubFactor01 = m[2][1] * m[3][3] - m[3][1] * m[2][3]; - // valType SubFactor01 = m[2][1] * m[3][3] - m[3][1] * m[2][3]; - // valType SubFactor07 = m[1][1] * m[3][3] - m[3][1] * m[1][3]; - // valType SubFactor14 = m[1][1] * m[2][3] - m[2][1] * m[1][3]; - - __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(3, 3, 3, 3)); - __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(1, 1, 1, 1)); - - __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(1, 1, 1, 1)); - __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(3, 3, 3, 3)); - - __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); - __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); - Fac1 = _mm_sub_ps(Mul00, Mul01); - } - - - __m128 Fac2; - { - // valType SubFactor02 = m[2][1] * m[3][2] - m[3][1] * m[2][2]; - // valType SubFactor02 = m[2][1] * m[3][2] - m[3][1] * m[2][2]; - // valType SubFactor08 = m[1][1] * m[3][2] - m[3][1] * m[1][2]; - // valType SubFactor15 = m[1][1] * m[2][2] - m[2][1] * m[1][2]; - - __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(2, 2, 2, 2)); - __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(1, 1, 1, 1)); - - __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(1, 1, 1, 1)); - __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(2, 2, 2, 2)); - - __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); - __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); - Fac2 = _mm_sub_ps(Mul00, Mul01); - } - - __m128 Fac3; - { - // valType SubFactor03 = m[2][0] * m[3][3] - m[3][0] * m[2][3]; - // valType SubFactor03 = m[2][0] * m[3][3] - m[3][0] * m[2][3]; - // valType SubFactor09 = m[1][0] * m[3][3] - m[3][0] * m[1][3]; - // valType SubFactor16 = m[1][0] * m[2][3] - m[2][0] * m[1][3]; - - __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(3, 3, 3, 3)); - __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(0, 0, 0, 0)); - - __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(0, 0, 0, 0)); - __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(3, 3, 3, 3)); - - __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); - __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); - Fac3 = _mm_sub_ps(Mul00, Mul01); - } - - __m128 Fac4; - { - // valType SubFactor04 = m[2][0] * m[3][2] - m[3][0] * m[2][2]; - // valType SubFactor04 = m[2][0] * m[3][2] - m[3][0] * m[2][2]; - // valType SubFactor10 = m[1][0] * m[3][2] - m[3][0] * m[1][2]; - // valType SubFactor17 = m[1][0] * m[2][2] - m[2][0] * m[1][2]; - - __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(2, 2, 2, 2)); - __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(0, 0, 0, 0)); - - __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(0, 0, 0, 0)); - __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(2, 2, 2, 2)); - - __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); - __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); - Fac4 = _mm_sub_ps(Mul00, Mul01); - } - - __m128 Fac5; - { - // valType SubFactor05 = m[2][0] * m[3][1] - m[3][0] * m[2][1]; - // valType SubFactor05 = m[2][0] * m[3][1] - m[3][0] * m[2][1]; - // valType SubFactor12 = m[1][0] * m[3][1] - m[3][0] * m[1][1]; - // valType SubFactor18 = m[1][0] * m[2][1] - m[2][0] * m[1][1]; - - __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(1, 1, 1, 1)); - __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(0, 0, 0, 0)); - - __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(0, 0, 0, 0)); - __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(1, 1, 1, 1)); - - __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); - __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); - Fac5 = _mm_sub_ps(Mul00, Mul01); - } - - __m128 SignA = _mm_set_ps( 1.0f,-1.0f, 1.0f,-1.0f); - __m128 SignB = _mm_set_ps(-1.0f, 1.0f,-1.0f, 1.0f); - - // m[1][0] - // m[0][0] - // m[0][0] - // m[0][0] - __m128 Temp0 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(0, 0, 0, 0)); - __m128 Vec0 = _mm_shuffle_ps(Temp0, Temp0, _MM_SHUFFLE(2, 2, 2, 0)); - - // m[1][1] - // m[0][1] - // m[0][1] - // m[0][1] - __m128 Temp1 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(1, 1, 1, 1)); - __m128 Vec1 = _mm_shuffle_ps(Temp1, Temp1, _MM_SHUFFLE(2, 2, 2, 0)); - - // m[1][2] - // m[0][2] - // m[0][2] - // m[0][2] - __m128 Temp2 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(2, 2, 2, 2)); - __m128 Vec2 = _mm_shuffle_ps(Temp2, Temp2, _MM_SHUFFLE(2, 2, 2, 0)); - - // m[1][3] - // m[0][3] - // m[0][3] - // m[0][3] - __m128 Temp3 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(3, 3, 3, 3)); - __m128 Vec3 = _mm_shuffle_ps(Temp3, Temp3, _MM_SHUFFLE(2, 2, 2, 0)); - - // col0 - // + (Vec1[0] * Fac0[0] - Vec2[0] * Fac1[0] + Vec3[0] * Fac2[0]), - // - (Vec1[1] * Fac0[1] - Vec2[1] * Fac1[1] + Vec3[1] * Fac2[1]), - // + (Vec1[2] * Fac0[2] - Vec2[2] * Fac1[2] + Vec3[2] * Fac2[2]), - // - (Vec1[3] * Fac0[3] - Vec2[3] * Fac1[3] + Vec3[3] * Fac2[3]), - __m128 Mul00 = _mm_mul_ps(Vec1, Fac0); - __m128 Mul01 = _mm_mul_ps(Vec2, Fac1); - __m128 Mul02 = _mm_mul_ps(Vec3, Fac2); - __m128 Sub00 = _mm_sub_ps(Mul00, Mul01); - __m128 Add00 = _mm_add_ps(Sub00, Mul02); - __m128 Inv0 = _mm_mul_ps(SignB, Add00); - - // col1 - // - (Vec0[0] * Fac0[0] - Vec2[0] * Fac3[0] + Vec3[0] * Fac4[0]), - // + (Vec0[0] * Fac0[1] - Vec2[1] * Fac3[1] + Vec3[1] * Fac4[1]), - // - (Vec0[0] * Fac0[2] - Vec2[2] * Fac3[2] + Vec3[2] * Fac4[2]), - // + (Vec0[0] * Fac0[3] - Vec2[3] * Fac3[3] + Vec3[3] * Fac4[3]), - __m128 Mul03 = _mm_mul_ps(Vec0, Fac0); - __m128 Mul04 = _mm_mul_ps(Vec2, Fac3); - __m128 Mul05 = _mm_mul_ps(Vec3, Fac4); - __m128 Sub01 = _mm_sub_ps(Mul03, Mul04); - __m128 Add01 = _mm_add_ps(Sub01, Mul05); - __m128 Inv1 = _mm_mul_ps(SignA, Add01); - - // col2 - // + (Vec0[0] * Fac1[0] - Vec1[0] * Fac3[0] + Vec3[0] * Fac5[0]), - // - (Vec0[0] * Fac1[1] - Vec1[1] * Fac3[1] + Vec3[1] * Fac5[1]), - // + (Vec0[0] * Fac1[2] - Vec1[2] * Fac3[2] + Vec3[2] * Fac5[2]), - // - (Vec0[0] * Fac1[3] - Vec1[3] * Fac3[3] + Vec3[3] * Fac5[3]), - __m128 Mul06 = _mm_mul_ps(Vec0, Fac1); - __m128 Mul07 = _mm_mul_ps(Vec1, Fac3); - __m128 Mul08 = _mm_mul_ps(Vec3, Fac5); - __m128 Sub02 = _mm_sub_ps(Mul06, Mul07); - __m128 Add02 = _mm_add_ps(Sub02, Mul08); - __m128 Inv2 = _mm_mul_ps(SignB, Add02); - - // col3 - // - (Vec1[0] * Fac2[0] - Vec1[0] * Fac4[0] + Vec2[0] * Fac5[0]), - // + (Vec1[0] * Fac2[1] - Vec1[1] * Fac4[1] + Vec2[1] * Fac5[1]), - // - (Vec1[0] * Fac2[2] - Vec1[2] * Fac4[2] + Vec2[2] * Fac5[2]), - // + (Vec1[0] * Fac2[3] - Vec1[3] * Fac4[3] + Vec2[3] * Fac5[3])); - __m128 Mul09 = _mm_mul_ps(Vec0, Fac2); - __m128 Mul10 = _mm_mul_ps(Vec1, Fac4); - __m128 Mul11 = _mm_mul_ps(Vec2, Fac5); - __m128 Sub03 = _mm_sub_ps(Mul09, Mul10); - __m128 Add03 = _mm_add_ps(Sub03, Mul11); - __m128 Inv3 = _mm_mul_ps(SignA, Add03); - - __m128 Row0 = _mm_shuffle_ps(Inv0, Inv1, _MM_SHUFFLE(0, 0, 0, 0)); - __m128 Row1 = _mm_shuffle_ps(Inv2, Inv3, _MM_SHUFFLE(0, 0, 0, 0)); - __m128 Row2 = _mm_shuffle_ps(Row0, Row1, _MM_SHUFFLE(2, 0, 2, 0)); - - // valType Determinant = m[0][0] * Inverse[0][0] - // + m[0][1] * Inverse[1][0] - // + m[0][2] * Inverse[2][0] - // + m[0][3] * Inverse[3][0]; - __m128 Det0 = glm_vec4_dot(in[0], Row2); - __m128 Rcp0 = _mm_div_ps(_mm_set1_ps(1.0f), Det0); - //__m128 Rcp0 = _mm_rcp_ps(Det0); - - // Inverse /= Determinant; - out[0] = _mm_mul_ps(Inv0, Rcp0); - out[1] = _mm_mul_ps(Inv1, Rcp0); - out[2] = _mm_mul_ps(Inv2, Rcp0); - out[3] = _mm_mul_ps(Inv3, Rcp0); -} - -GLM_FUNC_QUALIFIER void glm_mat4_inverse_lowp(glm_vec4 const in[4], glm_vec4 out[4]) -{ - __m128 Fac0; - { - // valType SubFactor00 = m[2][2] * m[3][3] - m[3][2] * m[2][3]; - // valType SubFactor00 = m[2][2] * m[3][3] - m[3][2] * m[2][3]; - // valType SubFactor06 = m[1][2] * m[3][3] - m[3][2] * m[1][3]; - // valType SubFactor13 = m[1][2] * m[2][3] - m[2][2] * m[1][3]; - - __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(3, 3, 3, 3)); - __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(2, 2, 2, 2)); - - __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(2, 2, 2, 2)); - __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(3, 3, 3, 3)); - - __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); - __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); - Fac0 = _mm_sub_ps(Mul00, Mul01); - } - - __m128 Fac1; - { - // valType SubFactor01 = m[2][1] * m[3][3] - m[3][1] * m[2][3]; - // valType SubFactor01 = m[2][1] * m[3][3] - m[3][1] * m[2][3]; - // valType SubFactor07 = m[1][1] * m[3][3] - m[3][1] * m[1][3]; - // valType SubFactor14 = m[1][1] * m[2][3] - m[2][1] * m[1][3]; - - __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(3, 3, 3, 3)); - __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(1, 1, 1, 1)); - - __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(1, 1, 1, 1)); - __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(3, 3, 3, 3)); - - __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); - __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); - Fac1 = _mm_sub_ps(Mul00, Mul01); - } - - - __m128 Fac2; - { - // valType SubFactor02 = m[2][1] * m[3][2] - m[3][1] * m[2][2]; - // valType SubFactor02 = m[2][1] * m[3][2] - m[3][1] * m[2][2]; - // valType SubFactor08 = m[1][1] * m[3][2] - m[3][1] * m[1][2]; - // valType SubFactor15 = m[1][1] * m[2][2] - m[2][1] * m[1][2]; - - __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(2, 2, 2, 2)); - __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(1, 1, 1, 1)); - - __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(1, 1, 1, 1)); - __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(2, 2, 2, 2)); - - __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); - __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); - Fac2 = _mm_sub_ps(Mul00, Mul01); - } - - __m128 Fac3; - { - // valType SubFactor03 = m[2][0] * m[3][3] - m[3][0] * m[2][3]; - // valType SubFactor03 = m[2][0] * m[3][3] - m[3][0] * m[2][3]; - // valType SubFactor09 = m[1][0] * m[3][3] - m[3][0] * m[1][3]; - // valType SubFactor16 = m[1][0] * m[2][3] - m[2][0] * m[1][3]; - - __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(3, 3, 3, 3)); - __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(0, 0, 0, 0)); - - __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(0, 0, 0, 0)); - __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(3, 3, 3, 3)); - - __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); - __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); - Fac3 = _mm_sub_ps(Mul00, Mul01); - } - - __m128 Fac4; - { - // valType SubFactor04 = m[2][0] * m[3][2] - m[3][0] * m[2][2]; - // valType SubFactor04 = m[2][0] * m[3][2] - m[3][0] * m[2][2]; - // valType SubFactor10 = m[1][0] * m[3][2] - m[3][0] * m[1][2]; - // valType SubFactor17 = m[1][0] * m[2][2] - m[2][0] * m[1][2]; - - __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(2, 2, 2, 2)); - __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(0, 0, 0, 0)); - - __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(0, 0, 0, 0)); - __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(2, 2, 2, 2)); - - __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); - __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); - Fac4 = _mm_sub_ps(Mul00, Mul01); - } - - __m128 Fac5; - { - // valType SubFactor05 = m[2][0] * m[3][1] - m[3][0] * m[2][1]; - // valType SubFactor05 = m[2][0] * m[3][1] - m[3][0] * m[2][1]; - // valType SubFactor12 = m[1][0] * m[3][1] - m[3][0] * m[1][1]; - // valType SubFactor18 = m[1][0] * m[2][1] - m[2][0] * m[1][1]; - - __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(1, 1, 1, 1)); - __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(0, 0, 0, 0)); - - __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(0, 0, 0, 0)); - __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(1, 1, 1, 1)); - - __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); - __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); - Fac5 = _mm_sub_ps(Mul00, Mul01); - } - - __m128 SignA = _mm_set_ps( 1.0f,-1.0f, 1.0f,-1.0f); - __m128 SignB = _mm_set_ps(-1.0f, 1.0f,-1.0f, 1.0f); - - // m[1][0] - // m[0][0] - // m[0][0] - // m[0][0] - __m128 Temp0 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(0, 0, 0, 0)); - __m128 Vec0 = _mm_shuffle_ps(Temp0, Temp0, _MM_SHUFFLE(2, 2, 2, 0)); - - // m[1][1] - // m[0][1] - // m[0][1] - // m[0][1] - __m128 Temp1 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(1, 1, 1, 1)); - __m128 Vec1 = _mm_shuffle_ps(Temp1, Temp1, _MM_SHUFFLE(2, 2, 2, 0)); - - // m[1][2] - // m[0][2] - // m[0][2] - // m[0][2] - __m128 Temp2 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(2, 2, 2, 2)); - __m128 Vec2 = _mm_shuffle_ps(Temp2, Temp2, _MM_SHUFFLE(2, 2, 2, 0)); - - // m[1][3] - // m[0][3] - // m[0][3] - // m[0][3] - __m128 Temp3 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(3, 3, 3, 3)); - __m128 Vec3 = _mm_shuffle_ps(Temp3, Temp3, _MM_SHUFFLE(2, 2, 2, 0)); - - // col0 - // + (Vec1[0] * Fac0[0] - Vec2[0] * Fac1[0] + Vec3[0] * Fac2[0]), - // - (Vec1[1] * Fac0[1] - Vec2[1] * Fac1[1] + Vec3[1] * Fac2[1]), - // + (Vec1[2] * Fac0[2] - Vec2[2] * Fac1[2] + Vec3[2] * Fac2[2]), - // - (Vec1[3] * Fac0[3] - Vec2[3] * Fac1[3] + Vec3[3] * Fac2[3]), - __m128 Mul00 = _mm_mul_ps(Vec1, Fac0); - __m128 Mul01 = _mm_mul_ps(Vec2, Fac1); - __m128 Mul02 = _mm_mul_ps(Vec3, Fac2); - __m128 Sub00 = _mm_sub_ps(Mul00, Mul01); - __m128 Add00 = _mm_add_ps(Sub00, Mul02); - __m128 Inv0 = _mm_mul_ps(SignB, Add00); - - // col1 - // - (Vec0[0] * Fac0[0] - Vec2[0] * Fac3[0] + Vec3[0] * Fac4[0]), - // + (Vec0[0] * Fac0[1] - Vec2[1] * Fac3[1] + Vec3[1] * Fac4[1]), - // - (Vec0[0] * Fac0[2] - Vec2[2] * Fac3[2] + Vec3[2] * Fac4[2]), - // + (Vec0[0] * Fac0[3] - Vec2[3] * Fac3[3] + Vec3[3] * Fac4[3]), - __m128 Mul03 = _mm_mul_ps(Vec0, Fac0); - __m128 Mul04 = _mm_mul_ps(Vec2, Fac3); - __m128 Mul05 = _mm_mul_ps(Vec3, Fac4); - __m128 Sub01 = _mm_sub_ps(Mul03, Mul04); - __m128 Add01 = _mm_add_ps(Sub01, Mul05); - __m128 Inv1 = _mm_mul_ps(SignA, Add01); - - // col2 - // + (Vec0[0] * Fac1[0] - Vec1[0] * Fac3[0] + Vec3[0] * Fac5[0]), - // - (Vec0[0] * Fac1[1] - Vec1[1] * Fac3[1] + Vec3[1] * Fac5[1]), - // + (Vec0[0] * Fac1[2] - Vec1[2] * Fac3[2] + Vec3[2] * Fac5[2]), - // - (Vec0[0] * Fac1[3] - Vec1[3] * Fac3[3] + Vec3[3] * Fac5[3]), - __m128 Mul06 = _mm_mul_ps(Vec0, Fac1); - __m128 Mul07 = _mm_mul_ps(Vec1, Fac3); - __m128 Mul08 = _mm_mul_ps(Vec3, Fac5); - __m128 Sub02 = _mm_sub_ps(Mul06, Mul07); - __m128 Add02 = _mm_add_ps(Sub02, Mul08); - __m128 Inv2 = _mm_mul_ps(SignB, Add02); - - // col3 - // - (Vec1[0] * Fac2[0] - Vec1[0] * Fac4[0] + Vec2[0] * Fac5[0]), - // + (Vec1[0] * Fac2[1] - Vec1[1] * Fac4[1] + Vec2[1] * Fac5[1]), - // - (Vec1[0] * Fac2[2] - Vec1[2] * Fac4[2] + Vec2[2] * Fac5[2]), - // + (Vec1[0] * Fac2[3] - Vec1[3] * Fac4[3] + Vec2[3] * Fac5[3])); - __m128 Mul09 = _mm_mul_ps(Vec0, Fac2); - __m128 Mul10 = _mm_mul_ps(Vec1, Fac4); - __m128 Mul11 = _mm_mul_ps(Vec2, Fac5); - __m128 Sub03 = _mm_sub_ps(Mul09, Mul10); - __m128 Add03 = _mm_add_ps(Sub03, Mul11); - __m128 Inv3 = _mm_mul_ps(SignA, Add03); - - __m128 Row0 = _mm_shuffle_ps(Inv0, Inv1, _MM_SHUFFLE(0, 0, 0, 0)); - __m128 Row1 = _mm_shuffle_ps(Inv2, Inv3, _MM_SHUFFLE(0, 0, 0, 0)); - __m128 Row2 = _mm_shuffle_ps(Row0, Row1, _MM_SHUFFLE(2, 0, 2, 0)); - - // valType Determinant = m[0][0] * Inverse[0][0] - // + m[0][1] * Inverse[1][0] - // + m[0][2] * Inverse[2][0] - // + m[0][3] * Inverse[3][0]; - __m128 Det0 = glm_vec4_dot(in[0], Row2); - __m128 Rcp0 = _mm_rcp_ps(Det0); - //__m128 Rcp0 = _mm_div_ps(one, Det0); - // Inverse /= Determinant; - out[0] = _mm_mul_ps(Inv0, Rcp0); - out[1] = _mm_mul_ps(Inv1, Rcp0); - out[2] = _mm_mul_ps(Inv2, Rcp0); - out[3] = _mm_mul_ps(Inv3, Rcp0); -} -/* -GLM_FUNC_QUALIFIER void glm_mat4_rotate(__m128 const in[4], float Angle, float const v[3], __m128 out[4]) -{ - float a = glm::radians(Angle); - float c = cos(a); - float s = sin(a); - - glm::vec4 AxisA(v[0], v[1], v[2], float(0)); - __m128 AxisB = _mm_set_ps(AxisA.w, AxisA.z, AxisA.y, AxisA.x); - __m128 AxisC = detail::sse_nrm_ps(AxisB); - - __m128 Cos0 = _mm_set_ss(c); - __m128 CosA = _mm_shuffle_ps(Cos0, Cos0, _MM_SHUFFLE(0, 0, 0, 0)); - __m128 Sin0 = _mm_set_ss(s); - __m128 SinA = _mm_shuffle_ps(Sin0, Sin0, _MM_SHUFFLE(0, 0, 0, 0)); - - // vec<3, T, Q> temp = (valType(1) - c) * axis; - __m128 Temp0 = _mm_sub_ps(one, CosA); - __m128 Temp1 = _mm_mul_ps(Temp0, AxisC); - - //Rotate[0][0] = c + temp[0] * axis[0]; - //Rotate[0][1] = 0 + temp[0] * axis[1] + s * axis[2]; - //Rotate[0][2] = 0 + temp[0] * axis[2] - s * axis[1]; - __m128 Axis0 = _mm_shuffle_ps(AxisC, AxisC, _MM_SHUFFLE(0, 0, 0, 0)); - __m128 TmpA0 = _mm_mul_ps(Axis0, AxisC); - __m128 CosA0 = _mm_shuffle_ps(Cos0, Cos0, _MM_SHUFFLE(1, 1, 1, 0)); - __m128 TmpA1 = _mm_add_ps(CosA0, TmpA0); - __m128 SinA0 = SinA;//_mm_set_ps(0.0f, s, -s, 0.0f); - __m128 TmpA2 = _mm_shuffle_ps(AxisC, AxisC, _MM_SHUFFLE(3, 1, 2, 3)); - __m128 TmpA3 = _mm_mul_ps(SinA0, TmpA2); - __m128 TmpA4 = _mm_add_ps(TmpA1, TmpA3); - - //Rotate[1][0] = 0 + temp[1] * axis[0] - s * axis[2]; - //Rotate[1][1] = c + temp[1] * axis[1]; - //Rotate[1][2] = 0 + temp[1] * axis[2] + s * axis[0]; - __m128 Axis1 = _mm_shuffle_ps(AxisC, AxisC, _MM_SHUFFLE(1, 1, 1, 1)); - __m128 TmpB0 = _mm_mul_ps(Axis1, AxisC); - __m128 CosA1 = _mm_shuffle_ps(Cos0, Cos0, _MM_SHUFFLE(1, 1, 0, 1)); - __m128 TmpB1 = _mm_add_ps(CosA1, TmpB0); - __m128 SinB0 = SinA;//_mm_set_ps(-s, 0.0f, s, 0.0f); - __m128 TmpB2 = _mm_shuffle_ps(AxisC, AxisC, _MM_SHUFFLE(3, 0, 3, 2)); - __m128 TmpB3 = _mm_mul_ps(SinA0, TmpB2); - __m128 TmpB4 = _mm_add_ps(TmpB1, TmpB3); - - //Rotate[2][0] = 0 + temp[2] * axis[0] + s * axis[1]; - //Rotate[2][1] = 0 + temp[2] * axis[1] - s * axis[0]; - //Rotate[2][2] = c + temp[2] * axis[2]; - __m128 Axis2 = _mm_shuffle_ps(AxisC, AxisC, _MM_SHUFFLE(2, 2, 2, 2)); - __m128 TmpC0 = _mm_mul_ps(Axis2, AxisC); - __m128 CosA2 = _mm_shuffle_ps(Cos0, Cos0, _MM_SHUFFLE(1, 0, 1, 1)); - __m128 TmpC1 = _mm_add_ps(CosA2, TmpC0); - __m128 SinC0 = SinA;//_mm_set_ps(s, -s, 0.0f, 0.0f); - __m128 TmpC2 = _mm_shuffle_ps(AxisC, AxisC, _MM_SHUFFLE(3, 3, 0, 1)); - __m128 TmpC3 = _mm_mul_ps(SinA0, TmpC2); - __m128 TmpC4 = _mm_add_ps(TmpC1, TmpC3); - - __m128 Result[4]; - Result[0] = TmpA4; - Result[1] = TmpB4; - Result[2] = TmpC4; - Result[3] = _mm_set_ps(1, 0, 0, 0); - - //mat<4, 4, valType> Result; - //Result[0] = m[0] * Rotate[0][0] + m[1] * Rotate[0][1] + m[2] * Rotate[0][2]; - //Result[1] = m[0] * Rotate[1][0] + m[1] * Rotate[1][1] + m[2] * Rotate[1][2]; - //Result[2] = m[0] * Rotate[2][0] + m[1] * Rotate[2][1] + m[2] * Rotate[2][2]; - //Result[3] = m[3]; - //return Result; - sse_mul_ps(in, Result, out); -} -*/ -GLM_FUNC_QUALIFIER void glm_mat4_outerProduct(__m128 const& c, __m128 const& r, __m128 out[4]) -{ - out[0] = _mm_mul_ps(c, _mm_shuffle_ps(r, r, _MM_SHUFFLE(0, 0, 0, 0))); - out[1] = _mm_mul_ps(c, _mm_shuffle_ps(r, r, _MM_SHUFFLE(1, 1, 1, 1))); - out[2] = _mm_mul_ps(c, _mm_shuffle_ps(r, r, _MM_SHUFFLE(2, 2, 2, 2))); - out[3] = _mm_mul_ps(c, _mm_shuffle_ps(r, r, _MM_SHUFFLE(3, 3, 3, 3))); -} - -#endif//GLM_ARCH & GLM_ARCH_SSE2_BIT diff --git a/third_party/glm/simd/neon.h b/third_party/glm/simd/neon.h deleted file mode 100755 index 6c38b06..0000000 --- a/third_party/glm/simd/neon.h +++ /dev/null @@ -1,155 +0,0 @@ -/// @ref simd_neon -/// @file glm/simd/neon.h - -#pragma once - -#if GLM_ARCH & GLM_ARCH_NEON_BIT -#include - -namespace glm { - namespace neon { - static float32x4_t dupq_lane(float32x4_t vsrc, int lane) { - switch(lane) { -#if GLM_ARCH & GLM_ARCH_ARMV8_BIT - case 0: return vdupq_laneq_f32(vsrc, 0); - case 1: return vdupq_laneq_f32(vsrc, 1); - case 2: return vdupq_laneq_f32(vsrc, 2); - case 3: return vdupq_laneq_f32(vsrc, 3); -#else - case 0: return vdupq_n_f32(vgetq_lane_f32(vsrc, 0)); - case 1: return vdupq_n_f32(vgetq_lane_f32(vsrc, 1)); - case 2: return vdupq_n_f32(vgetq_lane_f32(vsrc, 2)); - case 3: return vdupq_n_f32(vgetq_lane_f32(vsrc, 3)); -#endif - } - assert(!"Unreachable code executed!"); - return vdupq_n_f32(0.0f); - } - - static float32x2_t dup_lane(float32x4_t vsrc, int lane) { - switch(lane) { -#if GLM_ARCH & GLM_ARCH_ARMV8_BIT - case 0: return vdup_laneq_f32(vsrc, 0); - case 1: return vdup_laneq_f32(vsrc, 1); - case 2: return vdup_laneq_f32(vsrc, 2); - case 3: return vdup_laneq_f32(vsrc, 3); -#else - case 0: return vdup_n_f32(vgetq_lane_f32(vsrc, 0)); - case 1: return vdup_n_f32(vgetq_lane_f32(vsrc, 1)); - case 2: return vdup_n_f32(vgetq_lane_f32(vsrc, 2)); - case 3: return vdup_n_f32(vgetq_lane_f32(vsrc, 3)); -#endif - } - assert(!"Unreachable code executed!"); - return vdup_n_f32(0.0f); - } - - static float32x4_t copy_lane(float32x4_t vdst, int dlane, float32x4_t vsrc, int slane) { -#if GLM_ARCH & GLM_ARCH_ARMV8_BIT - switch(dlane) { - case 0: - switch(slane) { - case 0: return vcopyq_laneq_f32(vdst, 0, vsrc, 0); - case 1: return vcopyq_laneq_f32(vdst, 0, vsrc, 1); - case 2: return vcopyq_laneq_f32(vdst, 0, vsrc, 2); - case 3: return vcopyq_laneq_f32(vdst, 0, vsrc, 3); - } - assert(!"Unreachable code executed!"); - case 1: - switch(slane) { - case 0: return vcopyq_laneq_f32(vdst, 1, vsrc, 0); - case 1: return vcopyq_laneq_f32(vdst, 1, vsrc, 1); - case 2: return vcopyq_laneq_f32(vdst, 1, vsrc, 2); - case 3: return vcopyq_laneq_f32(vdst, 1, vsrc, 3); - } - assert(!"Unreachable code executed!"); - case 2: - switch(slane) { - case 0: return vcopyq_laneq_f32(vdst, 2, vsrc, 0); - case 1: return vcopyq_laneq_f32(vdst, 2, vsrc, 1); - case 2: return vcopyq_laneq_f32(vdst, 2, vsrc, 2); - case 3: return vcopyq_laneq_f32(vdst, 2, vsrc, 3); - } - assert(!"Unreachable code executed!"); - case 3: - switch(slane) { - case 0: return vcopyq_laneq_f32(vdst, 3, vsrc, 0); - case 1: return vcopyq_laneq_f32(vdst, 3, vsrc, 1); - case 2: return vcopyq_laneq_f32(vdst, 3, vsrc, 2); - case 3: return vcopyq_laneq_f32(vdst, 3, vsrc, 3); - } - assert(!"Unreachable code executed!"); - } -#else - - float l; - switch(slane) { - case 0: l = vgetq_lane_f32(vsrc, 0); break; - case 1: l = vgetq_lane_f32(vsrc, 1); break; - case 2: l = vgetq_lane_f32(vsrc, 2); break; - case 3: l = vgetq_lane_f32(vsrc, 3); break; - default: - assert(!"Unreachable code executed!"); - } - switch(dlane) { - case 0: return vsetq_lane_f32(l, vdst, 0); - case 1: return vsetq_lane_f32(l, vdst, 1); - case 2: return vsetq_lane_f32(l, vdst, 2); - case 3: return vsetq_lane_f32(l, vdst, 3); - } -#endif - assert(!"Unreachable code executed!"); - return vdupq_n_f32(0.0f); - } - - static float32x4_t mul_lane(float32x4_t v, float32x4_t vlane, int lane) { -#if GLM_ARCH & GLM_ARCH_ARMV8_BIT - switch(lane) { - case 0: return vmulq_laneq_f32(v, vlane, 0); break; - case 1: return vmulq_laneq_f32(v, vlane, 1); break; - case 2: return vmulq_laneq_f32(v, vlane, 2); break; - case 3: return vmulq_laneq_f32(v, vlane, 3); break; - default: - assert(!"Unreachable code executed!"); - } - assert(!"Unreachable code executed!"); - return vdupq_n_f32(0.0f); -#else - return vmulq_f32(v, dupq_lane(vlane, lane)); -#endif - } - - static float32x4_t madd_lane(float32x4_t acc, float32x4_t v, float32x4_t vlane, int lane) { -#if GLM_ARCH & GLM_ARCH_ARMV8_BIT -#ifdef GLM_CONFIG_FORCE_FMA -# define FMADD_LANE(acc, x, y, L) do { asm volatile ("fmla %0.4s, %1.4s, %2.4s" : "+w"(acc) : "w"(x), "w"(dup_lane(y, L))); } while(0) -#else -# define FMADD_LANE(acc, x, y, L) do { acc = vmlaq_laneq_f32(acc, x, y, L); } while(0) -#endif - - switch(lane) { - case 0: - FMADD_LANE(acc, v, vlane, 0); - return acc; - case 1: - FMADD_LANE(acc, v, vlane, 1); - return acc; - case 2: - FMADD_LANE(acc, v, vlane, 2); - return acc; - case 3: - FMADD_LANE(acc, v, vlane, 3); - return acc; - default: - assert(!"Unreachable code executed!"); - } - assert(!"Unreachable code executed!"); - return vdupq_n_f32(0.0f); -# undef FMADD_LANE -#else - return vaddq_f32(acc, vmulq_f32(v, dupq_lane(vlane, lane))); -#endif - } - } //namespace neon -} // namespace glm -#endif // GLM_ARCH & GLM_ARCH_NEON_BIT diff --git a/third_party/glm/simd/packing.h b/third_party/glm/simd/packing.h deleted file mode 100755 index 609163e..0000000 --- a/third_party/glm/simd/packing.h +++ /dev/null @@ -1,8 +0,0 @@ -/// @ref simd -/// @file glm/simd/packing.h - -#pragma once - -#if GLM_ARCH & GLM_ARCH_SSE2_BIT - -#endif//GLM_ARCH & GLM_ARCH_SSE2_BIT diff --git a/third_party/glm/simd/platform.h b/third_party/glm/simd/platform.h deleted file mode 100755 index ad25cc1..0000000 --- a/third_party/glm/simd/platform.h +++ /dev/null @@ -1,398 +0,0 @@ -#pragma once - -/////////////////////////////////////////////////////////////////////////////////// -// Platform - -#define GLM_PLATFORM_UNKNOWN 0x00000000 -#define GLM_PLATFORM_WINDOWS 0x00010000 -#define GLM_PLATFORM_LINUX 0x00020000 -#define GLM_PLATFORM_APPLE 0x00040000 -//#define GLM_PLATFORM_IOS 0x00080000 -#define GLM_PLATFORM_ANDROID 0x00100000 -#define GLM_PLATFORM_CHROME_NACL 0x00200000 -#define GLM_PLATFORM_UNIX 0x00400000 -#define GLM_PLATFORM_QNXNTO 0x00800000 -#define GLM_PLATFORM_WINCE 0x01000000 -#define GLM_PLATFORM_CYGWIN 0x02000000 - -#ifdef GLM_FORCE_PLATFORM_UNKNOWN -# define GLM_PLATFORM GLM_PLATFORM_UNKNOWN -#elif defined(__CYGWIN__) -# define GLM_PLATFORM GLM_PLATFORM_CYGWIN -#elif defined(__QNXNTO__) -# define GLM_PLATFORM GLM_PLATFORM_QNXNTO -#elif defined(__APPLE__) -# define GLM_PLATFORM GLM_PLATFORM_APPLE -#elif defined(WINCE) -# define GLM_PLATFORM GLM_PLATFORM_WINCE -#elif defined(_WIN32) -# define GLM_PLATFORM GLM_PLATFORM_WINDOWS -#elif defined(__native_client__) -# define GLM_PLATFORM GLM_PLATFORM_CHROME_NACL -#elif defined(__ANDROID__) -# define GLM_PLATFORM GLM_PLATFORM_ANDROID -#elif defined(__linux) -# define GLM_PLATFORM GLM_PLATFORM_LINUX -#elif defined(__unix) -# define GLM_PLATFORM GLM_PLATFORM_UNIX -#else -# define GLM_PLATFORM GLM_PLATFORM_UNKNOWN -#endif// - -/////////////////////////////////////////////////////////////////////////////////// -// Compiler - -#define GLM_COMPILER_UNKNOWN 0x00000000 - -// Intel -#define GLM_COMPILER_INTEL 0x00100000 -#define GLM_COMPILER_INTEL14 0x00100040 -#define GLM_COMPILER_INTEL15 0x00100050 -#define GLM_COMPILER_INTEL16 0x00100060 -#define GLM_COMPILER_INTEL17 0x00100070 - -// Visual C++ defines -#define GLM_COMPILER_VC 0x01000000 -#define GLM_COMPILER_VC12 0x01000001 -#define GLM_COMPILER_VC14 0x01000002 -#define GLM_COMPILER_VC15 0x01000003 -#define GLM_COMPILER_VC15_3 0x01000004 -#define GLM_COMPILER_VC15_5 0x01000005 -#define GLM_COMPILER_VC15_6 0x01000006 -#define GLM_COMPILER_VC15_7 0x01000007 -#define GLM_COMPILER_VC15_8 0x01000008 -#define GLM_COMPILER_VC15_9 0x01000009 -#define GLM_COMPILER_VC16 0x0100000A - -// GCC defines -#define GLM_COMPILER_GCC 0x02000000 -#define GLM_COMPILER_GCC46 0x020000D0 -#define GLM_COMPILER_GCC47 0x020000E0 -#define GLM_COMPILER_GCC48 0x020000F0 -#define GLM_COMPILER_GCC49 0x02000100 -#define GLM_COMPILER_GCC5 0x02000200 -#define GLM_COMPILER_GCC6 0x02000300 -#define GLM_COMPILER_GCC7 0x02000400 -#define GLM_COMPILER_GCC8 0x02000500 - -// CUDA -#define GLM_COMPILER_CUDA 0x10000000 -#define GLM_COMPILER_CUDA75 0x10000001 -#define GLM_COMPILER_CUDA80 0x10000002 -#define GLM_COMPILER_CUDA90 0x10000004 - -// SYCL -#define GLM_COMPILER_SYCL 0x00300000 - -// Clang -#define GLM_COMPILER_CLANG 0x20000000 -#define GLM_COMPILER_CLANG34 0x20000050 -#define GLM_COMPILER_CLANG35 0x20000060 -#define GLM_COMPILER_CLANG36 0x20000070 -#define GLM_COMPILER_CLANG37 0x20000080 -#define GLM_COMPILER_CLANG38 0x20000090 -#define GLM_COMPILER_CLANG39 0x200000A0 -#define GLM_COMPILER_CLANG40 0x200000B0 -#define GLM_COMPILER_CLANG41 0x200000C0 -#define GLM_COMPILER_CLANG42 0x200000D0 - -// Build model -#define GLM_MODEL_32 0x00000010 -#define GLM_MODEL_64 0x00000020 - -// Force generic C++ compiler -#ifdef GLM_FORCE_COMPILER_UNKNOWN -# define GLM_COMPILER GLM_COMPILER_UNKNOWN - -#elif defined(__INTEL_COMPILER) -# if __INTEL_COMPILER >= 1700 -# define GLM_COMPILER GLM_COMPILER_INTEL17 -# elif __INTEL_COMPILER >= 1600 -# define GLM_COMPILER GLM_COMPILER_INTEL16 -# elif __INTEL_COMPILER >= 1500 -# define GLM_COMPILER GLM_COMPILER_INTEL15 -# elif __INTEL_COMPILER >= 1400 -# define GLM_COMPILER GLM_COMPILER_INTEL14 -# elif __INTEL_COMPILER < 1400 -# error "GLM requires ICC 2013 SP1 or newer" -# endif - -// CUDA -#elif defined(__CUDACC__) -# if !defined(CUDA_VERSION) && !defined(GLM_FORCE_CUDA) -# include // make sure version is defined since nvcc does not define it itself! -# endif -# if CUDA_VERSION >= 8000 -# define GLM_COMPILER GLM_COMPILER_CUDA80 -# elif CUDA_VERSION >= 7500 -# define GLM_COMPILER GLM_COMPILER_CUDA75 -# elif CUDA_VERSION >= 7000 -# define GLM_COMPILER GLM_COMPILER_CUDA70 -# elif CUDA_VERSION < 7000 -# error "GLM requires CUDA 7.0 or higher" -# endif - -// SYCL -#elif defined(__SYCL_DEVICE_ONLY__) -# define GLM_COMPILER GLM_COMPILER_SYCL - -// Clang -#elif defined(__clang__) -# if defined(__apple_build_version__) -# if (__clang_major__ < 6) -# error "GLM requires Clang 3.4 / Apple Clang 6.0 or higher" -# elif __clang_major__ == 6 && __clang_minor__ == 0 -# define GLM_COMPILER GLM_COMPILER_CLANG35 -# elif __clang_major__ == 6 && __clang_minor__ >= 1 -# define GLM_COMPILER GLM_COMPILER_CLANG36 -# elif __clang_major__ >= 7 -# define GLM_COMPILER GLM_COMPILER_CLANG37 -# endif -# else -# if ((__clang_major__ == 3) && (__clang_minor__ < 4)) || (__clang_major__ < 3) -# error "GLM requires Clang 3.4 or higher" -# elif __clang_major__ == 3 && __clang_minor__ == 4 -# define GLM_COMPILER GLM_COMPILER_CLANG34 -# elif __clang_major__ == 3 && __clang_minor__ == 5 -# define GLM_COMPILER GLM_COMPILER_CLANG35 -# elif __clang_major__ == 3 && __clang_minor__ == 6 -# define GLM_COMPILER GLM_COMPILER_CLANG36 -# elif __clang_major__ == 3 && __clang_minor__ == 7 -# define GLM_COMPILER GLM_COMPILER_CLANG37 -# elif __clang_major__ == 3 && __clang_minor__ == 8 -# define GLM_COMPILER GLM_COMPILER_CLANG38 -# elif __clang_major__ == 3 && __clang_minor__ >= 9 -# define GLM_COMPILER GLM_COMPILER_CLANG39 -# elif __clang_major__ == 4 && __clang_minor__ == 0 -# define GLM_COMPILER GLM_COMPILER_CLANG40 -# elif __clang_major__ == 4 && __clang_minor__ == 1 -# define GLM_COMPILER GLM_COMPILER_CLANG41 -# elif __clang_major__ == 4 && __clang_minor__ >= 2 -# define GLM_COMPILER GLM_COMPILER_CLANG42 -# elif __clang_major__ >= 4 -# define GLM_COMPILER GLM_COMPILER_CLANG42 -# endif -# endif - -// Visual C++ -#elif defined(_MSC_VER) -# if _MSC_VER >= 1920 -# define GLM_COMPILER GLM_COMPILER_VC16 -# elif _MSC_VER >= 1916 -# define GLM_COMPILER GLM_COMPILER_VC15_9 -# elif _MSC_VER >= 1915 -# define GLM_COMPILER GLM_COMPILER_VC15_8 -# elif _MSC_VER >= 1914 -# define GLM_COMPILER GLM_COMPILER_VC15_7 -# elif _MSC_VER >= 1913 -# define GLM_COMPILER GLM_COMPILER_VC15_6 -# elif _MSC_VER >= 1912 -# define GLM_COMPILER GLM_COMPILER_VC15_5 -# elif _MSC_VER >= 1911 -# define GLM_COMPILER GLM_COMPILER_VC15_3 -# elif _MSC_VER >= 1910 -# define GLM_COMPILER GLM_COMPILER_VC15 -# elif _MSC_VER >= 1900 -# define GLM_COMPILER GLM_COMPILER_VC14 -# elif _MSC_VER >= 1800 -# define GLM_COMPILER GLM_COMPILER_VC12 -# elif _MSC_VER < 1800 -# error "GLM requires Visual C++ 12 - 2013 or higher" -# endif//_MSC_VER - -// G++ -#elif defined(__GNUC__) || defined(__MINGW32__) -# if __GNUC__ >= 8 -# define GLM_COMPILER GLM_COMPILER_GCC8 -# elif __GNUC__ >= 7 -# define GLM_COMPILER GLM_COMPILER_GCC7 -# elif __GNUC__ >= 6 -# define GLM_COMPILER GLM_COMPILER_GCC6 -# elif __GNUC__ >= 5 -# define GLM_COMPILER GLM_COMPILER_GCC5 -# elif __GNUC__ == 4 && __GNUC_MINOR__ >= 9 -# define GLM_COMPILER GLM_COMPILER_GCC49 -# elif __GNUC__ == 4 && __GNUC_MINOR__ >= 8 -# define GLM_COMPILER GLM_COMPILER_GCC48 -# elif __GNUC__ == 4 && __GNUC_MINOR__ >= 7 -# define GLM_COMPILER GLM_COMPILER_GCC47 -# elif __GNUC__ == 4 && __GNUC_MINOR__ >= 6 -# define GLM_COMPILER GLM_COMPILER_GCC46 -# elif ((__GNUC__ == 4) && (__GNUC_MINOR__ < 6)) || (__GNUC__ < 4) -# error "GLM requires GCC 4.6 or higher" -# endif - -#else -# define GLM_COMPILER GLM_COMPILER_UNKNOWN -#endif - -#ifndef GLM_COMPILER -# error "GLM_COMPILER undefined, your compiler may not be supported by GLM. Add #define GLM_COMPILER 0 to ignore this message." -#endif//GLM_COMPILER - -/////////////////////////////////////////////////////////////////////////////////// -// Instruction sets - -// User defines: GLM_FORCE_PURE GLM_FORCE_INTRINSICS GLM_FORCE_SSE2 GLM_FORCE_SSE3 GLM_FORCE_AVX GLM_FORCE_AVX2 GLM_FORCE_AVX2 - -#define GLM_ARCH_MIPS_BIT (0x10000000) -#define GLM_ARCH_PPC_BIT (0x20000000) -#define GLM_ARCH_ARM_BIT (0x40000000) -#define GLM_ARCH_ARMV8_BIT (0x01000000) -#define GLM_ARCH_X86_BIT (0x80000000) - -#define GLM_ARCH_SIMD_BIT (0x00001000) - -#define GLM_ARCH_NEON_BIT (0x00000001) -#define GLM_ARCH_SSE_BIT (0x00000002) -#define GLM_ARCH_SSE2_BIT (0x00000004) -#define GLM_ARCH_SSE3_BIT (0x00000008) -#define GLM_ARCH_SSSE3_BIT (0x00000010) -#define GLM_ARCH_SSE41_BIT (0x00000020) -#define GLM_ARCH_SSE42_BIT (0x00000040) -#define GLM_ARCH_AVX_BIT (0x00000080) -#define GLM_ARCH_AVX2_BIT (0x00000100) - -#define GLM_ARCH_UNKNOWN (0) -#define GLM_ARCH_X86 (GLM_ARCH_X86_BIT) -#define GLM_ARCH_SSE (GLM_ARCH_SSE_BIT | GLM_ARCH_SIMD_BIT | GLM_ARCH_X86) -#define GLM_ARCH_SSE2 (GLM_ARCH_SSE2_BIT | GLM_ARCH_SSE) -#define GLM_ARCH_SSE3 (GLM_ARCH_SSE3_BIT | GLM_ARCH_SSE2) -#define GLM_ARCH_SSSE3 (GLM_ARCH_SSSE3_BIT | GLM_ARCH_SSE3) -#define GLM_ARCH_SSE41 (GLM_ARCH_SSE41_BIT | GLM_ARCH_SSSE3) -#define GLM_ARCH_SSE42 (GLM_ARCH_SSE42_BIT | GLM_ARCH_SSE41) -#define GLM_ARCH_AVX (GLM_ARCH_AVX_BIT | GLM_ARCH_SSE42) -#define GLM_ARCH_AVX2 (GLM_ARCH_AVX2_BIT | GLM_ARCH_AVX) -#define GLM_ARCH_ARM (GLM_ARCH_ARM_BIT) -#define GLM_ARCH_ARMV8 (GLM_ARCH_NEON_BIT | GLM_ARCH_SIMD_BIT | GLM_ARCH_ARM | GLM_ARCH_ARMV8_BIT) -#define GLM_ARCH_NEON (GLM_ARCH_NEON_BIT | GLM_ARCH_SIMD_BIT | GLM_ARCH_ARM) -#define GLM_ARCH_MIPS (GLM_ARCH_MIPS_BIT) -#define GLM_ARCH_PPC (GLM_ARCH_PPC_BIT) - -#if defined(GLM_FORCE_ARCH_UNKNOWN) || defined(GLM_FORCE_PURE) -# define GLM_ARCH GLM_ARCH_UNKNOWN -#elif defined(GLM_FORCE_NEON) -# if __ARM_ARCH >= 8 -# define GLM_ARCH (GLM_ARCH_ARMV8) -# else -# define GLM_ARCH (GLM_ARCH_NEON) -# endif -# define GLM_FORCE_INTRINSICS -#elif defined(GLM_FORCE_AVX2) -# define GLM_ARCH (GLM_ARCH_AVX2) -# define GLM_FORCE_INTRINSICS -#elif defined(GLM_FORCE_AVX) -# define GLM_ARCH (GLM_ARCH_AVX) -# define GLM_FORCE_INTRINSICS -#elif defined(GLM_FORCE_SSE42) -# define GLM_ARCH (GLM_ARCH_SSE42) -# define GLM_FORCE_INTRINSICS -#elif defined(GLM_FORCE_SSE41) -# define GLM_ARCH (GLM_ARCH_SSE41) -# define GLM_FORCE_INTRINSICS -#elif defined(GLM_FORCE_SSSE3) -# define GLM_ARCH (GLM_ARCH_SSSE3) -# define GLM_FORCE_INTRINSICS -#elif defined(GLM_FORCE_SSE3) -# define GLM_ARCH (GLM_ARCH_SSE3) -# define GLM_FORCE_INTRINSICS -#elif defined(GLM_FORCE_SSE2) -# define GLM_ARCH (GLM_ARCH_SSE2) -# define GLM_FORCE_INTRINSICS -#elif defined(GLM_FORCE_SSE) -# define GLM_ARCH (GLM_ARCH_SSE) -# define GLM_FORCE_INTRINSICS -#elif defined(GLM_FORCE_INTRINSICS) && !defined(GLM_FORCE_XYZW_ONLY) -# if defined(__AVX2__) -# define GLM_ARCH (GLM_ARCH_AVX2) -# elif defined(__AVX__) -# define GLM_ARCH (GLM_ARCH_AVX) -# elif defined(__SSE4_2__) -# define GLM_ARCH (GLM_ARCH_SSE42) -# elif defined(__SSE4_1__) -# define GLM_ARCH (GLM_ARCH_SSE41) -# elif defined(__SSSE3__) -# define GLM_ARCH (GLM_ARCH_SSSE3) -# elif defined(__SSE3__) -# define GLM_ARCH (GLM_ARCH_SSE3) -# elif defined(__SSE2__) || defined(__x86_64__) || defined(_M_X64) || defined(_M_IX86_FP) -# define GLM_ARCH (GLM_ARCH_SSE2) -# elif defined(__i386__) -# define GLM_ARCH (GLM_ARCH_X86) -# elif defined(__ARM_ARCH) && (__ARM_ARCH >= 8) -# define GLM_ARCH (GLM_ARCH_ARMV8) -# elif defined(__ARM_NEON) -# define GLM_ARCH (GLM_ARCH_ARM | GLM_ARCH_NEON) -# elif defined(__arm__ ) || defined(_M_ARM) -# define GLM_ARCH (GLM_ARCH_ARM) -# elif defined(__mips__ ) -# define GLM_ARCH (GLM_ARCH_MIPS) -# elif defined(__powerpc__ ) || defined(_M_PPC) -# define GLM_ARCH (GLM_ARCH_PPC) -# else -# define GLM_ARCH (GLM_ARCH_UNKNOWN) -# endif -#else -# if defined(__x86_64__) || defined(_M_X64) || defined(_M_IX86) || defined(__i386__) -# define GLM_ARCH (GLM_ARCH_X86) -# elif defined(__arm__) || defined(_M_ARM) -# define GLM_ARCH (GLM_ARCH_ARM) -# elif defined(__powerpc__) || defined(_M_PPC) -# define GLM_ARCH (GLM_ARCH_PPC) -# elif defined(__mips__) -# define GLM_ARCH (GLM_ARCH_MIPS) -# else -# define GLM_ARCH (GLM_ARCH_UNKNOWN) -# endif -#endif - -#if GLM_ARCH & GLM_ARCH_AVX2_BIT -# include -#elif GLM_ARCH & GLM_ARCH_AVX_BIT -# include -#elif GLM_ARCH & GLM_ARCH_SSE42_BIT -# if GLM_COMPILER & GLM_COMPILER_CLANG -# include -# endif -# include -#elif GLM_ARCH & GLM_ARCH_SSE41_BIT -# include -#elif GLM_ARCH & GLM_ARCH_SSSE3_BIT -# include -#elif GLM_ARCH & GLM_ARCH_SSE3_BIT -# include -#elif GLM_ARCH & GLM_ARCH_SSE2_BIT -# include -#elif GLM_ARCH & GLM_ARCH_NEON_BIT -# include "neon.h" -#endif//GLM_ARCH - -#if GLM_ARCH & GLM_ARCH_SSE2_BIT - typedef __m128 glm_f32vec4; - typedef __m128i glm_i32vec4; - typedef __m128i glm_u32vec4; - typedef __m128d glm_f64vec2; - typedef __m128i glm_i64vec2; - typedef __m128i glm_u64vec2; - - typedef glm_f32vec4 glm_vec4; - typedef glm_i32vec4 glm_ivec4; - typedef glm_u32vec4 glm_uvec4; - typedef glm_f64vec2 glm_dvec2; -#endif - -#if GLM_ARCH & GLM_ARCH_AVX_BIT - typedef __m256d glm_f64vec4; - typedef glm_f64vec4 glm_dvec4; -#endif - -#if GLM_ARCH & GLM_ARCH_AVX2_BIT - typedef __m256i glm_i64vec4; - typedef __m256i glm_u64vec4; -#endif - -#if GLM_ARCH & GLM_ARCH_NEON_BIT - typedef float32x4_t glm_f32vec4; - typedef int32x4_t glm_i32vec4; - typedef uint32x4_t glm_u32vec4; -#endif diff --git a/third_party/glm/simd/trigonometric.h b/third_party/glm/simd/trigonometric.h deleted file mode 100755 index 739b796..0000000 --- a/third_party/glm/simd/trigonometric.h +++ /dev/null @@ -1,9 +0,0 @@ -/// @ref simd -/// @file glm/simd/trigonometric.h - -#pragma once - -#if GLM_ARCH & GLM_ARCH_SSE2_BIT - -#endif//GLM_ARCH & GLM_ARCH_SSE2_BIT - diff --git a/third_party/glm/simd/vector_relational.h b/third_party/glm/simd/vector_relational.h deleted file mode 100755 index f7385e9..0000000 --- a/third_party/glm/simd/vector_relational.h +++ /dev/null @@ -1,8 +0,0 @@ -/// @ref simd -/// @file glm/simd/vector_relational.h - -#pragma once - -#if GLM_ARCH & GLM_ARCH_SSE2_BIT - -#endif//GLM_ARCH & GLM_ARCH_SSE2_BIT diff --git a/third_party/glm/trigonometric.hpp b/third_party/glm/trigonometric.hpp deleted file mode 100755 index fcf07f8..0000000 --- a/third_party/glm/trigonometric.hpp +++ /dev/null @@ -1,210 +0,0 @@ -/// @ref core -/// @file glm/trigonometric.hpp -/// -/// @see GLSL 4.20.8 specification, section 8.1 Angle and Trigonometry Functions -/// -/// @defgroup core_func_trigonometric Angle and Trigonometry Functions -/// @ingroup core -/// -/// Function parameters specified as angle are assumed to be in units of radians. -/// In no case will any of these functions result in a divide by zero error. If -/// the divisor of a ratio is 0, then results will be undefined. -/// -/// These all operate component-wise. The description is per component. -/// -/// Include to use these core features. -/// -/// @see ext_vector_trigonometric - -#pragma once - -#include "detail/setup.hpp" -#include "detail/qualifier.hpp" - -namespace glm -{ - /// @addtogroup core_func_trigonometric - /// @{ - - /// Converts degrees to radians and returns the result. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see GLSL radians man page - /// @see GLSL 4.20.8 specification, section 8.1 Angle and Trigonometry Functions - template - GLM_FUNC_DECL GLM_CONSTEXPR vec radians(vec const& degrees); - - /// Converts radians to degrees and returns the result. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see GLSL degrees man page - /// @see GLSL 4.20.8 specification, section 8.1 Angle and Trigonometry Functions - template - GLM_FUNC_DECL GLM_CONSTEXPR vec degrees(vec const& radians); - - /// The standard trigonometric sine function. - /// The values returned by this function will range from [-1, 1]. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see GLSL sin man page - /// @see GLSL 4.20.8 specification, section 8.1 Angle and Trigonometry Functions - template - GLM_FUNC_DECL vec sin(vec const& angle); - - /// The standard trigonometric cosine function. - /// The values returned by this function will range from [-1, 1]. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see GLSL cos man page - /// @see GLSL 4.20.8 specification, section 8.1 Angle and Trigonometry Functions - template - GLM_FUNC_DECL vec cos(vec const& angle); - - /// The standard trigonometric tangent function. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see GLSL tan man page - /// @see GLSL 4.20.8 specification, section 8.1 Angle and Trigonometry Functions - template - GLM_FUNC_DECL vec tan(vec const& angle); - - /// Arc sine. Returns an angle whose sine is x. - /// The range of values returned by this function is [-PI/2, PI/2]. - /// Results are undefined if |x| > 1. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see GLSL asin man page - /// @see GLSL 4.20.8 specification, section 8.1 Angle and Trigonometry Functions - template - GLM_FUNC_DECL vec asin(vec const& x); - - /// Arc cosine. Returns an angle whose sine is x. - /// The range of values returned by this function is [0, PI]. - /// Results are undefined if |x| > 1. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see GLSL acos man page - /// @see GLSL 4.20.8 specification, section 8.1 Angle and Trigonometry Functions - template - GLM_FUNC_DECL vec acos(vec const& x); - - /// Arc tangent. Returns an angle whose tangent is y/x. - /// The signs of x and y are used to determine what - /// quadrant the angle is in. The range of values returned - /// by this function is [-PI, PI]. Results are undefined - /// if x and y are both 0. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see GLSL atan man page - /// @see GLSL 4.20.8 specification, section 8.1 Angle and Trigonometry Functions - template - GLM_FUNC_DECL vec atan(vec const& y, vec const& x); - - /// Arc tangent. Returns an angle whose tangent is y_over_x. - /// The range of values returned by this function is [-PI/2, PI/2]. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see GLSL atan man page - /// @see GLSL 4.20.8 specification, section 8.1 Angle and Trigonometry Functions - template - GLM_FUNC_DECL vec atan(vec const& y_over_x); - - /// Returns the hyperbolic sine function, (exp(x) - exp(-x)) / 2 - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see GLSL sinh man page - /// @see GLSL 4.20.8 specification, section 8.1 Angle and Trigonometry Functions - template - GLM_FUNC_DECL vec sinh(vec const& angle); - - /// Returns the hyperbolic cosine function, (exp(x) + exp(-x)) / 2 - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see GLSL cosh man page - /// @see GLSL 4.20.8 specification, section 8.1 Angle and Trigonometry Functions - template - GLM_FUNC_DECL vec cosh(vec const& angle); - - /// Returns the hyperbolic tangent function, sinh(angle) / cosh(angle) - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see GLSL tanh man page - /// @see GLSL 4.20.8 specification, section 8.1 Angle and Trigonometry Functions - template - GLM_FUNC_DECL vec tanh(vec const& angle); - - /// Arc hyperbolic sine; returns the inverse of sinh. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see GLSL asinh man page - /// @see GLSL 4.20.8 specification, section 8.1 Angle and Trigonometry Functions - template - GLM_FUNC_DECL vec asinh(vec const& x); - - /// Arc hyperbolic cosine; returns the non-negative inverse - /// of cosh. Results are undefined if x < 1. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see GLSL acosh man page - /// @see GLSL 4.20.8 specification, section 8.1 Angle and Trigonometry Functions - template - GLM_FUNC_DECL vec acosh(vec const& x); - - /// Arc hyperbolic tangent; returns the inverse of tanh. - /// Results are undefined if abs(x) >= 1. - /// - /// @tparam L Integer between 1 and 4 included that qualify the dimension of the vector - /// @tparam T Floating-point scalar types - /// @tparam Q Value from qualifier enum - /// - /// @see GLSL atanh man page - /// @see GLSL 4.20.8 specification, section 8.1 Angle and Trigonometry Functions - template - GLM_FUNC_DECL vec atanh(vec const& x); - - /// @} -}//namespace glm - -#include "detail/func_trigonometric.inl" diff --git a/third_party/glm/vec2.hpp b/third_party/glm/vec2.hpp deleted file mode 100755 index be768bf..0000000 --- a/third_party/glm/vec2.hpp +++ /dev/null @@ -1,14 +0,0 @@ -/// @ref core -/// @file glm/vec2.hpp - -#pragma once -#include "./ext/vector_bool2.hpp" -#include "./ext/vector_bool2_precision.hpp" -#include "./ext/vector_float2.hpp" -#include "./ext/vector_float2_precision.hpp" -#include "./ext/vector_double2.hpp" -#include "./ext/vector_double2_precision.hpp" -#include "./ext/vector_int2.hpp" -#include "./ext/vector_int2_precision.hpp" -#include "./ext/vector_uint2.hpp" -#include "./ext/vector_uint2_precision.hpp" diff --git a/third_party/glm/vec3.hpp b/third_party/glm/vec3.hpp deleted file mode 100755 index f570722..0000000 --- a/third_party/glm/vec3.hpp +++ /dev/null @@ -1,14 +0,0 @@ -/// @ref core -/// @file glm/vec3.hpp - -#pragma once -#include "./ext/vector_bool3.hpp" -#include "./ext/vector_bool3_precision.hpp" -#include "./ext/vector_float3.hpp" -#include "./ext/vector_float3_precision.hpp" -#include "./ext/vector_double3.hpp" -#include "./ext/vector_double3_precision.hpp" -#include "./ext/vector_int3.hpp" -#include "./ext/vector_int3_precision.hpp" -#include "./ext/vector_uint3.hpp" -#include "./ext/vector_uint3_precision.hpp" diff --git a/third_party/glm/vec4.hpp b/third_party/glm/vec4.hpp deleted file mode 100755 index 9117020..0000000 --- a/third_party/glm/vec4.hpp +++ /dev/null @@ -1,15 +0,0 @@ -/// @ref core -/// @file glm/vec4.hpp - -#pragma once -#include "./ext/vector_bool4.hpp" -#include "./ext/vector_bool4_precision.hpp" -#include "./ext/vector_float4.hpp" -#include "./ext/vector_float4_precision.hpp" -#include "./ext/vector_double4.hpp" -#include "./ext/vector_double4_precision.hpp" -#include "./ext/vector_int4.hpp" -#include "./ext/vector_int4_precision.hpp" -#include "./ext/vector_uint4.hpp" -#include "./ext/vector_uint4_precision.hpp" - diff --git a/third_party/glm/vector_relational.hpp b/third_party/glm/vector_relational.hpp deleted file mode 100755 index a0fe17e..0000000 --- a/third_party/glm/vector_relational.hpp +++ /dev/null @@ -1,121 +0,0 @@ -/// @ref core -/// @file glm/vector_relational.hpp -/// -/// @see GLSL 4.20.8 specification, section 8.7 Vector Relational Functions -/// -/// @defgroup core_func_vector_relational Vector Relational Functions -/// @ingroup core -/// -/// Relational and equality operators (<, <=, >, >=, ==, !=) are defined to -/// operate on scalars and produce scalar Boolean results. For vector results, -/// use the following built-in functions. -/// -/// In all cases, the sizes of all the input and return vectors for any particular -/// call must match. -/// -/// Include to use these core features. -/// -/// @see ext_vector_relational - -#pragma once - -#include "detail/qualifier.hpp" -#include "detail/setup.hpp" - -namespace glm -{ - /// @addtogroup core_func_vector_relational - /// @{ - - /// Returns the component-wise comparison result of x < y. - /// - /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. - /// @tparam T A floating-point or integer scalar type. - /// - /// @see GLSL lessThan man page - /// @see GLSL 4.20.8 specification, section 8.7 Vector Relational Functions - template - GLM_FUNC_DECL GLM_CONSTEXPR vec lessThan(vec const& x, vec const& y); - - /// Returns the component-wise comparison of result x <= y. - /// - /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. - /// @tparam T A floating-point or integer scalar type. - /// - /// @see GLSL lessThanEqual man page - /// @see GLSL 4.20.8 specification, section 8.7 Vector Relational Functions - template - GLM_FUNC_DECL GLM_CONSTEXPR vec lessThanEqual(vec const& x, vec const& y); - - /// Returns the component-wise comparison of result x > y. - /// - /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. - /// @tparam T A floating-point or integer scalar type. - /// - /// @see GLSL greaterThan man page - /// @see GLSL 4.20.8 specification, section 8.7 Vector Relational Functions - template - GLM_FUNC_DECL GLM_CONSTEXPR vec greaterThan(vec const& x, vec const& y); - - /// Returns the component-wise comparison of result x >= y. - /// - /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. - /// @tparam T A floating-point or integer scalar type. - /// - /// @see GLSL greaterThanEqual man page - /// @see GLSL 4.20.8 specification, section 8.7 Vector Relational Functions - template - GLM_FUNC_DECL GLM_CONSTEXPR vec greaterThanEqual(vec const& x, vec const& y); - - /// Returns the component-wise comparison of result x == y. - /// - /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. - /// @tparam T A floating-point, integer or bool scalar type. - /// - /// @see GLSL equal man page - /// @see GLSL 4.20.8 specification, section 8.7 Vector Relational Functions - template - GLM_FUNC_DECL GLM_CONSTEXPR vec equal(vec const& x, vec const& y); - - /// Returns the component-wise comparison of result x != y. - /// - /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. - /// @tparam T A floating-point, integer or bool scalar type. - /// - /// @see GLSL notEqual man page - /// @see GLSL 4.20.8 specification, section 8.7 Vector Relational Functions - template - GLM_FUNC_DECL GLM_CONSTEXPR vec notEqual(vec const& x, vec const& y); - - /// Returns true if any component of x is true. - /// - /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. - /// - /// @see GLSL any man page - /// @see GLSL 4.20.8 specification, section 8.7 Vector Relational Functions - template - GLM_FUNC_DECL GLM_CONSTEXPR bool any(vec const& v); - - /// Returns true if all components of x are true. - /// - /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. - /// - /// @see GLSL all man page - /// @see GLSL 4.20.8 specification, section 8.7 Vector Relational Functions - template - GLM_FUNC_DECL GLM_CONSTEXPR bool all(vec const& v); - - /// Returns the component-wise logical complement of x. - /// /!\ Because of language incompatibilities between C++ and GLSL, GLM defines the function not but not_ instead. - /// - /// @tparam L An integer between 1 and 4 included that qualify the dimension of the vector. - /// - /// @see GLSL not man page - /// @see GLSL 4.20.8 specification, section 8.7 Vector Relational Functions - template - GLM_FUNC_DECL GLM_CONSTEXPR vec not_(vec const& v); - - /// @} -}//namespace glm - -#include "detail/func_vector_relational.inl" diff --git a/third_party/kvf.h b/third_party/kvf.h new file mode 100755 index 0000000..be29a0b --- /dev/null +++ b/third_party/kvf.h @@ -0,0 +1,3048 @@ +/*** + * MIT License + * + * Copyright (c) 2023-2024 kbz_8 + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * + * Do this: + * #define KVF_IMPLEMENTATION + * before you include this file in *one* C or C++ file to create the implementation. + * + * // i.e. it should look like this: + * #include ... + * #include ... + * #include ... + * #define KVF_IMPLEMENTATION + * #include "kvf.h" + * + * You can #define KVF_ASSERT(x) before the #include to avoid using assert.h. + * And #define KVF_MALLOC, KVF_REALLOC, and KVF_FREE to avoid using malloc, realloc, free. + * + * By default KVF exits the program if a call to the Vulkan API fails. You can avoid that + * by using #define KVF_NO_EXIT_ON_FAILURE + * + * If you are using Volk or any other meta loader you must define KVF_IMPL_VK_NO_PROTOTYPES + * before including this file to avoid conflicts with Vulkan prototypes. + * You will also need to pass the function pointers to kvf using dedicated functions. + * + * You can also #define KVF_ENABLE_VALIDATION_LAYERS to enable validation layers. + * + * Use #define KVF_NO_KHR to remove all functions that use KHR calls. + */ + +#ifndef KBZ_8_VULKAN_FRAMEWORK_H +#define KBZ_8_VULKAN_FRAMEWORK_H + +#ifdef KVF_IMPL_VK_NO_PROTOTYPES + #define VK_NO_PROTOTYPES +#endif + +#include + +#include +#include + +/* ============================================= Prototypes ============================================= */ + +#ifdef __cplusplus +extern "C" { +#endif + +typedef enum +{ + KVF_GRAPHICS_QUEUE = 0, + KVF_PRESENT_QUEUE = 1, + KVF_COMPUTE_QUEUE = 2 +} KvfQueueType; + +typedef enum +{ + KVF_IMAGE_COLOR = 0, + KVF_IMAGE_DEPTH = 1, + KVF_IMAGE_DEPTH_ARRAY = 2, + KVF_IMAGE_CUBE = 3, + KVF_IMAGE_OTHER = 4, +} KvfImageType; + +typedef void (*KvfErrorCallback)(const char* message); + +#ifdef KVF_IMPL_VK_NO_PROTOTYPES + typedef struct KvfGlobalVulkanFunctions KvfGlobalVulkanFunctions; + typedef struct KvfDeviceVulkanFunctions KvfDeviceVulkanFunctions; + typedef struct KvfInstanceVulkanFunctions KvfInstanceVulkanFunctions; +#endif +typedef struct KvfGraphicsPipelineBuilder KvfGraphicsPipelineBuilder; + +void kvfSetErrorCallback(KvfErrorCallback callback); +void kvfSetWarningCallback(KvfErrorCallback callback); +void kvfSetValidationErrorCallback(KvfErrorCallback callback); +void kvfSetValidationWarningCallback(KvfErrorCallback callback); + +#ifdef KVF_IMPL_VK_NO_PROTOTYPES + void kvfPassGlobalVulkanFunctionPointers(const KvfGlobalVulkanFunctions* fns); + void kvfPassInstanceVulkanFunctionPointers(const KvfInstanceVulkanFunctions* fns); +#endif + +void kvfAddLayer(const char* layer); + +VkInstance kvfCreateInstance(const char** extensions_enabled, uint32_t extensions_count); +void kvfDestroyInstance(VkInstance instance); + +// If surfaces given to theses functions are VK_NULL_HANDLE no present queues will be searched and thus kvfQueuePresentKHR will not work +VkPhysicalDevice kvfPickFirstPhysicalDevice(VkInstance instance, VkSurfaceKHR surface); +VkPhysicalDevice kvfPickGoodDefaultPhysicalDevice(VkInstance instance, VkSurfaceKHR surface); +VkPhysicalDevice kvfPickGoodPhysicalDevice(VkInstance instance, VkSurfaceKHR surface, const char** device_extensions, uint32_t device_extensions_count); + +VkQueue kvfGetDeviceQueue(VkDevice device, KvfQueueType queue); +uint32_t kvfGetDeviceQueueFamily(VkDevice device, KvfQueueType queue); +#ifndef KVF_NO_KHR + bool kvfQueuePresentKHR(VkDevice device, VkSemaphore wait, VkSwapchainKHR swapchain, uint32_t image_index); // return false when the swapchain must be recreated +#endif + +// Meant to be used when creating a VkDevice with a custom VkPhysicalDevice +int32_t kvfFindDeviceQueueFamily(VkPhysicalDevice physical, KvfQueueType type); // This function cannot find present queue +#ifndef KVF_NO_KHR + int32_t kvfFindDeviceQueueFamilyKHR(VkPhysicalDevice physical, VkSurfaceKHR surface, KvfQueueType type); // This one can find present queue +#endif + +VkDevice kvfCreateDefaultDevice(VkPhysicalDevice physical); +VkDevice kvfCreateDevice(VkPhysicalDevice physical, const char** extensions, uint32_t extensions_count, VkPhysicalDeviceFeatures* features); +VkDevice kvfCreateDefaultDevicePhysicalDeviceAndCustomQueues(VkPhysicalDevice physical, int32_t graphics_queue, int32_t present_queue, int32_t compute_queue); +VkDevice kvfCreateDeviceCustomPhysicalDeviceAndQueues(VkPhysicalDevice physical, const char** extensions, uint32_t extensions_count, VkPhysicalDeviceFeatures* features, int32_t graphics_queue, int32_t present_queue, int32_t compute_queue); +#ifdef KVF_IMPL_VK_NO_PROTOTYPES + void kvfPassDeviceVulkanFunctionPointers(VkPhysicalDevice physical, VkDevice device, const KvfDeviceVulkanFunctions* fns); +#endif +void kvfDestroyDevice(VkDevice device); + +VkFence kvfCreateFence(VkDevice device); +void kvfWaitForFence(VkDevice device, VkFence fence); +void kvfDestroyFence(VkDevice device, VkFence fence); + +VkSemaphore kvfCreateSemaphore(VkDevice device); +void kvfDestroySemaphore(VkDevice device, VkSemaphore semaphore); + +#ifndef KVF_NO_KHR + VkSwapchainKHR kvfCreateSwapchainKHR(VkDevice device, VkPhysicalDevice physical, VkSurfaceKHR surface, VkExtent2D extent, VkSwapchainKHR old_swapchain, bool try_vsync); + VkFormat kvfGetSwapchainImagesFormat(VkSwapchainKHR swapchain); + uint32_t kvfGetSwapchainImagesCount(VkSwapchainKHR swapchain); + uint32_t kvfGetSwapchainMinImagesCount(VkSwapchainKHR swapchain); + VkExtent2D kvfGetSwapchainImagesSize(VkSwapchainKHR swapchain); + void kvfDestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain); +#endif + +VkImage kvfCreateImage(VkDevice device, uint32_t width, uint32_t height, VkFormat format, VkImageTiling tiling, VkImageUsageFlags usage, KvfImageType type); +void kvfCopyImageToBuffer(VkCommandBuffer cmd, VkBuffer dst, VkImage src, size_t buffer_offset, VkImageAspectFlagBits aspect, VkExtent3D extent); +void kvfDestroyImage(VkDevice device, VkImage image); +VkImageView kvfCreateImageView(VkDevice device, VkImage image, VkFormat format, VkImageViewType type, VkImageAspectFlags aspect, int layer_count); +void kvfDestroyImageView(VkDevice device, VkImageView image_view); +void kvfTransitionImageLayout(VkDevice device, VkImage image, KvfImageType type, VkCommandBuffer cmd, VkFormat format, VkImageLayout old_layout, VkImageLayout new_layout, bool is_single_time_cmd_buffer); +VkSampler kvfCreateSampler(VkDevice device, VkFilter filters, VkSamplerAddressMode address_modes, VkSamplerMipmapMode mipmap_mode); +void kvfDestroySampler(VkDevice device, VkSampler sampler); + +VkBuffer kvfCreateBuffer(VkDevice device, VkBufferUsageFlags usage, VkDeviceSize size); +void kvfCopyBufferToBuffer(VkCommandBuffer cmd, VkBuffer dst, VkBuffer src, size_t size); +void kvfCopyBufferToImage(VkCommandBuffer cmd, VkImage dst, VkBuffer src, size_t buffer_offset, VkImageAspectFlagBits aspect, VkExtent3D extent); +void kvfDestroyBuffer(VkDevice device, VkBuffer buffer); + +VkFramebuffer kvfCreateFramebuffer(VkDevice device, VkRenderPass renderpass, VkImageView* image_views, size_t image_views_count, VkExtent2D extent); +VkExtent2D kvfGetFramebufferSize(VkFramebuffer buffer); +void kvfDestroyFramebuffer(VkDevice device, VkFramebuffer framebuffer); + +VkCommandBuffer kvfCreateCommandBuffer(VkDevice device); +VkCommandBuffer kvfCreateCommandBufferLeveled(VkDevice device, VkCommandBufferLevel level); +void kvfBeginCommandBuffer(VkCommandBuffer buffer, VkCommandBufferUsageFlags flags); +void kvfEndCommandBuffer(VkCommandBuffer buffer); +void kvfSubmitCommandBuffer(VkDevice device, VkCommandBuffer buffer, KvfQueueType queue, VkSemaphore signal, VkSemaphore wait, VkFence fence, VkPipelineStageFlags* stages); +void kvfSubmitSingleTimeCommandBuffer(VkDevice device, VkCommandBuffer buffer, KvfQueueType queue, VkFence fence); + +VkAttachmentDescription kvfBuildAttachmentDescription(KvfImageType type, VkFormat format, VkImageLayout initial, VkImageLayout final, bool clear, VkSampleCountFlagBits samples); +#ifndef KVF_NO_KHR + VkAttachmentDescription kvfBuildSwapchainAttachmentDescription(VkSwapchainKHR swapchain, bool clear); +#endif + +VkRenderPass kvfCreateRenderPass(VkDevice device, VkAttachmentDescription* attachments, size_t attachments_count, VkPipelineBindPoint bind_point); +VkRenderPass kvfCreateRenderPassWithSubpassDependencies(VkDevice device, VkAttachmentDescription* attachments, size_t attachments_count, VkPipelineBindPoint bind_point, VkSubpassDependency* dependencies, size_t dependencies_count); +void kvfDestroyRenderPass(VkDevice device, VkRenderPass renderpass); +void kvfBeginRenderPass(VkRenderPass pass, VkCommandBuffer cmd, VkFramebuffer framebuffer, VkExtent2D framebuffer_extent, VkClearValue* clears, size_t clears_count); + +VkShaderModule kvfCreateShaderModule(VkDevice device, uint32_t* code, size_t size); +void kvfDestroyShaderModule(VkDevice device, VkShaderModule shader); + +const char* kvfVerbaliseVkResult(VkResult result); + +bool kvfIsStencilFormat(VkFormat format); +bool kvfIsDepthFormat(VkFormat format); +uint32_t kvfFormatSize(VkFormat format); +VkPipelineStageFlags kvfLayoutToAccessMask(VkImageLayout layout, bool is_destination); +VkPipelineStageFlags kvfAccessFlagsToPipelineStage(VkAccessFlags access_flags, VkPipelineStageFlags stage_flags); +VkFormat kvfFindSupportFormatInCandidates(VkDevice device, VkFormat* candidates, size_t candidates_count, VkImageTiling tiling, VkFormatFeatureFlags flags); + +VkDescriptorSetLayout kvfCreateDescriptorSetLayout(VkDevice device, VkDescriptorSetLayoutBinding* bindings, size_t bindings_count); +void kvfDestroyDescriptorSetLayout(VkDevice device, VkDescriptorSetLayout layout); + +VkDescriptorSet kvfAllocateDescriptorSet(VkDevice device, VkDescriptorSetLayout layout); +void kvfUpdateStorageBufferToDescriptorSet(VkDevice device, VkDescriptorSet set, const VkDescriptorBufferInfo* info, uint32_t binding); +void kvfUpdateUniformBufferToDescriptorSet(VkDevice device, VkDescriptorSet set, const VkDescriptorBufferInfo* info, uint32_t binding); +void kvfUpdateImageToDescriptorSet(VkDevice device, VkDescriptorSet set, const VkDescriptorImageInfo* info, uint32_t binding); +VkWriteDescriptorSet kvfWriteStorageBufferToDescriptorSet(VkDevice device, VkDescriptorSet set, const VkDescriptorBufferInfo* info, uint32_t binding); +VkWriteDescriptorSet kvfWriteUniformBufferToDescriptorSet(VkDevice device, VkDescriptorSet set, const VkDescriptorBufferInfo* info, uint32_t binding); +VkWriteDescriptorSet kvfWriteImageToDescriptorSet(VkDevice device, VkDescriptorSet set, const VkDescriptorImageInfo* info, uint32_t binding); + +void kvfResetDeviceDescriptorPools(VkDevice device); + +VkPipelineLayout kvfCreatePipelineLayout(VkDevice device, VkDescriptorSetLayout* set_layouts, size_t set_layouts_count, VkPushConstantRange* pc, size_t pc_count); +void kvfDestroyPipelineLayout(VkDevice device, VkPipelineLayout layout); + +KvfGraphicsPipelineBuilder* kvfCreateGPipelineBuilder(); +void kvfDestroyGPipelineBuilder(KvfGraphicsPipelineBuilder* builder); + +void kvfGPipelineBuilderReset(KvfGraphicsPipelineBuilder* builder); +void kvfGPipelineBuilderSetInputTopology(KvfGraphicsPipelineBuilder* builder, VkPrimitiveTopology topology); +void kvfGPipelineBuilderSetPolygonMode(KvfGraphicsPipelineBuilder* builder, VkPolygonMode polygon, float line_width); +void kvfGPipelineBuilderSetCullMode(KvfGraphicsPipelineBuilder* builder, VkCullModeFlags cull, VkFrontFace face); +void kvfGPipelineBuilderSetMultisampling(KvfGraphicsPipelineBuilder* builder, VkSampleCountFlagBits count); +void kvfGPipelineBuilderSetMultisamplingShading(KvfGraphicsPipelineBuilder* builder, VkSampleCountFlagBits count, float min_sampling_shading); +void kvfGPipelineBuilderDisableBlending(KvfGraphicsPipelineBuilder* builder); +void kvfGPipelineBuilderEnableAdditiveBlending(KvfGraphicsPipelineBuilder* builder); +void kvfGPipelineBuilderEnableAlphaBlending(KvfGraphicsPipelineBuilder* builder); +void kvfGPipelineBuilderEnableDepthTest(KvfGraphicsPipelineBuilder* builder, VkCompareOp op, bool write_enabled); +void kvfGPipelineBuilderDisableDepthTest(KvfGraphicsPipelineBuilder* builder); +void kvfGPipelineBuilderSetVertexInputs(KvfGraphicsPipelineBuilder* builder, VkVertexInputBindingDescription binds, VkVertexInputAttributeDescription* attributes, size_t attributes_count); +void kvfGPipelineBuilderAddShaderStage(KvfGraphicsPipelineBuilder* builder, VkShaderStageFlagBits stage, VkShaderModule module, const char* entry); +void kvfGPipelineBuilderResetShaderStages(KvfGraphicsPipelineBuilder* builder); + +VkPipeline kvfCreateGraphicsPipeline(VkDevice device, VkPipelineCache cache, VkPipelineLayout layout, KvfGraphicsPipelineBuilder* builder, VkRenderPass pass); +void kvfDestroyPipeline(VkDevice device, VkPipeline pipeline); + +void kvfCheckVk(VkResult result); + +#ifdef KVF_IMPL_VK_NO_PROTOTYPES + #ifdef KVF_DEFINE_VULKAN_FUNCTION_PROTOTYPE + #undef KVF_DEFINE_VULKAN_FUNCTION_PROTOTYPE + #endif + #define KVF_DEFINE_VULKAN_FUNCTION_PROTOTYPE(fn) PFN_##fn fn + + struct KvfGlobalVulkanFunctions + { + KVF_DEFINE_VULKAN_FUNCTION_PROTOTYPE(vkCreateInstance); + KVF_DEFINE_VULKAN_FUNCTION_PROTOTYPE(vkEnumerateInstanceExtensionProperties); + KVF_DEFINE_VULKAN_FUNCTION_PROTOTYPE(vkEnumerateInstanceLayerProperties); + KVF_DEFINE_VULKAN_FUNCTION_PROTOTYPE(vkGetInstanceProcAddr); + }; + + struct KvfInstanceVulkanFunctions + { + KVF_DEFINE_VULKAN_FUNCTION_PROTOTYPE(vkCreateDevice); + KVF_DEFINE_VULKAN_FUNCTION_PROTOTYPE(vkDestroyInstance); + KVF_DEFINE_VULKAN_FUNCTION_PROTOTYPE(vkEnumerateDeviceExtensionProperties); + KVF_DEFINE_VULKAN_FUNCTION_PROTOTYPE(vkEnumeratePhysicalDevices); + KVF_DEFINE_VULKAN_FUNCTION_PROTOTYPE(vkGetPhysicalDeviceFeatures); + KVF_DEFINE_VULKAN_FUNCTION_PROTOTYPE(vkGetPhysicalDeviceFormatProperties); + KVF_DEFINE_VULKAN_FUNCTION_PROTOTYPE(vkGetPhysicalDeviceImageFormatProperties); + KVF_DEFINE_VULKAN_FUNCTION_PROTOTYPE(vkGetPhysicalDeviceMemoryProperties); + KVF_DEFINE_VULKAN_FUNCTION_PROTOTYPE(vkGetPhysicalDeviceProperties); + KVF_DEFINE_VULKAN_FUNCTION_PROTOTYPE(vkGetPhysicalDeviceQueueFamilyProperties); + #ifndef KVF_NO_KHR + KVF_DEFINE_VULKAN_FUNCTION_PROTOTYPE(vkDestroySurfaceKHR); + KVF_DEFINE_VULKAN_FUNCTION_PROTOTYPE(vkGetPhysicalDeviceSurfaceCapabilitiesKHR); + KVF_DEFINE_VULKAN_FUNCTION_PROTOTYPE(vkGetPhysicalDeviceSurfaceFormatsKHR); + KVF_DEFINE_VULKAN_FUNCTION_PROTOTYPE(vkGetPhysicalDeviceSurfacePresentModesKHR); + KVF_DEFINE_VULKAN_FUNCTION_PROTOTYPE(vkGetPhysicalDeviceSurfaceSupportKHR); + #endif + }; + + struct KvfDeviceVulkanFunctions + { + KVF_DEFINE_VULKAN_FUNCTION_PROTOTYPE(vkAllocateCommandBuffers); + KVF_DEFINE_VULKAN_FUNCTION_PROTOTYPE(vkAllocateDescriptorSets); + KVF_DEFINE_VULKAN_FUNCTION_PROTOTYPE(vkBeginCommandBuffer); + KVF_DEFINE_VULKAN_FUNCTION_PROTOTYPE(vkCmdBeginRenderPass); + KVF_DEFINE_VULKAN_FUNCTION_PROTOTYPE(vkCmdCopyBuffer); + KVF_DEFINE_VULKAN_FUNCTION_PROTOTYPE(vkCmdCopyBufferToImage); + KVF_DEFINE_VULKAN_FUNCTION_PROTOTYPE(vkCmdCopyImage); + KVF_DEFINE_VULKAN_FUNCTION_PROTOTYPE(vkCmdCopyImageToBuffer); + KVF_DEFINE_VULKAN_FUNCTION_PROTOTYPE(vkCmdEndRenderPass); + KVF_DEFINE_VULKAN_FUNCTION_PROTOTYPE(vkCmdPipelineBarrier); + KVF_DEFINE_VULKAN_FUNCTION_PROTOTYPE(vkCreateBuffer); + KVF_DEFINE_VULKAN_FUNCTION_PROTOTYPE(vkCreateCommandPool); + KVF_DEFINE_VULKAN_FUNCTION_PROTOTYPE(vkCreateDescriptorPool); + KVF_DEFINE_VULKAN_FUNCTION_PROTOTYPE(vkCreateDescriptorSetLayout); + KVF_DEFINE_VULKAN_FUNCTION_PROTOTYPE(vkCreateFence); + KVF_DEFINE_VULKAN_FUNCTION_PROTOTYPE(vkCreateFramebuffer); + KVF_DEFINE_VULKAN_FUNCTION_PROTOTYPE(vkCreateGraphicsPipelines); + KVF_DEFINE_VULKAN_FUNCTION_PROTOTYPE(vkCreateImage); + KVF_DEFINE_VULKAN_FUNCTION_PROTOTYPE(vkCreateImageView); + KVF_DEFINE_VULKAN_FUNCTION_PROTOTYPE(vkCreatePipelineLayout); + KVF_DEFINE_VULKAN_FUNCTION_PROTOTYPE(vkCreateRenderPass); + KVF_DEFINE_VULKAN_FUNCTION_PROTOTYPE(vkCreateSampler); + KVF_DEFINE_VULKAN_FUNCTION_PROTOTYPE(vkCreateSemaphore); + KVF_DEFINE_VULKAN_FUNCTION_PROTOTYPE(vkCreateShaderModule); + KVF_DEFINE_VULKAN_FUNCTION_PROTOTYPE(vkDestroyBuffer); + KVF_DEFINE_VULKAN_FUNCTION_PROTOTYPE(vkDestroyCommandPool); + KVF_DEFINE_VULKAN_FUNCTION_PROTOTYPE(vkDestroyDescriptorPool); + KVF_DEFINE_VULKAN_FUNCTION_PROTOTYPE(vkDestroyDescriptorSetLayout); + KVF_DEFINE_VULKAN_FUNCTION_PROTOTYPE(vkDestroyDevice); + KVF_DEFINE_VULKAN_FUNCTION_PROTOTYPE(vkDestroyFence); + KVF_DEFINE_VULKAN_FUNCTION_PROTOTYPE(vkDestroyFramebuffer); + KVF_DEFINE_VULKAN_FUNCTION_PROTOTYPE(vkDestroyImage); + KVF_DEFINE_VULKAN_FUNCTION_PROTOTYPE(vkDestroyImageView); + KVF_DEFINE_VULKAN_FUNCTION_PROTOTYPE(vkDestroyPipeline); + KVF_DEFINE_VULKAN_FUNCTION_PROTOTYPE(vkDestroyPipelineLayout); + KVF_DEFINE_VULKAN_FUNCTION_PROTOTYPE(vkDestroyRenderPass); + KVF_DEFINE_VULKAN_FUNCTION_PROTOTYPE(vkDestroySampler); + KVF_DEFINE_VULKAN_FUNCTION_PROTOTYPE(vkDestroySemaphore); + KVF_DEFINE_VULKAN_FUNCTION_PROTOTYPE(vkDestroyShaderModule); + KVF_DEFINE_VULKAN_FUNCTION_PROTOTYPE(vkDeviceWaitIdle); + KVF_DEFINE_VULKAN_FUNCTION_PROTOTYPE(vkEndCommandBuffer); + KVF_DEFINE_VULKAN_FUNCTION_PROTOTYPE(vkGetDeviceQueue); + KVF_DEFINE_VULKAN_FUNCTION_PROTOTYPE(vkGetImageSubresourceLayout); + KVF_DEFINE_VULKAN_FUNCTION_PROTOTYPE(vkQueueSubmit); + KVF_DEFINE_VULKAN_FUNCTION_PROTOTYPE(vkResetCommandBuffer); + KVF_DEFINE_VULKAN_FUNCTION_PROTOTYPE(vkResetDescriptorPool); + KVF_DEFINE_VULKAN_FUNCTION_PROTOTYPE(vkResetEvent); + KVF_DEFINE_VULKAN_FUNCTION_PROTOTYPE(vkResetFences); + KVF_DEFINE_VULKAN_FUNCTION_PROTOTYPE(vkUpdateDescriptorSets); + KVF_DEFINE_VULKAN_FUNCTION_PROTOTYPE(vkWaitForFences); + #ifndef KVF_NO_KHR + KVF_DEFINE_VULKAN_FUNCTION_PROTOTYPE(vkCreateSwapchainKHR); + KVF_DEFINE_VULKAN_FUNCTION_PROTOTYPE(vkDestroySwapchainKHR); + KVF_DEFINE_VULKAN_FUNCTION_PROTOTYPE(vkGetSwapchainImagesKHR); + KVF_DEFINE_VULKAN_FUNCTION_PROTOTYPE(vkQueuePresentKHR); + #endif + }; +#endif + +#ifdef __cplusplus +} +#endif + +#endif // KBZ_8_VULKAN_FRAMEWORK_H + +/* ========================================== Implementation =========================================== */ + +#ifdef KVF_IMPLEMENTATION + +#ifndef KVF_MALLOC + #define KVF_MALLOC(x) malloc(x) +#endif +#ifndef KVF_REALLOC + #define KVF_REALLOC(x, s) realloc(x, s) +#endif +#ifndef KVF_FREE + #define KVF_FREE(x) free(x) +#endif +#ifndef KVF_ASSERT + #include + #define KVF_ASSERT(x) assert(x) +#endif + +#ifdef KVF_IMPL_VK_NO_PROTOTYPES + #define KVF_GET_GLOBAL_FUNCTION(fn) __kvf_g_fns.fn + #define KVF_GET_INSTANCE_FUNCTION(fn) __kvf_i_fns.fn + #define KVF_GET_DEVICE_FUNCTION(fn) kvf_device->fns.fn +#else + #define KVF_GET_GLOBAL_FUNCTION(fn) fn + #define KVF_GET_INSTANCE_FUNCTION(fn) fn + #define KVF_GET_DEVICE_FUNCTION(fn) fn +#endif + +#include +#include +#include + +#ifdef KVF_DESCRIPTOR_POOL_CAPACITY + #undef KVF_DESCRIPTOR_POOL_CAPACITY +#endif +#define KVF_DESCRIPTOR_POOL_CAPACITY 512 + +#ifdef KVF_COMMAND_POOL_CAPACITY + #undef KVF_COMMAND_POOL_CAPACITY +#endif +#define KVF_COMMAND_POOL_CAPACITY 512 + +typedef struct +{ + int32_t graphics; + int32_t present; + int32_t compute; +} __KvfQueueFamilies; + +typedef struct __KvfDescriptorPool +{ + VkDescriptorPool pool; + size_t capacity; + size_t size; +} __KvfDescriptorPool; + +typedef struct __KvfDevice +{ + __KvfQueueFamilies queues; + #ifdef KVF_IMPL_VK_NO_PROTOTYPES + KvfDeviceVulkanFunctions fns; + #endif + VkDevice device; + VkPhysicalDevice physical; + VkCommandPool cmd_pool; + VkCommandBuffer* cmd_buffers; + __KvfDescriptorPool* sets_pools; + size_t cmd_buffers_size; + size_t cmd_buffers_capacity; + size_t sets_pools_size; +} __KvfDevice; + +#ifndef KVF_NO_KHR + typedef struct __KvfSwapchainSupportInternal + { + VkSurfaceCapabilitiesKHR capabilities; + VkSurfaceFormatKHR* formats; + VkPresentModeKHR* present_modes; + uint32_t formats_count; + uint32_t present_modes_count; + } __KvfSwapchainSupportInternal; + + typedef struct __KvfSwapchain + { + __KvfSwapchainSupportInternal support; + VkSwapchainKHR swapchain; + VkExtent2D images_extent; + VkFormat images_format; + uint32_t images_count; + } __KvfSwapchain; +#endif + +typedef struct __KvfFramebuffer +{ + VkFramebuffer framebuffer; + VkExtent2D extent; +} __KvfFramebuffer; + +struct KvfGraphicsPipelineBuilder +{ + VkPipelineShaderStageCreateInfo* shader_stages; + VkPipelineVertexInputStateCreateInfo vertex_input_state; + VkPipelineInputAssemblyStateCreateInfo input_assembly_state; + VkPipelineTessellationStateCreateInfo tessellation_state; + VkPipelineRasterizationStateCreateInfo rasterization_state; + VkPipelineDepthStencilStateCreateInfo depth_stencil_state; + VkPipelineColorBlendAttachmentState color_blend_attachment_state; + VkPipelineMultisampleStateCreateInfo multisampling; + size_t shader_stages_count; +}; + +// Dynamic arrays +static __KvfDevice* __kvf_internal_devices = NULL; +static size_t __kvf_internal_devices_size = 0; +static size_t __kvf_internal_devices_capacity = 0; + +#ifndef KVF_NO_KHR + static __KvfSwapchain* __kvf_internal_swapchains = NULL; + static size_t __kvf_internal_swapchains_size = 0; + static size_t __kvf_internal_swapchains_capacity = 0; +#endif + +static __KvfFramebuffer* __kvf_internal_framebuffers = NULL; +static size_t __kvf_internal_framebuffers_size = 0; +static size_t __kvf_internal_framebuffers_capacity = 0; + +#ifdef KVF_ENABLE_VALIDATION_LAYERS + static VkDebugUtilsMessengerEXT __kvf_debug_messenger = VK_NULL_HANDLE; + static char** __kvf_extra_layers = NULL; + static size_t __kvf_extra_layers_count = 0; +#endif + +static KvfErrorCallback __kvf_error_callback = NULL; +static KvfErrorCallback __kvf_warning_callback = NULL; +static KvfErrorCallback __kvf_validation_error_callback = NULL; +static KvfErrorCallback __kvf_validation_warning_callback = NULL; + +#ifdef KVF_IMPL_VK_NO_PROTOTYPES + static KvfGlobalVulkanFunctions __kvf_g_fns; + static KvfInstanceVulkanFunctions __kvf_i_fns; +#endif + +void __kvfCheckVk(VkResult result, const char* function) +{ + if(result < VK_SUCCESS) + { + if(__kvf_error_callback != NULL) + { + char buffer[1024]; + snprintf(buffer, 1024, "KVF Vulkan error in '%s': %s", function, kvfVerbaliseVkResult(result)); + __kvf_error_callback(buffer); + return; + } + fprintf(stderr, "KVF Vulkan error in '%s': %s\n", function, kvfVerbaliseVkResult(result)); + #ifndef KVF_NO_EXIT_ON_FAILURE + exit(EXIT_FAILURE); + #endif + } + else if(result > VK_SUCCESS) + { + if(__kvf_warning_callback != NULL) + { + char buffer[1024]; + snprintf(buffer, 1024, "KVF Vulkan warning in '%s': %s", function, kvfVerbaliseVkResult(result)); + __kvf_warning_callback(buffer); + return; + } + printf("KVF Vulkan warning in '%s': %s\n", function, kvfVerbaliseVkResult(result)); + } +} + +#undef __kvfCheckVk +#define __kvfCheckVk(res) __kvfCheckVk(res, __FUNCTION__) + +void kvfCheckVk(VkResult result) +{ + __kvfCheckVk(result); +} + +void __kvfAddDeviceToArray(VkPhysicalDevice device, int32_t graphics_queue, int32_t present_queue, int32_t compute_queue) +{ + KVF_ASSERT(device != VK_NULL_HANDLE); + if(__kvf_internal_devices_size == __kvf_internal_devices_capacity) + { + // Resize the dynamic array if necessary + __kvf_internal_devices_capacity += 2; + __kvf_internal_devices = (__KvfDevice*)KVF_REALLOC(__kvf_internal_devices, __kvf_internal_devices_capacity * sizeof(__KvfDevice)); + } + + __kvf_internal_devices[__kvf_internal_devices_size].physical = device; + __kvf_internal_devices[__kvf_internal_devices_size].queues.graphics = graphics_queue; + __kvf_internal_devices[__kvf_internal_devices_size].queues.compute = compute_queue; + __kvf_internal_devices[__kvf_internal_devices_size].queues.present = present_queue; + __kvf_internal_devices_size++; +} + +void __kvfCompleteDevice(VkPhysicalDevice physical, VkDevice device) +{ + KVF_ASSERT(device != VK_NULL_HANDLE); + KVF_ASSERT(physical != VK_NULL_HANDLE); + + __KvfDevice* kvf_device = NULL; + + for(size_t i = 0; i < __kvf_internal_devices_size; i++) + { + if(__kvf_internal_devices[i].physical == physical) + kvf_device = &__kvf_internal_devices[i]; + } + + KVF_ASSERT(kvf_device != NULL); + + VkCommandPool pool; + VkCommandPoolCreateInfo pool_info = {}; + pool_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO; + pool_info.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT; + pool_info.queueFamilyIndex = kvf_device->queues.graphics; + __kvfCheckVk(KVF_GET_DEVICE_FUNCTION(vkCreateCommandPool)(device, &pool_info, NULL, &pool)); + + kvf_device->device = device; + kvf_device->cmd_pool = pool; + kvf_device->sets_pools = NULL; + kvf_device->sets_pools_size = 0; + kvf_device->cmd_buffers_size = 0; + kvf_device->cmd_buffers_capacity = KVF_COMMAND_POOL_CAPACITY; + kvf_device->cmd_buffers = (VkCommandBuffer*)KVF_MALLOC(KVF_COMMAND_POOL_CAPACITY * sizeof(VkCommandBuffer)); + KVF_ASSERT(kvf_device->cmd_buffers != NULL && "allocation failed :("); +} + +void __kvfCompleteDeviceCustomPhysicalDeviceAndQueues(VkPhysicalDevice physical, VkDevice device, int32_t graphics_queue, int32_t present_queue, int32_t compute_queue) +{ + KVF_ASSERT(device != VK_NULL_HANDLE); + KVF_ASSERT(physical != VK_NULL_HANDLE); + + __kvfAddDeviceToArray(physical, graphics_queue, present_queue, compute_queue); + + __KvfDevice* kvf_device = NULL; + + for(size_t i = 0; i < __kvf_internal_devices_size; i++) + { + if(__kvf_internal_devices[i].physical == physical) + kvf_device = &__kvf_internal_devices[i]; + } + + KVF_ASSERT(kvf_device != NULL); + + VkCommandPool pool; + VkCommandPoolCreateInfo pool_info = {}; + pool_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO; + pool_info.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT; + pool_info.queueFamilyIndex = kvf_device->queues.graphics; + __kvfCheckVk(KVF_GET_DEVICE_FUNCTION(vkCreateCommandPool)(device, &pool_info, NULL, &pool)); + + kvf_device->device = device; + kvf_device->cmd_pool = pool; + kvf_device->sets_pools = NULL; + kvf_device->sets_pools_size = 0; + kvf_device->cmd_buffers_size = 0; + kvf_device->cmd_buffers_capacity = KVF_COMMAND_POOL_CAPACITY; + kvf_device->cmd_buffers = (VkCommandBuffer*)KVF_MALLOC(KVF_COMMAND_POOL_CAPACITY * sizeof(VkCommandBuffer)); + KVF_ASSERT(kvf_device->cmd_buffers != NULL && "allocation failed :("); +} + +void __kvfDestroyDescriptorPools(VkDevice device); + +void __kvfDestroyDevice(VkDevice device) +{ + KVF_ASSERT(device != VK_NULL_HANDLE); + for(size_t i = 0; i < __kvf_internal_devices_size; i++) + { + if(__kvf_internal_devices[i].device == device) + { + __KvfDevice* kvf_device = &__kvf_internal_devices[i]; + KVF_FREE(kvf_device->cmd_buffers); + KVF_GET_DEVICE_FUNCTION(vkDestroyCommandPool)(device, kvf_device->cmd_pool, NULL); + __kvfDestroyDescriptorPools(device); + KVF_GET_DEVICE_FUNCTION(vkDestroyDevice)(device, NULL); + // Shift the elements to fill the gap + for(size_t j = i; j < __kvf_internal_devices_size - 1; j++) + __kvf_internal_devices[j] = __kvf_internal_devices[j + 1]; + __kvf_internal_devices_size--; + if(__kvf_internal_devices_size == 0) + { + KVF_FREE(__kvf_internal_devices); + __kvf_internal_devices = NULL; + __kvf_internal_devices_capacity = 0; + } + return; + } + } +} + +__KvfDevice* __kvfGetKvfDeviceFromVkPhysicalDevice(VkPhysicalDevice device) +{ + KVF_ASSERT(device != VK_NULL_HANDLE); + for(size_t i = 0; i < __kvf_internal_devices_size; i++) + { + if(__kvf_internal_devices[i].physical == device) + return &__kvf_internal_devices[i]; + } + return NULL; +} + +__KvfDevice* __kvfGetKvfDeviceFromVkDevice(VkDevice device) +{ + KVF_ASSERT(device != VK_NULL_HANDLE); + for(size_t i = 0; i < __kvf_internal_devices_size; i++) + { + if(__kvf_internal_devices[i].device == device) + return &__kvf_internal_devices[i]; + } + return NULL; +} + +__KvfDevice* __kvfGetKvfDeviceFromVkCommandBuffer(VkCommandBuffer cmd) +{ + KVF_ASSERT(cmd != VK_NULL_HANDLE); + for(size_t i = 0; i < __kvf_internal_devices_size; i++) + { + for(size_t j = 0; j < __kvf_internal_devices[i].cmd_buffers_size; j++) + { + if(__kvf_internal_devices[i].cmd_buffers[j] == cmd) + return &__kvf_internal_devices[i]; + } + } + return NULL; +} + +#ifndef KVF_NO_KHR + void __kvfAddSwapchainToArray(VkSwapchainKHR swapchain, __KvfSwapchainSupportInternal support, VkFormat format, uint32_t images_count, VkExtent2D extent) + { + KVF_ASSERT(swapchain != VK_NULL_HANDLE); + if(__kvf_internal_swapchains_size == __kvf_internal_swapchains_capacity) + { + // Resize the dynamic array if necessary + __kvf_internal_swapchains_capacity += 5; + __kvf_internal_swapchains = (__KvfSwapchain*)KVF_REALLOC(__kvf_internal_swapchains, __kvf_internal_swapchains_capacity * sizeof(__KvfSwapchain)); + } + + __kvf_internal_swapchains[__kvf_internal_swapchains_size].swapchain = swapchain; + __kvf_internal_swapchains[__kvf_internal_swapchains_size].support = support; + __kvf_internal_swapchains[__kvf_internal_swapchains_size].images_format = format; + __kvf_internal_swapchains[__kvf_internal_swapchains_size].images_count = images_count; + __kvf_internal_swapchains[__kvf_internal_swapchains_size].images_extent = extent; + __kvf_internal_swapchains_size++; + } + + void __kvfDestroySwapchain(VkDevice device, VkSwapchainKHR swapchain) + { + KVF_ASSERT(swapchain != VK_NULL_HANDLE); + KVF_ASSERT(device != VK_NULL_HANDLE); + + #ifdef KVF_IMPL_VK_NO_PROTOTYPES + __KvfDevice* kvf_device = __kvfGetKvfDeviceFromVkDevice(device); + KVF_ASSERT(kvf_device != NULL); + #endif + + for(size_t i = 0; i < __kvf_internal_swapchains_size; i++) + { + if(__kvf_internal_swapchains[i].swapchain == swapchain) + { + KVF_GET_DEVICE_FUNCTION(vkDestroySwapchainKHR)(device, swapchain, NULL); + // Shift the elements to fill the gap + for(size_t j = i; j < __kvf_internal_swapchains_size - 1; j++) + __kvf_internal_swapchains[j] = __kvf_internal_swapchains[j + 1]; + __kvf_internal_swapchains_size--; + if(__kvf_internal_swapchains_size == 0) + { + KVF_FREE(__kvf_internal_swapchains); + __kvf_internal_swapchains = NULL; + __kvf_internal_swapchains_capacity = 0; + } + return; + } + } + } + + __KvfSwapchain* __kvfGetKvfSwapchainFromVkSwapchainKHR(VkSwapchainKHR swapchain) + { + KVF_ASSERT(swapchain != VK_NULL_HANDLE); + for(size_t i = 0; i < __kvf_internal_swapchains_size; i++) + { + if(__kvf_internal_swapchains[i].swapchain == swapchain) + return &__kvf_internal_swapchains[i]; + } + return NULL; + } +#endif + +void __kvfAddFramebufferToArray(VkFramebuffer framebuffer, VkExtent2D extent) +{ + KVF_ASSERT(framebuffer != VK_NULL_HANDLE); + if(__kvf_internal_framebuffers_size == __kvf_internal_framebuffers_capacity) + { + // Resize the dynamic array if necessary + __kvf_internal_framebuffers_capacity += 5; + __kvf_internal_framebuffers = (__KvfFramebuffer*)KVF_REALLOC(__kvf_internal_framebuffers, __kvf_internal_framebuffers_capacity * sizeof(__KvfFramebuffer)); + } + + __kvf_internal_framebuffers[__kvf_internal_framebuffers_size].framebuffer = framebuffer; + __kvf_internal_framebuffers[__kvf_internal_framebuffers_size].extent = extent; + __kvf_internal_framebuffers_size++; +} + +void __kvfDestroyFramebuffer(VkDevice device, VkFramebuffer framebuffer) +{ + KVF_ASSERT(framebuffer != VK_NULL_HANDLE); + KVF_ASSERT(device != VK_NULL_HANDLE); + + #ifdef KVF_IMPL_VK_NO_PROTOTYPES + __KvfDevice* kvf_device = __kvfGetKvfDeviceFromVkDevice(device); + KVF_ASSERT(kvf_device != NULL); + #endif + + for(size_t i = 0; i < __kvf_internal_framebuffers_size; i++) + { + if(__kvf_internal_framebuffers[i].framebuffer == framebuffer) + { + KVF_GET_DEVICE_FUNCTION(vkDestroyFramebuffer)(device, framebuffer, NULL); + // Shift the elements to fill the gap + for(size_t j = i; j < __kvf_internal_framebuffers_size - 1; j++) + __kvf_internal_framebuffers[j] = __kvf_internal_framebuffers[j + 1]; + __kvf_internal_framebuffers_size--; + if(__kvf_internal_framebuffers_size == 0) + { + KVF_FREE(__kvf_internal_framebuffers); + __kvf_internal_framebuffers = NULL; + __kvf_internal_framebuffers_capacity = 0; + } + return; + } + } + KVF_ASSERT(false && "could not find framebuffer"); +} + +__KvfFramebuffer* __kvfGetKvfFramebufferFromVkFramebuffer(VkFramebuffer framebuffer) +{ + KVF_ASSERT(framebuffer != VK_NULL_HANDLE); + for(size_t i = 0; i < __kvf_internal_framebuffers_size; i++) + { + if(__kvf_internal_framebuffers[i].framebuffer == framebuffer) + return &__kvf_internal_framebuffers[i]; + } + return NULL; +} + +VkDescriptorPool __kvfDeviceCreateDescriptorPool(VkDevice device) +{ + KVF_ASSERT(device != VK_NULL_HANDLE); + __KvfDevice* kvf_device = __kvfGetKvfDeviceFromVkDevice(device); + KVF_ASSERT(kvf_device != NULL); + kvf_device->sets_pools_size++; + kvf_device->sets_pools = (__KvfDescriptorPool*)KVF_REALLOC(kvf_device->sets_pools, kvf_device->sets_pools_size * sizeof(__KvfDescriptorPool)); + memset(&kvf_device->sets_pools[kvf_device->sets_pools_size - 1], 0, sizeof(__KvfDescriptorPool)); + + VkDescriptorPoolSize pool_sizes[] = { + { VK_DESCRIPTOR_TYPE_SAMPLER, 1024 }, + { VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1024 }, + { VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, 1024 }, + { VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 1024 }, + { VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER, 1024 }, + { VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, 1024 }, + { VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1024 }, + { VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1024 }, + { VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC, 1024 }, + { VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC, 1024 }, + { VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT, 1024 } + }; + + VkDescriptorPoolCreateInfo pool_info = {}; + pool_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; + pool_info.poolSizeCount = sizeof(pool_sizes) / sizeof(VkDescriptorPoolSize); + pool_info.pPoolSizes = pool_sizes; + pool_info.maxSets = KVF_DESCRIPTOR_POOL_CAPACITY; + pool_info.flags = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT; + + __kvfCheckVk(KVF_GET_DEVICE_FUNCTION(vkCreateDescriptorPool)(device, &pool_info, NULL, &kvf_device->sets_pools[kvf_device->sets_pools_size - 1].pool)); + kvf_device->sets_pools[kvf_device->sets_pools_size - 1].capacity = KVF_DESCRIPTOR_POOL_CAPACITY; + return kvf_device->sets_pools[kvf_device->sets_pools_size - 1].pool; +} + +void __kvfDestroyDescriptorPools(VkDevice device) +{ + KVF_ASSERT(device != VK_NULL_HANDLE); + __KvfDevice* kvf_device = __kvfGetKvfDeviceFromVkDevice(device); + KVF_ASSERT(kvf_device != NULL); + + for(size_t i = 0; i < kvf_device->sets_pools_size; i++) + KVF_GET_DEVICE_FUNCTION(vkDestroyDescriptorPool)(device, kvf_device->sets_pools[i].pool, NULL); + KVF_FREE(kvf_device->sets_pools); + kvf_device->sets_pools_size = 0; +} + +void kvfSetErrorCallback(KvfErrorCallback callback) +{ + __kvf_error_callback = callback; +} + +void kvfSetWarningCallback(KvfErrorCallback callback) +{ + __kvf_warning_callback = callback; +} + +void kvfSetValidationErrorCallback(KvfErrorCallback callback) +{ + __kvf_validation_error_callback = callback; +} + +void kvfSetValidationWarningCallback(KvfErrorCallback callback) +{ + __kvf_validation_warning_callback = callback; +} + +#ifdef KVF_IMPL_VK_NO_PROTOTYPES + void kvfPassGlobalVulkanFunctionPointers(const KvfGlobalVulkanFunctions* fns) + { + KVF_ASSERT(fns != NULL); + __kvf_g_fns = *fns; + } + + void kvfPassInstanceVulkanFunctionPointers(const KvfInstanceVulkanFunctions* fns) + { + KVF_ASSERT(fns != NULL); + __kvf_i_fns = *fns; + } +#endif + +bool kvfIsStencilFormat(VkFormat format) +{ + switch(format) + { + case VK_FORMAT_D32_SFLOAT_S8_UINT: + case VK_FORMAT_D24_UNORM_S8_UINT: + return true; + + default: return false; + } +} + +bool kvfIsDepthFormat(VkFormat format) +{ + switch(format) + { + case VK_FORMAT_D16_UNORM: + case VK_FORMAT_D32_SFLOAT: + case VK_FORMAT_D32_SFLOAT_S8_UINT: + case VK_FORMAT_D24_UNORM_S8_UINT: + case VK_FORMAT_D16_UNORM_S8_UINT: + return true; + + default: return false; + } +} + +VkPipelineStageFlags kvfLayoutToAccessMask(VkImageLayout layout, bool is_destination) +{ + VkPipelineStageFlags access_mask = 0; + + switch(layout) + { + case VK_IMAGE_LAYOUT_UNDEFINED: + if(is_destination) + KVF_ASSERT(false && "Vulkan : the new layout used in a transition must not be VK_IMAGE_LAYOUT_UNDEFINED"); + break; + case VK_IMAGE_LAYOUT_GENERAL: access_mask = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT; break; + case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL: access_mask = VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; break; + case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL: access_mask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT; break; + case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL: + access_mask = VK_ACCESS_SHADER_READ_BIT; // VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT; + break; + case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL: access_mask = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_INPUT_ATTACHMENT_READ_BIT; break; + case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL: access_mask = VK_ACCESS_TRANSFER_READ_BIT; break; + case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL: access_mask = VK_ACCESS_TRANSFER_WRITE_BIT; break; + case VK_IMAGE_LAYOUT_PREINITIALIZED: + if(!is_destination) + access_mask = VK_ACCESS_HOST_WRITE_BIT; + else + KVF_ASSERT(false && "Vulkan : the new layout used in a transition must not be VK_IMAGE_LAYOUT_PREINITIALIZED"); + break; + case VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL: access_mask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT; break; + case VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL: access_mask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT; break; + case VK_IMAGE_LAYOUT_PRESENT_SRC_KHR: access_mask = VK_ACCESS_MEMORY_READ_BIT; break; + + default: KVF_ASSERT(false && "Vulkan : unexpected image layout"); break; + } + + return access_mask; +} + +VkPipelineStageFlags kvfAccessFlagsToPipelineStage(VkAccessFlags access_flags, VkPipelineStageFlags stage_flags) +{ + VkPipelineStageFlags stages = 0; + + while(access_flags != 0) + { + VkAccessFlagBits _access_flag = (VkAccessFlagBits)(access_flags & (~(access_flags - 1))); + if(_access_flag == 0 || (_access_flag & (_access_flag - 1)) != 0) + KVF_ASSERT(false && "Vulkan : an error has been caught during access flag to pipeline stage operation"); + access_flags &= ~_access_flag; + + switch(_access_flag) + { + case VK_ACCESS_INDIRECT_COMMAND_READ_BIT: stages |= VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT; break; + case VK_ACCESS_INDEX_READ_BIT: stages |= VK_PIPELINE_STAGE_VERTEX_INPUT_BIT; break; + case VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT: stages |= VK_PIPELINE_STAGE_VERTEX_INPUT_BIT; break; + case VK_ACCESS_UNIFORM_READ_BIT: stages |= stage_flags | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT; break; + case VK_ACCESS_INPUT_ATTACHMENT_READ_BIT: stages |= VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT; break; + case VK_ACCESS_SHADER_READ_BIT: stages |= stage_flags | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT; break; + case VK_ACCESS_SHADER_WRITE_BIT: stages |= stage_flags | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT; break; + case VK_ACCESS_COLOR_ATTACHMENT_READ_BIT: stages |= VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT; break; + case VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT: stages |= VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT; break; + case VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT: stages |= VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT; break; + case VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT: stages |= VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT; break; + case VK_ACCESS_TRANSFER_READ_BIT: stages |= VK_PIPELINE_STAGE_TRANSFER_BIT; break; + case VK_ACCESS_TRANSFER_WRITE_BIT: stages |= VK_PIPELINE_STAGE_TRANSFER_BIT; break; + case VK_ACCESS_HOST_READ_BIT: stages |= VK_PIPELINE_STAGE_HOST_BIT; break; + case VK_ACCESS_HOST_WRITE_BIT: stages |= VK_PIPELINE_STAGE_HOST_BIT; break; + case VK_ACCESS_MEMORY_READ_BIT: break; + case VK_ACCESS_MEMORY_WRITE_BIT: break; + + default: KVF_ASSERT(false && "Vulkan : unknown access flag"); break; + } + } + return stages; +} + +VkFormat kvfFindSupportFormatInCandidates(VkDevice device, VkFormat* candidates, size_t candidates_count, VkImageTiling tiling, VkFormatFeatureFlags flags) +{ + KVF_ASSERT(device != VK_NULL_HANDLE); + __KvfDevice* kvf_device = __kvfGetKvfDeviceFromVkDevice(device); + KVF_ASSERT(kvf_device != NULL); + for(size_t i = 0; i < candidates_count; i++) + { + VkFormatProperties props; + KVF_GET_INSTANCE_FUNCTION(vkGetPhysicalDeviceFormatProperties)(kvf_device->physical, candidates[i], &props); + if(tiling == VK_IMAGE_TILING_LINEAR && (props.linearTilingFeatures & flags) == flags) + return candidates[i]; + else if(tiling == VK_IMAGE_TILING_OPTIMAL && (props.optimalTilingFeatures & flags) == flags) + return candidates[i]; + } + + KVF_ASSERT(false && "Vulkan : failed to find image format"); + return VK_FORMAT_R8G8B8A8_SRGB; // just to avoir warning +} + +uint32_t kvfFormatSize(VkFormat format) +{ + switch(format) + { + case VK_FORMAT_UNDEFINED: return 0; + case VK_FORMAT_R4G4_UNORM_PACK8: return 1; + case VK_FORMAT_R4G4B4A4_UNORM_PACK16: return 2; + case VK_FORMAT_B4G4R4A4_UNORM_PACK16: return 2; + case VK_FORMAT_R5G6B5_UNORM_PACK16: return 2; + case VK_FORMAT_B5G6R5_UNORM_PACK16: return 2; + case VK_FORMAT_R5G5B5A1_UNORM_PACK16: return 2; + case VK_FORMAT_B5G5R5A1_UNORM_PACK16: return 2; + case VK_FORMAT_A1R5G5B5_UNORM_PACK16: return 2; + case VK_FORMAT_R8_UNORM: return 1; + case VK_FORMAT_R8_SNORM: return 1; + case VK_FORMAT_R8_USCALED: return 1; + case VK_FORMAT_R8_SSCALED: return 1; + case VK_FORMAT_R8_UINT: return 1; + case VK_FORMAT_R8_SINT: return 1; + case VK_FORMAT_R8_SRGB: return 1; + case VK_FORMAT_R8G8_UNORM: return 2; + case VK_FORMAT_R8G8_SNORM: return 2; + case VK_FORMAT_R8G8_USCALED: return 2; + case VK_FORMAT_R8G8_SSCALED: return 2; + case VK_FORMAT_R8G8_UINT: return 2; + case VK_FORMAT_R8G8_SINT: return 2; + case VK_FORMAT_R8G8_SRGB: return 2; + case VK_FORMAT_R8G8B8_UNORM: return 3; + case VK_FORMAT_R8G8B8_SNORM: return 3; + case VK_FORMAT_R8G8B8_USCALED: return 3; + case VK_FORMAT_R8G8B8_SSCALED: return 3; + case VK_FORMAT_R8G8B8_UINT: return 3; + case VK_FORMAT_R8G8B8_SINT: return 3; + case VK_FORMAT_R8G8B8_SRGB: return 3; + case VK_FORMAT_B8G8R8_UNORM: return 3; + case VK_FORMAT_B8G8R8_SNORM: return 3; + case VK_FORMAT_B8G8R8_USCALED: return 3; + case VK_FORMAT_B8G8R8_SSCALED: return 3; + case VK_FORMAT_B8G8R8_UINT: return 3; + case VK_FORMAT_B8G8R8_SINT: return 3; + case VK_FORMAT_B8G8R8_SRGB: return 3; + case VK_FORMAT_R8G8B8A8_UNORM: return 4; + case VK_FORMAT_R8G8B8A8_SNORM: return 4; + case VK_FORMAT_R8G8B8A8_USCALED: return 4; + case VK_FORMAT_R8G8B8A8_SSCALED: return 4; + case VK_FORMAT_R8G8B8A8_UINT: return 4; + case VK_FORMAT_R8G8B8A8_SINT: return 4; + case VK_FORMAT_R8G8B8A8_SRGB: return 4; + case VK_FORMAT_B8G8R8A8_UNORM: return 4; + case VK_FORMAT_B8G8R8A8_SNORM: return 4; + case VK_FORMAT_B8G8R8A8_USCALED: return 4; + case VK_FORMAT_B8G8R8A8_SSCALED: return 4; + case VK_FORMAT_B8G8R8A8_UINT: return 4; + case VK_FORMAT_B8G8R8A8_SINT: return 4; + case VK_FORMAT_B8G8R8A8_SRGB: return 4; + case VK_FORMAT_A8B8G8R8_UNORM_PACK32: return 4; + case VK_FORMAT_A8B8G8R8_SNORM_PACK32: return 4; + case VK_FORMAT_A8B8G8R8_USCALED_PACK32: return 4; + case VK_FORMAT_A8B8G8R8_SSCALED_PACK32: return 4; + case VK_FORMAT_A8B8G8R8_UINT_PACK32: return 4; + case VK_FORMAT_A8B8G8R8_SINT_PACK32: return 4; + case VK_FORMAT_A8B8G8R8_SRGB_PACK32: return 4; + case VK_FORMAT_A2R10G10B10_UNORM_PACK32: return 4; + case VK_FORMAT_A2R10G10B10_SNORM_PACK32: return 4; + case VK_FORMAT_A2R10G10B10_USCALED_PACK32: return 4; + case VK_FORMAT_A2R10G10B10_SSCALED_PACK32: return 4; + case VK_FORMAT_A2R10G10B10_UINT_PACK32: return 4; + case VK_FORMAT_A2R10G10B10_SINT_PACK32: return 4; + case VK_FORMAT_A2B10G10R10_UNORM_PACK32: return 4; + case VK_FORMAT_A2B10G10R10_SNORM_PACK32: return 4; + case VK_FORMAT_A2B10G10R10_USCALED_PACK32: return 4; + case VK_FORMAT_A2B10G10R10_SSCALED_PACK32: return 4; + case VK_FORMAT_A2B10G10R10_UINT_PACK32: return 4; + case VK_FORMAT_A2B10G10R10_SINT_PACK32: return 4; + case VK_FORMAT_R16_UNORM: return 2; + case VK_FORMAT_R16_SNORM: return 2; + case VK_FORMAT_R16_USCALED: return 2; + case VK_FORMAT_R16_SSCALED: return 2; + case VK_FORMAT_R16_UINT: return 2; + case VK_FORMAT_R16_SINT: return 2; + case VK_FORMAT_R16_SFLOAT: return 2; + case VK_FORMAT_R16G16_UNORM: return 4; + case VK_FORMAT_R16G16_SNORM: return 4; + case VK_FORMAT_R16G16_USCALED: return 4; + case VK_FORMAT_R16G16_SSCALED: return 4; + case VK_FORMAT_R16G16_UINT: return 4; + case VK_FORMAT_R16G16_SINT: return 4; + case VK_FORMAT_R16G16_SFLOAT: return 4; + case VK_FORMAT_R16G16B16_UNORM: return 6; + case VK_FORMAT_R16G16B16_SNORM: return 6; + case VK_FORMAT_R16G16B16_USCALED: return 6; + case VK_FORMAT_R16G16B16_SSCALED: return 6; + case VK_FORMAT_R16G16B16_UINT: return 6; + case VK_FORMAT_R16G16B16_SINT: return 6; + case VK_FORMAT_R16G16B16_SFLOAT: return 6; + case VK_FORMAT_R16G16B16A16_UNORM: return 8; + case VK_FORMAT_R16G16B16A16_SNORM: return 8; + case VK_FORMAT_R16G16B16A16_USCALED: return 8; + case VK_FORMAT_R16G16B16A16_SSCALED: return 8; + case VK_FORMAT_R16G16B16A16_UINT: return 8; + case VK_FORMAT_R16G16B16A16_SINT: return 8; + case VK_FORMAT_R16G16B16A16_SFLOAT: return 8; + case VK_FORMAT_R32_UINT: return 4; + case VK_FORMAT_R32_SINT: return 4; + case VK_FORMAT_R32_SFLOAT: return 4; + case VK_FORMAT_R32G32_UINT: return 8; + case VK_FORMAT_R32G32_SINT: return 8; + case VK_FORMAT_R32G32_SFLOAT: return 8; + case VK_FORMAT_R32G32B32_UINT: return 12; + case VK_FORMAT_R32G32B32_SINT: return 12; + case VK_FORMAT_R32G32B32_SFLOAT: return 12; + case VK_FORMAT_R32G32B32A32_UINT: return 16; + case VK_FORMAT_R32G32B32A32_SINT: return 16; + case VK_FORMAT_R32G32B32A32_SFLOAT: return 16; + case VK_FORMAT_R64_UINT: return 8; + case VK_FORMAT_R64_SINT: return 8; + case VK_FORMAT_R64_SFLOAT: return 8; + case VK_FORMAT_R64G64_UINT: return 16; + case VK_FORMAT_R64G64_SINT: return 16; + case VK_FORMAT_R64G64_SFLOAT: return 16; + case VK_FORMAT_R64G64B64_UINT: return 24; + case VK_FORMAT_R64G64B64_SINT: return 24; + case VK_FORMAT_R64G64B64_SFLOAT: return 24; + case VK_FORMAT_R64G64B64A64_UINT: return 32; + case VK_FORMAT_R64G64B64A64_SINT: return 32; + case VK_FORMAT_R64G64B64A64_SFLOAT: return 32; + case VK_FORMAT_B10G11R11_UFLOAT_PACK32: return 4; + case VK_FORMAT_E5B9G9R9_UFLOAT_PACK32: return 4; + + default: return 0; + } +} + +const char* kvfVerbaliseVkResult(VkResult result) +{ + switch(result) + { + case VK_SUCCESS: return "Success"; + case VK_NOT_READY: return "A fence or query has not yet completed"; + case VK_TIMEOUT: return "A wait operation has not completed in the specified time"; + case VK_EVENT_SET: return "An event is signaled"; + case VK_EVENT_RESET: return "An event is unsignaled"; + case VK_INCOMPLETE: return "A return array was too small for the result"; + case VK_ERROR_OUT_OF_HOST_MEMORY: return "A host memory allocation has failed"; + case VK_ERROR_OUT_OF_DEVICE_MEMORY: return "A device memory allocation has failed"; + case VK_ERROR_INITIALIZATION_FAILED: return "Initialization of an object could not be completed for implementation-specific reasons"; + case VK_ERROR_DEVICE_LOST: return "The logical or physical device has been lost"; + case VK_ERROR_MEMORY_MAP_FAILED: return "Mapping of a memory object has failed"; + case VK_ERROR_LAYER_NOT_PRESENT: return "A requested layer is not present or could not be loaded"; + case VK_ERROR_EXTENSION_NOT_PRESENT: return "A requested extension is not supported"; + case VK_ERROR_FEATURE_NOT_PRESENT: return "A requested feature is not supported"; + case VK_ERROR_INCOMPATIBLE_DRIVER: return "The requested version of Vulkan is not supported by the driver or is otherwise incompatible"; + case VK_ERROR_TOO_MANY_OBJECTS: return "Too many objects of the type have already been created"; + case VK_ERROR_FORMAT_NOT_SUPPORTED: return "A requested format is not supported on this device"; + case VK_ERROR_SURFACE_LOST_KHR: return "A surface is no longer available"; + case VK_SUBOPTIMAL_KHR: return "A swapchain no longer matches the surface properties exactly, but can still be used"; + case VK_ERROR_OUT_OF_DATE_KHR: return "A surface has changed in such a way that it is no longer compatible with the swapchain"; + case VK_ERROR_INCOMPATIBLE_DISPLAY_KHR: return "The display used by a swapchain does not use the same presentable image layout"; + case VK_ERROR_NATIVE_WINDOW_IN_USE_KHR: return "The requested window is already connected to a VkSurfaceKHR, or to some other non-Vulkan API"; + case VK_ERROR_VALIDATION_FAILED_EXT: return "A validation layer found an error"; + + default: return "Unknown Vulkan error"; + } + return NULL; // just to avoid warnings +} + +#ifdef KVF_ENABLE_VALIDATION_LAYERS + bool __kvfCheckValidationLayerSupport() + { + uint32_t layer_count; + KVF_GET_GLOBAL_FUNCTION(vkEnumerateInstanceLayerProperties)(&layer_count, NULL); + VkLayerProperties* available_layers = (VkLayerProperties*)KVF_MALLOC(sizeof(VkLayerProperties) * layer_count); + KVF_ASSERT(available_layers != NULL && "allocation failed :("); + KVF_GET_GLOBAL_FUNCTION(vkEnumerateInstanceLayerProperties)(&layer_count, available_layers); + for(size_t i = 0; i < __kvf_extra_layers_count; i++) + { + bool found = false; + for(size_t j = 0; j < layer_count; j++) + { + if(strcmp(available_layers[j].layerName, __kvf_extra_layers[i]) == 0) + { + found = true; + break; + } + } + if(!found) + { + KVF_FREE(available_layers); + return false; + } + } + KVF_FREE(available_layers); + return true; + } + + VKAPI_ATTR VkBool32 VKAPI_CALL __kvfDebugCallback(VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity, VkDebugUtilsMessageTypeFlagsEXT messageType, const VkDebugUtilsMessengerCallbackDataEXT* pCallbackData, void* pUserData) + { + (void)messageType; + (void)pUserData; + if(messageSeverity == VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT) + { + if(__kvf_validation_error_callback != NULL) + { + char buffer[4096]; + snprintf(buffer, 4096, "KVF Vulkan validation error : %s", pCallbackData->pMessage); + __kvf_validation_error_callback(buffer); + return VK_FALSE; + } + fprintf(stderr, "\nKVF Vulkan validation error : %s\n", pCallbackData->pMessage); + } + else if(messageSeverity == VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT) + { + if(__kvf_validation_warning_callback != NULL) + { + char buffer[4096]; + snprintf(buffer, 4096, "KVF Vulkan validation warning : %s", pCallbackData->pMessage); + __kvf_validation_warning_callback(buffer); + return VK_FALSE; + } + fprintf(stderr, "\nKVF Vulkan validation warning : %s\n", pCallbackData->pMessage); + } + return VK_FALSE; + } + + void __kvfPopulateDebugMessengerCreateInfo(VkDebugUtilsMessengerCreateInfoEXT* create_info) + { + create_info->sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT; + create_info->messageSeverity = VK_DEBUG_UTILS_MESSAGE_SEVERITY_VERBOSE_BIT_EXT | VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT | VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT; + create_info->messageType = VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT | VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT | VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT; + create_info->pfnUserCallback = __kvfDebugCallback; + } + + VkResult __kvfCreateDebugUtilsMessengerEXT(VkInstance instance, const VkDebugUtilsMessengerCreateInfoEXT* create_info, VkDebugUtilsMessengerEXT* messenger) + { + PFN_vkCreateDebugUtilsMessengerEXT func = (PFN_vkCreateDebugUtilsMessengerEXT)KVF_GET_GLOBAL_FUNCTION(vkGetInstanceProcAddr)(instance, "vkCreateDebugUtilsMessengerEXT"); + return func ? func(instance, create_info, NULL, messenger) : VK_ERROR_EXTENSION_NOT_PRESENT; + } + + void __kvfInitValidationLayers(VkInstance instance) + { + uint32_t extension_count; + KVF_GET_GLOBAL_FUNCTION(vkEnumerateInstanceExtensionProperties)(NULL, &extension_count, NULL); + VkExtensionProperties* extensions = (VkExtensionProperties*)KVF_MALLOC(extension_count * sizeof(VkExtensionProperties)); + KVF_ASSERT(extensions != NULL && "allocation failed :("); + KVF_GET_GLOBAL_FUNCTION(vkEnumerateInstanceExtensionProperties)(NULL, &extension_count, extensions); + bool extension_found = false; + for(uint32_t i = 0; i < extension_count; i++) + { + if(strcmp(extensions[i].extensionName, VK_EXT_DEBUG_UTILS_EXTENSION_NAME) == 0) + { + extension_found = true; + break; + } + } + if(!extension_found) + { + if(__kvf_validation_warning_callback != NULL) + { + char buffer[1024]; + snprintf(buffer, 1024, "KVF Vulkan warning: %s is not present; cannot enable validation layers", VK_EXT_DEBUG_UTILS_EXTENSION_NAME); + __kvf_validation_warning_callback(buffer); + return; + } + printf("KVF Vulkan warning: %s is not present; cannot enable validation layers", VK_EXT_DEBUG_UTILS_EXTENSION_NAME); + KVF_FREE(extensions); + return; + } + VkDebugUtilsMessengerCreateInfoEXT create_info = {}; + __kvfPopulateDebugMessengerCreateInfo(&create_info); + __kvfCheckVk(__kvfCreateDebugUtilsMessengerEXT(instance, &create_info, &__kvf_debug_messenger)); + } + + void __kvfDestroyDebugUtilsMessengerEXT(VkInstance instance) + { + PFN_vkDestroyDebugUtilsMessengerEXT func = (PFN_vkDestroyDebugUtilsMessengerEXT)KVF_GET_GLOBAL_FUNCTION(vkGetInstanceProcAddr)(instance, "vkDestroyDebugUtilsMessengerEXT"); + if(func) + func(instance, __kvf_debug_messenger, NULL); + } +#endif // KVF_ENABLE_VALIDATION_LAYERS + +void kvfAddLayer(const char* layer) +{ + #ifdef KVF_ENABLE_VALIDATION_LAYERS + __kvf_extra_layers = (char**)KVF_REALLOC(__kvf_extra_layers, sizeof(char*) * (__kvf_extra_layers_count + 1)); + KVF_ASSERT(__kvf_extra_layers != NULL); + __kvf_extra_layers[__kvf_extra_layers_count] = (char*)KVF_MALLOC(strlen(layer) + 1); + KVF_ASSERT(__kvf_extra_layers[__kvf_extra_layers_count] != NULL && "allocation failed :("); + strcpy(__kvf_extra_layers[__kvf_extra_layers_count], layer); + __kvf_extra_layers_count++; + #else + (void)layer; + if(__kvf_validation_error_callback != NULL) + { + char buffer[4096]; + snprintf(buffer, 4096, "KVF Vulkan validation error : cannot add extra layers, validation layers are not enabled. Try adding #define KVF_ENABLE_VALIDATION_LAYERS"); + __kvf_validation_error_callback(buffer); + return; + } + fprintf(stderr, "KVF Vulkan validation error : cannot add extra layers, validation layers are not enabled. Try adding #define KVF_ENABLE_VALIDATION_LAYERS"); + #endif +} + +VkInstance kvfCreateInstance(const char** extensions_enabled, uint32_t extensions_count) +{ + VkInstance instance = VK_NULL_HANDLE; + + VkInstanceCreateInfo create_info = {}; + create_info.sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO; + create_info.pApplicationInfo = NULL; + create_info.enabledExtensionCount = extensions_count; + create_info.ppEnabledExtensionNames = extensions_enabled; + create_info.enabledLayerCount = 0; + create_info.ppEnabledLayerNames = NULL; + create_info.pNext = NULL; + #if defined(VK_USE_PLATFORM_MACOS_MVK) || defined(VK_USE_PLATFORM_METAL_EXT) + create_info.flags = VK_INSTANCE_CREATE_ENUMERATE_PORTABILITY_BIT_KHR; + #else + create_info.flags = 0; + #endif + +#ifdef KVF_ENABLE_VALIDATION_LAYERS + kvfAddLayer("VK_LAYER_KHRONOS_validation"); + const char** new_extension_set = NULL; + VkDebugUtilsMessengerCreateInfoEXT debug_create_info = {}; + if(__kvfCheckValidationLayerSupport()) + { + __kvfPopulateDebugMessengerCreateInfo(&debug_create_info); + new_extension_set = (const char**)KVF_MALLOC(sizeof(char*) * (extensions_count + 1)); + KVF_ASSERT(new_extension_set != NULL && "allocation failed :("); + memcpy(new_extension_set, extensions_enabled, sizeof(char*) * extensions_count); + new_extension_set[extensions_count] = VK_EXT_DEBUG_UTILS_EXTENSION_NAME; + + create_info.enabledExtensionCount = extensions_count + 1; + create_info.ppEnabledExtensionNames = new_extension_set; + create_info.enabledLayerCount = __kvf_extra_layers_count; + create_info.ppEnabledLayerNames = (const char* const*)__kvf_extra_layers; + create_info.pNext = (VkDebugUtilsMessengerCreateInfoEXT*)&debug_create_info; + } +#endif + + __kvfCheckVk(KVF_GET_GLOBAL_FUNCTION(vkCreateInstance)(&create_info, NULL, &instance)); +#ifdef KVF_ENABLE_VALIDATION_LAYERS + KVF_FREE(new_extension_set); + __kvfInitValidationLayers(instance); +#endif + return instance; +} + +void kvfDestroyInstance(VkInstance instance) +{ + if(instance == VK_NULL_HANDLE) + return; +#ifdef KVF_ENABLE_VALIDATION_LAYERS + __kvfDestroyDebugUtilsMessengerEXT(instance); + for(size_t i = 0; i < __kvf_extra_layers_count; i++) + KVF_FREE(__kvf_extra_layers[i]); + KVF_FREE(__kvf_extra_layers); + __kvf_extra_layers_count = 0; +#endif + KVF_GET_INSTANCE_FUNCTION(vkDestroyInstance)(instance, NULL); +} + +__KvfQueueFamilies __kvfFindQueueFamilies(VkPhysicalDevice physical, VkSurfaceKHR surface) +{ + __KvfQueueFamilies queues = { -1, -1, -1 }; + uint32_t queue_family_count; + KVF_GET_INSTANCE_FUNCTION(vkGetPhysicalDeviceQueueFamilyProperties)(physical, &queue_family_count, NULL); + VkQueueFamilyProperties* queue_families = (VkQueueFamilyProperties*)KVF_MALLOC(sizeof(VkQueueFamilyProperties) * queue_family_count); + KVF_ASSERT(queue_families != NULL && "allocation failed :("); + KVF_GET_INSTANCE_FUNCTION(vkGetPhysicalDeviceQueueFamilyProperties)(physical, &queue_family_count, queue_families); + + for(uint32_t i = 0; i < queue_family_count; i++) + { + // try to find a queue family index that supports compute but not graphics + if(queue_families[i].queueFlags & VK_QUEUE_COMPUTE_BIT && (queue_families[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) == 0) + queues.compute = i; + else if(queues.compute != -1 && queue_families[i].queueFlags & VK_QUEUE_COMPUTE_BIT) // else just find a compute queue + queues.compute = i; + if(queue_families[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) + queues.graphics = i; + #ifndef KVF_NO_KHR + VkBool32 present_support = false; + if(surface != VK_NULL_HANDLE) + { + KVF_GET_INSTANCE_FUNCTION(vkGetPhysicalDeviceSurfaceSupportKHR)(physical, i, surface, &present_support); + if(present_support) + queues.present = i; + if(queues.graphics != -1 && queues.present != -1 && queues.compute != -1) + break; + } + else if(queues.graphics != -1 && queues.compute != -1) + break; + #else + if(queues.graphics != -1 && queues.compute != -1) + break; + #endif + } + KVF_FREE(queue_families); + return queues; +} + +VkPhysicalDevice kvfPickFirstPhysicalDevice(VkInstance instance, VkSurfaceKHR surface) +{ + uint32_t device_count; + VkPhysicalDevice* devices = NULL; + VkPhysicalDevice chosen_one = VK_NULL_HANDLE; + + KVF_ASSERT(instance != VK_NULL_HANDLE); + + KVF_GET_INSTANCE_FUNCTION(vkEnumeratePhysicalDevices)(instance, &device_count, NULL); + devices = (VkPhysicalDevice*)KVF_MALLOC(sizeof(VkPhysicalDevice) * device_count + 1); + KVF_ASSERT(devices != NULL && "allocation failed :("); + KVF_GET_INSTANCE_FUNCTION(vkEnumeratePhysicalDevices)(instance, &device_count, devices); + chosen_one = devices[0]; + KVF_FREE(devices); + __KvfQueueFamilies queues = __kvfFindQueueFamilies(chosen_one, surface); + __kvfAddDeviceToArray(chosen_one, queues.graphics, queues.present, queues.present); + return chosen_one; +} + +VkPhysicalDevice kvfPickGoodDefaultPhysicalDevice(VkInstance instance, VkSurfaceKHR surface) +{ + const char* extensions[] = { VK_KHR_SWAPCHAIN_EXTENSION_NAME }; + return kvfPickGoodPhysicalDevice(instance, surface, extensions, sizeof(extensions) / sizeof(extensions[0])); +} + +int32_t __kvfScorePhysicalDevice(VkPhysicalDevice device, VkSurfaceKHR surface, const char** device_extensions, uint32_t device_extensions_count) +{ + /* Check extensions support */ + uint32_t extension_count; + KVF_GET_INSTANCE_FUNCTION(vkEnumerateDeviceExtensionProperties)(device, NULL, &extension_count, NULL); + VkExtensionProperties* props = (VkExtensionProperties*)KVF_MALLOC(sizeof(VkExtensionProperties) * extension_count + 1); + KVF_ASSERT(props != NULL && "allocation failed :("); + KVF_GET_INSTANCE_FUNCTION(vkEnumerateDeviceExtensionProperties)(device, NULL, &extension_count, props); + + bool are_there_required_device_extensions = true; + for(uint32_t j = 0; j < device_extensions_count; j++) + { + bool is_there_extension = false; + for(uint32_t k = 0; k < extension_count; k++) + { + if(strcmp(device_extensions[j], props[k].extensionName) == 0) + { + is_there_extension = true; + break; + } + } + if(is_there_extension == false) + { + are_there_required_device_extensions = false; + break; + } + } + KVF_FREE(props); + if(are_there_required_device_extensions == false) + return -1; + + /* Check Queue Families Support */ + __KvfQueueFamilies queues = __kvfFindQueueFamilies(device, surface); + if(queues.graphics == -1 || (surface != VK_NULL_HANDLE && queues.present == -1)) + return -1; + + #ifndef KVF_NO_KHR + if(surface != VK_NULL_HANDLE) + { + /* Check surface formats counts */ + uint32_t format_count; + KVF_GET_INSTANCE_FUNCTION(vkGetPhysicalDeviceSurfaceFormatsKHR)(device, surface, &format_count, NULL); + if(format_count == 0) + return -1; + } + #endif + + VkPhysicalDeviceProperties device_props; + KVF_GET_INSTANCE_FUNCTION(vkGetPhysicalDeviceProperties)(device, &device_props); + + VkPhysicalDeviceFeatures device_features; + KVF_GET_INSTANCE_FUNCTION(vkGetPhysicalDeviceFeatures)(device, &device_features); + + int32_t score = -1; + if(device_props.deviceType == VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU) + score += 1000; + + if(!device_features.geometryShader) + return -1; + + score += device_props.limits.maxImageDimension2D; + score += device_props.limits.maxBoundDescriptorSets; + + return score; +} + +VkPhysicalDevice kvfPickGoodPhysicalDevice(VkInstance instance, VkSurfaceKHR surface, const char** device_extensions, uint32_t device_extensions_count) +{ + VkPhysicalDevice* devices = NULL; + VkPhysicalDevice chosen_one = VK_NULL_HANDLE; + uint32_t device_count; + int32_t best_device_score = -1; + + KVF_ASSERT(instance != VK_NULL_HANDLE); + + KVF_GET_INSTANCE_FUNCTION(vkEnumeratePhysicalDevices)(instance, &device_count, NULL); + devices = (VkPhysicalDevice*)KVF_MALLOC(sizeof(VkPhysicalDevice) * device_count + 1); + KVF_ASSERT(devices != NULL && "allocation failed :("); + KVF_GET_INSTANCE_FUNCTION(vkEnumeratePhysicalDevices)(instance, &device_count, devices); + + for(uint32_t i = 0; i < device_count; i++) + { + int32_t current_device_score = __kvfScorePhysicalDevice(devices[i], surface, device_extensions, device_extensions_count); + if(current_device_score > best_device_score) + { + best_device_score = current_device_score; + chosen_one = devices[i]; + } + } + KVF_FREE(devices); + if(chosen_one != VK_NULL_HANDLE) + { + __KvfQueueFamilies queues = __kvfFindQueueFamilies(chosen_one, surface); + __kvfAddDeviceToArray(chosen_one, queues.graphics, queues.present, queues.compute); + return chosen_one; + } + return VK_NULL_HANDLE; +} + +VkDevice kvfCreateDefaultDevice(VkPhysicalDevice physical) +{ + const char* extensions[] = { VK_KHR_SWAPCHAIN_EXTENSION_NAME }; + VkPhysicalDeviceFeatures device_features = { VK_FALSE }; + return kvfCreateDevice(physical, extensions, sizeof(extensions) / sizeof(extensions[0]), &device_features); +} + +VkDevice kvfCreateDevice(VkPhysicalDevice physical, const char** extensions, uint32_t extensions_count, VkPhysicalDeviceFeatures* features) +{ + const float queue_priority = 1.0f; + + __KvfDevice* kvf_device = __kvfGetKvfDeviceFromVkPhysicalDevice(physical); + + KVF_ASSERT(kvf_device != NULL); + + uint32_t queue_count = 0; + queue_count += (kvf_device->queues.graphics != -1); + queue_count += (kvf_device->queues.present != -1); + queue_count += (kvf_device->queues.compute != -1); + + VkDeviceQueueCreateInfo* queue_create_infos = (VkDeviceQueueCreateInfo*)KVF_MALLOC(queue_count * sizeof(VkDeviceQueueCreateInfo)); + KVF_ASSERT(queue_create_infos != NULL && "allocation failed :("); + size_t i = 0; + if(kvf_device->queues.graphics != -1) + { + queue_create_infos[i].sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO; + queue_create_infos[i].queueFamilyIndex = kvf_device->queues.graphics; + queue_create_infos[i].queueCount = 1; + queue_create_infos[i].pQueuePriorities = &queue_priority; + queue_create_infos[i].flags = 0; + queue_create_infos[i].pNext = NULL; + i++; + } + if(kvf_device->queues.present != -1 && kvf_device->queues.present != kvf_device->queues.graphics) + { + queue_create_infos[i].sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO; + queue_create_infos[i].queueFamilyIndex = kvf_device->queues.present; + queue_create_infos[i].queueCount = 1; + queue_create_infos[i].pQueuePriorities = &queue_priority; + queue_create_infos[i].flags = 0; + queue_create_infos[i].pNext = NULL; + i++; + } + if(kvf_device->queues.compute != -1 && kvf_device->queues.present != kvf_device->queues.compute && kvf_device->queues.graphics != kvf_device->queues.compute) + { + queue_create_infos[i].sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO; + queue_create_infos[i].queueFamilyIndex = kvf_device->queues.compute; + queue_create_infos[i].queueCount = 1; + queue_create_infos[i].pQueuePriorities = &queue_priority; + queue_create_infos[i].flags = 0; + queue_create_infos[i].pNext = NULL; + i++; + } + + VkDeviceCreateInfo createInfo; + createInfo.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO; + createInfo.queueCreateInfoCount = i; + createInfo.pQueueCreateInfos = queue_create_infos; + createInfo.pEnabledFeatures = features; + createInfo.enabledExtensionCount = extensions_count; + createInfo.ppEnabledExtensionNames = extensions; + createInfo.enabledLayerCount = 0; + createInfo.ppEnabledLayerNames = NULL; + createInfo.flags = 0; + createInfo.pNext = NULL; + + VkDevice device; + __kvfCheckVk(KVF_GET_INSTANCE_FUNCTION(vkCreateDevice)(physical, &createInfo, NULL, &device)); + #ifndef KVF_IMPL_VK_NO_PROTOTYPES + __kvfCompleteDevice(physical, device); + #endif + + return device; +} + +VkDevice kvfCreateDefaultDevicePhysicalDeviceAndCustomQueues(VkPhysicalDevice physical, int32_t graphics_queue, int32_t present_queue, int32_t compute_queue) +{ + const char* extensions[] = { VK_KHR_SWAPCHAIN_EXTENSION_NAME }; + VkPhysicalDeviceFeatures device_features = { VK_FALSE }; + return kvfCreateDeviceCustomPhysicalDeviceAndQueues(physical, extensions, sizeof(extensions) / sizeof(extensions[0]), &device_features, graphics_queue, present_queue, compute_queue); +} + +VkDevice kvfCreateDeviceCustomPhysicalDeviceAndQueues(VkPhysicalDevice physical, const char** extensions, uint32_t extensions_count, VkPhysicalDeviceFeatures* features, int32_t graphics_queue, int32_t present_queue, int32_t compute_queue) +{ + const float queue_priority = 1.0f; + + uint32_t queue_count = 0; + queue_count += (graphics_queue != -1); + queue_count += (present_queue != -1); + queue_count += (compute_queue != -1); + + VkDeviceQueueCreateInfo* queue_create_infos = (VkDeviceQueueCreateInfo*)KVF_MALLOC(queue_count * sizeof(VkDeviceQueueCreateInfo)); + KVF_ASSERT(queue_create_infos != NULL && "allocation failed :("); + size_t i = 0; + if(graphics_queue != -1) + { + queue_create_infos[i].sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO; + queue_create_infos[i].queueFamilyIndex = graphics_queue; + queue_create_infos[i].queueCount = 1; + queue_create_infos[i].pQueuePriorities = &queue_priority; + queue_create_infos[i].flags = 0; + queue_create_infos[i].pNext = NULL; + i++; + } + if(present_queue != -1 && present_queue != graphics_queue) + { + queue_create_infos[i].sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO; + queue_create_infos[i].queueFamilyIndex = present_queue; + queue_create_infos[i].queueCount = 1; + queue_create_infos[i].pQueuePriorities = &queue_priority; + queue_create_infos[i].flags = 0; + queue_create_infos[i].pNext = NULL; + i++; + } + if(compute_queue != -1 && present_queue != compute_queue && graphics_queue != compute_queue) + { + queue_create_infos[i].sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO; + queue_create_infos[i].queueFamilyIndex = compute_queue; + queue_create_infos[i].queueCount = 1; + queue_create_infos[i].pQueuePriorities = &queue_priority; + queue_create_infos[i].flags = 0; + queue_create_infos[i].pNext = NULL; + i++; + } + + VkDeviceCreateInfo createInfo; + createInfo.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO; + createInfo.queueCreateInfoCount = queue_count; + createInfo.pQueueCreateInfos = queue_create_infos; + createInfo.pEnabledFeatures = features; + createInfo.enabledExtensionCount = extensions_count; + createInfo.ppEnabledExtensionNames = extensions; + createInfo.enabledLayerCount = 0; + createInfo.ppEnabledLayerNames = NULL; + createInfo.flags = 0; + createInfo.pNext = NULL; + + VkDevice device; + __kvfCheckVk(KVF_GET_INSTANCE_FUNCTION(vkCreateDevice)(physical, &createInfo, NULL, &device)); + #ifndef KVF_IMPL_VK_NO_PROTOTYPES + __kvfCompleteDeviceCustomPhysicalDeviceAndQueues(physical, device, graphics_queue, present_queue, compute_queue); + #endif + + return device; +} + +#ifdef KVF_IMPL_VK_NO_PROTOTYPES + void kvfPassDeviceVulkanFunctionPointers(VkPhysicalDevice physical, VkDevice device, const KvfDeviceVulkanFunctions* fns) + { + KVF_ASSERT(device != VK_NULL_HANDLE); + KVF_ASSERT(fns != NULL); + __KvfDevice* kvf_device = __kvfGetKvfDeviceFromVkPhysicalDevice(physical); + KVF_ASSERT(kvf_device != NULL); + kvf_device->fns = *fns; + __kvfCompleteDevice(physical, device); + } +#endif + +void kvfDestroyDevice(VkDevice device) +{ + if(device == VK_NULL_HANDLE) + return; + __kvfDestroyDevice(device); +} + +VkQueue kvfGetDeviceQueue(VkDevice device, KvfQueueType queue) +{ + KVF_ASSERT(device != VK_NULL_HANDLE); + __KvfDevice* kvf_device = __kvfGetKvfDeviceFromVkDevice(device); + KVF_ASSERT(kvf_device != NULL); + VkQueue vk_queue = VK_NULL_HANDLE; + if(queue == KVF_GRAPHICS_QUEUE) + { + KVF_ASSERT(kvf_device->queues.graphics != -1); + KVF_GET_DEVICE_FUNCTION(vkGetDeviceQueue)(device, kvf_device->queues.graphics, 0, &vk_queue); + } + else if(queue == KVF_PRESENT_QUEUE) + { + KVF_ASSERT(kvf_device->queues.present != -1); + KVF_GET_DEVICE_FUNCTION(vkGetDeviceQueue)(device, kvf_device->queues.present, 0, &vk_queue); + } + else if(queue == KVF_COMPUTE_QUEUE) + { + KVF_ASSERT(kvf_device->queues.compute != -1); + KVF_GET_DEVICE_FUNCTION(vkGetDeviceQueue)(device, kvf_device->queues.compute, 0, &vk_queue); + } + return vk_queue; +} + +uint32_t kvfGetDeviceQueueFamily(VkDevice device, KvfQueueType queue) +{ + KVF_ASSERT(device != VK_NULL_HANDLE); + __KvfDevice* kvf_device = __kvfGetKvfDeviceFromVkDevice(device); + KVF_ASSERT(kvf_device != NULL); + if(queue == KVF_GRAPHICS_QUEUE) + return kvf_device->queues.graphics; + else if(queue == KVF_PRESENT_QUEUE) + return kvf_device->queues.present; + else if(queue == KVF_COMPUTE_QUEUE) + return kvf_device->queues.compute; + KVF_ASSERT(false && "invalid queue"); + return 0; +} + +#ifndef KVF_NO_KHR + bool kvfQueuePresentKHR(VkDevice device, VkSemaphore wait, VkSwapchainKHR swapchain, uint32_t image_index) + { + KVF_ASSERT(device != VK_NULL_HANDLE); + #ifdef KVF_IMPL_VK_NO_PROTOTYPES + __KvfDevice* kvf_device = __kvfGetKvfDeviceFromVkDevice(device); + KVF_ASSERT(kvf_device != NULL); + #endif + VkPresentInfoKHR present_info = {}; + present_info.sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR; + present_info.waitSemaphoreCount = 1; + present_info.pWaitSemaphores = &wait; + present_info.swapchainCount = 1; + present_info.pSwapchains = &swapchain; + present_info.pImageIndices = &image_index; + VkResult result = KVF_GET_DEVICE_FUNCTION(vkQueuePresentKHR)(kvfGetDeviceQueue(device, KVF_PRESENT_QUEUE), &present_info); + if(result == VK_ERROR_OUT_OF_DATE_KHR || result == VK_SUBOPTIMAL_KHR) + return false; + else + __kvfCheckVk(result); + return true; + } +#endif + +int32_t kvfFindDeviceQueueFamily(VkPhysicalDevice physical, KvfQueueType type) +{ + KVF_ASSERT(physical != VK_NULL_HANDLE); + KVF_ASSERT(type != KVF_PRESENT_QUEUE && "Use kvfFindDeviceQueueFamilyKHR to find present queue"); + + uint32_t queue_family_count; + KVF_GET_INSTANCE_FUNCTION(vkGetPhysicalDeviceQueueFamilyProperties)(physical, &queue_family_count, NULL); + VkQueueFamilyProperties* queue_families = (VkQueueFamilyProperties*)KVF_MALLOC(sizeof(VkQueueFamilyProperties) * queue_family_count); + KVF_ASSERT(queue_families != NULL && "allocation failed :("); + KVF_GET_INSTANCE_FUNCTION(vkGetPhysicalDeviceQueueFamilyProperties)(physical, &queue_family_count, queue_families); + + int32_t queue = -1; + + for(uint32_t i = 0; i < queue_family_count; i++) + { + if(type == KVF_COMPUTE_QUEUE) + { + if(queue_families[i].queueFlags & VK_QUEUE_COMPUTE_BIT && (queue_families[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) == 0) + queue = i; + else if(queue != -1 && queue_families[i].queueFlags & VK_QUEUE_COMPUTE_BIT) // else just find a compute queue + queue = i; + } + else if(type == KVF_GRAPHICS_QUEUE) + { + if(queue_families[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) + queue = i; + } + + if(queue != -1) + break; + } + KVF_FREE(queue_families); + return queue; +} + +#ifndef KVF_NO_KHR + int32_t kvfFindDeviceQueueFamilyKHR(VkPhysicalDevice physical, VkSurfaceKHR surface, KvfQueueType type) + { + KVF_ASSERT(physical != VK_NULL_HANDLE); + KVF_ASSERT(surface != VK_NULL_HANDLE); + + if(type != KVF_PRESENT_QUEUE) + return kvfFindDeviceQueueFamily(physical, type); + + uint32_t queue_family_count; + KVF_GET_INSTANCE_FUNCTION(vkGetPhysicalDeviceQueueFamilyProperties)(physical, &queue_family_count, NULL); + VkQueueFamilyProperties* queue_families = (VkQueueFamilyProperties*)KVF_MALLOC(sizeof(VkQueueFamilyProperties) * queue_family_count); + KVF_ASSERT(queue_families != NULL && "allocation failed :("); + KVF_GET_INSTANCE_FUNCTION(vkGetPhysicalDeviceQueueFamilyProperties)(physical, &queue_family_count, queue_families); + + int32_t queue = -1; + + for(uint32_t i = 0; i < queue_family_count; i++) + { + VkBool32 present_support = false; + KVF_GET_INSTANCE_FUNCTION(vkGetPhysicalDeviceSurfaceSupportKHR)(physical, i, surface, &present_support); + if(present_support) + queue = i; + if(queue != -1) + break; + } + KVF_FREE(queue_families); + return queue; + } +#endif + +VkFence kvfCreateFence(VkDevice device) +{ + KVF_ASSERT(device != VK_NULL_HANDLE); + #ifdef KVF_IMPL_VK_NO_PROTOTYPES + __KvfDevice* kvf_device = __kvfGetKvfDeviceFromVkDevice(device); + KVF_ASSERT(kvf_device != NULL); + #endif + VkFenceCreateInfo fence_info = {}; + fence_info.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; + fence_info.flags = VK_FENCE_CREATE_SIGNALED_BIT; + VkFence fence; + __kvfCheckVk(KVF_GET_DEVICE_FUNCTION(vkCreateFence)(device, &fence_info, NULL, &fence)); + return fence; +} + +void kvfWaitForFence(VkDevice device, VkFence fence) +{ + KVF_ASSERT(device != VK_NULL_HANDLE); + KVF_ASSERT(fence != VK_NULL_HANDLE); + #ifdef KVF_IMPL_VK_NO_PROTOTYPES + __KvfDevice* kvf_device = __kvfGetKvfDeviceFromVkDevice(device); + KVF_ASSERT(kvf_device != NULL); + #endif + KVF_GET_DEVICE_FUNCTION(vkWaitForFences)(device, 1, &fence, VK_TRUE, UINT64_MAX); +} + +void kvfDestroyFence(VkDevice device, VkFence fence) +{ + if(fence == VK_NULL_HANDLE) + return; + KVF_ASSERT(device != VK_NULL_HANDLE); + #ifdef KVF_IMPL_VK_NO_PROTOTYPES + __KvfDevice* kvf_device = __kvfGetKvfDeviceFromVkDevice(device); + KVF_ASSERT(kvf_device != NULL); + #endif + KVF_GET_DEVICE_FUNCTION(vkDestroyFence)(device, fence, NULL); +} + +VkSemaphore kvfCreateSemaphore(VkDevice device) +{ + KVF_ASSERT(device != VK_NULL_HANDLE); + #ifdef KVF_IMPL_VK_NO_PROTOTYPES + __KvfDevice* kvf_device = __kvfGetKvfDeviceFromVkDevice(device); + KVF_ASSERT(kvf_device != NULL); + #endif + VkSemaphoreCreateInfo semaphore_info = {}; + semaphore_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO; + VkSemaphore semaphore; + __kvfCheckVk(KVF_GET_DEVICE_FUNCTION(vkCreateSemaphore)(device, &semaphore_info, NULL, &semaphore)); + return semaphore; +} + +void kvfDestroySemaphore(VkDevice device, VkSemaphore semaphore) +{ + if(semaphore == VK_NULL_HANDLE) + return; + KVF_ASSERT(device != VK_NULL_HANDLE); + #ifdef KVF_IMPL_VK_NO_PROTOTYPES + __KvfDevice* kvf_device = __kvfGetKvfDeviceFromVkDevice(device); + KVF_ASSERT(kvf_device != NULL); + #endif + KVF_GET_DEVICE_FUNCTION(vkDestroySemaphore)(device, semaphore, NULL); +} + +#ifndef KVF_NO_KHR + __KvfSwapchainSupportInternal __kvfQuerySwapchainSupport(VkPhysicalDevice physical, VkSurfaceKHR surface) + { + __KvfSwapchainSupportInternal support; + + __kvfCheckVk(KVF_GET_INSTANCE_FUNCTION(vkGetPhysicalDeviceSurfaceCapabilitiesKHR)(physical, surface, &support.capabilities)); + + KVF_GET_INSTANCE_FUNCTION(vkGetPhysicalDeviceSurfaceFormatsKHR)(physical, surface, &support.formats_count, NULL); + if(support.formats_count != 0) + { + support.formats = (VkSurfaceFormatKHR*)KVF_MALLOC(sizeof(VkSurfaceFormatKHR) * support.formats_count); + KVF_ASSERT(support.formats != NULL && "allocation failed :("); + KVF_GET_INSTANCE_FUNCTION(vkGetPhysicalDeviceSurfaceFormatsKHR)(physical, surface, &support.formats_count, support.formats); + } + + KVF_GET_INSTANCE_FUNCTION(vkGetPhysicalDeviceSurfacePresentModesKHR)(physical, surface, &support.present_modes_count, NULL); + if(support.present_modes_count != 0) + { + support.present_modes = (VkPresentModeKHR*)KVF_MALLOC(sizeof(VkPresentModeKHR) * support.present_modes_count); + KVF_ASSERT(support.present_modes != NULL && "allocation failed :("); + KVF_GET_INSTANCE_FUNCTION(vkGetPhysicalDeviceSurfacePresentModesKHR)(physical, surface, &support.present_modes_count, support.present_modes); + } + return support; + } + + VkSurfaceFormatKHR __kvfChooseSwapSurfaceFormat(__KvfSwapchainSupportInternal* support) + { + for(uint32_t i = 0; i < support->formats_count; i++) + { + if(support->formats[i].format == VK_FORMAT_R8G8B8A8_SRGB && support->formats[i].colorSpace == VK_COLOR_SPACE_SRGB_NONLINEAR_KHR) + return support->formats[i]; + } + return support->formats[0]; + } + + VkPresentModeKHR __kvfChooseSwapPresentMode(__KvfSwapchainSupportInternal* support, bool try_vsync) + { + if(try_vsync) + return VK_PRESENT_MODE_FIFO_KHR; + bool mailbox_supported = false; + bool immediate_supported = false; + for(uint32_t i = 0; i < support->present_modes_count; i++) + { + if(support->present_modes[i] == VK_PRESENT_MODE_MAILBOX_KHR) + mailbox_supported = true; + if(support->present_modes[i] == VK_PRESENT_MODE_IMMEDIATE_KHR) + immediate_supported = true; + } + if(mailbox_supported) + return VK_PRESENT_MODE_MAILBOX_KHR; + if(immediate_supported) + return VK_PRESENT_MODE_IMMEDIATE_KHR; // Best mode for low latency + return VK_PRESENT_MODE_FIFO_KHR; + } + + uint32_t __kvfClamp(uint32_t i, uint32_t min, uint32_t max) + { + const uint32_t t = i < min ? min : i; + return t > max ? max : t; + } + + VkSwapchainKHR kvfCreateSwapchainKHR(VkDevice device, VkPhysicalDevice physical, VkSurfaceKHR surface, VkExtent2D extent, VkSwapchainKHR old_swapchain, bool try_vsync) + { + KVF_ASSERT(device != VK_NULL_HANDLE); + VkSwapchainKHR swapchain; + __KvfSwapchainSupportInternal support = __kvfQuerySwapchainSupport(physical, surface); + + VkSurfaceFormatKHR surfaceFormat = __kvfChooseSwapSurfaceFormat(&support); + VkPresentModeKHR present_mode = __kvfChooseSwapPresentMode(&support, try_vsync); + + uint32_t image_count = support.capabilities.minImageCount + 1; + if(support.capabilities.maxImageCount > 0 && image_count > support.capabilities.maxImageCount) + image_count = support.capabilities.maxImageCount; + + __KvfDevice* kvf_device = __kvfGetKvfDeviceFromVkDevice(device); + KVF_ASSERT(kvf_device != NULL); + + uint32_t queue_family_indices[] = { (uint32_t)kvf_device->queues.graphics, (uint32_t)kvf_device->queues.present }; + + if(support.capabilities.currentExtent.width != UINT32_MAX) + extent = support.capabilities.currentExtent; + else + { + extent.width = __kvfClamp(extent.width, support.capabilities.minImageExtent.width, support.capabilities.maxImageExtent.width); + extent.height = __kvfClamp(extent.height, support.capabilities.minImageExtent.height, support.capabilities.maxImageExtent.height); + } + + VkSwapchainCreateInfoKHR createInfo = {}; + createInfo.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR; + createInfo.surface = surface; + createInfo.minImageCount = image_count; + createInfo.imageFormat = surfaceFormat.format; + createInfo.imageColorSpace = surfaceFormat.colorSpace; + createInfo.imageExtent = extent; + createInfo.imageArrayLayers = 1; + createInfo.imageUsage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; + createInfo.preTransform = support.capabilities.currentTransform; + createInfo.compositeAlpha = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR; + createInfo.presentMode = present_mode; + createInfo.clipped = VK_TRUE; + createInfo.oldSwapchain = old_swapchain; + + if(kvf_device->queues.graphics != kvf_device->queues.present) + { + createInfo.imageSharingMode = VK_SHARING_MODE_CONCURRENT; + createInfo.queueFamilyIndexCount = 2; + createInfo.pQueueFamilyIndices = queue_family_indices; + } + else + createInfo.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE; + + __kvfCheckVk(KVF_GET_DEVICE_FUNCTION(vkCreateSwapchainKHR)(device, &createInfo, NULL, &swapchain)); + + uint32_t images_count; + KVF_GET_DEVICE_FUNCTION(vkGetSwapchainImagesKHR)(device, swapchain, (uint32_t*)&images_count, NULL); + + __kvfAddSwapchainToArray(swapchain, support, surfaceFormat.format, images_count, extent); + + return swapchain; + } + + VkFormat kvfGetSwapchainImagesFormat(VkSwapchainKHR swapchain) + { + KVF_ASSERT(swapchain != VK_NULL_HANDLE); + __KvfSwapchain* kvf_swapchain = __kvfGetKvfSwapchainFromVkSwapchainKHR(swapchain); + KVF_ASSERT(kvf_swapchain != NULL); + return kvf_swapchain->images_format; + } + + uint32_t kvfGetSwapchainImagesCount(VkSwapchainKHR swapchain) + { + KVF_ASSERT(swapchain != VK_NULL_HANDLE); + __KvfSwapchain* kvf_swapchain = __kvfGetKvfSwapchainFromVkSwapchainKHR(swapchain); + KVF_ASSERT(kvf_swapchain != NULL); + return kvf_swapchain->images_count; + } + + uint32_t kvfGetSwapchainMinImagesCount(VkSwapchainKHR swapchain) + { + KVF_ASSERT(swapchain != VK_NULL_HANDLE); + __KvfSwapchain* kvf_swapchain = __kvfGetKvfSwapchainFromVkSwapchainKHR(swapchain); + KVF_ASSERT(kvf_swapchain != NULL); + return kvf_swapchain->support.capabilities.minImageCount; + } + + VkExtent2D kvfGetSwapchainImagesSize(VkSwapchainKHR swapchain) + { + KVF_ASSERT(swapchain != VK_NULL_HANDLE); + __KvfSwapchain* kvf_swapchain = __kvfGetKvfSwapchainFromVkSwapchainKHR(swapchain); + KVF_ASSERT(kvf_swapchain != NULL); + return kvf_swapchain->images_extent; + } + + void kvfDestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain) + { + if(swapchain == VK_NULL_HANDLE) + return; + KVF_ASSERT(device != VK_NULL_HANDLE); + __kvfDestroySwapchain(device, swapchain); + } +#endif + +VkImage kvfCreateImage(VkDevice device, uint32_t width, uint32_t height, VkFormat format, VkImageTiling tiling, VkImageUsageFlags usage, KvfImageType type) +{ + KVF_ASSERT(device != VK_NULL_HANDLE); + #ifdef KVF_IMPL_VK_NO_PROTOTYPES + __KvfDevice* kvf_device = __kvfGetKvfDeviceFromVkDevice(device); + KVF_ASSERT(kvf_device != NULL); + #endif + VkImageCreateInfo image_info = {}; + image_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; + image_info.imageType = VK_IMAGE_TYPE_2D; + image_info.extent.width = width; + image_info.extent.height = height; + image_info.extent.depth = 1; + image_info.mipLevels = 1; + image_info.arrayLayers = 1; + image_info.format = format; + image_info.tiling = tiling; + image_info.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; + image_info.usage = usage; + image_info.samples = VK_SAMPLE_COUNT_1_BIT; + image_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE; + + switch(type) + { + case KVF_IMAGE_CUBE: image_info.flags = VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT; image_info.arrayLayers = 6; break; + default: break; + } + + VkImage image; + __kvfCheckVk(KVF_GET_DEVICE_FUNCTION(vkCreateImage)(device, &image_info, NULL, &image)); + return image; +} + +void kvfCopyImageToBuffer(VkCommandBuffer cmd, VkBuffer dst, VkImage src, size_t buffer_offset, VkImageAspectFlagBits aspect, VkExtent3D extent) +{ + KVF_ASSERT(cmd != VK_NULL_HANDLE); + KVF_ASSERT(dst != VK_NULL_HANDLE); + KVF_ASSERT(src != VK_NULL_HANDLE); + #ifdef KVF_IMPL_VK_NO_PROTOTYPES + __KvfDevice* kvf_device = __kvfGetKvfDeviceFromVkCommandBuffer(cmd); + KVF_ASSERT(kvf_device != NULL); + #endif + VkOffset3D offset = { 0, 0, 0 }; + VkBufferImageCopy region = {}; + region.bufferOffset = buffer_offset; + region.bufferRowLength = 0; + region.bufferImageHeight = 0; + region.imageSubresource.aspectMask = aspect; + region.imageSubresource.mipLevel = 0; + region.imageSubresource.baseArrayLayer = 0; + region.imageSubresource.layerCount = 1; + region.imageOffset = offset; + region.imageExtent = extent; + KVF_GET_DEVICE_FUNCTION(vkCmdCopyImageToBuffer)(cmd, src, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, dst, 1, ®ion); +} + +void kvfDestroyImage(VkDevice device, VkImage image) +{ + if(image == VK_NULL_HANDLE) + return; + KVF_ASSERT(device != VK_NULL_HANDLE); + #ifdef KVF_IMPL_VK_NO_PROTOTYPES + __KvfDevice* kvf_device = __kvfGetKvfDeviceFromVkDevice(device); + KVF_ASSERT(kvf_device != NULL); + #endif + KVF_GET_DEVICE_FUNCTION(vkDestroyImage)(device, image, NULL); +} + +VkImageView kvfCreateImageView(VkDevice device, VkImage image, VkFormat format, VkImageViewType type, VkImageAspectFlags aspect, int layer_count) +{ + KVF_ASSERT(device != VK_NULL_HANDLE); + #ifdef KVF_IMPL_VK_NO_PROTOTYPES + __KvfDevice* kvf_device = __kvfGetKvfDeviceFromVkDevice(device); + KVF_ASSERT(kvf_device != NULL); + #endif + VkImageViewCreateInfo create_info = {}; + create_info.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; + create_info.image = image; + create_info.viewType = type; + create_info.format = format; + create_info.components.r = VK_COMPONENT_SWIZZLE_IDENTITY; + create_info.components.g = VK_COMPONENT_SWIZZLE_IDENTITY; + create_info.components.b = VK_COMPONENT_SWIZZLE_IDENTITY; + create_info.components.a = VK_COMPONENT_SWIZZLE_IDENTITY; + create_info.subresourceRange.aspectMask = aspect; + create_info.subresourceRange.baseMipLevel = 0; + create_info.subresourceRange.levelCount = 1; + create_info.subresourceRange.baseArrayLayer = 0; + create_info.subresourceRange.layerCount = layer_count; + VkImageView view; + __kvfCheckVk(KVF_GET_DEVICE_FUNCTION(vkCreateImageView)(device, &create_info, NULL, &view)); + return view; +} + +void kvfDestroyImageView(VkDevice device, VkImageView image_view) +{ + KVF_ASSERT(device != VK_NULL_HANDLE); + KVF_ASSERT(image_view != VK_NULL_HANDLE); + #ifdef KVF_IMPL_VK_NO_PROTOTYPES + __KvfDevice* kvf_device = __kvfGetKvfDeviceFromVkDevice(device); + KVF_ASSERT(kvf_device != NULL); + #endif + KVF_GET_DEVICE_FUNCTION(vkDestroyImageView)(device, image_view, NULL); +} + +void kvfTransitionImageLayout(VkDevice device, VkImage image, KvfImageType type, VkCommandBuffer cmd, VkFormat format, VkImageLayout old_layout, VkImageLayout new_layout, bool is_single_time_cmd_buffer) +{ + KVF_ASSERT(device != VK_NULL_HANDLE); + KVF_ASSERT(cmd != VK_NULL_HANDLE); + + if(new_layout == old_layout) + return; + + #ifdef KVF_IMPL_VK_NO_PROTOTYPES + __KvfDevice* kvf_device = __kvfGetKvfDeviceFromVkDevice(device); + KVF_ASSERT(kvf_device != NULL); + #endif + + if(is_single_time_cmd_buffer) + kvfBeginCommandBuffer(cmd, VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT); + + VkImageMemoryBarrier barrier = {}; + barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; + barrier.oldLayout = old_layout; + barrier.newLayout = new_layout; + barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; + barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; + barrier.image = image; + barrier.subresourceRange.aspectMask = kvfIsDepthFormat(format) ? VK_IMAGE_ASPECT_DEPTH_BIT : VK_IMAGE_ASPECT_COLOR_BIT; + barrier.subresourceRange.baseMipLevel = 0; + barrier.subresourceRange.levelCount = 1; + barrier.subresourceRange.baseArrayLayer = 0; + barrier.subresourceRange.layerCount = (type == KVF_IMAGE_CUBE ? 6 : 1); + barrier.srcAccessMask = kvfLayoutToAccessMask(old_layout, false); + barrier.dstAccessMask = kvfLayoutToAccessMask(new_layout, true); + if(kvfIsStencilFormat(format)) + barrier.subresourceRange.aspectMask |= VK_IMAGE_ASPECT_STENCIL_BIT; + + VkPipelineStageFlags source_stage = 0; + if(barrier.oldLayout == VK_IMAGE_LAYOUT_PRESENT_SRC_KHR) + source_stage = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT; + else if(barrier.srcAccessMask != 0) + source_stage = kvfAccessFlagsToPipelineStage(barrier.srcAccessMask, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT); + else + source_stage = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT; + + VkPipelineStageFlags destination_stage = 0; + if(barrier.newLayout == VK_IMAGE_LAYOUT_PRESENT_SRC_KHR) + destination_stage = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT; + else if(barrier.dstAccessMask != 0) + destination_stage = kvfAccessFlagsToPipelineStage(barrier.dstAccessMask, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT); + else + destination_stage = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT; + + KVF_GET_DEVICE_FUNCTION(vkCmdPipelineBarrier)(cmd, source_stage, destination_stage, 0, 0, NULL, 0, NULL, 1, &barrier); + + if(is_single_time_cmd_buffer) + { + kvfEndCommandBuffer(cmd); + VkFence fence = kvfCreateFence(device); + kvfSubmitSingleTimeCommandBuffer(device, cmd, KVF_GRAPHICS_QUEUE, fence); + kvfDestroyFence(device, fence); + } +} + +VkSampler kvfCreateSampler(VkDevice device, VkFilter filters, VkSamplerAddressMode address_modes, VkSamplerMipmapMode mipmap_mode) +{ + KVF_ASSERT(device != VK_NULL_HANDLE); + #ifdef KVF_IMPL_VK_NO_PROTOTYPES + __KvfDevice* kvf_device = __kvfGetKvfDeviceFromVkDevice(device); + KVF_ASSERT(kvf_device != NULL); + #endif + VkSamplerCreateInfo info = {}; + info.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO; + info.magFilter = filters; + info.minFilter = filters; + info.mipmapMode = mipmap_mode; + info.addressModeU = address_modes; + info.addressModeV = address_modes; + info.addressModeW = address_modes; + info.minLod = -1000; + info.maxLod = 1000; + info.anisotropyEnable = VK_FALSE; + info.maxAnisotropy = 1.0f; + VkSampler sampler; + __kvfCheckVk(KVF_GET_DEVICE_FUNCTION(vkCreateSampler)(device, &info, NULL, &sampler)); + return sampler; +} + +void kvfDestroySampler(VkDevice device, VkSampler sampler) +{ + if(sampler == VK_NULL_HANDLE) + return; + KVF_ASSERT(device != VK_NULL_HANDLE); + #ifdef KVF_IMPL_VK_NO_PROTOTYPES + __KvfDevice* kvf_device = __kvfGetKvfDeviceFromVkDevice(device); + KVF_ASSERT(kvf_device != NULL); + #endif + KVF_GET_DEVICE_FUNCTION(vkDestroySampler)(device, sampler, NULL); +} + +VkBuffer kvfCreateBuffer(VkDevice device, VkBufferUsageFlags usage, VkDeviceSize size) +{ + KVF_ASSERT(device != VK_NULL_HANDLE); + #ifdef KVF_IMPL_VK_NO_PROTOTYPES + __KvfDevice* kvf_device = __kvfGetKvfDeviceFromVkDevice(device); + KVF_ASSERT(kvf_device != NULL); + #endif + VkBufferCreateInfo buffer_info = {}; + buffer_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; + buffer_info.size = size; + buffer_info.usage = usage; + buffer_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE; + VkBuffer buffer; + __kvfCheckVk(KVF_GET_DEVICE_FUNCTION(vkCreateBuffer)(device, &buffer_info, NULL, &buffer)); + return buffer; +} + +void kvfCopyBufferToBuffer(VkCommandBuffer cmd, VkBuffer dst, VkBuffer src, size_t size) +{ + KVF_ASSERT(cmd != VK_NULL_HANDLE); + KVF_ASSERT(dst != VK_NULL_HANDLE); + KVF_ASSERT(src != VK_NULL_HANDLE); + #ifdef KVF_IMPL_VK_NO_PROTOTYPES + __KvfDevice* kvf_device = __kvfGetKvfDeviceFromVkCommandBuffer(cmd); + KVF_ASSERT(kvf_device != NULL); + #endif + VkBufferCopy copy_region = {}; + copy_region.size = size; + KVF_GET_DEVICE_FUNCTION(vkCmdCopyBuffer)(cmd, src, dst, 1, ©_region); +} + +void kvfCopyBufferToImage(VkCommandBuffer cmd, VkImage dst, VkBuffer src, size_t buffer_offset, VkImageAspectFlagBits aspect, VkExtent3D extent) +{ + KVF_ASSERT(cmd != VK_NULL_HANDLE); + KVF_ASSERT(dst != VK_NULL_HANDLE); + KVF_ASSERT(src != VK_NULL_HANDLE); + #ifdef KVF_IMPL_VK_NO_PROTOTYPES + __KvfDevice* kvf_device = __kvfGetKvfDeviceFromVkCommandBuffer(cmd); + KVF_ASSERT(kvf_device != NULL); + #endif + VkOffset3D offset = { 0, 0, 0 }; + VkBufferImageCopy region = {}; + region.bufferOffset = buffer_offset; + region.bufferRowLength = 0; + region.bufferImageHeight = 0; + region.imageSubresource.aspectMask = aspect; + region.imageSubresource.mipLevel = 0; + region.imageSubresource.baseArrayLayer = 0; + region.imageSubresource.layerCount = 1; + region.imageOffset = offset; + region.imageExtent = extent; + KVF_GET_DEVICE_FUNCTION(vkCmdCopyBufferToImage)(cmd, src, dst, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, ®ion); +} + +void kvfDestroyBuffer(VkDevice device, VkBuffer buffer) +{ + if(buffer != VK_NULL_HANDLE) + return; + KVF_ASSERT(device != VK_NULL_HANDLE); + #ifdef KVF_IMPL_VK_NO_PROTOTYPES + __KvfDevice* kvf_device = __kvfGetKvfDeviceFromVkDevice(device); + KVF_ASSERT(kvf_device != NULL); + #endif + KVF_GET_DEVICE_FUNCTION(vkDestroyBuffer)(device, buffer, NULL); +} + +VkFramebuffer kvfCreateFramebuffer(VkDevice device, VkRenderPass render_pass, VkImageView* image_views, size_t image_views_count, VkExtent2D extent) +{ + KVF_ASSERT(device != VK_NULL_HANDLE); + KVF_ASSERT(image_views != NULL); + #ifdef KVF_IMPL_VK_NO_PROTOTYPES + __KvfDevice* kvf_device = __kvfGetKvfDeviceFromVkDevice(device); + KVF_ASSERT(kvf_device != NULL); + #endif + + VkFramebufferCreateInfo framebuffer_info = {}; + framebuffer_info.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO; + framebuffer_info.renderPass = render_pass; + framebuffer_info.attachmentCount = image_views_count; + framebuffer_info.pAttachments = image_views; + framebuffer_info.width = extent.width; + framebuffer_info.height = extent.height; + framebuffer_info.layers = 1; + VkFramebuffer framebuffer = VK_NULL_HANDLE; + __kvfCheckVk(KVF_GET_DEVICE_FUNCTION(vkCreateFramebuffer)(device, &framebuffer_info, NULL, &framebuffer)); + __kvfAddFramebufferToArray(framebuffer, extent); + return framebuffer; +} + +VkExtent2D kvfGetFramebufferSize(VkFramebuffer buffer) +{ + __KvfFramebuffer* kvf_framebuffer = __kvfGetKvfFramebufferFromVkFramebuffer(buffer); + KVF_ASSERT(kvf_framebuffer != NULL); + return kvf_framebuffer->extent; +} + +void kvfDestroyFramebuffer(VkDevice device, VkFramebuffer framebuffer) +{ + if(framebuffer == VK_NULL_HANDLE) + return; + KVF_ASSERT(device != VK_NULL_HANDLE); + __kvfDestroyFramebuffer(device, framebuffer); +} + +VkCommandBuffer kvfCreateCommandBuffer(VkDevice device) +{ + KVF_ASSERT(device != VK_NULL_HANDLE); + return kvfCreateCommandBufferLeveled(device, VK_COMMAND_BUFFER_LEVEL_PRIMARY); +} + +VkCommandBuffer kvfCreateCommandBufferLeveled(VkDevice device, VkCommandBufferLevel level) +{ + KVF_ASSERT(device != VK_NULL_HANDLE); + __KvfDevice* kvf_device = __kvfGetKvfDeviceFromVkDevice(device); + KVF_ASSERT(kvf_device != NULL); + + VkCommandPool pool = kvf_device->cmd_pool; + VkCommandBuffer buffer; + VkCommandBufferAllocateInfo alloc_info = {}; + alloc_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO; + alloc_info.commandPool = pool; + alloc_info.level = level; + alloc_info.commandBufferCount = 1; + __kvfCheckVk(KVF_GET_DEVICE_FUNCTION(vkAllocateCommandBuffers)(device, &alloc_info, &buffer)); + + if(kvf_device->cmd_buffers_size == kvf_device->cmd_buffers_capacity) + { + // Resize the dynamic array if necessary + kvf_device->cmd_buffers_capacity += KVF_COMMAND_POOL_CAPACITY; + kvf_device->cmd_buffers = (VkCommandBuffer*)KVF_REALLOC(kvf_device->cmd_buffers, kvf_device->cmd_buffers_capacity * sizeof(VkCommandBuffer)); + KVF_ASSERT(kvf_device->cmd_buffers != NULL && "allocation failed :("); + } + kvf_device->cmd_buffers[kvf_device->cmd_buffers_size] = buffer; + kvf_device->cmd_buffers_size++; + return buffer; +} + +void kvfBeginCommandBuffer(VkCommandBuffer buffer, VkCommandBufferUsageFlags usage) +{ + KVF_ASSERT(buffer != VK_NULL_HANDLE); + #ifdef KVF_IMPL_VK_NO_PROTOTYPES + __KvfDevice* kvf_device = __kvfGetKvfDeviceFromVkCommandBuffer(buffer); + KVF_ASSERT(kvf_device != NULL); + #endif + VkCommandBufferBeginInfo begin_info = {}; + begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; + begin_info.flags = usage; + __kvfCheckVk(KVF_GET_DEVICE_FUNCTION(vkBeginCommandBuffer)(buffer, &begin_info)); +} + +void kvfEndCommandBuffer(VkCommandBuffer buffer) +{ + KVF_ASSERT(buffer != VK_NULL_HANDLE); + #ifdef KVF_IMPL_VK_NO_PROTOTYPES + __KvfDevice* kvf_device = __kvfGetKvfDeviceFromVkCommandBuffer(buffer); + KVF_ASSERT(kvf_device != NULL); + #endif + __kvfCheckVk(KVF_GET_DEVICE_FUNCTION(vkEndCommandBuffer)(buffer)); +} + +void kvfSubmitCommandBuffer(VkDevice device, VkCommandBuffer buffer, KvfQueueType queue, VkSemaphore signal, VkSemaphore wait, VkFence fence, VkPipelineStageFlags* stages) +{ + KVF_ASSERT(device != VK_NULL_HANDLE); + + #ifdef KVF_IMPL_VK_NO_PROTOTYPES + __KvfDevice* kvf_device = __kvfGetKvfDeviceFromVkDevice(device); + KVF_ASSERT(kvf_device != NULL); + #endif + VkSemaphore signal_semaphores[1]; + VkSemaphore wait_semaphores[1]; + signal_semaphores[0] = signal; + wait_semaphores[0] = wait; + + if(fence != VK_NULL_HANDLE) + KVF_GET_DEVICE_FUNCTION(vkResetFences)(device, 1, &fence); + + VkSubmitInfo submit_info = {}; + submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; + submit_info.waitSemaphoreCount = (wait == VK_NULL_HANDLE ? 0 : 1); + submit_info.pWaitSemaphores = wait_semaphores; + submit_info.pWaitDstStageMask = stages; + submit_info.commandBufferCount = 1; + submit_info.pCommandBuffers = &buffer; + submit_info.signalSemaphoreCount = (signal == VK_NULL_HANDLE ? 0 : 1); + submit_info.pSignalSemaphores = signal_semaphores; + __kvfCheckVk(KVF_GET_DEVICE_FUNCTION(vkQueueSubmit)(kvfGetDeviceQueue(device, queue), 1, &submit_info, fence)); +} + +void kvfSubmitSingleTimeCommandBuffer(VkDevice device, VkCommandBuffer buffer, KvfQueueType queue, VkFence fence) +{ + KVF_ASSERT(device != VK_NULL_HANDLE); + #ifdef KVF_IMPL_VK_NO_PROTOTYPES + __KvfDevice* kvf_device = __kvfGetKvfDeviceFromVkDevice(device); + KVF_ASSERT(kvf_device != NULL); + #endif + + if(fence != VK_NULL_HANDLE) + KVF_GET_DEVICE_FUNCTION(vkResetFences)(device, 1, &fence); + + VkSubmitInfo submit_info = {}; + submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; + submit_info.commandBufferCount = 1; + submit_info.pCommandBuffers = &buffer; + __kvfCheckVk(KVF_GET_DEVICE_FUNCTION(vkQueueSubmit)(kvfGetDeviceQueue(device, queue), 1, &submit_info, fence)); + if(fence != VK_NULL_HANDLE) + kvfWaitForFence(device, fence); +} + +VkAttachmentDescription kvfBuildAttachmentDescription(KvfImageType type, VkFormat format, VkImageLayout initial, VkImageLayout final, bool clear, VkSampleCountFlagBits samples) +{ + VkAttachmentDescription attachment = {}; + + switch(type) + { + case KVF_IMAGE_CUBE: + case KVF_IMAGE_DEPTH_ARRAY: + case KVF_IMAGE_COLOR: + case KVF_IMAGE_DEPTH: + { + attachment.format = format; + attachment.initialLayout = initial; + attachment.finalLayout = final; + break; + } + + default: KVF_ASSERT(false && "KVF Attachment Description builder : unsupported image type"); break; + } + + if(clear) + { + attachment.loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR; + attachment.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_CLEAR; + attachment.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; + } + else + { + if(samples != VK_SAMPLE_COUNT_1_BIT) + { + attachment.loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE; + attachment.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE; + } + else + { + attachment.loadOp = VK_ATTACHMENT_LOAD_OP_LOAD; + attachment.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_LOAD; + } + } + + attachment.samples = samples; + if(samples != VK_SAMPLE_COUNT_1_BIT) + attachment.storeOp = VK_ATTACHMENT_STORE_OP_DONT_CARE; + else + attachment.storeOp = VK_ATTACHMENT_STORE_OP_STORE; + attachment.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE; + attachment.flags = 0; + + return attachment; +} + +#ifndef KVF_NO_KHR + VkAttachmentDescription kvfBuildSwapchainAttachmentDescription(VkSwapchainKHR swapchain, bool clear) + { + __KvfSwapchain* kvf_swapchain = __kvfGetKvfSwapchainFromVkSwapchainKHR(swapchain); + KVF_ASSERT(kvf_swapchain != NULL); + KVF_ASSERT(kvf_swapchain->images_count != 0); + return kvfBuildAttachmentDescription(KVF_IMAGE_COLOR, kvf_swapchain->images_format, VK_IMAGE_LAYOUT_UNDEFINED,VK_IMAGE_LAYOUT_PRESENT_SRC_KHR, clear, VK_SAMPLE_COUNT_1_BIT); + } +#endif + +VkRenderPass kvfCreateRenderPass(VkDevice device, VkAttachmentDescription* attachments, size_t attachments_count, VkPipelineBindPoint bind_point) +{ + KVF_ASSERT(device != VK_NULL_HANDLE); + return kvfCreateRenderPassWithSubpassDependencies(device, attachments, attachments_count, bind_point, NULL, 0); +} + +VkRenderPass kvfCreateRenderPassWithSubpassDependencies(VkDevice device, VkAttachmentDescription* attachments, size_t attachments_count, VkPipelineBindPoint bind_point, VkSubpassDependency* dependencies, size_t dependencies_count) +{ + size_t color_attachment_count = 0; + size_t depth_attachment_count = 0; + + for(size_t i = 0; i < attachments_count; i++) + { + if(kvfIsDepthFormat(attachments[i].format)) + depth_attachment_count++; + else + color_attachment_count++; + } + + VkAttachmentReference* color_references = NULL; + VkAttachmentReference* depth_references = NULL; + + if(color_attachment_count != 0) + { + color_references = (VkAttachmentReference*)KVF_MALLOC(color_attachment_count * sizeof(VkAttachmentReference)); + KVF_ASSERT(color_references != NULL && "allocation failed :("); + } + if(depth_attachment_count != 0) + { + depth_references = (VkAttachmentReference*)KVF_MALLOC(depth_attachment_count * sizeof(VkAttachmentReference)); + KVF_ASSERT(depth_references != NULL && "allocation failed :("); + } + + for(size_t i = 0, c = 0, d = 0; i < attachments_count; i++) + { + if(!kvfIsDepthFormat(attachments[i].format)) + { + VkImageLayout layout = attachments[i].finalLayout; + color_references[c].attachment = i; + color_references[c].layout = layout == VK_IMAGE_LAYOUT_PRESENT_SRC_KHR ? VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL : layout; + c++; + } + else + { + depth_references[d].attachment = i; + depth_references[d].layout = attachments[i].finalLayout; + d++; + } + } + + #ifdef KVF_IMPL_VK_NO_PROTOTYPES + __KvfDevice* kvf_device = __kvfGetKvfDeviceFromVkDevice(device); + KVF_ASSERT(kvf_device != NULL); + #endif + + VkSubpassDescription subpass = {}; + subpass.pipelineBindPoint = bind_point; + subpass.colorAttachmentCount = color_attachment_count; + subpass.pColorAttachments = color_references; + subpass.pDepthStencilAttachment = depth_references; + + VkRenderPassCreateInfo renderpass_create_info = {}; + renderpass_create_info.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO; + renderpass_create_info.attachmentCount = attachments_count; + renderpass_create_info.pAttachments = attachments; + renderpass_create_info.subpassCount = 1; + renderpass_create_info.pSubpasses = &subpass; + renderpass_create_info.dependencyCount = dependencies_count; + renderpass_create_info.pDependencies = dependencies; + + VkRenderPass render_pass = VK_NULL_HANDLE; + __kvfCheckVk(KVF_GET_DEVICE_FUNCTION(vkCreateRenderPass)(device, &renderpass_create_info, NULL, &render_pass)); + KVF_FREE(color_references); + KVF_FREE(depth_references); + return render_pass; +} + +void kvfDestroyRenderPass(VkDevice device, VkRenderPass renderPass) +{ + if(renderPass == VK_NULL_HANDLE) + return; + KVF_ASSERT(device != VK_NULL_HANDLE); + #ifdef KVF_IMPL_VK_NO_PROTOTYPES + __KvfDevice* kvf_device = __kvfGetKvfDeviceFromVkDevice(device); + KVF_ASSERT(kvf_device != NULL); + #endif + KVF_GET_DEVICE_FUNCTION(vkDestroyRenderPass)(device, renderPass, NULL); +} + +void kvfBeginRenderPass(VkRenderPass pass, VkCommandBuffer cmd, VkFramebuffer framebuffer, VkExtent2D framebuffer_extent, VkClearValue* clears, size_t clears_count) +{ + KVF_ASSERT(pass != VK_NULL_HANDLE); + KVF_ASSERT(framebuffer != VK_NULL_HANDLE); + #ifdef KVF_IMPL_VK_NO_PROTOTYPES + __KvfDevice* kvf_device = __kvfGetKvfDeviceFromVkCommandBuffer(cmd); + KVF_ASSERT(kvf_device != NULL); + #endif + + VkOffset2D offset = { 0, 0 }; + VkRenderPassBeginInfo renderpass_info = {}; + renderpass_info.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO; + renderpass_info.renderPass = pass; + renderpass_info.framebuffer = framebuffer; + renderpass_info.renderArea.offset = offset; + renderpass_info.renderArea.extent = framebuffer_extent; + renderpass_info.clearValueCount = clears_count; + renderpass_info.pClearValues = clears; + KVF_GET_DEVICE_FUNCTION(vkCmdBeginRenderPass)(cmd, &renderpass_info, VK_SUBPASS_CONTENTS_INLINE); +} + +VkShaderModule kvfCreateShaderModule(VkDevice device, uint32_t* code, size_t size) +{ + KVF_ASSERT(device != VK_NULL_HANDLE); + #ifdef KVF_IMPL_VK_NO_PROTOTYPES + __KvfDevice* kvf_device = __kvfGetKvfDeviceFromVkDevice(device); + KVF_ASSERT(kvf_device != NULL); + #endif + VkShaderModuleCreateInfo createInfo = {}; + createInfo.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO; + createInfo.codeSize = size * sizeof(uint32_t); + createInfo.pCode = code; + VkShaderModule shader = VK_NULL_HANDLE; + __kvfCheckVk(KVF_GET_DEVICE_FUNCTION(vkCreateShaderModule)(device, &createInfo, NULL, &shader)); + return shader; +} + +void kvfDestroyShaderModule(VkDevice device, VkShaderModule shader) +{ + if(shader == VK_NULL_HANDLE) + return; + KVF_ASSERT(device != VK_NULL_HANDLE); + #ifdef KVF_IMPL_VK_NO_PROTOTYPES + __KvfDevice* kvf_device = __kvfGetKvfDeviceFromVkDevice(device); + KVF_ASSERT(kvf_device != NULL); + #endif + KVF_GET_DEVICE_FUNCTION(vkDestroyShaderModule)(device, shader, NULL); +} + +VkDescriptorSetLayout kvfCreateDescriptorSetLayout(VkDevice device, VkDescriptorSetLayoutBinding* bindings, size_t bindings_count) +{ + KVF_ASSERT(device != VK_NULL_HANDLE); + #ifdef KVF_IMPL_VK_NO_PROTOTYPES + __KvfDevice* kvf_device = __kvfGetKvfDeviceFromVkDevice(device); + KVF_ASSERT(kvf_device != NULL); + #endif + VkDescriptorSetLayoutCreateInfo layout_info = {}; + layout_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO; + layout_info.bindingCount = bindings_count; + layout_info.pBindings = bindings; + + VkDescriptorSetLayout layout; + __kvfCheckVk(KVF_GET_DEVICE_FUNCTION(vkCreateDescriptorSetLayout)(device, &layout_info, NULL, &layout)); + return layout; +} + +void kvfDestroyDescriptorSetLayout(VkDevice device, VkDescriptorSetLayout layout) +{ + if(layout == VK_NULL_HANDLE) + return; + KVF_ASSERT(device != VK_NULL_HANDLE); + #ifdef KVF_IMPL_VK_NO_PROTOTYPES + __KvfDevice* kvf_device = __kvfGetKvfDeviceFromVkDevice(device); + KVF_ASSERT(kvf_device != NULL); + #endif + KVF_GET_DEVICE_FUNCTION(vkDestroyDescriptorSetLayout)(device, layout, NULL); +} + +VkDescriptorSet kvfAllocateDescriptorSet(VkDevice device, VkDescriptorSetLayout layout) +{ + KVF_ASSERT(device != VK_NULL_HANDLE); + __KvfDevice* kvf_device = __kvfGetKvfDeviceFromVkDevice(device); + KVF_ASSERT(kvf_device != NULL); + VkDescriptorPool pool = VK_NULL_HANDLE; + for(uint32_t i = 0; i < kvf_device->sets_pools_size; i++) + { + if(kvf_device->sets_pools[i].size < kvf_device->sets_pools[i].capacity) + pool = kvf_device->sets_pools[i].pool; + } + if(pool == VK_NULL_HANDLE) + pool = __kvfDeviceCreateDescriptorPool(device); + KVF_ASSERT(pool != VK_NULL_HANDLE); + + VkDescriptorSet set = VK_NULL_HANDLE; + VkDescriptorSetAllocateInfo alloc_info = {}; + alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; + alloc_info.descriptorPool = pool; + alloc_info.descriptorSetCount = 1; + alloc_info.pSetLayouts = &layout; + __kvfCheckVk(KVF_GET_DEVICE_FUNCTION(vkAllocateDescriptorSets)(device, &alloc_info, &set)); + KVF_ASSERT(set != VK_NULL_HANDLE); + return set; +} + +void kvfUpdateStorageBufferToDescriptorSet(VkDevice device, VkDescriptorSet set, const VkDescriptorBufferInfo* info, uint32_t binding) +{ + #ifdef KVF_IMPL_VK_NO_PROTOTYPES + __KvfDevice* kvf_device = __kvfGetKvfDeviceFromVkDevice(device); + KVF_ASSERT(kvf_device != NULL); + #endif + VkWriteDescriptorSet write = kvfWriteStorageBufferToDescriptorSet(device, set, info, binding); + KVF_GET_DEVICE_FUNCTION(vkUpdateDescriptorSets)(device, 1, &write, 0, NULL); +} + +void kvfUpdateUniformBufferToDescriptorSet(VkDevice device, VkDescriptorSet set, const VkDescriptorBufferInfo* info, uint32_t binding) +{ + #ifdef KVF_IMPL_VK_NO_PROTOTYPES + __KvfDevice* kvf_device = __kvfGetKvfDeviceFromVkDevice(device); + KVF_ASSERT(kvf_device != NULL); + #endif + VkWriteDescriptorSet write = kvfWriteUniformBufferToDescriptorSet(device, set, info, binding); + KVF_GET_DEVICE_FUNCTION(vkUpdateDescriptorSets)(device, 1, &write, 0, NULL); +} + +void kvfUpdateImageToDescriptorSet(VkDevice device, VkDescriptorSet set, const VkDescriptorImageInfo* info, uint32_t binding) +{ + #ifdef KVF_IMPL_VK_NO_PROTOTYPES + __KvfDevice* kvf_device = __kvfGetKvfDeviceFromVkDevice(device); + KVF_ASSERT(kvf_device != NULL); + #endif + VkWriteDescriptorSet write = kvfWriteImageToDescriptorSet(device, set, info, binding); + KVF_GET_DEVICE_FUNCTION(vkUpdateDescriptorSets)(device, 1, &write, 0, NULL); +} + +VkWriteDescriptorSet kvfWriteStorageBufferToDescriptorSet(VkDevice device, VkDescriptorSet set, const VkDescriptorBufferInfo* info, uint32_t binding) +{ + KVF_ASSERT(device != VK_NULL_HANDLE); + KVF_ASSERT(set != VK_NULL_HANDLE); + VkWriteDescriptorSet descriptor_write = {}; + descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; + descriptor_write.dstSet = set; + descriptor_write.dstBinding = binding; + descriptor_write.dstArrayElement = 0; + descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER; + descriptor_write.descriptorCount = 1; + descriptor_write.pBufferInfo = info; + return descriptor_write; +} + +VkWriteDescriptorSet kvfWriteUniformBufferToDescriptorSet(VkDevice device, VkDescriptorSet set, const VkDescriptorBufferInfo* info, uint32_t binding) +{ + KVF_ASSERT(device != VK_NULL_HANDLE); + KVF_ASSERT(set != VK_NULL_HANDLE); + VkWriteDescriptorSet descriptor_write = {}; + descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; + descriptor_write.dstSet = set; + descriptor_write.dstBinding = binding; + descriptor_write.dstArrayElement = 0; + descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; + descriptor_write.descriptorCount = 1; + descriptor_write.pBufferInfo = info; + return descriptor_write; +} + +VkWriteDescriptorSet kvfWriteImageToDescriptorSet(VkDevice device, VkDescriptorSet set, const VkDescriptorImageInfo* info, uint32_t binding) +{ + KVF_ASSERT(device != VK_NULL_HANDLE); + KVF_ASSERT(set != VK_NULL_HANDLE); + VkWriteDescriptorSet descriptor_write = {}; + descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; + descriptor_write.dstSet = set; + descriptor_write.dstBinding = binding; + descriptor_write.dstArrayElement = 0; + descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; + descriptor_write.descriptorCount = 1; + descriptor_write.pImageInfo = info; + return descriptor_write; +} + +VkPipelineLayout kvfCreatePipelineLayout(VkDevice device, VkDescriptorSetLayout* set_layouts, size_t set_layouts_count, VkPushConstantRange* pc, size_t pc_count) +{ + KVF_ASSERT(device != VK_NULL_HANDLE); + #ifdef KVF_IMPL_VK_NO_PROTOTYPES + __KvfDevice* kvf_device = __kvfGetKvfDeviceFromVkDevice(device); + KVF_ASSERT(kvf_device != NULL); + #endif + VkPipelineLayoutCreateInfo pipeline_layout_info = {}; + pipeline_layout_info.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO; + pipeline_layout_info.setLayoutCount = set_layouts_count; + pipeline_layout_info.pSetLayouts = set_layouts; + pipeline_layout_info.pushConstantRangeCount = pc_count; + pipeline_layout_info.pPushConstantRanges = pc; + + VkPipelineLayout layout; + __kvfCheckVk(KVF_GET_DEVICE_FUNCTION(vkCreatePipelineLayout)(device, &pipeline_layout_info, NULL, &layout)); + return layout; +} + +void kvfDestroyPipelineLayout(VkDevice device, VkPipelineLayout layout) +{ + if(layout == VK_NULL_HANDLE) + return; + KVF_ASSERT(device != VK_NULL_HANDLE); + #ifdef KVF_IMPL_VK_NO_PROTOTYPES + __KvfDevice* kvf_device = __kvfGetKvfDeviceFromVkDevice(device); + KVF_ASSERT(kvf_device != NULL); + #endif + KVF_GET_DEVICE_FUNCTION(vkDestroyPipelineLayout)(device, layout, NULL); +} + +void kvfResetDeviceDescriptorPools(VkDevice device) +{ + KVF_ASSERT(device != VK_NULL_HANDLE); + __KvfDevice* kvf_device = __kvfGetKvfDeviceFromVkDevice(device); + KVF_ASSERT(kvf_device != NULL); + for(uint32_t i = 0; i < kvf_device->sets_pools_size; i++) + { + KVF_GET_DEVICE_FUNCTION(vkResetDescriptorPool)(device, kvf_device->sets_pools[i].pool, 0); + kvf_device->sets_pools[i].size = 0; + } +} + +KvfGraphicsPipelineBuilder* kvfCreateGPipelineBuilder() +{ + KvfGraphicsPipelineBuilder* builder = (KvfGraphicsPipelineBuilder*)KVF_MALLOC(sizeof(KvfGraphicsPipelineBuilder)); + KVF_ASSERT(builder != NULL && "allocation failed :("); + memset(builder, 0, sizeof(KvfGraphicsPipelineBuilder)); + kvfGPipelineBuilderReset(builder); + return builder; +} + +void kvfDestroyGPipelineBuilder(KvfGraphicsPipelineBuilder* builder) +{ + KVF_ASSERT(builder != NULL); + KVF_FREE(builder->shader_stages); + if(builder->vertex_input_state.pVertexAttributeDescriptions != NULL) + KVF_FREE((VkVertexInputAttributeDescription*)builder->vertex_input_state.pVertexAttributeDescriptions); + if(builder->vertex_input_state.pVertexBindingDescriptions != NULL) + KVF_FREE((VkVertexInputBindingDescription*)builder->vertex_input_state.pVertexBindingDescriptions); + KVF_FREE(builder); +} + +void kvfGPipelineBuilderReset(KvfGraphicsPipelineBuilder* builder) +{ + KVF_ASSERT(builder != NULL); + KVF_FREE(builder->shader_stages); + if(builder->vertex_input_state.pVertexAttributeDescriptions != NULL) + KVF_FREE((VkVertexInputAttributeDescription*)builder->vertex_input_state.pVertexAttributeDescriptions); + if(builder->vertex_input_state.pVertexBindingDescriptions != NULL) + KVF_FREE((VkVertexInputBindingDescription*)builder->vertex_input_state.pVertexBindingDescriptions); + memset(builder, 0, sizeof(KvfGraphicsPipelineBuilder)); + builder->vertex_input_state.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO; + builder->input_assembly_state.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO; + builder->tessellation_state.sType = VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO; + builder->rasterization_state.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO; + builder->depth_stencil_state.sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO; + builder->multisampling.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO; +} + +void kvfGPipelineBuilderSetInputTopology(KvfGraphicsPipelineBuilder* builder, VkPrimitiveTopology topology) +{ + KVF_ASSERT(builder != NULL); + builder->input_assembly_state.topology = topology; + builder->input_assembly_state.primitiveRestartEnable = VK_FALSE; +} + +void kvfGPipelineBuilderSetPolygonMode(KvfGraphicsPipelineBuilder* builder, VkPolygonMode polygon, float line_width) +{ + KVF_ASSERT(builder != NULL); + builder->rasterization_state.polygonMode = polygon; + builder->rasterization_state.lineWidth = line_width; +} + +void kvfGPipelineBuilderSetCullMode(KvfGraphicsPipelineBuilder* builder, VkCullModeFlags cull, VkFrontFace face) +{ + KVF_ASSERT(builder != NULL); + builder->rasterization_state.cullMode = cull; + builder->rasterization_state.frontFace = face; +} + +void kvfGPipelineBuilderSetMultisampling(KvfGraphicsPipelineBuilder* builder, VkSampleCountFlagBits count) +{ + KVF_ASSERT(builder != NULL); + builder->multisampling.rasterizationSamples = count; +} + +void kvfGPipelineBuilderSetMultisamplingShading(KvfGraphicsPipelineBuilder* builder, VkSampleCountFlagBits count, float min_sampling_shading) +{ + KVF_ASSERT(builder != NULL); + builder->multisampling.rasterizationSamples = count; + builder->multisampling.sampleShadingEnable = VK_TRUE; + builder->multisampling.minSampleShading = min_sampling_shading; +} + +void kvfGPipelineBuilderDisableBlending(KvfGraphicsPipelineBuilder* builder) +{ + KVF_ASSERT(builder != NULL); + builder->color_blend_attachment_state.colorWriteMask = VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT | VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT; + builder->color_blend_attachment_state.blendEnable = VK_FALSE; +} + +void kvfGPipelineBuilderEnableAdditiveBlending(KvfGraphicsPipelineBuilder* builder) +{ + KVF_ASSERT(builder != NULL); + builder->color_blend_attachment_state.colorWriteMask = VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT | VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT; + builder->color_blend_attachment_state.blendEnable = VK_TRUE; + builder->color_blend_attachment_state.srcColorBlendFactor = VK_BLEND_FACTOR_SRC_ALPHA; + builder->color_blend_attachment_state.dstColorBlendFactor = VK_BLEND_FACTOR_ONE; + builder->color_blend_attachment_state.colorBlendOp = VK_BLEND_OP_ADD; + builder->color_blend_attachment_state.srcAlphaBlendFactor = VK_BLEND_FACTOR_ONE; + builder->color_blend_attachment_state.dstAlphaBlendFactor = VK_BLEND_FACTOR_ZERO; + builder->color_blend_attachment_state.alphaBlendOp = VK_BLEND_OP_ADD; +} + +void kvfGPipelineBuilderEnableAlphaBlending(KvfGraphicsPipelineBuilder* builder) +{ + KVF_ASSERT(builder != NULL); + builder->color_blend_attachment_state.colorWriteMask = VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT | VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT; + builder->color_blend_attachment_state.blendEnable = VK_TRUE; + builder->color_blend_attachment_state.srcColorBlendFactor = VK_BLEND_FACTOR_SRC_ALPHA; + builder->color_blend_attachment_state.dstColorBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA; + builder->color_blend_attachment_state.colorBlendOp = VK_BLEND_OP_ADD; + builder->color_blend_attachment_state.srcAlphaBlendFactor = VK_BLEND_FACTOR_ONE; + builder->color_blend_attachment_state.dstAlphaBlendFactor = VK_BLEND_FACTOR_ZERO; + builder->color_blend_attachment_state.alphaBlendOp = VK_BLEND_OP_ADD; +} + +void kvfGPipelineBuilderEnableDepthTest(KvfGraphicsPipelineBuilder* builder, VkCompareOp op, bool write_enabled) +{ + KVF_ASSERT(builder != NULL); + builder->depth_stencil_state.depthTestEnable = VK_TRUE; + builder->depth_stencil_state.depthWriteEnable = write_enabled; + builder->depth_stencil_state.depthCompareOp = op; + builder->depth_stencil_state.depthBoundsTestEnable = VK_FALSE; + builder->depth_stencil_state.stencilTestEnable = VK_FALSE; + builder->depth_stencil_state.minDepthBounds = 0.f; + builder->depth_stencil_state.maxDepthBounds = 1.f; +} + +void kvfGPipelineBuilderDisableDepthTest(KvfGraphicsPipelineBuilder* builder) +{ + KVF_ASSERT(builder != NULL); + builder->depth_stencil_state.depthTestEnable = VK_FALSE; + builder->depth_stencil_state.depthWriteEnable = VK_FALSE; + builder->depth_stencil_state.depthCompareOp = VK_COMPARE_OP_NEVER; + builder->depth_stencil_state.depthBoundsTestEnable = VK_FALSE; + builder->depth_stencil_state.stencilTestEnable = VK_FALSE; + builder->depth_stencil_state.minDepthBounds = 0.f; + builder->depth_stencil_state.maxDepthBounds = 1.f; +} + +void kvfGPipelineBuilderSetVertexInputs(KvfGraphicsPipelineBuilder* builder, VkVertexInputBindingDescription binds, VkVertexInputAttributeDescription* attributes, size_t attributes_count) +{ + KVF_ASSERT(builder != NULL); + KVF_ASSERT(attributes != NULL); + VkVertexInputBindingDescription* binds_ptr = (VkVertexInputBindingDescription*)KVF_MALLOC(sizeof(VkVertexInputBindingDescription)); + KVF_ASSERT(binds_ptr != NULL && "allocation failed :("); + *binds_ptr = binds; + VkVertexInputAttributeDescription* attributes_descriptions = (VkVertexInputAttributeDescription*)KVF_MALLOC(sizeof(VkVertexInputAttributeDescription) * attributes_count); + KVF_ASSERT(attributes_descriptions != NULL && "allocation failed :("); + memcpy(attributes_descriptions, attributes, sizeof(VkVertexInputAttributeDescription) * attributes_count); + builder->vertex_input_state.vertexBindingDescriptionCount = 1; + builder->vertex_input_state.pVertexBindingDescriptions = binds_ptr; + builder->vertex_input_state.vertexAttributeDescriptionCount = attributes_count; + builder->vertex_input_state.pVertexAttributeDescriptions = attributes_descriptions; +} + +void kvfGPipelineBuilderAddShaderStage(KvfGraphicsPipelineBuilder* builder, VkShaderStageFlagBits stage, VkShaderModule module, const char* entry) +{ + KVF_ASSERT(builder != NULL); + builder->shader_stages = (VkPipelineShaderStageCreateInfo*)KVF_REALLOC(builder->shader_stages, sizeof(VkPipelineShaderStageCreateInfo) * (builder->shader_stages_count + 1)); + KVF_ASSERT(builder->shader_stages != NULL); + memset(&builder->shader_stages[builder->shader_stages_count], 0, sizeof(VkPipelineShaderStageCreateInfo)); + char* entry_ptr = (char*)KVF_MALLOC(strlen(entry)); + KVF_ASSERT(entry_ptr != NULL && "allocation failed :("); + strcpy(entry_ptr, entry); + builder->shader_stages[builder->shader_stages_count].sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO; + builder->shader_stages[builder->shader_stages_count].stage = stage; + builder->shader_stages[builder->shader_stages_count].module = module; + builder->shader_stages[builder->shader_stages_count].pName = entry_ptr; + builder->shader_stages_count++; +} + +void kvfGPipelineBuilderResetShaderStages(KvfGraphicsPipelineBuilder* builder) +{ + KVF_ASSERT(builder != NULL); + if(builder->shader_stages == NULL) + return; + + for(size_t i = 0; builder->shader_stages_count; i++) + KVF_FREE((char*)builder->shader_stages[i].pName); + KVF_FREE(builder->shader_stages); + builder->shader_stages_count = 0; +} + +VkPipeline kvfCreateGraphicsPipeline(VkDevice device, VkPipelineCache cache, VkPipelineLayout layout, KvfGraphicsPipelineBuilder* builder, VkRenderPass pass) +{ + KVF_ASSERT(builder != NULL); + KVF_ASSERT(device != VK_NULL_HANDLE); + + VkPipelineColorBlendStateCreateInfo color_blending = {}; + color_blending.sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO; + color_blending.logicOpEnable = VK_FALSE; + color_blending.logicOp = VK_LOGIC_OP_COPY; + color_blending.attachmentCount = 1; + color_blending.pAttachments = &builder->color_blend_attachment_state; + color_blending.blendConstants[0] = 0.0f; + color_blending.blendConstants[1] = 0.0f; + color_blending.blendConstants[2] = 0.0f; + color_blending.blendConstants[3] = 0.0f; + + VkDynamicState states[] = { VK_DYNAMIC_STATE_VIEWPORT, VK_DYNAMIC_STATE_SCISSOR }; + + VkPipelineDynamicStateCreateInfo dynamic_states = {}; + dynamic_states.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO; + dynamic_states.dynamicStateCount = sizeof(states) / sizeof(VkDynamicState); + dynamic_states.pDynamicStates = states; + + VkPipelineViewportStateCreateInfo viewport_state = {}; + viewport_state.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO; + viewport_state.viewportCount = 1; + viewport_state.pViewports = NULL; + viewport_state.scissorCount = 1; + viewport_state.pScissors = NULL; + + VkGraphicsPipelineCreateInfo pipeline_info = {}; + pipeline_info.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO; + pipeline_info.stageCount = builder->shader_stages_count; + pipeline_info.pStages = builder->shader_stages; + pipeline_info.pVertexInputState = &builder->vertex_input_state; + pipeline_info.pInputAssemblyState = &builder->input_assembly_state; + pipeline_info.pViewportState = &viewport_state; + pipeline_info.pRasterizationState = &builder->rasterization_state; + pipeline_info.pMultisampleState = &builder->multisampling; + pipeline_info.pColorBlendState = &color_blending; + pipeline_info.pDynamicState = &dynamic_states; + pipeline_info.layout = layout; + pipeline_info.renderPass = pass; + pipeline_info.subpass = 0; + pipeline_info.basePipelineHandle = VK_NULL_HANDLE; + pipeline_info.pDepthStencilState = &builder->depth_stencil_state; + + #ifdef KVF_IMPL_VK_NO_PROTOTYPES + __KvfDevice* kvf_device = __kvfGetKvfDeviceFromVkDevice(device); + KVF_ASSERT(kvf_device != NULL); + #endif + VkPipeline pipeline; + __kvfCheckVk(KVF_GET_DEVICE_FUNCTION(vkCreateGraphicsPipelines)(device, cache, 1, &pipeline_info, NULL, &pipeline)); + return pipeline; +} + +void kvfDestroyPipeline(VkDevice device, VkPipeline pipeline) +{ + KVF_ASSERT(device != VK_NULL_HANDLE); + #ifdef KVF_IMPL_VK_NO_PROTOTYPES + __KvfDevice* kvf_device = __kvfGetKvfDeviceFromVkDevice(device); + KVF_ASSERT(kvf_device != NULL); + #endif + KVF_GET_DEVICE_FUNCTION(vkDestroyPipeline)(device, pipeline, NULL); +} + +#endif // KVF_IMPLEMENTATION diff --git a/third_party/volk.c b/third_party/volk.c deleted file mode 100644 index dfcdf41..0000000 --- a/third_party/volk.c +++ /dev/null @@ -1,3221 +0,0 @@ -/* This file is part of volk library; see volk.h for version/license details */ -/* clang-format off */ -#include "volk.h" - -#ifdef _WIN32 - typedef const char* LPCSTR; - typedef struct HINSTANCE__* HINSTANCE; - typedef HINSTANCE HMODULE; - #if defined(_MINWINDEF_) - /* minwindef.h defines FARPROC, and attempting to redefine it may conflict with -Wstrict-prototypes */ - #elif defined(_WIN64) - typedef __int64 (__stdcall* FARPROC)(void); - #else - typedef int (__stdcall* FARPROC)(void); - #endif -#else -# include -#endif - -#ifdef __APPLE__ -# include -#endif - -#ifdef __cplusplus -extern "C" { -#endif - -#ifdef _WIN32 -__declspec(dllimport) HMODULE __stdcall LoadLibraryA(LPCSTR); -__declspec(dllimport) FARPROC __stdcall GetProcAddress(HMODULE, LPCSTR); -__declspec(dllimport) int __stdcall FreeLibrary(HMODULE); -#endif - -#if defined(__GNUC__) -# define VOLK_DISABLE_GCC_PEDANTIC_WARNINGS \ - _Pragma("GCC diagnostic push") \ - _Pragma("GCC diagnostic ignored \"-Wpedantic\"") -# define VOLK_RESTORE_GCC_PEDANTIC_WARNINGS \ - _Pragma("GCC diagnostic pop") -#else -# define VOLK_DISABLE_GCC_PEDANTIC_WARNINGS -# define VOLK_RESTORE_GCC_PEDANTIC_WARNINGS -#endif - -static void* loadedModule = NULL; -static VkInstance loadedInstance = VK_NULL_HANDLE; -static VkDevice loadedDevice = VK_NULL_HANDLE; - -static void volkGenLoadLoader(void* context, PFN_vkVoidFunction (*load)(void*, const char*)); -static void volkGenLoadInstance(void* context, PFN_vkVoidFunction (*load)(void*, const char*)); -static void volkGenLoadDevice(void* context, PFN_vkVoidFunction (*load)(void*, const char*)); -static void volkGenLoadDeviceTable(struct VolkDeviceTable* table, void* context, PFN_vkVoidFunction (*load)(void*, const char*)); - -static PFN_vkVoidFunction vkGetInstanceProcAddrStub(void* context, const char* name) -{ - return vkGetInstanceProcAddr((VkInstance)context, name); -} - -static PFN_vkVoidFunction vkGetDeviceProcAddrStub(void* context, const char* name) -{ - return vkGetDeviceProcAddr((VkDevice)context, name); -} - -static PFN_vkVoidFunction nullProcAddrStub(void* context, const char* name) -{ - (void)context; - (void)name; - return NULL; -} - -VkResult volkInitialize(void) -{ -#if defined(_WIN32) - HMODULE module = LoadLibraryA("vulkan-1.dll"); - if (!module) - return VK_ERROR_INITIALIZATION_FAILED; - - // note: function pointer is cast through void function pointer to silence cast-function-type warning on gcc8 - vkGetInstanceProcAddr = (PFN_vkGetInstanceProcAddr)(void(*)(void))GetProcAddress(module, "vkGetInstanceProcAddr"); -#elif defined(__APPLE__) - void* module = dlopen("libvulkan.dylib", RTLD_NOW | RTLD_LOCAL); - if (!module) - module = dlopen("libvulkan.1.dylib", RTLD_NOW | RTLD_LOCAL); - if (!module) - module = dlopen("libMoltenVK.dylib", RTLD_NOW | RTLD_LOCAL); - // Add support for using Vulkan and MoltenVK in a Framework. App store rules for iOS - // strictly enforce no .dylib's. If they aren't found it just falls through - if (!module) - module = dlopen("vulkan.framework/vulkan", RTLD_NOW | RTLD_LOCAL); - if (!module) - module = dlopen("MoltenVK.framework/MoltenVK", RTLD_NOW | RTLD_LOCAL); - // modern versions of macOS don't search /usr/local/lib automatically contrary to what man dlopen says - // Vulkan SDK uses this as the system-wide installation location, so we're going to fallback to this if all else fails - if (!module && getenv("DYLD_FALLBACK_LIBRARY_PATH") == NULL) - module = dlopen("/usr/local/lib/libvulkan.dylib", RTLD_NOW | RTLD_LOCAL); - if (!module) - return VK_ERROR_INITIALIZATION_FAILED; - - vkGetInstanceProcAddr = (PFN_vkGetInstanceProcAddr)dlsym(module, "vkGetInstanceProcAddr"); -#else - void* module = dlopen("libvulkan.so.1", RTLD_NOW | RTLD_LOCAL); - if (!module) - module = dlopen("libvulkan.so", RTLD_NOW | RTLD_LOCAL); - if (!module) - return VK_ERROR_INITIALIZATION_FAILED; - VOLK_DISABLE_GCC_PEDANTIC_WARNINGS - vkGetInstanceProcAddr = (PFN_vkGetInstanceProcAddr)dlsym(module, "vkGetInstanceProcAddr"); - VOLK_RESTORE_GCC_PEDANTIC_WARNINGS -#endif - - loadedModule = module; - volkGenLoadLoader(NULL, vkGetInstanceProcAddrStub); - - return VK_SUCCESS; -} - -void volkInitializeCustom(PFN_vkGetInstanceProcAddr handler) -{ - vkGetInstanceProcAddr = handler; - - loadedModule = NULL; - volkGenLoadLoader(NULL, vkGetInstanceProcAddrStub); -} - -void volkFinalize(void) -{ - if (loadedModule) - { -#if defined(_WIN32) - FreeLibrary((HMODULE)loadedModule); -#else - dlclose(loadedModule); -#endif - } - - vkGetInstanceProcAddr = NULL; - volkGenLoadLoader(NULL, nullProcAddrStub); - volkGenLoadInstance(NULL, nullProcAddrStub); - volkGenLoadDevice(NULL, nullProcAddrStub); - - loadedModule = NULL; - loadedInstance = VK_NULL_HANDLE; - loadedDevice = VK_NULL_HANDLE; -} - -uint32_t volkGetInstanceVersion(void) -{ -#if defined(VK_VERSION_1_1) - uint32_t apiVersion = 0; - if (vkEnumerateInstanceVersion && vkEnumerateInstanceVersion(&apiVersion) == VK_SUCCESS) - return apiVersion; -#endif - - if (vkCreateInstance) - return VK_API_VERSION_1_0; - - return 0; -} - -void volkLoadInstance(VkInstance instance) -{ - loadedInstance = instance; - volkGenLoadInstance(instance, vkGetInstanceProcAddrStub); - volkGenLoadDevice(instance, vkGetInstanceProcAddrStub); -} - -void volkLoadInstanceOnly(VkInstance instance) -{ - loadedInstance = instance; - volkGenLoadInstance(instance, vkGetInstanceProcAddrStub); -} - -VkInstance volkGetLoadedInstance(void) -{ - return loadedInstance; -} - -void volkLoadDevice(VkDevice device) -{ - loadedDevice = device; - volkGenLoadDevice(device, vkGetDeviceProcAddrStub); -} - -VkDevice volkGetLoadedDevice(void) -{ - return loadedDevice; -} - -void volkLoadDeviceTable(struct VolkDeviceTable* table, VkDevice device) -{ - volkGenLoadDeviceTable(table, device, vkGetDeviceProcAddrStub); -} - -static void volkGenLoadLoader(void* context, PFN_vkVoidFunction (*load)(void*, const char*)) -{ - /* VOLK_GENERATE_LOAD_LOADER */ -#if defined(VK_VERSION_1_0) - vkCreateInstance = (PFN_vkCreateInstance)load(context, "vkCreateInstance"); - vkEnumerateInstanceExtensionProperties = (PFN_vkEnumerateInstanceExtensionProperties)load(context, "vkEnumerateInstanceExtensionProperties"); - vkEnumerateInstanceLayerProperties = (PFN_vkEnumerateInstanceLayerProperties)load(context, "vkEnumerateInstanceLayerProperties"); -#endif /* defined(VK_VERSION_1_0) */ -#if defined(VK_VERSION_1_1) - vkEnumerateInstanceVersion = (PFN_vkEnumerateInstanceVersion)load(context, "vkEnumerateInstanceVersion"); -#endif /* defined(VK_VERSION_1_1) */ - /* VOLK_GENERATE_LOAD_LOADER */ -} - -static void volkGenLoadInstance(void* context, PFN_vkVoidFunction (*load)(void*, const char*)) -{ - /* VOLK_GENERATE_LOAD_INSTANCE */ -#if defined(VK_VERSION_1_0) - vkCreateDevice = (PFN_vkCreateDevice)load(context, "vkCreateDevice"); - vkDestroyInstance = (PFN_vkDestroyInstance)load(context, "vkDestroyInstance"); - vkEnumerateDeviceExtensionProperties = (PFN_vkEnumerateDeviceExtensionProperties)load(context, "vkEnumerateDeviceExtensionProperties"); - vkEnumerateDeviceLayerProperties = (PFN_vkEnumerateDeviceLayerProperties)load(context, "vkEnumerateDeviceLayerProperties"); - vkEnumeratePhysicalDevices = (PFN_vkEnumeratePhysicalDevices)load(context, "vkEnumeratePhysicalDevices"); - vkGetDeviceProcAddr = (PFN_vkGetDeviceProcAddr)load(context, "vkGetDeviceProcAddr"); - vkGetPhysicalDeviceFeatures = (PFN_vkGetPhysicalDeviceFeatures)load(context, "vkGetPhysicalDeviceFeatures"); - vkGetPhysicalDeviceFormatProperties = (PFN_vkGetPhysicalDeviceFormatProperties)load(context, "vkGetPhysicalDeviceFormatProperties"); - vkGetPhysicalDeviceImageFormatProperties = (PFN_vkGetPhysicalDeviceImageFormatProperties)load(context, "vkGetPhysicalDeviceImageFormatProperties"); - vkGetPhysicalDeviceMemoryProperties = (PFN_vkGetPhysicalDeviceMemoryProperties)load(context, "vkGetPhysicalDeviceMemoryProperties"); - vkGetPhysicalDeviceProperties = (PFN_vkGetPhysicalDeviceProperties)load(context, "vkGetPhysicalDeviceProperties"); - vkGetPhysicalDeviceQueueFamilyProperties = (PFN_vkGetPhysicalDeviceQueueFamilyProperties)load(context, "vkGetPhysicalDeviceQueueFamilyProperties"); - vkGetPhysicalDeviceSparseImageFormatProperties = (PFN_vkGetPhysicalDeviceSparseImageFormatProperties)load(context, "vkGetPhysicalDeviceSparseImageFormatProperties"); -#endif /* defined(VK_VERSION_1_0) */ -#if defined(VK_VERSION_1_1) - vkEnumeratePhysicalDeviceGroups = (PFN_vkEnumeratePhysicalDeviceGroups)load(context, "vkEnumeratePhysicalDeviceGroups"); - vkGetPhysicalDeviceExternalBufferProperties = (PFN_vkGetPhysicalDeviceExternalBufferProperties)load(context, "vkGetPhysicalDeviceExternalBufferProperties"); - vkGetPhysicalDeviceExternalFenceProperties = (PFN_vkGetPhysicalDeviceExternalFenceProperties)load(context, "vkGetPhysicalDeviceExternalFenceProperties"); - vkGetPhysicalDeviceExternalSemaphoreProperties = (PFN_vkGetPhysicalDeviceExternalSemaphoreProperties)load(context, "vkGetPhysicalDeviceExternalSemaphoreProperties"); - vkGetPhysicalDeviceFeatures2 = (PFN_vkGetPhysicalDeviceFeatures2)load(context, "vkGetPhysicalDeviceFeatures2"); - vkGetPhysicalDeviceFormatProperties2 = (PFN_vkGetPhysicalDeviceFormatProperties2)load(context, "vkGetPhysicalDeviceFormatProperties2"); - vkGetPhysicalDeviceImageFormatProperties2 = (PFN_vkGetPhysicalDeviceImageFormatProperties2)load(context, "vkGetPhysicalDeviceImageFormatProperties2"); - vkGetPhysicalDeviceMemoryProperties2 = (PFN_vkGetPhysicalDeviceMemoryProperties2)load(context, "vkGetPhysicalDeviceMemoryProperties2"); - vkGetPhysicalDeviceProperties2 = (PFN_vkGetPhysicalDeviceProperties2)load(context, "vkGetPhysicalDeviceProperties2"); - vkGetPhysicalDeviceQueueFamilyProperties2 = (PFN_vkGetPhysicalDeviceQueueFamilyProperties2)load(context, "vkGetPhysicalDeviceQueueFamilyProperties2"); - vkGetPhysicalDeviceSparseImageFormatProperties2 = (PFN_vkGetPhysicalDeviceSparseImageFormatProperties2)load(context, "vkGetPhysicalDeviceSparseImageFormatProperties2"); -#endif /* defined(VK_VERSION_1_1) */ -#if defined(VK_VERSION_1_3) - vkGetPhysicalDeviceToolProperties = (PFN_vkGetPhysicalDeviceToolProperties)load(context, "vkGetPhysicalDeviceToolProperties"); -#endif /* defined(VK_VERSION_1_3) */ -#if defined(VK_EXT_acquire_drm_display) - vkAcquireDrmDisplayEXT = (PFN_vkAcquireDrmDisplayEXT)load(context, "vkAcquireDrmDisplayEXT"); - vkGetDrmDisplayEXT = (PFN_vkGetDrmDisplayEXT)load(context, "vkGetDrmDisplayEXT"); -#endif /* defined(VK_EXT_acquire_drm_display) */ -#if defined(VK_EXT_acquire_xlib_display) - vkAcquireXlibDisplayEXT = (PFN_vkAcquireXlibDisplayEXT)load(context, "vkAcquireXlibDisplayEXT"); - vkGetRandROutputDisplayEXT = (PFN_vkGetRandROutputDisplayEXT)load(context, "vkGetRandROutputDisplayEXT"); -#endif /* defined(VK_EXT_acquire_xlib_display) */ -#if defined(VK_EXT_calibrated_timestamps) - vkGetPhysicalDeviceCalibrateableTimeDomainsEXT = (PFN_vkGetPhysicalDeviceCalibrateableTimeDomainsEXT)load(context, "vkGetPhysicalDeviceCalibrateableTimeDomainsEXT"); -#endif /* defined(VK_EXT_calibrated_timestamps) */ -#if defined(VK_EXT_debug_report) - vkCreateDebugReportCallbackEXT = (PFN_vkCreateDebugReportCallbackEXT)load(context, "vkCreateDebugReportCallbackEXT"); - vkDebugReportMessageEXT = (PFN_vkDebugReportMessageEXT)load(context, "vkDebugReportMessageEXT"); - vkDestroyDebugReportCallbackEXT = (PFN_vkDestroyDebugReportCallbackEXT)load(context, "vkDestroyDebugReportCallbackEXT"); -#endif /* defined(VK_EXT_debug_report) */ -#if defined(VK_EXT_debug_utils) - vkCmdBeginDebugUtilsLabelEXT = (PFN_vkCmdBeginDebugUtilsLabelEXT)load(context, "vkCmdBeginDebugUtilsLabelEXT"); - vkCmdEndDebugUtilsLabelEXT = (PFN_vkCmdEndDebugUtilsLabelEXT)load(context, "vkCmdEndDebugUtilsLabelEXT"); - vkCmdInsertDebugUtilsLabelEXT = (PFN_vkCmdInsertDebugUtilsLabelEXT)load(context, "vkCmdInsertDebugUtilsLabelEXT"); - vkCreateDebugUtilsMessengerEXT = (PFN_vkCreateDebugUtilsMessengerEXT)load(context, "vkCreateDebugUtilsMessengerEXT"); - vkDestroyDebugUtilsMessengerEXT = (PFN_vkDestroyDebugUtilsMessengerEXT)load(context, "vkDestroyDebugUtilsMessengerEXT"); - vkQueueBeginDebugUtilsLabelEXT = (PFN_vkQueueBeginDebugUtilsLabelEXT)load(context, "vkQueueBeginDebugUtilsLabelEXT"); - vkQueueEndDebugUtilsLabelEXT = (PFN_vkQueueEndDebugUtilsLabelEXT)load(context, "vkQueueEndDebugUtilsLabelEXT"); - vkQueueInsertDebugUtilsLabelEXT = (PFN_vkQueueInsertDebugUtilsLabelEXT)load(context, "vkQueueInsertDebugUtilsLabelEXT"); - vkSetDebugUtilsObjectNameEXT = (PFN_vkSetDebugUtilsObjectNameEXT)load(context, "vkSetDebugUtilsObjectNameEXT"); - vkSetDebugUtilsObjectTagEXT = (PFN_vkSetDebugUtilsObjectTagEXT)load(context, "vkSetDebugUtilsObjectTagEXT"); - vkSubmitDebugUtilsMessageEXT = (PFN_vkSubmitDebugUtilsMessageEXT)load(context, "vkSubmitDebugUtilsMessageEXT"); -#endif /* defined(VK_EXT_debug_utils) */ -#if defined(VK_EXT_direct_mode_display) - vkReleaseDisplayEXT = (PFN_vkReleaseDisplayEXT)load(context, "vkReleaseDisplayEXT"); -#endif /* defined(VK_EXT_direct_mode_display) */ -#if defined(VK_EXT_directfb_surface) - vkCreateDirectFBSurfaceEXT = (PFN_vkCreateDirectFBSurfaceEXT)load(context, "vkCreateDirectFBSurfaceEXT"); - vkGetPhysicalDeviceDirectFBPresentationSupportEXT = (PFN_vkGetPhysicalDeviceDirectFBPresentationSupportEXT)load(context, "vkGetPhysicalDeviceDirectFBPresentationSupportEXT"); -#endif /* defined(VK_EXT_directfb_surface) */ -#if defined(VK_EXT_display_surface_counter) - vkGetPhysicalDeviceSurfaceCapabilities2EXT = (PFN_vkGetPhysicalDeviceSurfaceCapabilities2EXT)load(context, "vkGetPhysicalDeviceSurfaceCapabilities2EXT"); -#endif /* defined(VK_EXT_display_surface_counter) */ -#if defined(VK_EXT_full_screen_exclusive) - vkGetPhysicalDeviceSurfacePresentModes2EXT = (PFN_vkGetPhysicalDeviceSurfacePresentModes2EXT)load(context, "vkGetPhysicalDeviceSurfacePresentModes2EXT"); -#endif /* defined(VK_EXT_full_screen_exclusive) */ -#if defined(VK_EXT_headless_surface) - vkCreateHeadlessSurfaceEXT = (PFN_vkCreateHeadlessSurfaceEXT)load(context, "vkCreateHeadlessSurfaceEXT"); -#endif /* defined(VK_EXT_headless_surface) */ -#if defined(VK_EXT_metal_surface) - vkCreateMetalSurfaceEXT = (PFN_vkCreateMetalSurfaceEXT)load(context, "vkCreateMetalSurfaceEXT"); -#endif /* defined(VK_EXT_metal_surface) */ -#if defined(VK_EXT_sample_locations) - vkGetPhysicalDeviceMultisamplePropertiesEXT = (PFN_vkGetPhysicalDeviceMultisamplePropertiesEXT)load(context, "vkGetPhysicalDeviceMultisamplePropertiesEXT"); -#endif /* defined(VK_EXT_sample_locations) */ -#if defined(VK_EXT_tooling_info) - vkGetPhysicalDeviceToolPropertiesEXT = (PFN_vkGetPhysicalDeviceToolPropertiesEXT)load(context, "vkGetPhysicalDeviceToolPropertiesEXT"); -#endif /* defined(VK_EXT_tooling_info) */ -#if defined(VK_FUCHSIA_imagepipe_surface) - vkCreateImagePipeSurfaceFUCHSIA = (PFN_vkCreateImagePipeSurfaceFUCHSIA)load(context, "vkCreateImagePipeSurfaceFUCHSIA"); -#endif /* defined(VK_FUCHSIA_imagepipe_surface) */ -#if defined(VK_GGP_stream_descriptor_surface) - vkCreateStreamDescriptorSurfaceGGP = (PFN_vkCreateStreamDescriptorSurfaceGGP)load(context, "vkCreateStreamDescriptorSurfaceGGP"); -#endif /* defined(VK_GGP_stream_descriptor_surface) */ -#if defined(VK_KHR_android_surface) - vkCreateAndroidSurfaceKHR = (PFN_vkCreateAndroidSurfaceKHR)load(context, "vkCreateAndroidSurfaceKHR"); -#endif /* defined(VK_KHR_android_surface) */ -#if defined(VK_KHR_calibrated_timestamps) - vkGetPhysicalDeviceCalibrateableTimeDomainsKHR = (PFN_vkGetPhysicalDeviceCalibrateableTimeDomainsKHR)load(context, "vkGetPhysicalDeviceCalibrateableTimeDomainsKHR"); -#endif /* defined(VK_KHR_calibrated_timestamps) */ -#if defined(VK_KHR_cooperative_matrix) - vkGetPhysicalDeviceCooperativeMatrixPropertiesKHR = (PFN_vkGetPhysicalDeviceCooperativeMatrixPropertiesKHR)load(context, "vkGetPhysicalDeviceCooperativeMatrixPropertiesKHR"); -#endif /* defined(VK_KHR_cooperative_matrix) */ -#if defined(VK_KHR_device_group_creation) - vkEnumeratePhysicalDeviceGroupsKHR = (PFN_vkEnumeratePhysicalDeviceGroupsKHR)load(context, "vkEnumeratePhysicalDeviceGroupsKHR"); -#endif /* defined(VK_KHR_device_group_creation) */ -#if defined(VK_KHR_display) - vkCreateDisplayModeKHR = (PFN_vkCreateDisplayModeKHR)load(context, "vkCreateDisplayModeKHR"); - vkCreateDisplayPlaneSurfaceKHR = (PFN_vkCreateDisplayPlaneSurfaceKHR)load(context, "vkCreateDisplayPlaneSurfaceKHR"); - vkGetDisplayModePropertiesKHR = (PFN_vkGetDisplayModePropertiesKHR)load(context, "vkGetDisplayModePropertiesKHR"); - vkGetDisplayPlaneCapabilitiesKHR = (PFN_vkGetDisplayPlaneCapabilitiesKHR)load(context, "vkGetDisplayPlaneCapabilitiesKHR"); - vkGetDisplayPlaneSupportedDisplaysKHR = (PFN_vkGetDisplayPlaneSupportedDisplaysKHR)load(context, "vkGetDisplayPlaneSupportedDisplaysKHR"); - vkGetPhysicalDeviceDisplayPlanePropertiesKHR = (PFN_vkGetPhysicalDeviceDisplayPlanePropertiesKHR)load(context, "vkGetPhysicalDeviceDisplayPlanePropertiesKHR"); - vkGetPhysicalDeviceDisplayPropertiesKHR = (PFN_vkGetPhysicalDeviceDisplayPropertiesKHR)load(context, "vkGetPhysicalDeviceDisplayPropertiesKHR"); -#endif /* defined(VK_KHR_display) */ -#if defined(VK_KHR_external_fence_capabilities) - vkGetPhysicalDeviceExternalFencePropertiesKHR = (PFN_vkGetPhysicalDeviceExternalFencePropertiesKHR)load(context, "vkGetPhysicalDeviceExternalFencePropertiesKHR"); -#endif /* defined(VK_KHR_external_fence_capabilities) */ -#if defined(VK_KHR_external_memory_capabilities) - vkGetPhysicalDeviceExternalBufferPropertiesKHR = (PFN_vkGetPhysicalDeviceExternalBufferPropertiesKHR)load(context, "vkGetPhysicalDeviceExternalBufferPropertiesKHR"); -#endif /* defined(VK_KHR_external_memory_capabilities) */ -#if defined(VK_KHR_external_semaphore_capabilities) - vkGetPhysicalDeviceExternalSemaphorePropertiesKHR = (PFN_vkGetPhysicalDeviceExternalSemaphorePropertiesKHR)load(context, "vkGetPhysicalDeviceExternalSemaphorePropertiesKHR"); -#endif /* defined(VK_KHR_external_semaphore_capabilities) */ -#if defined(VK_KHR_fragment_shading_rate) - vkGetPhysicalDeviceFragmentShadingRatesKHR = (PFN_vkGetPhysicalDeviceFragmentShadingRatesKHR)load(context, "vkGetPhysicalDeviceFragmentShadingRatesKHR"); -#endif /* defined(VK_KHR_fragment_shading_rate) */ -#if defined(VK_KHR_get_display_properties2) - vkGetDisplayModeProperties2KHR = (PFN_vkGetDisplayModeProperties2KHR)load(context, "vkGetDisplayModeProperties2KHR"); - vkGetDisplayPlaneCapabilities2KHR = (PFN_vkGetDisplayPlaneCapabilities2KHR)load(context, "vkGetDisplayPlaneCapabilities2KHR"); - vkGetPhysicalDeviceDisplayPlaneProperties2KHR = (PFN_vkGetPhysicalDeviceDisplayPlaneProperties2KHR)load(context, "vkGetPhysicalDeviceDisplayPlaneProperties2KHR"); - vkGetPhysicalDeviceDisplayProperties2KHR = (PFN_vkGetPhysicalDeviceDisplayProperties2KHR)load(context, "vkGetPhysicalDeviceDisplayProperties2KHR"); -#endif /* defined(VK_KHR_get_display_properties2) */ -#if defined(VK_KHR_get_physical_device_properties2) - vkGetPhysicalDeviceFeatures2KHR = (PFN_vkGetPhysicalDeviceFeatures2KHR)load(context, "vkGetPhysicalDeviceFeatures2KHR"); - vkGetPhysicalDeviceFormatProperties2KHR = (PFN_vkGetPhysicalDeviceFormatProperties2KHR)load(context, "vkGetPhysicalDeviceFormatProperties2KHR"); - vkGetPhysicalDeviceImageFormatProperties2KHR = (PFN_vkGetPhysicalDeviceImageFormatProperties2KHR)load(context, "vkGetPhysicalDeviceImageFormatProperties2KHR"); - vkGetPhysicalDeviceMemoryProperties2KHR = (PFN_vkGetPhysicalDeviceMemoryProperties2KHR)load(context, "vkGetPhysicalDeviceMemoryProperties2KHR"); - vkGetPhysicalDeviceProperties2KHR = (PFN_vkGetPhysicalDeviceProperties2KHR)load(context, "vkGetPhysicalDeviceProperties2KHR"); - vkGetPhysicalDeviceQueueFamilyProperties2KHR = (PFN_vkGetPhysicalDeviceQueueFamilyProperties2KHR)load(context, "vkGetPhysicalDeviceQueueFamilyProperties2KHR"); - vkGetPhysicalDeviceSparseImageFormatProperties2KHR = (PFN_vkGetPhysicalDeviceSparseImageFormatProperties2KHR)load(context, "vkGetPhysicalDeviceSparseImageFormatProperties2KHR"); -#endif /* defined(VK_KHR_get_physical_device_properties2) */ -#if defined(VK_KHR_get_surface_capabilities2) - vkGetPhysicalDeviceSurfaceCapabilities2KHR = (PFN_vkGetPhysicalDeviceSurfaceCapabilities2KHR)load(context, "vkGetPhysicalDeviceSurfaceCapabilities2KHR"); - vkGetPhysicalDeviceSurfaceFormats2KHR = (PFN_vkGetPhysicalDeviceSurfaceFormats2KHR)load(context, "vkGetPhysicalDeviceSurfaceFormats2KHR"); -#endif /* defined(VK_KHR_get_surface_capabilities2) */ -#if defined(VK_KHR_performance_query) - vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR = (PFN_vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR)load(context, "vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR"); - vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR = (PFN_vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR)load(context, "vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR"); -#endif /* defined(VK_KHR_performance_query) */ -#if defined(VK_KHR_surface) - vkDestroySurfaceKHR = (PFN_vkDestroySurfaceKHR)load(context, "vkDestroySurfaceKHR"); - vkGetPhysicalDeviceSurfaceCapabilitiesKHR = (PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR)load(context, "vkGetPhysicalDeviceSurfaceCapabilitiesKHR"); - vkGetPhysicalDeviceSurfaceFormatsKHR = (PFN_vkGetPhysicalDeviceSurfaceFormatsKHR)load(context, "vkGetPhysicalDeviceSurfaceFormatsKHR"); - vkGetPhysicalDeviceSurfacePresentModesKHR = (PFN_vkGetPhysicalDeviceSurfacePresentModesKHR)load(context, "vkGetPhysicalDeviceSurfacePresentModesKHR"); - vkGetPhysicalDeviceSurfaceSupportKHR = (PFN_vkGetPhysicalDeviceSurfaceSupportKHR)load(context, "vkGetPhysicalDeviceSurfaceSupportKHR"); -#endif /* defined(VK_KHR_surface) */ -#if defined(VK_KHR_video_encode_queue) - vkGetPhysicalDeviceVideoEncodeQualityLevelPropertiesKHR = (PFN_vkGetPhysicalDeviceVideoEncodeQualityLevelPropertiesKHR)load(context, "vkGetPhysicalDeviceVideoEncodeQualityLevelPropertiesKHR"); -#endif /* defined(VK_KHR_video_encode_queue) */ -#if defined(VK_KHR_video_queue) - vkGetPhysicalDeviceVideoCapabilitiesKHR = (PFN_vkGetPhysicalDeviceVideoCapabilitiesKHR)load(context, "vkGetPhysicalDeviceVideoCapabilitiesKHR"); - vkGetPhysicalDeviceVideoFormatPropertiesKHR = (PFN_vkGetPhysicalDeviceVideoFormatPropertiesKHR)load(context, "vkGetPhysicalDeviceVideoFormatPropertiesKHR"); -#endif /* defined(VK_KHR_video_queue) */ -#if defined(VK_KHR_wayland_surface) - vkCreateWaylandSurfaceKHR = (PFN_vkCreateWaylandSurfaceKHR)load(context, "vkCreateWaylandSurfaceKHR"); - vkGetPhysicalDeviceWaylandPresentationSupportKHR = (PFN_vkGetPhysicalDeviceWaylandPresentationSupportKHR)load(context, "vkGetPhysicalDeviceWaylandPresentationSupportKHR"); -#endif /* defined(VK_KHR_wayland_surface) */ -#if defined(VK_KHR_win32_surface) - vkCreateWin32SurfaceKHR = (PFN_vkCreateWin32SurfaceKHR)load(context, "vkCreateWin32SurfaceKHR"); - vkGetPhysicalDeviceWin32PresentationSupportKHR = (PFN_vkGetPhysicalDeviceWin32PresentationSupportKHR)load(context, "vkGetPhysicalDeviceWin32PresentationSupportKHR"); -#endif /* defined(VK_KHR_win32_surface) */ -#if defined(VK_KHR_xcb_surface) - vkCreateXcbSurfaceKHR = (PFN_vkCreateXcbSurfaceKHR)load(context, "vkCreateXcbSurfaceKHR"); - vkGetPhysicalDeviceXcbPresentationSupportKHR = (PFN_vkGetPhysicalDeviceXcbPresentationSupportKHR)load(context, "vkGetPhysicalDeviceXcbPresentationSupportKHR"); -#endif /* defined(VK_KHR_xcb_surface) */ -#if defined(VK_KHR_xlib_surface) - vkCreateXlibSurfaceKHR = (PFN_vkCreateXlibSurfaceKHR)load(context, "vkCreateXlibSurfaceKHR"); - vkGetPhysicalDeviceXlibPresentationSupportKHR = (PFN_vkGetPhysicalDeviceXlibPresentationSupportKHR)load(context, "vkGetPhysicalDeviceXlibPresentationSupportKHR"); -#endif /* defined(VK_KHR_xlib_surface) */ -#if defined(VK_MVK_ios_surface) - vkCreateIOSSurfaceMVK = (PFN_vkCreateIOSSurfaceMVK)load(context, "vkCreateIOSSurfaceMVK"); -#endif /* defined(VK_MVK_ios_surface) */ -#if defined(VK_MVK_macos_surface) - vkCreateMacOSSurfaceMVK = (PFN_vkCreateMacOSSurfaceMVK)load(context, "vkCreateMacOSSurfaceMVK"); -#endif /* defined(VK_MVK_macos_surface) */ -#if defined(VK_NN_vi_surface) - vkCreateViSurfaceNN = (PFN_vkCreateViSurfaceNN)load(context, "vkCreateViSurfaceNN"); -#endif /* defined(VK_NN_vi_surface) */ -#if defined(VK_NV_acquire_winrt_display) - vkAcquireWinrtDisplayNV = (PFN_vkAcquireWinrtDisplayNV)load(context, "vkAcquireWinrtDisplayNV"); - vkGetWinrtDisplayNV = (PFN_vkGetWinrtDisplayNV)load(context, "vkGetWinrtDisplayNV"); -#endif /* defined(VK_NV_acquire_winrt_display) */ -#if defined(VK_NV_cooperative_matrix) - vkGetPhysicalDeviceCooperativeMatrixPropertiesNV = (PFN_vkGetPhysicalDeviceCooperativeMatrixPropertiesNV)load(context, "vkGetPhysicalDeviceCooperativeMatrixPropertiesNV"); -#endif /* defined(VK_NV_cooperative_matrix) */ -#if defined(VK_NV_coverage_reduction_mode) - vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV = (PFN_vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV)load(context, "vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV"); -#endif /* defined(VK_NV_coverage_reduction_mode) */ -#if defined(VK_NV_external_memory_capabilities) - vkGetPhysicalDeviceExternalImageFormatPropertiesNV = (PFN_vkGetPhysicalDeviceExternalImageFormatPropertiesNV)load(context, "vkGetPhysicalDeviceExternalImageFormatPropertiesNV"); -#endif /* defined(VK_NV_external_memory_capabilities) */ -#if defined(VK_NV_optical_flow) - vkGetPhysicalDeviceOpticalFlowImageFormatsNV = (PFN_vkGetPhysicalDeviceOpticalFlowImageFormatsNV)load(context, "vkGetPhysicalDeviceOpticalFlowImageFormatsNV"); -#endif /* defined(VK_NV_optical_flow) */ -#if defined(VK_QNX_screen_surface) - vkCreateScreenSurfaceQNX = (PFN_vkCreateScreenSurfaceQNX)load(context, "vkCreateScreenSurfaceQNX"); - vkGetPhysicalDeviceScreenPresentationSupportQNX = (PFN_vkGetPhysicalDeviceScreenPresentationSupportQNX)load(context, "vkGetPhysicalDeviceScreenPresentationSupportQNX"); -#endif /* defined(VK_QNX_screen_surface) */ -#if (defined(VK_KHR_device_group) && defined(VK_KHR_surface)) || (defined(VK_KHR_swapchain) && defined(VK_VERSION_1_1)) - vkGetPhysicalDevicePresentRectanglesKHR = (PFN_vkGetPhysicalDevicePresentRectanglesKHR)load(context, "vkGetPhysicalDevicePresentRectanglesKHR"); -#endif /* (defined(VK_KHR_device_group) && defined(VK_KHR_surface)) || (defined(VK_KHR_swapchain) && defined(VK_VERSION_1_1)) */ - /* VOLK_GENERATE_LOAD_INSTANCE */ -} - -static void volkGenLoadDevice(void* context, PFN_vkVoidFunction (*load)(void*, const char*)) -{ - /* VOLK_GENERATE_LOAD_DEVICE */ -#if defined(VK_VERSION_1_0) - vkAllocateCommandBuffers = (PFN_vkAllocateCommandBuffers)load(context, "vkAllocateCommandBuffers"); - vkAllocateDescriptorSets = (PFN_vkAllocateDescriptorSets)load(context, "vkAllocateDescriptorSets"); - vkAllocateMemory = (PFN_vkAllocateMemory)load(context, "vkAllocateMemory"); - vkBeginCommandBuffer = (PFN_vkBeginCommandBuffer)load(context, "vkBeginCommandBuffer"); - vkBindBufferMemory = (PFN_vkBindBufferMemory)load(context, "vkBindBufferMemory"); - vkBindImageMemory = (PFN_vkBindImageMemory)load(context, "vkBindImageMemory"); - vkCmdBeginQuery = (PFN_vkCmdBeginQuery)load(context, "vkCmdBeginQuery"); - vkCmdBeginRenderPass = (PFN_vkCmdBeginRenderPass)load(context, "vkCmdBeginRenderPass"); - vkCmdBindDescriptorSets = (PFN_vkCmdBindDescriptorSets)load(context, "vkCmdBindDescriptorSets"); - vkCmdBindIndexBuffer = (PFN_vkCmdBindIndexBuffer)load(context, "vkCmdBindIndexBuffer"); - vkCmdBindPipeline = (PFN_vkCmdBindPipeline)load(context, "vkCmdBindPipeline"); - vkCmdBindVertexBuffers = (PFN_vkCmdBindVertexBuffers)load(context, "vkCmdBindVertexBuffers"); - vkCmdBlitImage = (PFN_vkCmdBlitImage)load(context, "vkCmdBlitImage"); - vkCmdClearAttachments = (PFN_vkCmdClearAttachments)load(context, "vkCmdClearAttachments"); - vkCmdClearColorImage = (PFN_vkCmdClearColorImage)load(context, "vkCmdClearColorImage"); - vkCmdClearDepthStencilImage = (PFN_vkCmdClearDepthStencilImage)load(context, "vkCmdClearDepthStencilImage"); - vkCmdCopyBuffer = (PFN_vkCmdCopyBuffer)load(context, "vkCmdCopyBuffer"); - vkCmdCopyBufferToImage = (PFN_vkCmdCopyBufferToImage)load(context, "vkCmdCopyBufferToImage"); - vkCmdCopyImage = (PFN_vkCmdCopyImage)load(context, "vkCmdCopyImage"); - vkCmdCopyImageToBuffer = (PFN_vkCmdCopyImageToBuffer)load(context, "vkCmdCopyImageToBuffer"); - vkCmdCopyQueryPoolResults = (PFN_vkCmdCopyQueryPoolResults)load(context, "vkCmdCopyQueryPoolResults"); - vkCmdDispatch = (PFN_vkCmdDispatch)load(context, "vkCmdDispatch"); - vkCmdDispatchIndirect = (PFN_vkCmdDispatchIndirect)load(context, "vkCmdDispatchIndirect"); - vkCmdDraw = (PFN_vkCmdDraw)load(context, "vkCmdDraw"); - vkCmdDrawIndexed = (PFN_vkCmdDrawIndexed)load(context, "vkCmdDrawIndexed"); - vkCmdDrawIndexedIndirect = (PFN_vkCmdDrawIndexedIndirect)load(context, "vkCmdDrawIndexedIndirect"); - vkCmdDrawIndirect = (PFN_vkCmdDrawIndirect)load(context, "vkCmdDrawIndirect"); - vkCmdEndQuery = (PFN_vkCmdEndQuery)load(context, "vkCmdEndQuery"); - vkCmdEndRenderPass = (PFN_vkCmdEndRenderPass)load(context, "vkCmdEndRenderPass"); - vkCmdExecuteCommands = (PFN_vkCmdExecuteCommands)load(context, "vkCmdExecuteCommands"); - vkCmdFillBuffer = (PFN_vkCmdFillBuffer)load(context, "vkCmdFillBuffer"); - vkCmdNextSubpass = (PFN_vkCmdNextSubpass)load(context, "vkCmdNextSubpass"); - vkCmdPipelineBarrier = (PFN_vkCmdPipelineBarrier)load(context, "vkCmdPipelineBarrier"); - vkCmdPushConstants = (PFN_vkCmdPushConstants)load(context, "vkCmdPushConstants"); - vkCmdResetEvent = (PFN_vkCmdResetEvent)load(context, "vkCmdResetEvent"); - vkCmdResetQueryPool = (PFN_vkCmdResetQueryPool)load(context, "vkCmdResetQueryPool"); - vkCmdResolveImage = (PFN_vkCmdResolveImage)load(context, "vkCmdResolveImage"); - vkCmdSetBlendConstants = (PFN_vkCmdSetBlendConstants)load(context, "vkCmdSetBlendConstants"); - vkCmdSetDepthBias = (PFN_vkCmdSetDepthBias)load(context, "vkCmdSetDepthBias"); - vkCmdSetDepthBounds = (PFN_vkCmdSetDepthBounds)load(context, "vkCmdSetDepthBounds"); - vkCmdSetEvent = (PFN_vkCmdSetEvent)load(context, "vkCmdSetEvent"); - vkCmdSetLineWidth = (PFN_vkCmdSetLineWidth)load(context, "vkCmdSetLineWidth"); - vkCmdSetScissor = (PFN_vkCmdSetScissor)load(context, "vkCmdSetScissor"); - vkCmdSetStencilCompareMask = (PFN_vkCmdSetStencilCompareMask)load(context, "vkCmdSetStencilCompareMask"); - vkCmdSetStencilReference = (PFN_vkCmdSetStencilReference)load(context, "vkCmdSetStencilReference"); - vkCmdSetStencilWriteMask = (PFN_vkCmdSetStencilWriteMask)load(context, "vkCmdSetStencilWriteMask"); - vkCmdSetViewport = (PFN_vkCmdSetViewport)load(context, "vkCmdSetViewport"); - vkCmdUpdateBuffer = (PFN_vkCmdUpdateBuffer)load(context, "vkCmdUpdateBuffer"); - vkCmdWaitEvents = (PFN_vkCmdWaitEvents)load(context, "vkCmdWaitEvents"); - vkCmdWriteTimestamp = (PFN_vkCmdWriteTimestamp)load(context, "vkCmdWriteTimestamp"); - vkCreateBuffer = (PFN_vkCreateBuffer)load(context, "vkCreateBuffer"); - vkCreateBufferView = (PFN_vkCreateBufferView)load(context, "vkCreateBufferView"); - vkCreateCommandPool = (PFN_vkCreateCommandPool)load(context, "vkCreateCommandPool"); - vkCreateComputePipelines = (PFN_vkCreateComputePipelines)load(context, "vkCreateComputePipelines"); - vkCreateDescriptorPool = (PFN_vkCreateDescriptorPool)load(context, "vkCreateDescriptorPool"); - vkCreateDescriptorSetLayout = (PFN_vkCreateDescriptorSetLayout)load(context, "vkCreateDescriptorSetLayout"); - vkCreateEvent = (PFN_vkCreateEvent)load(context, "vkCreateEvent"); - vkCreateFence = (PFN_vkCreateFence)load(context, "vkCreateFence"); - vkCreateFramebuffer = (PFN_vkCreateFramebuffer)load(context, "vkCreateFramebuffer"); - vkCreateGraphicsPipelines = (PFN_vkCreateGraphicsPipelines)load(context, "vkCreateGraphicsPipelines"); - vkCreateImage = (PFN_vkCreateImage)load(context, "vkCreateImage"); - vkCreateImageView = (PFN_vkCreateImageView)load(context, "vkCreateImageView"); - vkCreatePipelineCache = (PFN_vkCreatePipelineCache)load(context, "vkCreatePipelineCache"); - vkCreatePipelineLayout = (PFN_vkCreatePipelineLayout)load(context, "vkCreatePipelineLayout"); - vkCreateQueryPool = (PFN_vkCreateQueryPool)load(context, "vkCreateQueryPool"); - vkCreateRenderPass = (PFN_vkCreateRenderPass)load(context, "vkCreateRenderPass"); - vkCreateSampler = (PFN_vkCreateSampler)load(context, "vkCreateSampler"); - vkCreateSemaphore = (PFN_vkCreateSemaphore)load(context, "vkCreateSemaphore"); - vkCreateShaderModule = (PFN_vkCreateShaderModule)load(context, "vkCreateShaderModule"); - vkDestroyBuffer = (PFN_vkDestroyBuffer)load(context, "vkDestroyBuffer"); - vkDestroyBufferView = (PFN_vkDestroyBufferView)load(context, "vkDestroyBufferView"); - vkDestroyCommandPool = (PFN_vkDestroyCommandPool)load(context, "vkDestroyCommandPool"); - vkDestroyDescriptorPool = (PFN_vkDestroyDescriptorPool)load(context, "vkDestroyDescriptorPool"); - vkDestroyDescriptorSetLayout = (PFN_vkDestroyDescriptorSetLayout)load(context, "vkDestroyDescriptorSetLayout"); - vkDestroyDevice = (PFN_vkDestroyDevice)load(context, "vkDestroyDevice"); - vkDestroyEvent = (PFN_vkDestroyEvent)load(context, "vkDestroyEvent"); - vkDestroyFence = (PFN_vkDestroyFence)load(context, "vkDestroyFence"); - vkDestroyFramebuffer = (PFN_vkDestroyFramebuffer)load(context, "vkDestroyFramebuffer"); - vkDestroyImage = (PFN_vkDestroyImage)load(context, "vkDestroyImage"); - vkDestroyImageView = (PFN_vkDestroyImageView)load(context, "vkDestroyImageView"); - vkDestroyPipeline = (PFN_vkDestroyPipeline)load(context, "vkDestroyPipeline"); - vkDestroyPipelineCache = (PFN_vkDestroyPipelineCache)load(context, "vkDestroyPipelineCache"); - vkDestroyPipelineLayout = (PFN_vkDestroyPipelineLayout)load(context, "vkDestroyPipelineLayout"); - vkDestroyQueryPool = (PFN_vkDestroyQueryPool)load(context, "vkDestroyQueryPool"); - vkDestroyRenderPass = (PFN_vkDestroyRenderPass)load(context, "vkDestroyRenderPass"); - vkDestroySampler = (PFN_vkDestroySampler)load(context, "vkDestroySampler"); - vkDestroySemaphore = (PFN_vkDestroySemaphore)load(context, "vkDestroySemaphore"); - vkDestroyShaderModule = (PFN_vkDestroyShaderModule)load(context, "vkDestroyShaderModule"); - vkDeviceWaitIdle = (PFN_vkDeviceWaitIdle)load(context, "vkDeviceWaitIdle"); - vkEndCommandBuffer = (PFN_vkEndCommandBuffer)load(context, "vkEndCommandBuffer"); - vkFlushMappedMemoryRanges = (PFN_vkFlushMappedMemoryRanges)load(context, "vkFlushMappedMemoryRanges"); - vkFreeCommandBuffers = (PFN_vkFreeCommandBuffers)load(context, "vkFreeCommandBuffers"); - vkFreeDescriptorSets = (PFN_vkFreeDescriptorSets)load(context, "vkFreeDescriptorSets"); - vkFreeMemory = (PFN_vkFreeMemory)load(context, "vkFreeMemory"); - vkGetBufferMemoryRequirements = (PFN_vkGetBufferMemoryRequirements)load(context, "vkGetBufferMemoryRequirements"); - vkGetDeviceMemoryCommitment = (PFN_vkGetDeviceMemoryCommitment)load(context, "vkGetDeviceMemoryCommitment"); - vkGetDeviceQueue = (PFN_vkGetDeviceQueue)load(context, "vkGetDeviceQueue"); - vkGetEventStatus = (PFN_vkGetEventStatus)load(context, "vkGetEventStatus"); - vkGetFenceStatus = (PFN_vkGetFenceStatus)load(context, "vkGetFenceStatus"); - vkGetImageMemoryRequirements = (PFN_vkGetImageMemoryRequirements)load(context, "vkGetImageMemoryRequirements"); - vkGetImageSparseMemoryRequirements = (PFN_vkGetImageSparseMemoryRequirements)load(context, "vkGetImageSparseMemoryRequirements"); - vkGetImageSubresourceLayout = (PFN_vkGetImageSubresourceLayout)load(context, "vkGetImageSubresourceLayout"); - vkGetPipelineCacheData = (PFN_vkGetPipelineCacheData)load(context, "vkGetPipelineCacheData"); - vkGetQueryPoolResults = (PFN_vkGetQueryPoolResults)load(context, "vkGetQueryPoolResults"); - vkGetRenderAreaGranularity = (PFN_vkGetRenderAreaGranularity)load(context, "vkGetRenderAreaGranularity"); - vkInvalidateMappedMemoryRanges = (PFN_vkInvalidateMappedMemoryRanges)load(context, "vkInvalidateMappedMemoryRanges"); - vkMapMemory = (PFN_vkMapMemory)load(context, "vkMapMemory"); - vkMergePipelineCaches = (PFN_vkMergePipelineCaches)load(context, "vkMergePipelineCaches"); - vkQueueBindSparse = (PFN_vkQueueBindSparse)load(context, "vkQueueBindSparse"); - vkQueueSubmit = (PFN_vkQueueSubmit)load(context, "vkQueueSubmit"); - vkQueueWaitIdle = (PFN_vkQueueWaitIdle)load(context, "vkQueueWaitIdle"); - vkResetCommandBuffer = (PFN_vkResetCommandBuffer)load(context, "vkResetCommandBuffer"); - vkResetCommandPool = (PFN_vkResetCommandPool)load(context, "vkResetCommandPool"); - vkResetDescriptorPool = (PFN_vkResetDescriptorPool)load(context, "vkResetDescriptorPool"); - vkResetEvent = (PFN_vkResetEvent)load(context, "vkResetEvent"); - vkResetFences = (PFN_vkResetFences)load(context, "vkResetFences"); - vkSetEvent = (PFN_vkSetEvent)load(context, "vkSetEvent"); - vkUnmapMemory = (PFN_vkUnmapMemory)load(context, "vkUnmapMemory"); - vkUpdateDescriptorSets = (PFN_vkUpdateDescriptorSets)load(context, "vkUpdateDescriptorSets"); - vkWaitForFences = (PFN_vkWaitForFences)load(context, "vkWaitForFences"); -#endif /* defined(VK_VERSION_1_0) */ -#if defined(VK_VERSION_1_1) - vkBindBufferMemory2 = (PFN_vkBindBufferMemory2)load(context, "vkBindBufferMemory2"); - vkBindImageMemory2 = (PFN_vkBindImageMemory2)load(context, "vkBindImageMemory2"); - vkCmdDispatchBase = (PFN_vkCmdDispatchBase)load(context, "vkCmdDispatchBase"); - vkCmdSetDeviceMask = (PFN_vkCmdSetDeviceMask)load(context, "vkCmdSetDeviceMask"); - vkCreateDescriptorUpdateTemplate = (PFN_vkCreateDescriptorUpdateTemplate)load(context, "vkCreateDescriptorUpdateTemplate"); - vkCreateSamplerYcbcrConversion = (PFN_vkCreateSamplerYcbcrConversion)load(context, "vkCreateSamplerYcbcrConversion"); - vkDestroyDescriptorUpdateTemplate = (PFN_vkDestroyDescriptorUpdateTemplate)load(context, "vkDestroyDescriptorUpdateTemplate"); - vkDestroySamplerYcbcrConversion = (PFN_vkDestroySamplerYcbcrConversion)load(context, "vkDestroySamplerYcbcrConversion"); - vkGetBufferMemoryRequirements2 = (PFN_vkGetBufferMemoryRequirements2)load(context, "vkGetBufferMemoryRequirements2"); - vkGetDescriptorSetLayoutSupport = (PFN_vkGetDescriptorSetLayoutSupport)load(context, "vkGetDescriptorSetLayoutSupport"); - vkGetDeviceGroupPeerMemoryFeatures = (PFN_vkGetDeviceGroupPeerMemoryFeatures)load(context, "vkGetDeviceGroupPeerMemoryFeatures"); - vkGetDeviceQueue2 = (PFN_vkGetDeviceQueue2)load(context, "vkGetDeviceQueue2"); - vkGetImageMemoryRequirements2 = (PFN_vkGetImageMemoryRequirements2)load(context, "vkGetImageMemoryRequirements2"); - vkGetImageSparseMemoryRequirements2 = (PFN_vkGetImageSparseMemoryRequirements2)load(context, "vkGetImageSparseMemoryRequirements2"); - vkTrimCommandPool = (PFN_vkTrimCommandPool)load(context, "vkTrimCommandPool"); - vkUpdateDescriptorSetWithTemplate = (PFN_vkUpdateDescriptorSetWithTemplate)load(context, "vkUpdateDescriptorSetWithTemplate"); -#endif /* defined(VK_VERSION_1_1) */ -#if defined(VK_VERSION_1_2) - vkCmdBeginRenderPass2 = (PFN_vkCmdBeginRenderPass2)load(context, "vkCmdBeginRenderPass2"); - vkCmdDrawIndexedIndirectCount = (PFN_vkCmdDrawIndexedIndirectCount)load(context, "vkCmdDrawIndexedIndirectCount"); - vkCmdDrawIndirectCount = (PFN_vkCmdDrawIndirectCount)load(context, "vkCmdDrawIndirectCount"); - vkCmdEndRenderPass2 = (PFN_vkCmdEndRenderPass2)load(context, "vkCmdEndRenderPass2"); - vkCmdNextSubpass2 = (PFN_vkCmdNextSubpass2)load(context, "vkCmdNextSubpass2"); - vkCreateRenderPass2 = (PFN_vkCreateRenderPass2)load(context, "vkCreateRenderPass2"); - vkGetBufferDeviceAddress = (PFN_vkGetBufferDeviceAddress)load(context, "vkGetBufferDeviceAddress"); - vkGetBufferOpaqueCaptureAddress = (PFN_vkGetBufferOpaqueCaptureAddress)load(context, "vkGetBufferOpaqueCaptureAddress"); - vkGetDeviceMemoryOpaqueCaptureAddress = (PFN_vkGetDeviceMemoryOpaqueCaptureAddress)load(context, "vkGetDeviceMemoryOpaqueCaptureAddress"); - vkGetSemaphoreCounterValue = (PFN_vkGetSemaphoreCounterValue)load(context, "vkGetSemaphoreCounterValue"); - vkResetQueryPool = (PFN_vkResetQueryPool)load(context, "vkResetQueryPool"); - vkSignalSemaphore = (PFN_vkSignalSemaphore)load(context, "vkSignalSemaphore"); - vkWaitSemaphores = (PFN_vkWaitSemaphores)load(context, "vkWaitSemaphores"); -#endif /* defined(VK_VERSION_1_2) */ -#if defined(VK_VERSION_1_3) - vkCmdBeginRendering = (PFN_vkCmdBeginRendering)load(context, "vkCmdBeginRendering"); - vkCmdBindVertexBuffers2 = (PFN_vkCmdBindVertexBuffers2)load(context, "vkCmdBindVertexBuffers2"); - vkCmdBlitImage2 = (PFN_vkCmdBlitImage2)load(context, "vkCmdBlitImage2"); - vkCmdCopyBuffer2 = (PFN_vkCmdCopyBuffer2)load(context, "vkCmdCopyBuffer2"); - vkCmdCopyBufferToImage2 = (PFN_vkCmdCopyBufferToImage2)load(context, "vkCmdCopyBufferToImage2"); - vkCmdCopyImage2 = (PFN_vkCmdCopyImage2)load(context, "vkCmdCopyImage2"); - vkCmdCopyImageToBuffer2 = (PFN_vkCmdCopyImageToBuffer2)load(context, "vkCmdCopyImageToBuffer2"); - vkCmdEndRendering = (PFN_vkCmdEndRendering)load(context, "vkCmdEndRendering"); - vkCmdPipelineBarrier2 = (PFN_vkCmdPipelineBarrier2)load(context, "vkCmdPipelineBarrier2"); - vkCmdResetEvent2 = (PFN_vkCmdResetEvent2)load(context, "vkCmdResetEvent2"); - vkCmdResolveImage2 = (PFN_vkCmdResolveImage2)load(context, "vkCmdResolveImage2"); - vkCmdSetCullMode = (PFN_vkCmdSetCullMode)load(context, "vkCmdSetCullMode"); - vkCmdSetDepthBiasEnable = (PFN_vkCmdSetDepthBiasEnable)load(context, "vkCmdSetDepthBiasEnable"); - vkCmdSetDepthBoundsTestEnable = (PFN_vkCmdSetDepthBoundsTestEnable)load(context, "vkCmdSetDepthBoundsTestEnable"); - vkCmdSetDepthCompareOp = (PFN_vkCmdSetDepthCompareOp)load(context, "vkCmdSetDepthCompareOp"); - vkCmdSetDepthTestEnable = (PFN_vkCmdSetDepthTestEnable)load(context, "vkCmdSetDepthTestEnable"); - vkCmdSetDepthWriteEnable = (PFN_vkCmdSetDepthWriteEnable)load(context, "vkCmdSetDepthWriteEnable"); - vkCmdSetEvent2 = (PFN_vkCmdSetEvent2)load(context, "vkCmdSetEvent2"); - vkCmdSetFrontFace = (PFN_vkCmdSetFrontFace)load(context, "vkCmdSetFrontFace"); - vkCmdSetPrimitiveRestartEnable = (PFN_vkCmdSetPrimitiveRestartEnable)load(context, "vkCmdSetPrimitiveRestartEnable"); - vkCmdSetPrimitiveTopology = (PFN_vkCmdSetPrimitiveTopology)load(context, "vkCmdSetPrimitiveTopology"); - vkCmdSetRasterizerDiscardEnable = (PFN_vkCmdSetRasterizerDiscardEnable)load(context, "vkCmdSetRasterizerDiscardEnable"); - vkCmdSetScissorWithCount = (PFN_vkCmdSetScissorWithCount)load(context, "vkCmdSetScissorWithCount"); - vkCmdSetStencilOp = (PFN_vkCmdSetStencilOp)load(context, "vkCmdSetStencilOp"); - vkCmdSetStencilTestEnable = (PFN_vkCmdSetStencilTestEnable)load(context, "vkCmdSetStencilTestEnable"); - vkCmdSetViewportWithCount = (PFN_vkCmdSetViewportWithCount)load(context, "vkCmdSetViewportWithCount"); - vkCmdWaitEvents2 = (PFN_vkCmdWaitEvents2)load(context, "vkCmdWaitEvents2"); - vkCmdWriteTimestamp2 = (PFN_vkCmdWriteTimestamp2)load(context, "vkCmdWriteTimestamp2"); - vkCreatePrivateDataSlot = (PFN_vkCreatePrivateDataSlot)load(context, "vkCreatePrivateDataSlot"); - vkDestroyPrivateDataSlot = (PFN_vkDestroyPrivateDataSlot)load(context, "vkDestroyPrivateDataSlot"); - vkGetDeviceBufferMemoryRequirements = (PFN_vkGetDeviceBufferMemoryRequirements)load(context, "vkGetDeviceBufferMemoryRequirements"); - vkGetDeviceImageMemoryRequirements = (PFN_vkGetDeviceImageMemoryRequirements)load(context, "vkGetDeviceImageMemoryRequirements"); - vkGetDeviceImageSparseMemoryRequirements = (PFN_vkGetDeviceImageSparseMemoryRequirements)load(context, "vkGetDeviceImageSparseMemoryRequirements"); - vkGetPrivateData = (PFN_vkGetPrivateData)load(context, "vkGetPrivateData"); - vkQueueSubmit2 = (PFN_vkQueueSubmit2)load(context, "vkQueueSubmit2"); - vkSetPrivateData = (PFN_vkSetPrivateData)load(context, "vkSetPrivateData"); -#endif /* defined(VK_VERSION_1_3) */ -#if defined(VK_AMDX_shader_enqueue) - vkCmdDispatchGraphAMDX = (PFN_vkCmdDispatchGraphAMDX)load(context, "vkCmdDispatchGraphAMDX"); - vkCmdDispatchGraphIndirectAMDX = (PFN_vkCmdDispatchGraphIndirectAMDX)load(context, "vkCmdDispatchGraphIndirectAMDX"); - vkCmdDispatchGraphIndirectCountAMDX = (PFN_vkCmdDispatchGraphIndirectCountAMDX)load(context, "vkCmdDispatchGraphIndirectCountAMDX"); - vkCmdInitializeGraphScratchMemoryAMDX = (PFN_vkCmdInitializeGraphScratchMemoryAMDX)load(context, "vkCmdInitializeGraphScratchMemoryAMDX"); - vkCreateExecutionGraphPipelinesAMDX = (PFN_vkCreateExecutionGraphPipelinesAMDX)load(context, "vkCreateExecutionGraphPipelinesAMDX"); - vkGetExecutionGraphPipelineNodeIndexAMDX = (PFN_vkGetExecutionGraphPipelineNodeIndexAMDX)load(context, "vkGetExecutionGraphPipelineNodeIndexAMDX"); - vkGetExecutionGraphPipelineScratchSizeAMDX = (PFN_vkGetExecutionGraphPipelineScratchSizeAMDX)load(context, "vkGetExecutionGraphPipelineScratchSizeAMDX"); -#endif /* defined(VK_AMDX_shader_enqueue) */ -#if defined(VK_AMD_anti_lag) - vkAntiLagUpdateAMD = (PFN_vkAntiLagUpdateAMD)load(context, "vkAntiLagUpdateAMD"); -#endif /* defined(VK_AMD_anti_lag) */ -#if defined(VK_AMD_buffer_marker) - vkCmdWriteBufferMarkerAMD = (PFN_vkCmdWriteBufferMarkerAMD)load(context, "vkCmdWriteBufferMarkerAMD"); -#endif /* defined(VK_AMD_buffer_marker) */ -#if defined(VK_AMD_display_native_hdr) - vkSetLocalDimmingAMD = (PFN_vkSetLocalDimmingAMD)load(context, "vkSetLocalDimmingAMD"); -#endif /* defined(VK_AMD_display_native_hdr) */ -#if defined(VK_AMD_draw_indirect_count) - vkCmdDrawIndexedIndirectCountAMD = (PFN_vkCmdDrawIndexedIndirectCountAMD)load(context, "vkCmdDrawIndexedIndirectCountAMD"); - vkCmdDrawIndirectCountAMD = (PFN_vkCmdDrawIndirectCountAMD)load(context, "vkCmdDrawIndirectCountAMD"); -#endif /* defined(VK_AMD_draw_indirect_count) */ -#if defined(VK_AMD_shader_info) - vkGetShaderInfoAMD = (PFN_vkGetShaderInfoAMD)load(context, "vkGetShaderInfoAMD"); -#endif /* defined(VK_AMD_shader_info) */ -#if defined(VK_ANDROID_external_memory_android_hardware_buffer) - vkGetAndroidHardwareBufferPropertiesANDROID = (PFN_vkGetAndroidHardwareBufferPropertiesANDROID)load(context, "vkGetAndroidHardwareBufferPropertiesANDROID"); - vkGetMemoryAndroidHardwareBufferANDROID = (PFN_vkGetMemoryAndroidHardwareBufferANDROID)load(context, "vkGetMemoryAndroidHardwareBufferANDROID"); -#endif /* defined(VK_ANDROID_external_memory_android_hardware_buffer) */ -#if defined(VK_EXT_attachment_feedback_loop_dynamic_state) - vkCmdSetAttachmentFeedbackLoopEnableEXT = (PFN_vkCmdSetAttachmentFeedbackLoopEnableEXT)load(context, "vkCmdSetAttachmentFeedbackLoopEnableEXT"); -#endif /* defined(VK_EXT_attachment_feedback_loop_dynamic_state) */ -#if defined(VK_EXT_buffer_device_address) - vkGetBufferDeviceAddressEXT = (PFN_vkGetBufferDeviceAddressEXT)load(context, "vkGetBufferDeviceAddressEXT"); -#endif /* defined(VK_EXT_buffer_device_address) */ -#if defined(VK_EXT_calibrated_timestamps) - vkGetCalibratedTimestampsEXT = (PFN_vkGetCalibratedTimestampsEXT)load(context, "vkGetCalibratedTimestampsEXT"); -#endif /* defined(VK_EXT_calibrated_timestamps) */ -#if defined(VK_EXT_color_write_enable) - vkCmdSetColorWriteEnableEXT = (PFN_vkCmdSetColorWriteEnableEXT)load(context, "vkCmdSetColorWriteEnableEXT"); -#endif /* defined(VK_EXT_color_write_enable) */ -#if defined(VK_EXT_conditional_rendering) - vkCmdBeginConditionalRenderingEXT = (PFN_vkCmdBeginConditionalRenderingEXT)load(context, "vkCmdBeginConditionalRenderingEXT"); - vkCmdEndConditionalRenderingEXT = (PFN_vkCmdEndConditionalRenderingEXT)load(context, "vkCmdEndConditionalRenderingEXT"); -#endif /* defined(VK_EXT_conditional_rendering) */ -#if defined(VK_EXT_debug_marker) - vkCmdDebugMarkerBeginEXT = (PFN_vkCmdDebugMarkerBeginEXT)load(context, "vkCmdDebugMarkerBeginEXT"); - vkCmdDebugMarkerEndEXT = (PFN_vkCmdDebugMarkerEndEXT)load(context, "vkCmdDebugMarkerEndEXT"); - vkCmdDebugMarkerInsertEXT = (PFN_vkCmdDebugMarkerInsertEXT)load(context, "vkCmdDebugMarkerInsertEXT"); - vkDebugMarkerSetObjectNameEXT = (PFN_vkDebugMarkerSetObjectNameEXT)load(context, "vkDebugMarkerSetObjectNameEXT"); - vkDebugMarkerSetObjectTagEXT = (PFN_vkDebugMarkerSetObjectTagEXT)load(context, "vkDebugMarkerSetObjectTagEXT"); -#endif /* defined(VK_EXT_debug_marker) */ -#if defined(VK_EXT_depth_bias_control) - vkCmdSetDepthBias2EXT = (PFN_vkCmdSetDepthBias2EXT)load(context, "vkCmdSetDepthBias2EXT"); -#endif /* defined(VK_EXT_depth_bias_control) */ -#if defined(VK_EXT_descriptor_buffer) - vkCmdBindDescriptorBufferEmbeddedSamplersEXT = (PFN_vkCmdBindDescriptorBufferEmbeddedSamplersEXT)load(context, "vkCmdBindDescriptorBufferEmbeddedSamplersEXT"); - vkCmdBindDescriptorBuffersEXT = (PFN_vkCmdBindDescriptorBuffersEXT)load(context, "vkCmdBindDescriptorBuffersEXT"); - vkCmdSetDescriptorBufferOffsetsEXT = (PFN_vkCmdSetDescriptorBufferOffsetsEXT)load(context, "vkCmdSetDescriptorBufferOffsetsEXT"); - vkGetBufferOpaqueCaptureDescriptorDataEXT = (PFN_vkGetBufferOpaqueCaptureDescriptorDataEXT)load(context, "vkGetBufferOpaqueCaptureDescriptorDataEXT"); - vkGetDescriptorEXT = (PFN_vkGetDescriptorEXT)load(context, "vkGetDescriptorEXT"); - vkGetDescriptorSetLayoutBindingOffsetEXT = (PFN_vkGetDescriptorSetLayoutBindingOffsetEXT)load(context, "vkGetDescriptorSetLayoutBindingOffsetEXT"); - vkGetDescriptorSetLayoutSizeEXT = (PFN_vkGetDescriptorSetLayoutSizeEXT)load(context, "vkGetDescriptorSetLayoutSizeEXT"); - vkGetImageOpaqueCaptureDescriptorDataEXT = (PFN_vkGetImageOpaqueCaptureDescriptorDataEXT)load(context, "vkGetImageOpaqueCaptureDescriptorDataEXT"); - vkGetImageViewOpaqueCaptureDescriptorDataEXT = (PFN_vkGetImageViewOpaqueCaptureDescriptorDataEXT)load(context, "vkGetImageViewOpaqueCaptureDescriptorDataEXT"); - vkGetSamplerOpaqueCaptureDescriptorDataEXT = (PFN_vkGetSamplerOpaqueCaptureDescriptorDataEXT)load(context, "vkGetSamplerOpaqueCaptureDescriptorDataEXT"); -#endif /* defined(VK_EXT_descriptor_buffer) */ -#if defined(VK_EXT_descriptor_buffer) && (defined(VK_KHR_acceleration_structure) || defined(VK_NV_ray_tracing)) - vkGetAccelerationStructureOpaqueCaptureDescriptorDataEXT = (PFN_vkGetAccelerationStructureOpaqueCaptureDescriptorDataEXT)load(context, "vkGetAccelerationStructureOpaqueCaptureDescriptorDataEXT"); -#endif /* defined(VK_EXT_descriptor_buffer) && (defined(VK_KHR_acceleration_structure) || defined(VK_NV_ray_tracing)) */ -#if defined(VK_EXT_device_fault) - vkGetDeviceFaultInfoEXT = (PFN_vkGetDeviceFaultInfoEXT)load(context, "vkGetDeviceFaultInfoEXT"); -#endif /* defined(VK_EXT_device_fault) */ -#if defined(VK_EXT_discard_rectangles) - vkCmdSetDiscardRectangleEXT = (PFN_vkCmdSetDiscardRectangleEXT)load(context, "vkCmdSetDiscardRectangleEXT"); -#endif /* defined(VK_EXT_discard_rectangles) */ -#if defined(VK_EXT_discard_rectangles) && VK_EXT_DISCARD_RECTANGLES_SPEC_VERSION >= 2 - vkCmdSetDiscardRectangleEnableEXT = (PFN_vkCmdSetDiscardRectangleEnableEXT)load(context, "vkCmdSetDiscardRectangleEnableEXT"); - vkCmdSetDiscardRectangleModeEXT = (PFN_vkCmdSetDiscardRectangleModeEXT)load(context, "vkCmdSetDiscardRectangleModeEXT"); -#endif /* defined(VK_EXT_discard_rectangles) && VK_EXT_DISCARD_RECTANGLES_SPEC_VERSION >= 2 */ -#if defined(VK_EXT_display_control) - vkDisplayPowerControlEXT = (PFN_vkDisplayPowerControlEXT)load(context, "vkDisplayPowerControlEXT"); - vkGetSwapchainCounterEXT = (PFN_vkGetSwapchainCounterEXT)load(context, "vkGetSwapchainCounterEXT"); - vkRegisterDeviceEventEXT = (PFN_vkRegisterDeviceEventEXT)load(context, "vkRegisterDeviceEventEXT"); - vkRegisterDisplayEventEXT = (PFN_vkRegisterDisplayEventEXT)load(context, "vkRegisterDisplayEventEXT"); -#endif /* defined(VK_EXT_display_control) */ -#if defined(VK_EXT_external_memory_host) - vkGetMemoryHostPointerPropertiesEXT = (PFN_vkGetMemoryHostPointerPropertiesEXT)load(context, "vkGetMemoryHostPointerPropertiesEXT"); -#endif /* defined(VK_EXT_external_memory_host) */ -#if defined(VK_EXT_full_screen_exclusive) - vkAcquireFullScreenExclusiveModeEXT = (PFN_vkAcquireFullScreenExclusiveModeEXT)load(context, "vkAcquireFullScreenExclusiveModeEXT"); - vkReleaseFullScreenExclusiveModeEXT = (PFN_vkReleaseFullScreenExclusiveModeEXT)load(context, "vkReleaseFullScreenExclusiveModeEXT"); -#endif /* defined(VK_EXT_full_screen_exclusive) */ -#if defined(VK_EXT_full_screen_exclusive) && (defined(VK_KHR_device_group) || defined(VK_VERSION_1_1)) - vkGetDeviceGroupSurfacePresentModes2EXT = (PFN_vkGetDeviceGroupSurfacePresentModes2EXT)load(context, "vkGetDeviceGroupSurfacePresentModes2EXT"); -#endif /* defined(VK_EXT_full_screen_exclusive) && (defined(VK_KHR_device_group) || defined(VK_VERSION_1_1)) */ -#if defined(VK_EXT_hdr_metadata) - vkSetHdrMetadataEXT = (PFN_vkSetHdrMetadataEXT)load(context, "vkSetHdrMetadataEXT"); -#endif /* defined(VK_EXT_hdr_metadata) */ -#if defined(VK_EXT_host_image_copy) - vkCopyImageToImageEXT = (PFN_vkCopyImageToImageEXT)load(context, "vkCopyImageToImageEXT"); - vkCopyImageToMemoryEXT = (PFN_vkCopyImageToMemoryEXT)load(context, "vkCopyImageToMemoryEXT"); - vkCopyMemoryToImageEXT = (PFN_vkCopyMemoryToImageEXT)load(context, "vkCopyMemoryToImageEXT"); - vkTransitionImageLayoutEXT = (PFN_vkTransitionImageLayoutEXT)load(context, "vkTransitionImageLayoutEXT"); -#endif /* defined(VK_EXT_host_image_copy) */ -#if defined(VK_EXT_host_query_reset) - vkResetQueryPoolEXT = (PFN_vkResetQueryPoolEXT)load(context, "vkResetQueryPoolEXT"); -#endif /* defined(VK_EXT_host_query_reset) */ -#if defined(VK_EXT_image_drm_format_modifier) - vkGetImageDrmFormatModifierPropertiesEXT = (PFN_vkGetImageDrmFormatModifierPropertiesEXT)load(context, "vkGetImageDrmFormatModifierPropertiesEXT"); -#endif /* defined(VK_EXT_image_drm_format_modifier) */ -#if defined(VK_EXT_line_rasterization) - vkCmdSetLineStippleEXT = (PFN_vkCmdSetLineStippleEXT)load(context, "vkCmdSetLineStippleEXT"); -#endif /* defined(VK_EXT_line_rasterization) */ -#if defined(VK_EXT_mesh_shader) - vkCmdDrawMeshTasksEXT = (PFN_vkCmdDrawMeshTasksEXT)load(context, "vkCmdDrawMeshTasksEXT"); - vkCmdDrawMeshTasksIndirectCountEXT = (PFN_vkCmdDrawMeshTasksIndirectCountEXT)load(context, "vkCmdDrawMeshTasksIndirectCountEXT"); - vkCmdDrawMeshTasksIndirectEXT = (PFN_vkCmdDrawMeshTasksIndirectEXT)load(context, "vkCmdDrawMeshTasksIndirectEXT"); -#endif /* defined(VK_EXT_mesh_shader) */ -#if defined(VK_EXT_metal_objects) - vkExportMetalObjectsEXT = (PFN_vkExportMetalObjectsEXT)load(context, "vkExportMetalObjectsEXT"); -#endif /* defined(VK_EXT_metal_objects) */ -#if defined(VK_EXT_multi_draw) - vkCmdDrawMultiEXT = (PFN_vkCmdDrawMultiEXT)load(context, "vkCmdDrawMultiEXT"); - vkCmdDrawMultiIndexedEXT = (PFN_vkCmdDrawMultiIndexedEXT)load(context, "vkCmdDrawMultiIndexedEXT"); -#endif /* defined(VK_EXT_multi_draw) */ -#if defined(VK_EXT_opacity_micromap) - vkBuildMicromapsEXT = (PFN_vkBuildMicromapsEXT)load(context, "vkBuildMicromapsEXT"); - vkCmdBuildMicromapsEXT = (PFN_vkCmdBuildMicromapsEXT)load(context, "vkCmdBuildMicromapsEXT"); - vkCmdCopyMemoryToMicromapEXT = (PFN_vkCmdCopyMemoryToMicromapEXT)load(context, "vkCmdCopyMemoryToMicromapEXT"); - vkCmdCopyMicromapEXT = (PFN_vkCmdCopyMicromapEXT)load(context, "vkCmdCopyMicromapEXT"); - vkCmdCopyMicromapToMemoryEXT = (PFN_vkCmdCopyMicromapToMemoryEXT)load(context, "vkCmdCopyMicromapToMemoryEXT"); - vkCmdWriteMicromapsPropertiesEXT = (PFN_vkCmdWriteMicromapsPropertiesEXT)load(context, "vkCmdWriteMicromapsPropertiesEXT"); - vkCopyMemoryToMicromapEXT = (PFN_vkCopyMemoryToMicromapEXT)load(context, "vkCopyMemoryToMicromapEXT"); - vkCopyMicromapEXT = (PFN_vkCopyMicromapEXT)load(context, "vkCopyMicromapEXT"); - vkCopyMicromapToMemoryEXT = (PFN_vkCopyMicromapToMemoryEXT)load(context, "vkCopyMicromapToMemoryEXT"); - vkCreateMicromapEXT = (PFN_vkCreateMicromapEXT)load(context, "vkCreateMicromapEXT"); - vkDestroyMicromapEXT = (PFN_vkDestroyMicromapEXT)load(context, "vkDestroyMicromapEXT"); - vkGetDeviceMicromapCompatibilityEXT = (PFN_vkGetDeviceMicromapCompatibilityEXT)load(context, "vkGetDeviceMicromapCompatibilityEXT"); - vkGetMicromapBuildSizesEXT = (PFN_vkGetMicromapBuildSizesEXT)load(context, "vkGetMicromapBuildSizesEXT"); - vkWriteMicromapsPropertiesEXT = (PFN_vkWriteMicromapsPropertiesEXT)load(context, "vkWriteMicromapsPropertiesEXT"); -#endif /* defined(VK_EXT_opacity_micromap) */ -#if defined(VK_EXT_pageable_device_local_memory) - vkSetDeviceMemoryPriorityEXT = (PFN_vkSetDeviceMemoryPriorityEXT)load(context, "vkSetDeviceMemoryPriorityEXT"); -#endif /* defined(VK_EXT_pageable_device_local_memory) */ -#if defined(VK_EXT_pipeline_properties) - vkGetPipelinePropertiesEXT = (PFN_vkGetPipelinePropertiesEXT)load(context, "vkGetPipelinePropertiesEXT"); -#endif /* defined(VK_EXT_pipeline_properties) */ -#if defined(VK_EXT_private_data) - vkCreatePrivateDataSlotEXT = (PFN_vkCreatePrivateDataSlotEXT)load(context, "vkCreatePrivateDataSlotEXT"); - vkDestroyPrivateDataSlotEXT = (PFN_vkDestroyPrivateDataSlotEXT)load(context, "vkDestroyPrivateDataSlotEXT"); - vkGetPrivateDataEXT = (PFN_vkGetPrivateDataEXT)load(context, "vkGetPrivateDataEXT"); - vkSetPrivateDataEXT = (PFN_vkSetPrivateDataEXT)load(context, "vkSetPrivateDataEXT"); -#endif /* defined(VK_EXT_private_data) */ -#if defined(VK_EXT_sample_locations) - vkCmdSetSampleLocationsEXT = (PFN_vkCmdSetSampleLocationsEXT)load(context, "vkCmdSetSampleLocationsEXT"); -#endif /* defined(VK_EXT_sample_locations) */ -#if defined(VK_EXT_shader_module_identifier) - vkGetShaderModuleCreateInfoIdentifierEXT = (PFN_vkGetShaderModuleCreateInfoIdentifierEXT)load(context, "vkGetShaderModuleCreateInfoIdentifierEXT"); - vkGetShaderModuleIdentifierEXT = (PFN_vkGetShaderModuleIdentifierEXT)load(context, "vkGetShaderModuleIdentifierEXT"); -#endif /* defined(VK_EXT_shader_module_identifier) */ -#if defined(VK_EXT_shader_object) - vkCmdBindShadersEXT = (PFN_vkCmdBindShadersEXT)load(context, "vkCmdBindShadersEXT"); - vkCreateShadersEXT = (PFN_vkCreateShadersEXT)load(context, "vkCreateShadersEXT"); - vkDestroyShaderEXT = (PFN_vkDestroyShaderEXT)load(context, "vkDestroyShaderEXT"); - vkGetShaderBinaryDataEXT = (PFN_vkGetShaderBinaryDataEXT)load(context, "vkGetShaderBinaryDataEXT"); -#endif /* defined(VK_EXT_shader_object) */ -#if defined(VK_EXT_swapchain_maintenance1) - vkReleaseSwapchainImagesEXT = (PFN_vkReleaseSwapchainImagesEXT)load(context, "vkReleaseSwapchainImagesEXT"); -#endif /* defined(VK_EXT_swapchain_maintenance1) */ -#if defined(VK_EXT_transform_feedback) - vkCmdBeginQueryIndexedEXT = (PFN_vkCmdBeginQueryIndexedEXT)load(context, "vkCmdBeginQueryIndexedEXT"); - vkCmdBeginTransformFeedbackEXT = (PFN_vkCmdBeginTransformFeedbackEXT)load(context, "vkCmdBeginTransformFeedbackEXT"); - vkCmdBindTransformFeedbackBuffersEXT = (PFN_vkCmdBindTransformFeedbackBuffersEXT)load(context, "vkCmdBindTransformFeedbackBuffersEXT"); - vkCmdDrawIndirectByteCountEXT = (PFN_vkCmdDrawIndirectByteCountEXT)load(context, "vkCmdDrawIndirectByteCountEXT"); - vkCmdEndQueryIndexedEXT = (PFN_vkCmdEndQueryIndexedEXT)load(context, "vkCmdEndQueryIndexedEXT"); - vkCmdEndTransformFeedbackEXT = (PFN_vkCmdEndTransformFeedbackEXT)load(context, "vkCmdEndTransformFeedbackEXT"); -#endif /* defined(VK_EXT_transform_feedback) */ -#if defined(VK_EXT_validation_cache) - vkCreateValidationCacheEXT = (PFN_vkCreateValidationCacheEXT)load(context, "vkCreateValidationCacheEXT"); - vkDestroyValidationCacheEXT = (PFN_vkDestroyValidationCacheEXT)load(context, "vkDestroyValidationCacheEXT"); - vkGetValidationCacheDataEXT = (PFN_vkGetValidationCacheDataEXT)load(context, "vkGetValidationCacheDataEXT"); - vkMergeValidationCachesEXT = (PFN_vkMergeValidationCachesEXT)load(context, "vkMergeValidationCachesEXT"); -#endif /* defined(VK_EXT_validation_cache) */ -#if defined(VK_FUCHSIA_buffer_collection) - vkCreateBufferCollectionFUCHSIA = (PFN_vkCreateBufferCollectionFUCHSIA)load(context, "vkCreateBufferCollectionFUCHSIA"); - vkDestroyBufferCollectionFUCHSIA = (PFN_vkDestroyBufferCollectionFUCHSIA)load(context, "vkDestroyBufferCollectionFUCHSIA"); - vkGetBufferCollectionPropertiesFUCHSIA = (PFN_vkGetBufferCollectionPropertiesFUCHSIA)load(context, "vkGetBufferCollectionPropertiesFUCHSIA"); - vkSetBufferCollectionBufferConstraintsFUCHSIA = (PFN_vkSetBufferCollectionBufferConstraintsFUCHSIA)load(context, "vkSetBufferCollectionBufferConstraintsFUCHSIA"); - vkSetBufferCollectionImageConstraintsFUCHSIA = (PFN_vkSetBufferCollectionImageConstraintsFUCHSIA)load(context, "vkSetBufferCollectionImageConstraintsFUCHSIA"); -#endif /* defined(VK_FUCHSIA_buffer_collection) */ -#if defined(VK_FUCHSIA_external_memory) - vkGetMemoryZirconHandleFUCHSIA = (PFN_vkGetMemoryZirconHandleFUCHSIA)load(context, "vkGetMemoryZirconHandleFUCHSIA"); - vkGetMemoryZirconHandlePropertiesFUCHSIA = (PFN_vkGetMemoryZirconHandlePropertiesFUCHSIA)load(context, "vkGetMemoryZirconHandlePropertiesFUCHSIA"); -#endif /* defined(VK_FUCHSIA_external_memory) */ -#if defined(VK_FUCHSIA_external_semaphore) - vkGetSemaphoreZirconHandleFUCHSIA = (PFN_vkGetSemaphoreZirconHandleFUCHSIA)load(context, "vkGetSemaphoreZirconHandleFUCHSIA"); - vkImportSemaphoreZirconHandleFUCHSIA = (PFN_vkImportSemaphoreZirconHandleFUCHSIA)load(context, "vkImportSemaphoreZirconHandleFUCHSIA"); -#endif /* defined(VK_FUCHSIA_external_semaphore) */ -#if defined(VK_GOOGLE_display_timing) - vkGetPastPresentationTimingGOOGLE = (PFN_vkGetPastPresentationTimingGOOGLE)load(context, "vkGetPastPresentationTimingGOOGLE"); - vkGetRefreshCycleDurationGOOGLE = (PFN_vkGetRefreshCycleDurationGOOGLE)load(context, "vkGetRefreshCycleDurationGOOGLE"); -#endif /* defined(VK_GOOGLE_display_timing) */ -#if defined(VK_HUAWEI_cluster_culling_shader) - vkCmdDrawClusterHUAWEI = (PFN_vkCmdDrawClusterHUAWEI)load(context, "vkCmdDrawClusterHUAWEI"); - vkCmdDrawClusterIndirectHUAWEI = (PFN_vkCmdDrawClusterIndirectHUAWEI)load(context, "vkCmdDrawClusterIndirectHUAWEI"); -#endif /* defined(VK_HUAWEI_cluster_culling_shader) */ -#if defined(VK_HUAWEI_invocation_mask) - vkCmdBindInvocationMaskHUAWEI = (PFN_vkCmdBindInvocationMaskHUAWEI)load(context, "vkCmdBindInvocationMaskHUAWEI"); -#endif /* defined(VK_HUAWEI_invocation_mask) */ -#if defined(VK_HUAWEI_subpass_shading) - vkCmdSubpassShadingHUAWEI = (PFN_vkCmdSubpassShadingHUAWEI)load(context, "vkCmdSubpassShadingHUAWEI"); - vkGetDeviceSubpassShadingMaxWorkgroupSizeHUAWEI = (PFN_vkGetDeviceSubpassShadingMaxWorkgroupSizeHUAWEI)load(context, "vkGetDeviceSubpassShadingMaxWorkgroupSizeHUAWEI"); -#endif /* defined(VK_HUAWEI_subpass_shading) */ -#if defined(VK_INTEL_performance_query) - vkAcquirePerformanceConfigurationINTEL = (PFN_vkAcquirePerformanceConfigurationINTEL)load(context, "vkAcquirePerformanceConfigurationINTEL"); - vkCmdSetPerformanceMarkerINTEL = (PFN_vkCmdSetPerformanceMarkerINTEL)load(context, "vkCmdSetPerformanceMarkerINTEL"); - vkCmdSetPerformanceOverrideINTEL = (PFN_vkCmdSetPerformanceOverrideINTEL)load(context, "vkCmdSetPerformanceOverrideINTEL"); - vkCmdSetPerformanceStreamMarkerINTEL = (PFN_vkCmdSetPerformanceStreamMarkerINTEL)load(context, "vkCmdSetPerformanceStreamMarkerINTEL"); - vkGetPerformanceParameterINTEL = (PFN_vkGetPerformanceParameterINTEL)load(context, "vkGetPerformanceParameterINTEL"); - vkInitializePerformanceApiINTEL = (PFN_vkInitializePerformanceApiINTEL)load(context, "vkInitializePerformanceApiINTEL"); - vkQueueSetPerformanceConfigurationINTEL = (PFN_vkQueueSetPerformanceConfigurationINTEL)load(context, "vkQueueSetPerformanceConfigurationINTEL"); - vkReleasePerformanceConfigurationINTEL = (PFN_vkReleasePerformanceConfigurationINTEL)load(context, "vkReleasePerformanceConfigurationINTEL"); - vkUninitializePerformanceApiINTEL = (PFN_vkUninitializePerformanceApiINTEL)load(context, "vkUninitializePerformanceApiINTEL"); -#endif /* defined(VK_INTEL_performance_query) */ -#if defined(VK_KHR_acceleration_structure) - vkBuildAccelerationStructuresKHR = (PFN_vkBuildAccelerationStructuresKHR)load(context, "vkBuildAccelerationStructuresKHR"); - vkCmdBuildAccelerationStructuresIndirectKHR = (PFN_vkCmdBuildAccelerationStructuresIndirectKHR)load(context, "vkCmdBuildAccelerationStructuresIndirectKHR"); - vkCmdBuildAccelerationStructuresKHR = (PFN_vkCmdBuildAccelerationStructuresKHR)load(context, "vkCmdBuildAccelerationStructuresKHR"); - vkCmdCopyAccelerationStructureKHR = (PFN_vkCmdCopyAccelerationStructureKHR)load(context, "vkCmdCopyAccelerationStructureKHR"); - vkCmdCopyAccelerationStructureToMemoryKHR = (PFN_vkCmdCopyAccelerationStructureToMemoryKHR)load(context, "vkCmdCopyAccelerationStructureToMemoryKHR"); - vkCmdCopyMemoryToAccelerationStructureKHR = (PFN_vkCmdCopyMemoryToAccelerationStructureKHR)load(context, "vkCmdCopyMemoryToAccelerationStructureKHR"); - vkCmdWriteAccelerationStructuresPropertiesKHR = (PFN_vkCmdWriteAccelerationStructuresPropertiesKHR)load(context, "vkCmdWriteAccelerationStructuresPropertiesKHR"); - vkCopyAccelerationStructureKHR = (PFN_vkCopyAccelerationStructureKHR)load(context, "vkCopyAccelerationStructureKHR"); - vkCopyAccelerationStructureToMemoryKHR = (PFN_vkCopyAccelerationStructureToMemoryKHR)load(context, "vkCopyAccelerationStructureToMemoryKHR"); - vkCopyMemoryToAccelerationStructureKHR = (PFN_vkCopyMemoryToAccelerationStructureKHR)load(context, "vkCopyMemoryToAccelerationStructureKHR"); - vkCreateAccelerationStructureKHR = (PFN_vkCreateAccelerationStructureKHR)load(context, "vkCreateAccelerationStructureKHR"); - vkDestroyAccelerationStructureKHR = (PFN_vkDestroyAccelerationStructureKHR)load(context, "vkDestroyAccelerationStructureKHR"); - vkGetAccelerationStructureBuildSizesKHR = (PFN_vkGetAccelerationStructureBuildSizesKHR)load(context, "vkGetAccelerationStructureBuildSizesKHR"); - vkGetAccelerationStructureDeviceAddressKHR = (PFN_vkGetAccelerationStructureDeviceAddressKHR)load(context, "vkGetAccelerationStructureDeviceAddressKHR"); - vkGetDeviceAccelerationStructureCompatibilityKHR = (PFN_vkGetDeviceAccelerationStructureCompatibilityKHR)load(context, "vkGetDeviceAccelerationStructureCompatibilityKHR"); - vkWriteAccelerationStructuresPropertiesKHR = (PFN_vkWriteAccelerationStructuresPropertiesKHR)load(context, "vkWriteAccelerationStructuresPropertiesKHR"); -#endif /* defined(VK_KHR_acceleration_structure) */ -#if defined(VK_KHR_bind_memory2) - vkBindBufferMemory2KHR = (PFN_vkBindBufferMemory2KHR)load(context, "vkBindBufferMemory2KHR"); - vkBindImageMemory2KHR = (PFN_vkBindImageMemory2KHR)load(context, "vkBindImageMemory2KHR"); -#endif /* defined(VK_KHR_bind_memory2) */ -#if defined(VK_KHR_buffer_device_address) - vkGetBufferDeviceAddressKHR = (PFN_vkGetBufferDeviceAddressKHR)load(context, "vkGetBufferDeviceAddressKHR"); - vkGetBufferOpaqueCaptureAddressKHR = (PFN_vkGetBufferOpaqueCaptureAddressKHR)load(context, "vkGetBufferOpaqueCaptureAddressKHR"); - vkGetDeviceMemoryOpaqueCaptureAddressKHR = (PFN_vkGetDeviceMemoryOpaqueCaptureAddressKHR)load(context, "vkGetDeviceMemoryOpaqueCaptureAddressKHR"); -#endif /* defined(VK_KHR_buffer_device_address) */ -#if defined(VK_KHR_calibrated_timestamps) - vkGetCalibratedTimestampsKHR = (PFN_vkGetCalibratedTimestampsKHR)load(context, "vkGetCalibratedTimestampsKHR"); -#endif /* defined(VK_KHR_calibrated_timestamps) */ -#if defined(VK_KHR_copy_commands2) - vkCmdBlitImage2KHR = (PFN_vkCmdBlitImage2KHR)load(context, "vkCmdBlitImage2KHR"); - vkCmdCopyBuffer2KHR = (PFN_vkCmdCopyBuffer2KHR)load(context, "vkCmdCopyBuffer2KHR"); - vkCmdCopyBufferToImage2KHR = (PFN_vkCmdCopyBufferToImage2KHR)load(context, "vkCmdCopyBufferToImage2KHR"); - vkCmdCopyImage2KHR = (PFN_vkCmdCopyImage2KHR)load(context, "vkCmdCopyImage2KHR"); - vkCmdCopyImageToBuffer2KHR = (PFN_vkCmdCopyImageToBuffer2KHR)load(context, "vkCmdCopyImageToBuffer2KHR"); - vkCmdResolveImage2KHR = (PFN_vkCmdResolveImage2KHR)load(context, "vkCmdResolveImage2KHR"); -#endif /* defined(VK_KHR_copy_commands2) */ -#if defined(VK_KHR_create_renderpass2) - vkCmdBeginRenderPass2KHR = (PFN_vkCmdBeginRenderPass2KHR)load(context, "vkCmdBeginRenderPass2KHR"); - vkCmdEndRenderPass2KHR = (PFN_vkCmdEndRenderPass2KHR)load(context, "vkCmdEndRenderPass2KHR"); - vkCmdNextSubpass2KHR = (PFN_vkCmdNextSubpass2KHR)load(context, "vkCmdNextSubpass2KHR"); - vkCreateRenderPass2KHR = (PFN_vkCreateRenderPass2KHR)load(context, "vkCreateRenderPass2KHR"); -#endif /* defined(VK_KHR_create_renderpass2) */ -#if defined(VK_KHR_deferred_host_operations) - vkCreateDeferredOperationKHR = (PFN_vkCreateDeferredOperationKHR)load(context, "vkCreateDeferredOperationKHR"); - vkDeferredOperationJoinKHR = (PFN_vkDeferredOperationJoinKHR)load(context, "vkDeferredOperationJoinKHR"); - vkDestroyDeferredOperationKHR = (PFN_vkDestroyDeferredOperationKHR)load(context, "vkDestroyDeferredOperationKHR"); - vkGetDeferredOperationMaxConcurrencyKHR = (PFN_vkGetDeferredOperationMaxConcurrencyKHR)load(context, "vkGetDeferredOperationMaxConcurrencyKHR"); - vkGetDeferredOperationResultKHR = (PFN_vkGetDeferredOperationResultKHR)load(context, "vkGetDeferredOperationResultKHR"); -#endif /* defined(VK_KHR_deferred_host_operations) */ -#if defined(VK_KHR_descriptor_update_template) - vkCreateDescriptorUpdateTemplateKHR = (PFN_vkCreateDescriptorUpdateTemplateKHR)load(context, "vkCreateDescriptorUpdateTemplateKHR"); - vkDestroyDescriptorUpdateTemplateKHR = (PFN_vkDestroyDescriptorUpdateTemplateKHR)load(context, "vkDestroyDescriptorUpdateTemplateKHR"); - vkUpdateDescriptorSetWithTemplateKHR = (PFN_vkUpdateDescriptorSetWithTemplateKHR)load(context, "vkUpdateDescriptorSetWithTemplateKHR"); -#endif /* defined(VK_KHR_descriptor_update_template) */ -#if defined(VK_KHR_device_group) - vkCmdDispatchBaseKHR = (PFN_vkCmdDispatchBaseKHR)load(context, "vkCmdDispatchBaseKHR"); - vkCmdSetDeviceMaskKHR = (PFN_vkCmdSetDeviceMaskKHR)load(context, "vkCmdSetDeviceMaskKHR"); - vkGetDeviceGroupPeerMemoryFeaturesKHR = (PFN_vkGetDeviceGroupPeerMemoryFeaturesKHR)load(context, "vkGetDeviceGroupPeerMemoryFeaturesKHR"); -#endif /* defined(VK_KHR_device_group) */ -#if defined(VK_KHR_display_swapchain) - vkCreateSharedSwapchainsKHR = (PFN_vkCreateSharedSwapchainsKHR)load(context, "vkCreateSharedSwapchainsKHR"); -#endif /* defined(VK_KHR_display_swapchain) */ -#if defined(VK_KHR_draw_indirect_count) - vkCmdDrawIndexedIndirectCountKHR = (PFN_vkCmdDrawIndexedIndirectCountKHR)load(context, "vkCmdDrawIndexedIndirectCountKHR"); - vkCmdDrawIndirectCountKHR = (PFN_vkCmdDrawIndirectCountKHR)load(context, "vkCmdDrawIndirectCountKHR"); -#endif /* defined(VK_KHR_draw_indirect_count) */ -#if defined(VK_KHR_dynamic_rendering) - vkCmdBeginRenderingKHR = (PFN_vkCmdBeginRenderingKHR)load(context, "vkCmdBeginRenderingKHR"); - vkCmdEndRenderingKHR = (PFN_vkCmdEndRenderingKHR)load(context, "vkCmdEndRenderingKHR"); -#endif /* defined(VK_KHR_dynamic_rendering) */ -#if defined(VK_KHR_dynamic_rendering_local_read) - vkCmdSetRenderingAttachmentLocationsKHR = (PFN_vkCmdSetRenderingAttachmentLocationsKHR)load(context, "vkCmdSetRenderingAttachmentLocationsKHR"); - vkCmdSetRenderingInputAttachmentIndicesKHR = (PFN_vkCmdSetRenderingInputAttachmentIndicesKHR)load(context, "vkCmdSetRenderingInputAttachmentIndicesKHR"); -#endif /* defined(VK_KHR_dynamic_rendering_local_read) */ -#if defined(VK_KHR_external_fence_fd) - vkGetFenceFdKHR = (PFN_vkGetFenceFdKHR)load(context, "vkGetFenceFdKHR"); - vkImportFenceFdKHR = (PFN_vkImportFenceFdKHR)load(context, "vkImportFenceFdKHR"); -#endif /* defined(VK_KHR_external_fence_fd) */ -#if defined(VK_KHR_external_fence_win32) - vkGetFenceWin32HandleKHR = (PFN_vkGetFenceWin32HandleKHR)load(context, "vkGetFenceWin32HandleKHR"); - vkImportFenceWin32HandleKHR = (PFN_vkImportFenceWin32HandleKHR)load(context, "vkImportFenceWin32HandleKHR"); -#endif /* defined(VK_KHR_external_fence_win32) */ -#if defined(VK_KHR_external_memory_fd) - vkGetMemoryFdKHR = (PFN_vkGetMemoryFdKHR)load(context, "vkGetMemoryFdKHR"); - vkGetMemoryFdPropertiesKHR = (PFN_vkGetMemoryFdPropertiesKHR)load(context, "vkGetMemoryFdPropertiesKHR"); -#endif /* defined(VK_KHR_external_memory_fd) */ -#if defined(VK_KHR_external_memory_win32) - vkGetMemoryWin32HandleKHR = (PFN_vkGetMemoryWin32HandleKHR)load(context, "vkGetMemoryWin32HandleKHR"); - vkGetMemoryWin32HandlePropertiesKHR = (PFN_vkGetMemoryWin32HandlePropertiesKHR)load(context, "vkGetMemoryWin32HandlePropertiesKHR"); -#endif /* defined(VK_KHR_external_memory_win32) */ -#if defined(VK_KHR_external_semaphore_fd) - vkGetSemaphoreFdKHR = (PFN_vkGetSemaphoreFdKHR)load(context, "vkGetSemaphoreFdKHR"); - vkImportSemaphoreFdKHR = (PFN_vkImportSemaphoreFdKHR)load(context, "vkImportSemaphoreFdKHR"); -#endif /* defined(VK_KHR_external_semaphore_fd) */ -#if defined(VK_KHR_external_semaphore_win32) - vkGetSemaphoreWin32HandleKHR = (PFN_vkGetSemaphoreWin32HandleKHR)load(context, "vkGetSemaphoreWin32HandleKHR"); - vkImportSemaphoreWin32HandleKHR = (PFN_vkImportSemaphoreWin32HandleKHR)load(context, "vkImportSemaphoreWin32HandleKHR"); -#endif /* defined(VK_KHR_external_semaphore_win32) */ -#if defined(VK_KHR_fragment_shading_rate) - vkCmdSetFragmentShadingRateKHR = (PFN_vkCmdSetFragmentShadingRateKHR)load(context, "vkCmdSetFragmentShadingRateKHR"); -#endif /* defined(VK_KHR_fragment_shading_rate) */ -#if defined(VK_KHR_get_memory_requirements2) - vkGetBufferMemoryRequirements2KHR = (PFN_vkGetBufferMemoryRequirements2KHR)load(context, "vkGetBufferMemoryRequirements2KHR"); - vkGetImageMemoryRequirements2KHR = (PFN_vkGetImageMemoryRequirements2KHR)load(context, "vkGetImageMemoryRequirements2KHR"); - vkGetImageSparseMemoryRequirements2KHR = (PFN_vkGetImageSparseMemoryRequirements2KHR)load(context, "vkGetImageSparseMemoryRequirements2KHR"); -#endif /* defined(VK_KHR_get_memory_requirements2) */ -#if defined(VK_KHR_line_rasterization) - vkCmdSetLineStippleKHR = (PFN_vkCmdSetLineStippleKHR)load(context, "vkCmdSetLineStippleKHR"); -#endif /* defined(VK_KHR_line_rasterization) */ -#if defined(VK_KHR_maintenance1) - vkTrimCommandPoolKHR = (PFN_vkTrimCommandPoolKHR)load(context, "vkTrimCommandPoolKHR"); -#endif /* defined(VK_KHR_maintenance1) */ -#if defined(VK_KHR_maintenance3) - vkGetDescriptorSetLayoutSupportKHR = (PFN_vkGetDescriptorSetLayoutSupportKHR)load(context, "vkGetDescriptorSetLayoutSupportKHR"); -#endif /* defined(VK_KHR_maintenance3) */ -#if defined(VK_KHR_maintenance4) - vkGetDeviceBufferMemoryRequirementsKHR = (PFN_vkGetDeviceBufferMemoryRequirementsKHR)load(context, "vkGetDeviceBufferMemoryRequirementsKHR"); - vkGetDeviceImageMemoryRequirementsKHR = (PFN_vkGetDeviceImageMemoryRequirementsKHR)load(context, "vkGetDeviceImageMemoryRequirementsKHR"); - vkGetDeviceImageSparseMemoryRequirementsKHR = (PFN_vkGetDeviceImageSparseMemoryRequirementsKHR)load(context, "vkGetDeviceImageSparseMemoryRequirementsKHR"); -#endif /* defined(VK_KHR_maintenance4) */ -#if defined(VK_KHR_maintenance5) - vkCmdBindIndexBuffer2KHR = (PFN_vkCmdBindIndexBuffer2KHR)load(context, "vkCmdBindIndexBuffer2KHR"); - vkGetDeviceImageSubresourceLayoutKHR = (PFN_vkGetDeviceImageSubresourceLayoutKHR)load(context, "vkGetDeviceImageSubresourceLayoutKHR"); - vkGetImageSubresourceLayout2KHR = (PFN_vkGetImageSubresourceLayout2KHR)load(context, "vkGetImageSubresourceLayout2KHR"); - vkGetRenderingAreaGranularityKHR = (PFN_vkGetRenderingAreaGranularityKHR)load(context, "vkGetRenderingAreaGranularityKHR"); -#endif /* defined(VK_KHR_maintenance5) */ -#if defined(VK_KHR_maintenance6) - vkCmdBindDescriptorSets2KHR = (PFN_vkCmdBindDescriptorSets2KHR)load(context, "vkCmdBindDescriptorSets2KHR"); - vkCmdPushConstants2KHR = (PFN_vkCmdPushConstants2KHR)load(context, "vkCmdPushConstants2KHR"); -#endif /* defined(VK_KHR_maintenance6) */ -#if defined(VK_KHR_maintenance6) && defined(VK_KHR_push_descriptor) - vkCmdPushDescriptorSet2KHR = (PFN_vkCmdPushDescriptorSet2KHR)load(context, "vkCmdPushDescriptorSet2KHR"); - vkCmdPushDescriptorSetWithTemplate2KHR = (PFN_vkCmdPushDescriptorSetWithTemplate2KHR)load(context, "vkCmdPushDescriptorSetWithTemplate2KHR"); -#endif /* defined(VK_KHR_maintenance6) && defined(VK_KHR_push_descriptor) */ -#if defined(VK_KHR_maintenance6) && defined(VK_EXT_descriptor_buffer) - vkCmdBindDescriptorBufferEmbeddedSamplers2EXT = (PFN_vkCmdBindDescriptorBufferEmbeddedSamplers2EXT)load(context, "vkCmdBindDescriptorBufferEmbeddedSamplers2EXT"); - vkCmdSetDescriptorBufferOffsets2EXT = (PFN_vkCmdSetDescriptorBufferOffsets2EXT)load(context, "vkCmdSetDescriptorBufferOffsets2EXT"); -#endif /* defined(VK_KHR_maintenance6) && defined(VK_EXT_descriptor_buffer) */ -#if defined(VK_KHR_map_memory2) - vkMapMemory2KHR = (PFN_vkMapMemory2KHR)load(context, "vkMapMemory2KHR"); - vkUnmapMemory2KHR = (PFN_vkUnmapMemory2KHR)load(context, "vkUnmapMemory2KHR"); -#endif /* defined(VK_KHR_map_memory2) */ -#if defined(VK_KHR_performance_query) - vkAcquireProfilingLockKHR = (PFN_vkAcquireProfilingLockKHR)load(context, "vkAcquireProfilingLockKHR"); - vkReleaseProfilingLockKHR = (PFN_vkReleaseProfilingLockKHR)load(context, "vkReleaseProfilingLockKHR"); -#endif /* defined(VK_KHR_performance_query) */ -#if defined(VK_KHR_pipeline_binary) - vkCreatePipelineBinariesKHR = (PFN_vkCreatePipelineBinariesKHR)load(context, "vkCreatePipelineBinariesKHR"); - vkDestroyPipelineBinaryKHR = (PFN_vkDestroyPipelineBinaryKHR)load(context, "vkDestroyPipelineBinaryKHR"); - vkGetPipelineBinaryDataKHR = (PFN_vkGetPipelineBinaryDataKHR)load(context, "vkGetPipelineBinaryDataKHR"); - vkGetPipelineKeyKHR = (PFN_vkGetPipelineKeyKHR)load(context, "vkGetPipelineKeyKHR"); - vkReleaseCapturedPipelineDataKHR = (PFN_vkReleaseCapturedPipelineDataKHR)load(context, "vkReleaseCapturedPipelineDataKHR"); -#endif /* defined(VK_KHR_pipeline_binary) */ -#if defined(VK_KHR_pipeline_executable_properties) - vkGetPipelineExecutableInternalRepresentationsKHR = (PFN_vkGetPipelineExecutableInternalRepresentationsKHR)load(context, "vkGetPipelineExecutableInternalRepresentationsKHR"); - vkGetPipelineExecutablePropertiesKHR = (PFN_vkGetPipelineExecutablePropertiesKHR)load(context, "vkGetPipelineExecutablePropertiesKHR"); - vkGetPipelineExecutableStatisticsKHR = (PFN_vkGetPipelineExecutableStatisticsKHR)load(context, "vkGetPipelineExecutableStatisticsKHR"); -#endif /* defined(VK_KHR_pipeline_executable_properties) */ -#if defined(VK_KHR_present_wait) - vkWaitForPresentKHR = (PFN_vkWaitForPresentKHR)load(context, "vkWaitForPresentKHR"); -#endif /* defined(VK_KHR_present_wait) */ -#if defined(VK_KHR_push_descriptor) - vkCmdPushDescriptorSetKHR = (PFN_vkCmdPushDescriptorSetKHR)load(context, "vkCmdPushDescriptorSetKHR"); -#endif /* defined(VK_KHR_push_descriptor) */ -#if defined(VK_KHR_ray_tracing_maintenance1) && defined(VK_KHR_ray_tracing_pipeline) - vkCmdTraceRaysIndirect2KHR = (PFN_vkCmdTraceRaysIndirect2KHR)load(context, "vkCmdTraceRaysIndirect2KHR"); -#endif /* defined(VK_KHR_ray_tracing_maintenance1) && defined(VK_KHR_ray_tracing_pipeline) */ -#if defined(VK_KHR_ray_tracing_pipeline) - vkCmdSetRayTracingPipelineStackSizeKHR = (PFN_vkCmdSetRayTracingPipelineStackSizeKHR)load(context, "vkCmdSetRayTracingPipelineStackSizeKHR"); - vkCmdTraceRaysIndirectKHR = (PFN_vkCmdTraceRaysIndirectKHR)load(context, "vkCmdTraceRaysIndirectKHR"); - vkCmdTraceRaysKHR = (PFN_vkCmdTraceRaysKHR)load(context, "vkCmdTraceRaysKHR"); - vkCreateRayTracingPipelinesKHR = (PFN_vkCreateRayTracingPipelinesKHR)load(context, "vkCreateRayTracingPipelinesKHR"); - vkGetRayTracingCaptureReplayShaderGroupHandlesKHR = (PFN_vkGetRayTracingCaptureReplayShaderGroupHandlesKHR)load(context, "vkGetRayTracingCaptureReplayShaderGroupHandlesKHR"); - vkGetRayTracingShaderGroupHandlesKHR = (PFN_vkGetRayTracingShaderGroupHandlesKHR)load(context, "vkGetRayTracingShaderGroupHandlesKHR"); - vkGetRayTracingShaderGroupStackSizeKHR = (PFN_vkGetRayTracingShaderGroupStackSizeKHR)load(context, "vkGetRayTracingShaderGroupStackSizeKHR"); -#endif /* defined(VK_KHR_ray_tracing_pipeline) */ -#if defined(VK_KHR_sampler_ycbcr_conversion) - vkCreateSamplerYcbcrConversionKHR = (PFN_vkCreateSamplerYcbcrConversionKHR)load(context, "vkCreateSamplerYcbcrConversionKHR"); - vkDestroySamplerYcbcrConversionKHR = (PFN_vkDestroySamplerYcbcrConversionKHR)load(context, "vkDestroySamplerYcbcrConversionKHR"); -#endif /* defined(VK_KHR_sampler_ycbcr_conversion) */ -#if defined(VK_KHR_shared_presentable_image) - vkGetSwapchainStatusKHR = (PFN_vkGetSwapchainStatusKHR)load(context, "vkGetSwapchainStatusKHR"); -#endif /* defined(VK_KHR_shared_presentable_image) */ -#if defined(VK_KHR_swapchain) - vkAcquireNextImageKHR = (PFN_vkAcquireNextImageKHR)load(context, "vkAcquireNextImageKHR"); - vkCreateSwapchainKHR = (PFN_vkCreateSwapchainKHR)load(context, "vkCreateSwapchainKHR"); - vkDestroySwapchainKHR = (PFN_vkDestroySwapchainKHR)load(context, "vkDestroySwapchainKHR"); - vkGetSwapchainImagesKHR = (PFN_vkGetSwapchainImagesKHR)load(context, "vkGetSwapchainImagesKHR"); - vkQueuePresentKHR = (PFN_vkQueuePresentKHR)load(context, "vkQueuePresentKHR"); -#endif /* defined(VK_KHR_swapchain) */ -#if defined(VK_KHR_synchronization2) - vkCmdPipelineBarrier2KHR = (PFN_vkCmdPipelineBarrier2KHR)load(context, "vkCmdPipelineBarrier2KHR"); - vkCmdResetEvent2KHR = (PFN_vkCmdResetEvent2KHR)load(context, "vkCmdResetEvent2KHR"); - vkCmdSetEvent2KHR = (PFN_vkCmdSetEvent2KHR)load(context, "vkCmdSetEvent2KHR"); - vkCmdWaitEvents2KHR = (PFN_vkCmdWaitEvents2KHR)load(context, "vkCmdWaitEvents2KHR"); - vkCmdWriteTimestamp2KHR = (PFN_vkCmdWriteTimestamp2KHR)load(context, "vkCmdWriteTimestamp2KHR"); - vkQueueSubmit2KHR = (PFN_vkQueueSubmit2KHR)load(context, "vkQueueSubmit2KHR"); -#endif /* defined(VK_KHR_synchronization2) */ -#if defined(VK_KHR_synchronization2) && defined(VK_AMD_buffer_marker) - vkCmdWriteBufferMarker2AMD = (PFN_vkCmdWriteBufferMarker2AMD)load(context, "vkCmdWriteBufferMarker2AMD"); -#endif /* defined(VK_KHR_synchronization2) && defined(VK_AMD_buffer_marker) */ -#if defined(VK_KHR_synchronization2) && defined(VK_NV_device_diagnostic_checkpoints) - vkGetQueueCheckpointData2NV = (PFN_vkGetQueueCheckpointData2NV)load(context, "vkGetQueueCheckpointData2NV"); -#endif /* defined(VK_KHR_synchronization2) && defined(VK_NV_device_diagnostic_checkpoints) */ -#if defined(VK_KHR_timeline_semaphore) - vkGetSemaphoreCounterValueKHR = (PFN_vkGetSemaphoreCounterValueKHR)load(context, "vkGetSemaphoreCounterValueKHR"); - vkSignalSemaphoreKHR = (PFN_vkSignalSemaphoreKHR)load(context, "vkSignalSemaphoreKHR"); - vkWaitSemaphoresKHR = (PFN_vkWaitSemaphoresKHR)load(context, "vkWaitSemaphoresKHR"); -#endif /* defined(VK_KHR_timeline_semaphore) */ -#if defined(VK_KHR_video_decode_queue) - vkCmdDecodeVideoKHR = (PFN_vkCmdDecodeVideoKHR)load(context, "vkCmdDecodeVideoKHR"); -#endif /* defined(VK_KHR_video_decode_queue) */ -#if defined(VK_KHR_video_encode_queue) - vkCmdEncodeVideoKHR = (PFN_vkCmdEncodeVideoKHR)load(context, "vkCmdEncodeVideoKHR"); - vkGetEncodedVideoSessionParametersKHR = (PFN_vkGetEncodedVideoSessionParametersKHR)load(context, "vkGetEncodedVideoSessionParametersKHR"); -#endif /* defined(VK_KHR_video_encode_queue) */ -#if defined(VK_KHR_video_queue) - vkBindVideoSessionMemoryKHR = (PFN_vkBindVideoSessionMemoryKHR)load(context, "vkBindVideoSessionMemoryKHR"); - vkCmdBeginVideoCodingKHR = (PFN_vkCmdBeginVideoCodingKHR)load(context, "vkCmdBeginVideoCodingKHR"); - vkCmdControlVideoCodingKHR = (PFN_vkCmdControlVideoCodingKHR)load(context, "vkCmdControlVideoCodingKHR"); - vkCmdEndVideoCodingKHR = (PFN_vkCmdEndVideoCodingKHR)load(context, "vkCmdEndVideoCodingKHR"); - vkCreateVideoSessionKHR = (PFN_vkCreateVideoSessionKHR)load(context, "vkCreateVideoSessionKHR"); - vkCreateVideoSessionParametersKHR = (PFN_vkCreateVideoSessionParametersKHR)load(context, "vkCreateVideoSessionParametersKHR"); - vkDestroyVideoSessionKHR = (PFN_vkDestroyVideoSessionKHR)load(context, "vkDestroyVideoSessionKHR"); - vkDestroyVideoSessionParametersKHR = (PFN_vkDestroyVideoSessionParametersKHR)load(context, "vkDestroyVideoSessionParametersKHR"); - vkGetVideoSessionMemoryRequirementsKHR = (PFN_vkGetVideoSessionMemoryRequirementsKHR)load(context, "vkGetVideoSessionMemoryRequirementsKHR"); - vkUpdateVideoSessionParametersKHR = (PFN_vkUpdateVideoSessionParametersKHR)load(context, "vkUpdateVideoSessionParametersKHR"); -#endif /* defined(VK_KHR_video_queue) */ -#if defined(VK_NVX_binary_import) - vkCmdCuLaunchKernelNVX = (PFN_vkCmdCuLaunchKernelNVX)load(context, "vkCmdCuLaunchKernelNVX"); - vkCreateCuFunctionNVX = (PFN_vkCreateCuFunctionNVX)load(context, "vkCreateCuFunctionNVX"); - vkCreateCuModuleNVX = (PFN_vkCreateCuModuleNVX)load(context, "vkCreateCuModuleNVX"); - vkDestroyCuFunctionNVX = (PFN_vkDestroyCuFunctionNVX)load(context, "vkDestroyCuFunctionNVX"); - vkDestroyCuModuleNVX = (PFN_vkDestroyCuModuleNVX)load(context, "vkDestroyCuModuleNVX"); -#endif /* defined(VK_NVX_binary_import) */ -#if defined(VK_NVX_image_view_handle) - vkGetImageViewAddressNVX = (PFN_vkGetImageViewAddressNVX)load(context, "vkGetImageViewAddressNVX"); - vkGetImageViewHandleNVX = (PFN_vkGetImageViewHandleNVX)load(context, "vkGetImageViewHandleNVX"); -#endif /* defined(VK_NVX_image_view_handle) */ -#if defined(VK_NV_clip_space_w_scaling) - vkCmdSetViewportWScalingNV = (PFN_vkCmdSetViewportWScalingNV)load(context, "vkCmdSetViewportWScalingNV"); -#endif /* defined(VK_NV_clip_space_w_scaling) */ -#if defined(VK_NV_copy_memory_indirect) - vkCmdCopyMemoryIndirectNV = (PFN_vkCmdCopyMemoryIndirectNV)load(context, "vkCmdCopyMemoryIndirectNV"); - vkCmdCopyMemoryToImageIndirectNV = (PFN_vkCmdCopyMemoryToImageIndirectNV)load(context, "vkCmdCopyMemoryToImageIndirectNV"); -#endif /* defined(VK_NV_copy_memory_indirect) */ -#if defined(VK_NV_cuda_kernel_launch) - vkCmdCudaLaunchKernelNV = (PFN_vkCmdCudaLaunchKernelNV)load(context, "vkCmdCudaLaunchKernelNV"); - vkCreateCudaFunctionNV = (PFN_vkCreateCudaFunctionNV)load(context, "vkCreateCudaFunctionNV"); - vkCreateCudaModuleNV = (PFN_vkCreateCudaModuleNV)load(context, "vkCreateCudaModuleNV"); - vkDestroyCudaFunctionNV = (PFN_vkDestroyCudaFunctionNV)load(context, "vkDestroyCudaFunctionNV"); - vkDestroyCudaModuleNV = (PFN_vkDestroyCudaModuleNV)load(context, "vkDestroyCudaModuleNV"); - vkGetCudaModuleCacheNV = (PFN_vkGetCudaModuleCacheNV)load(context, "vkGetCudaModuleCacheNV"); -#endif /* defined(VK_NV_cuda_kernel_launch) */ -#if defined(VK_NV_device_diagnostic_checkpoints) - vkCmdSetCheckpointNV = (PFN_vkCmdSetCheckpointNV)load(context, "vkCmdSetCheckpointNV"); - vkGetQueueCheckpointDataNV = (PFN_vkGetQueueCheckpointDataNV)load(context, "vkGetQueueCheckpointDataNV"); -#endif /* defined(VK_NV_device_diagnostic_checkpoints) */ -#if defined(VK_NV_device_generated_commands) - vkCmdBindPipelineShaderGroupNV = (PFN_vkCmdBindPipelineShaderGroupNV)load(context, "vkCmdBindPipelineShaderGroupNV"); - vkCmdExecuteGeneratedCommandsNV = (PFN_vkCmdExecuteGeneratedCommandsNV)load(context, "vkCmdExecuteGeneratedCommandsNV"); - vkCmdPreprocessGeneratedCommandsNV = (PFN_vkCmdPreprocessGeneratedCommandsNV)load(context, "vkCmdPreprocessGeneratedCommandsNV"); - vkCreateIndirectCommandsLayoutNV = (PFN_vkCreateIndirectCommandsLayoutNV)load(context, "vkCreateIndirectCommandsLayoutNV"); - vkDestroyIndirectCommandsLayoutNV = (PFN_vkDestroyIndirectCommandsLayoutNV)load(context, "vkDestroyIndirectCommandsLayoutNV"); - vkGetGeneratedCommandsMemoryRequirementsNV = (PFN_vkGetGeneratedCommandsMemoryRequirementsNV)load(context, "vkGetGeneratedCommandsMemoryRequirementsNV"); -#endif /* defined(VK_NV_device_generated_commands) */ -#if defined(VK_NV_device_generated_commands_compute) - vkCmdUpdatePipelineIndirectBufferNV = (PFN_vkCmdUpdatePipelineIndirectBufferNV)load(context, "vkCmdUpdatePipelineIndirectBufferNV"); - vkGetPipelineIndirectDeviceAddressNV = (PFN_vkGetPipelineIndirectDeviceAddressNV)load(context, "vkGetPipelineIndirectDeviceAddressNV"); - vkGetPipelineIndirectMemoryRequirementsNV = (PFN_vkGetPipelineIndirectMemoryRequirementsNV)load(context, "vkGetPipelineIndirectMemoryRequirementsNV"); -#endif /* defined(VK_NV_device_generated_commands_compute) */ -#if defined(VK_NV_external_memory_rdma) - vkGetMemoryRemoteAddressNV = (PFN_vkGetMemoryRemoteAddressNV)load(context, "vkGetMemoryRemoteAddressNV"); -#endif /* defined(VK_NV_external_memory_rdma) */ -#if defined(VK_NV_external_memory_win32) - vkGetMemoryWin32HandleNV = (PFN_vkGetMemoryWin32HandleNV)load(context, "vkGetMemoryWin32HandleNV"); -#endif /* defined(VK_NV_external_memory_win32) */ -#if defined(VK_NV_fragment_shading_rate_enums) - vkCmdSetFragmentShadingRateEnumNV = (PFN_vkCmdSetFragmentShadingRateEnumNV)load(context, "vkCmdSetFragmentShadingRateEnumNV"); -#endif /* defined(VK_NV_fragment_shading_rate_enums) */ -#if defined(VK_NV_low_latency2) - vkGetLatencyTimingsNV = (PFN_vkGetLatencyTimingsNV)load(context, "vkGetLatencyTimingsNV"); - vkLatencySleepNV = (PFN_vkLatencySleepNV)load(context, "vkLatencySleepNV"); - vkQueueNotifyOutOfBandNV = (PFN_vkQueueNotifyOutOfBandNV)load(context, "vkQueueNotifyOutOfBandNV"); - vkSetLatencyMarkerNV = (PFN_vkSetLatencyMarkerNV)load(context, "vkSetLatencyMarkerNV"); - vkSetLatencySleepModeNV = (PFN_vkSetLatencySleepModeNV)load(context, "vkSetLatencySleepModeNV"); -#endif /* defined(VK_NV_low_latency2) */ -#if defined(VK_NV_memory_decompression) - vkCmdDecompressMemoryIndirectCountNV = (PFN_vkCmdDecompressMemoryIndirectCountNV)load(context, "vkCmdDecompressMemoryIndirectCountNV"); - vkCmdDecompressMemoryNV = (PFN_vkCmdDecompressMemoryNV)load(context, "vkCmdDecompressMemoryNV"); -#endif /* defined(VK_NV_memory_decompression) */ -#if defined(VK_NV_mesh_shader) - vkCmdDrawMeshTasksIndirectCountNV = (PFN_vkCmdDrawMeshTasksIndirectCountNV)load(context, "vkCmdDrawMeshTasksIndirectCountNV"); - vkCmdDrawMeshTasksIndirectNV = (PFN_vkCmdDrawMeshTasksIndirectNV)load(context, "vkCmdDrawMeshTasksIndirectNV"); - vkCmdDrawMeshTasksNV = (PFN_vkCmdDrawMeshTasksNV)load(context, "vkCmdDrawMeshTasksNV"); -#endif /* defined(VK_NV_mesh_shader) */ -#if defined(VK_NV_optical_flow) - vkBindOpticalFlowSessionImageNV = (PFN_vkBindOpticalFlowSessionImageNV)load(context, "vkBindOpticalFlowSessionImageNV"); - vkCmdOpticalFlowExecuteNV = (PFN_vkCmdOpticalFlowExecuteNV)load(context, "vkCmdOpticalFlowExecuteNV"); - vkCreateOpticalFlowSessionNV = (PFN_vkCreateOpticalFlowSessionNV)load(context, "vkCreateOpticalFlowSessionNV"); - vkDestroyOpticalFlowSessionNV = (PFN_vkDestroyOpticalFlowSessionNV)load(context, "vkDestroyOpticalFlowSessionNV"); -#endif /* defined(VK_NV_optical_flow) */ -#if defined(VK_NV_ray_tracing) - vkBindAccelerationStructureMemoryNV = (PFN_vkBindAccelerationStructureMemoryNV)load(context, "vkBindAccelerationStructureMemoryNV"); - vkCmdBuildAccelerationStructureNV = (PFN_vkCmdBuildAccelerationStructureNV)load(context, "vkCmdBuildAccelerationStructureNV"); - vkCmdCopyAccelerationStructureNV = (PFN_vkCmdCopyAccelerationStructureNV)load(context, "vkCmdCopyAccelerationStructureNV"); - vkCmdTraceRaysNV = (PFN_vkCmdTraceRaysNV)load(context, "vkCmdTraceRaysNV"); - vkCmdWriteAccelerationStructuresPropertiesNV = (PFN_vkCmdWriteAccelerationStructuresPropertiesNV)load(context, "vkCmdWriteAccelerationStructuresPropertiesNV"); - vkCompileDeferredNV = (PFN_vkCompileDeferredNV)load(context, "vkCompileDeferredNV"); - vkCreateAccelerationStructureNV = (PFN_vkCreateAccelerationStructureNV)load(context, "vkCreateAccelerationStructureNV"); - vkCreateRayTracingPipelinesNV = (PFN_vkCreateRayTracingPipelinesNV)load(context, "vkCreateRayTracingPipelinesNV"); - vkDestroyAccelerationStructureNV = (PFN_vkDestroyAccelerationStructureNV)load(context, "vkDestroyAccelerationStructureNV"); - vkGetAccelerationStructureHandleNV = (PFN_vkGetAccelerationStructureHandleNV)load(context, "vkGetAccelerationStructureHandleNV"); - vkGetAccelerationStructureMemoryRequirementsNV = (PFN_vkGetAccelerationStructureMemoryRequirementsNV)load(context, "vkGetAccelerationStructureMemoryRequirementsNV"); - vkGetRayTracingShaderGroupHandlesNV = (PFN_vkGetRayTracingShaderGroupHandlesNV)load(context, "vkGetRayTracingShaderGroupHandlesNV"); -#endif /* defined(VK_NV_ray_tracing) */ -#if defined(VK_NV_scissor_exclusive) && VK_NV_SCISSOR_EXCLUSIVE_SPEC_VERSION >= 2 - vkCmdSetExclusiveScissorEnableNV = (PFN_vkCmdSetExclusiveScissorEnableNV)load(context, "vkCmdSetExclusiveScissorEnableNV"); -#endif /* defined(VK_NV_scissor_exclusive) && VK_NV_SCISSOR_EXCLUSIVE_SPEC_VERSION >= 2 */ -#if defined(VK_NV_scissor_exclusive) - vkCmdSetExclusiveScissorNV = (PFN_vkCmdSetExclusiveScissorNV)load(context, "vkCmdSetExclusiveScissorNV"); -#endif /* defined(VK_NV_scissor_exclusive) */ -#if defined(VK_NV_shading_rate_image) - vkCmdBindShadingRateImageNV = (PFN_vkCmdBindShadingRateImageNV)load(context, "vkCmdBindShadingRateImageNV"); - vkCmdSetCoarseSampleOrderNV = (PFN_vkCmdSetCoarseSampleOrderNV)load(context, "vkCmdSetCoarseSampleOrderNV"); - vkCmdSetViewportShadingRatePaletteNV = (PFN_vkCmdSetViewportShadingRatePaletteNV)load(context, "vkCmdSetViewportShadingRatePaletteNV"); -#endif /* defined(VK_NV_shading_rate_image) */ -#if defined(VK_QCOM_tile_properties) - vkGetDynamicRenderingTilePropertiesQCOM = (PFN_vkGetDynamicRenderingTilePropertiesQCOM)load(context, "vkGetDynamicRenderingTilePropertiesQCOM"); - vkGetFramebufferTilePropertiesQCOM = (PFN_vkGetFramebufferTilePropertiesQCOM)load(context, "vkGetFramebufferTilePropertiesQCOM"); -#endif /* defined(VK_QCOM_tile_properties) */ -#if defined(VK_QNX_external_memory_screen_buffer) - vkGetScreenBufferPropertiesQNX = (PFN_vkGetScreenBufferPropertiesQNX)load(context, "vkGetScreenBufferPropertiesQNX"); -#endif /* defined(VK_QNX_external_memory_screen_buffer) */ -#if defined(VK_VALVE_descriptor_set_host_mapping) - vkGetDescriptorSetHostMappingVALVE = (PFN_vkGetDescriptorSetHostMappingVALVE)load(context, "vkGetDescriptorSetHostMappingVALVE"); - vkGetDescriptorSetLayoutHostMappingInfoVALVE = (PFN_vkGetDescriptorSetLayoutHostMappingInfoVALVE)load(context, "vkGetDescriptorSetLayoutHostMappingInfoVALVE"); -#endif /* defined(VK_VALVE_descriptor_set_host_mapping) */ -#if (defined(VK_EXT_extended_dynamic_state)) || (defined(VK_EXT_shader_object)) - vkCmdBindVertexBuffers2EXT = (PFN_vkCmdBindVertexBuffers2EXT)load(context, "vkCmdBindVertexBuffers2EXT"); - vkCmdSetCullModeEXT = (PFN_vkCmdSetCullModeEXT)load(context, "vkCmdSetCullModeEXT"); - vkCmdSetDepthBoundsTestEnableEXT = (PFN_vkCmdSetDepthBoundsTestEnableEXT)load(context, "vkCmdSetDepthBoundsTestEnableEXT"); - vkCmdSetDepthCompareOpEXT = (PFN_vkCmdSetDepthCompareOpEXT)load(context, "vkCmdSetDepthCompareOpEXT"); - vkCmdSetDepthTestEnableEXT = (PFN_vkCmdSetDepthTestEnableEXT)load(context, "vkCmdSetDepthTestEnableEXT"); - vkCmdSetDepthWriteEnableEXT = (PFN_vkCmdSetDepthWriteEnableEXT)load(context, "vkCmdSetDepthWriteEnableEXT"); - vkCmdSetFrontFaceEXT = (PFN_vkCmdSetFrontFaceEXT)load(context, "vkCmdSetFrontFaceEXT"); - vkCmdSetPrimitiveTopologyEXT = (PFN_vkCmdSetPrimitiveTopologyEXT)load(context, "vkCmdSetPrimitiveTopologyEXT"); - vkCmdSetScissorWithCountEXT = (PFN_vkCmdSetScissorWithCountEXT)load(context, "vkCmdSetScissorWithCountEXT"); - vkCmdSetStencilOpEXT = (PFN_vkCmdSetStencilOpEXT)load(context, "vkCmdSetStencilOpEXT"); - vkCmdSetStencilTestEnableEXT = (PFN_vkCmdSetStencilTestEnableEXT)load(context, "vkCmdSetStencilTestEnableEXT"); - vkCmdSetViewportWithCountEXT = (PFN_vkCmdSetViewportWithCountEXT)load(context, "vkCmdSetViewportWithCountEXT"); -#endif /* (defined(VK_EXT_extended_dynamic_state)) || (defined(VK_EXT_shader_object)) */ -#if (defined(VK_EXT_extended_dynamic_state2)) || (defined(VK_EXT_shader_object)) - vkCmdSetDepthBiasEnableEXT = (PFN_vkCmdSetDepthBiasEnableEXT)load(context, "vkCmdSetDepthBiasEnableEXT"); - vkCmdSetLogicOpEXT = (PFN_vkCmdSetLogicOpEXT)load(context, "vkCmdSetLogicOpEXT"); - vkCmdSetPatchControlPointsEXT = (PFN_vkCmdSetPatchControlPointsEXT)load(context, "vkCmdSetPatchControlPointsEXT"); - vkCmdSetPrimitiveRestartEnableEXT = (PFN_vkCmdSetPrimitiveRestartEnableEXT)load(context, "vkCmdSetPrimitiveRestartEnableEXT"); - vkCmdSetRasterizerDiscardEnableEXT = (PFN_vkCmdSetRasterizerDiscardEnableEXT)load(context, "vkCmdSetRasterizerDiscardEnableEXT"); -#endif /* (defined(VK_EXT_extended_dynamic_state2)) || (defined(VK_EXT_shader_object)) */ -#if (defined(VK_EXT_extended_dynamic_state3)) || (defined(VK_EXT_shader_object)) - vkCmdSetAlphaToCoverageEnableEXT = (PFN_vkCmdSetAlphaToCoverageEnableEXT)load(context, "vkCmdSetAlphaToCoverageEnableEXT"); - vkCmdSetAlphaToOneEnableEXT = (PFN_vkCmdSetAlphaToOneEnableEXT)load(context, "vkCmdSetAlphaToOneEnableEXT"); - vkCmdSetColorBlendEnableEXT = (PFN_vkCmdSetColorBlendEnableEXT)load(context, "vkCmdSetColorBlendEnableEXT"); - vkCmdSetColorBlendEquationEXT = (PFN_vkCmdSetColorBlendEquationEXT)load(context, "vkCmdSetColorBlendEquationEXT"); - vkCmdSetColorWriteMaskEXT = (PFN_vkCmdSetColorWriteMaskEXT)load(context, "vkCmdSetColorWriteMaskEXT"); - vkCmdSetDepthClampEnableEXT = (PFN_vkCmdSetDepthClampEnableEXT)load(context, "vkCmdSetDepthClampEnableEXT"); - vkCmdSetLogicOpEnableEXT = (PFN_vkCmdSetLogicOpEnableEXT)load(context, "vkCmdSetLogicOpEnableEXT"); - vkCmdSetPolygonModeEXT = (PFN_vkCmdSetPolygonModeEXT)load(context, "vkCmdSetPolygonModeEXT"); - vkCmdSetRasterizationSamplesEXT = (PFN_vkCmdSetRasterizationSamplesEXT)load(context, "vkCmdSetRasterizationSamplesEXT"); - vkCmdSetSampleMaskEXT = (PFN_vkCmdSetSampleMaskEXT)load(context, "vkCmdSetSampleMaskEXT"); -#endif /* (defined(VK_EXT_extended_dynamic_state3)) || (defined(VK_EXT_shader_object)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && (defined(VK_KHR_maintenance2) || defined(VK_VERSION_1_1))) || (defined(VK_EXT_shader_object)) - vkCmdSetTessellationDomainOriginEXT = (PFN_vkCmdSetTessellationDomainOriginEXT)load(context, "vkCmdSetTessellationDomainOriginEXT"); -#endif /* (defined(VK_EXT_extended_dynamic_state3) && (defined(VK_KHR_maintenance2) || defined(VK_VERSION_1_1))) || (defined(VK_EXT_shader_object)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_transform_feedback)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_transform_feedback)) - vkCmdSetRasterizationStreamEXT = (PFN_vkCmdSetRasterizationStreamEXT)load(context, "vkCmdSetRasterizationStreamEXT"); -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_transform_feedback)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_transform_feedback)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_conservative_rasterization)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_conservative_rasterization)) - vkCmdSetConservativeRasterizationModeEXT = (PFN_vkCmdSetConservativeRasterizationModeEXT)load(context, "vkCmdSetConservativeRasterizationModeEXT"); - vkCmdSetExtraPrimitiveOverestimationSizeEXT = (PFN_vkCmdSetExtraPrimitiveOverestimationSizeEXT)load(context, "vkCmdSetExtraPrimitiveOverestimationSizeEXT"); -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_conservative_rasterization)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_conservative_rasterization)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_depth_clip_enable)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_depth_clip_enable)) - vkCmdSetDepthClipEnableEXT = (PFN_vkCmdSetDepthClipEnableEXT)load(context, "vkCmdSetDepthClipEnableEXT"); -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_depth_clip_enable)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_depth_clip_enable)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_sample_locations)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_sample_locations)) - vkCmdSetSampleLocationsEnableEXT = (PFN_vkCmdSetSampleLocationsEnableEXT)load(context, "vkCmdSetSampleLocationsEnableEXT"); -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_sample_locations)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_sample_locations)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_blend_operation_advanced)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_blend_operation_advanced)) - vkCmdSetColorBlendAdvancedEXT = (PFN_vkCmdSetColorBlendAdvancedEXT)load(context, "vkCmdSetColorBlendAdvancedEXT"); -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_blend_operation_advanced)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_blend_operation_advanced)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_provoking_vertex)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_provoking_vertex)) - vkCmdSetProvokingVertexModeEXT = (PFN_vkCmdSetProvokingVertexModeEXT)load(context, "vkCmdSetProvokingVertexModeEXT"); -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_provoking_vertex)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_provoking_vertex)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_line_rasterization)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_line_rasterization)) - vkCmdSetLineRasterizationModeEXT = (PFN_vkCmdSetLineRasterizationModeEXT)load(context, "vkCmdSetLineRasterizationModeEXT"); - vkCmdSetLineStippleEnableEXT = (PFN_vkCmdSetLineStippleEnableEXT)load(context, "vkCmdSetLineStippleEnableEXT"); -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_line_rasterization)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_line_rasterization)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_depth_clip_control)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_depth_clip_control)) - vkCmdSetDepthClipNegativeOneToOneEXT = (PFN_vkCmdSetDepthClipNegativeOneToOneEXT)load(context, "vkCmdSetDepthClipNegativeOneToOneEXT"); -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_depth_clip_control)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_depth_clip_control)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_clip_space_w_scaling)) || (defined(VK_EXT_shader_object) && defined(VK_NV_clip_space_w_scaling)) - vkCmdSetViewportWScalingEnableNV = (PFN_vkCmdSetViewportWScalingEnableNV)load(context, "vkCmdSetViewportWScalingEnableNV"); -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_clip_space_w_scaling)) || (defined(VK_EXT_shader_object) && defined(VK_NV_clip_space_w_scaling)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_viewport_swizzle)) || (defined(VK_EXT_shader_object) && defined(VK_NV_viewport_swizzle)) - vkCmdSetViewportSwizzleNV = (PFN_vkCmdSetViewportSwizzleNV)load(context, "vkCmdSetViewportSwizzleNV"); -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_viewport_swizzle)) || (defined(VK_EXT_shader_object) && defined(VK_NV_viewport_swizzle)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_fragment_coverage_to_color)) || (defined(VK_EXT_shader_object) && defined(VK_NV_fragment_coverage_to_color)) - vkCmdSetCoverageToColorEnableNV = (PFN_vkCmdSetCoverageToColorEnableNV)load(context, "vkCmdSetCoverageToColorEnableNV"); - vkCmdSetCoverageToColorLocationNV = (PFN_vkCmdSetCoverageToColorLocationNV)load(context, "vkCmdSetCoverageToColorLocationNV"); -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_fragment_coverage_to_color)) || (defined(VK_EXT_shader_object) && defined(VK_NV_fragment_coverage_to_color)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_framebuffer_mixed_samples)) || (defined(VK_EXT_shader_object) && defined(VK_NV_framebuffer_mixed_samples)) - vkCmdSetCoverageModulationModeNV = (PFN_vkCmdSetCoverageModulationModeNV)load(context, "vkCmdSetCoverageModulationModeNV"); - vkCmdSetCoverageModulationTableEnableNV = (PFN_vkCmdSetCoverageModulationTableEnableNV)load(context, "vkCmdSetCoverageModulationTableEnableNV"); - vkCmdSetCoverageModulationTableNV = (PFN_vkCmdSetCoverageModulationTableNV)load(context, "vkCmdSetCoverageModulationTableNV"); -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_framebuffer_mixed_samples)) || (defined(VK_EXT_shader_object) && defined(VK_NV_framebuffer_mixed_samples)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_shading_rate_image)) || (defined(VK_EXT_shader_object) && defined(VK_NV_shading_rate_image)) - vkCmdSetShadingRateImageEnableNV = (PFN_vkCmdSetShadingRateImageEnableNV)load(context, "vkCmdSetShadingRateImageEnableNV"); -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_shading_rate_image)) || (defined(VK_EXT_shader_object) && defined(VK_NV_shading_rate_image)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_representative_fragment_test)) || (defined(VK_EXT_shader_object) && defined(VK_NV_representative_fragment_test)) - vkCmdSetRepresentativeFragmentTestEnableNV = (PFN_vkCmdSetRepresentativeFragmentTestEnableNV)load(context, "vkCmdSetRepresentativeFragmentTestEnableNV"); -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_representative_fragment_test)) || (defined(VK_EXT_shader_object) && defined(VK_NV_representative_fragment_test)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_coverage_reduction_mode)) || (defined(VK_EXT_shader_object) && defined(VK_NV_coverage_reduction_mode)) - vkCmdSetCoverageReductionModeNV = (PFN_vkCmdSetCoverageReductionModeNV)load(context, "vkCmdSetCoverageReductionModeNV"); -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_coverage_reduction_mode)) || (defined(VK_EXT_shader_object) && defined(VK_NV_coverage_reduction_mode)) */ -#if (defined(VK_EXT_host_image_copy)) || (defined(VK_EXT_image_compression_control)) - vkGetImageSubresourceLayout2EXT = (PFN_vkGetImageSubresourceLayout2EXT)load(context, "vkGetImageSubresourceLayout2EXT"); -#endif /* (defined(VK_EXT_host_image_copy)) || (defined(VK_EXT_image_compression_control)) */ -#if (defined(VK_EXT_shader_object)) || (defined(VK_EXT_vertex_input_dynamic_state)) - vkCmdSetVertexInputEXT = (PFN_vkCmdSetVertexInputEXT)load(context, "vkCmdSetVertexInputEXT"); -#endif /* (defined(VK_EXT_shader_object)) || (defined(VK_EXT_vertex_input_dynamic_state)) */ -#if (defined(VK_KHR_descriptor_update_template) && defined(VK_KHR_push_descriptor)) || (defined(VK_KHR_push_descriptor) && (defined(VK_VERSION_1_1) || defined(VK_KHR_descriptor_update_template))) - vkCmdPushDescriptorSetWithTemplateKHR = (PFN_vkCmdPushDescriptorSetWithTemplateKHR)load(context, "vkCmdPushDescriptorSetWithTemplateKHR"); -#endif /* (defined(VK_KHR_descriptor_update_template) && defined(VK_KHR_push_descriptor)) || (defined(VK_KHR_push_descriptor) && (defined(VK_VERSION_1_1) || defined(VK_KHR_descriptor_update_template))) */ -#if (defined(VK_KHR_device_group) && defined(VK_KHR_surface)) || (defined(VK_KHR_swapchain) && defined(VK_VERSION_1_1)) - vkGetDeviceGroupPresentCapabilitiesKHR = (PFN_vkGetDeviceGroupPresentCapabilitiesKHR)load(context, "vkGetDeviceGroupPresentCapabilitiesKHR"); - vkGetDeviceGroupSurfacePresentModesKHR = (PFN_vkGetDeviceGroupSurfacePresentModesKHR)load(context, "vkGetDeviceGroupSurfacePresentModesKHR"); -#endif /* (defined(VK_KHR_device_group) && defined(VK_KHR_surface)) || (defined(VK_KHR_swapchain) && defined(VK_VERSION_1_1)) */ -#if (defined(VK_KHR_device_group) && defined(VK_KHR_swapchain)) || (defined(VK_KHR_swapchain) && defined(VK_VERSION_1_1)) - vkAcquireNextImage2KHR = (PFN_vkAcquireNextImage2KHR)load(context, "vkAcquireNextImage2KHR"); -#endif /* (defined(VK_KHR_device_group) && defined(VK_KHR_swapchain)) || (defined(VK_KHR_swapchain) && defined(VK_VERSION_1_1)) */ - /* VOLK_GENERATE_LOAD_DEVICE */ -} - -static void volkGenLoadDeviceTable(struct VolkDeviceTable* table, void* context, PFN_vkVoidFunction (*load)(void*, const char*)) -{ - /* VOLK_GENERATE_LOAD_DEVICE_TABLE */ -#if defined(VK_VERSION_1_0) - table->vkAllocateCommandBuffers = (PFN_vkAllocateCommandBuffers)load(context, "vkAllocateCommandBuffers"); - table->vkAllocateDescriptorSets = (PFN_vkAllocateDescriptorSets)load(context, "vkAllocateDescriptorSets"); - table->vkAllocateMemory = (PFN_vkAllocateMemory)load(context, "vkAllocateMemory"); - table->vkBeginCommandBuffer = (PFN_vkBeginCommandBuffer)load(context, "vkBeginCommandBuffer"); - table->vkBindBufferMemory = (PFN_vkBindBufferMemory)load(context, "vkBindBufferMemory"); - table->vkBindImageMemory = (PFN_vkBindImageMemory)load(context, "vkBindImageMemory"); - table->vkCmdBeginQuery = (PFN_vkCmdBeginQuery)load(context, "vkCmdBeginQuery"); - table->vkCmdBeginRenderPass = (PFN_vkCmdBeginRenderPass)load(context, "vkCmdBeginRenderPass"); - table->vkCmdBindDescriptorSets = (PFN_vkCmdBindDescriptorSets)load(context, "vkCmdBindDescriptorSets"); - table->vkCmdBindIndexBuffer = (PFN_vkCmdBindIndexBuffer)load(context, "vkCmdBindIndexBuffer"); - table->vkCmdBindPipeline = (PFN_vkCmdBindPipeline)load(context, "vkCmdBindPipeline"); - table->vkCmdBindVertexBuffers = (PFN_vkCmdBindVertexBuffers)load(context, "vkCmdBindVertexBuffers"); - table->vkCmdBlitImage = (PFN_vkCmdBlitImage)load(context, "vkCmdBlitImage"); - table->vkCmdClearAttachments = (PFN_vkCmdClearAttachments)load(context, "vkCmdClearAttachments"); - table->vkCmdClearColorImage = (PFN_vkCmdClearColorImage)load(context, "vkCmdClearColorImage"); - table->vkCmdClearDepthStencilImage = (PFN_vkCmdClearDepthStencilImage)load(context, "vkCmdClearDepthStencilImage"); - table->vkCmdCopyBuffer = (PFN_vkCmdCopyBuffer)load(context, "vkCmdCopyBuffer"); - table->vkCmdCopyBufferToImage = (PFN_vkCmdCopyBufferToImage)load(context, "vkCmdCopyBufferToImage"); - table->vkCmdCopyImage = (PFN_vkCmdCopyImage)load(context, "vkCmdCopyImage"); - table->vkCmdCopyImageToBuffer = (PFN_vkCmdCopyImageToBuffer)load(context, "vkCmdCopyImageToBuffer"); - table->vkCmdCopyQueryPoolResults = (PFN_vkCmdCopyQueryPoolResults)load(context, "vkCmdCopyQueryPoolResults"); - table->vkCmdDispatch = (PFN_vkCmdDispatch)load(context, "vkCmdDispatch"); - table->vkCmdDispatchIndirect = (PFN_vkCmdDispatchIndirect)load(context, "vkCmdDispatchIndirect"); - table->vkCmdDraw = (PFN_vkCmdDraw)load(context, "vkCmdDraw"); - table->vkCmdDrawIndexed = (PFN_vkCmdDrawIndexed)load(context, "vkCmdDrawIndexed"); - table->vkCmdDrawIndexedIndirect = (PFN_vkCmdDrawIndexedIndirect)load(context, "vkCmdDrawIndexedIndirect"); - table->vkCmdDrawIndirect = (PFN_vkCmdDrawIndirect)load(context, "vkCmdDrawIndirect"); - table->vkCmdEndQuery = (PFN_vkCmdEndQuery)load(context, "vkCmdEndQuery"); - table->vkCmdEndRenderPass = (PFN_vkCmdEndRenderPass)load(context, "vkCmdEndRenderPass"); - table->vkCmdExecuteCommands = (PFN_vkCmdExecuteCommands)load(context, "vkCmdExecuteCommands"); - table->vkCmdFillBuffer = (PFN_vkCmdFillBuffer)load(context, "vkCmdFillBuffer"); - table->vkCmdNextSubpass = (PFN_vkCmdNextSubpass)load(context, "vkCmdNextSubpass"); - table->vkCmdPipelineBarrier = (PFN_vkCmdPipelineBarrier)load(context, "vkCmdPipelineBarrier"); - table->vkCmdPushConstants = (PFN_vkCmdPushConstants)load(context, "vkCmdPushConstants"); - table->vkCmdResetEvent = (PFN_vkCmdResetEvent)load(context, "vkCmdResetEvent"); - table->vkCmdResetQueryPool = (PFN_vkCmdResetQueryPool)load(context, "vkCmdResetQueryPool"); - table->vkCmdResolveImage = (PFN_vkCmdResolveImage)load(context, "vkCmdResolveImage"); - table->vkCmdSetBlendConstants = (PFN_vkCmdSetBlendConstants)load(context, "vkCmdSetBlendConstants"); - table->vkCmdSetDepthBias = (PFN_vkCmdSetDepthBias)load(context, "vkCmdSetDepthBias"); - table->vkCmdSetDepthBounds = (PFN_vkCmdSetDepthBounds)load(context, "vkCmdSetDepthBounds"); - table->vkCmdSetEvent = (PFN_vkCmdSetEvent)load(context, "vkCmdSetEvent"); - table->vkCmdSetLineWidth = (PFN_vkCmdSetLineWidth)load(context, "vkCmdSetLineWidth"); - table->vkCmdSetScissor = (PFN_vkCmdSetScissor)load(context, "vkCmdSetScissor"); - table->vkCmdSetStencilCompareMask = (PFN_vkCmdSetStencilCompareMask)load(context, "vkCmdSetStencilCompareMask"); - table->vkCmdSetStencilReference = (PFN_vkCmdSetStencilReference)load(context, "vkCmdSetStencilReference"); - table->vkCmdSetStencilWriteMask = (PFN_vkCmdSetStencilWriteMask)load(context, "vkCmdSetStencilWriteMask"); - table->vkCmdSetViewport = (PFN_vkCmdSetViewport)load(context, "vkCmdSetViewport"); - table->vkCmdUpdateBuffer = (PFN_vkCmdUpdateBuffer)load(context, "vkCmdUpdateBuffer"); - table->vkCmdWaitEvents = (PFN_vkCmdWaitEvents)load(context, "vkCmdWaitEvents"); - table->vkCmdWriteTimestamp = (PFN_vkCmdWriteTimestamp)load(context, "vkCmdWriteTimestamp"); - table->vkCreateBuffer = (PFN_vkCreateBuffer)load(context, "vkCreateBuffer"); - table->vkCreateBufferView = (PFN_vkCreateBufferView)load(context, "vkCreateBufferView"); - table->vkCreateCommandPool = (PFN_vkCreateCommandPool)load(context, "vkCreateCommandPool"); - table->vkCreateComputePipelines = (PFN_vkCreateComputePipelines)load(context, "vkCreateComputePipelines"); - table->vkCreateDescriptorPool = (PFN_vkCreateDescriptorPool)load(context, "vkCreateDescriptorPool"); - table->vkCreateDescriptorSetLayout = (PFN_vkCreateDescriptorSetLayout)load(context, "vkCreateDescriptorSetLayout"); - table->vkCreateEvent = (PFN_vkCreateEvent)load(context, "vkCreateEvent"); - table->vkCreateFence = (PFN_vkCreateFence)load(context, "vkCreateFence"); - table->vkCreateFramebuffer = (PFN_vkCreateFramebuffer)load(context, "vkCreateFramebuffer"); - table->vkCreateGraphicsPipelines = (PFN_vkCreateGraphicsPipelines)load(context, "vkCreateGraphicsPipelines"); - table->vkCreateImage = (PFN_vkCreateImage)load(context, "vkCreateImage"); - table->vkCreateImageView = (PFN_vkCreateImageView)load(context, "vkCreateImageView"); - table->vkCreatePipelineCache = (PFN_vkCreatePipelineCache)load(context, "vkCreatePipelineCache"); - table->vkCreatePipelineLayout = (PFN_vkCreatePipelineLayout)load(context, "vkCreatePipelineLayout"); - table->vkCreateQueryPool = (PFN_vkCreateQueryPool)load(context, "vkCreateQueryPool"); - table->vkCreateRenderPass = (PFN_vkCreateRenderPass)load(context, "vkCreateRenderPass"); - table->vkCreateSampler = (PFN_vkCreateSampler)load(context, "vkCreateSampler"); - table->vkCreateSemaphore = (PFN_vkCreateSemaphore)load(context, "vkCreateSemaphore"); - table->vkCreateShaderModule = (PFN_vkCreateShaderModule)load(context, "vkCreateShaderModule"); - table->vkDestroyBuffer = (PFN_vkDestroyBuffer)load(context, "vkDestroyBuffer"); - table->vkDestroyBufferView = (PFN_vkDestroyBufferView)load(context, "vkDestroyBufferView"); - table->vkDestroyCommandPool = (PFN_vkDestroyCommandPool)load(context, "vkDestroyCommandPool"); - table->vkDestroyDescriptorPool = (PFN_vkDestroyDescriptorPool)load(context, "vkDestroyDescriptorPool"); - table->vkDestroyDescriptorSetLayout = (PFN_vkDestroyDescriptorSetLayout)load(context, "vkDestroyDescriptorSetLayout"); - table->vkDestroyDevice = (PFN_vkDestroyDevice)load(context, "vkDestroyDevice"); - table->vkDestroyEvent = (PFN_vkDestroyEvent)load(context, "vkDestroyEvent"); - table->vkDestroyFence = (PFN_vkDestroyFence)load(context, "vkDestroyFence"); - table->vkDestroyFramebuffer = (PFN_vkDestroyFramebuffer)load(context, "vkDestroyFramebuffer"); - table->vkDestroyImage = (PFN_vkDestroyImage)load(context, "vkDestroyImage"); - table->vkDestroyImageView = (PFN_vkDestroyImageView)load(context, "vkDestroyImageView"); - table->vkDestroyPipeline = (PFN_vkDestroyPipeline)load(context, "vkDestroyPipeline"); - table->vkDestroyPipelineCache = (PFN_vkDestroyPipelineCache)load(context, "vkDestroyPipelineCache"); - table->vkDestroyPipelineLayout = (PFN_vkDestroyPipelineLayout)load(context, "vkDestroyPipelineLayout"); - table->vkDestroyQueryPool = (PFN_vkDestroyQueryPool)load(context, "vkDestroyQueryPool"); - table->vkDestroyRenderPass = (PFN_vkDestroyRenderPass)load(context, "vkDestroyRenderPass"); - table->vkDestroySampler = (PFN_vkDestroySampler)load(context, "vkDestroySampler"); - table->vkDestroySemaphore = (PFN_vkDestroySemaphore)load(context, "vkDestroySemaphore"); - table->vkDestroyShaderModule = (PFN_vkDestroyShaderModule)load(context, "vkDestroyShaderModule"); - table->vkDeviceWaitIdle = (PFN_vkDeviceWaitIdle)load(context, "vkDeviceWaitIdle"); - table->vkEndCommandBuffer = (PFN_vkEndCommandBuffer)load(context, "vkEndCommandBuffer"); - table->vkFlushMappedMemoryRanges = (PFN_vkFlushMappedMemoryRanges)load(context, "vkFlushMappedMemoryRanges"); - table->vkFreeCommandBuffers = (PFN_vkFreeCommandBuffers)load(context, "vkFreeCommandBuffers"); - table->vkFreeDescriptorSets = (PFN_vkFreeDescriptorSets)load(context, "vkFreeDescriptorSets"); - table->vkFreeMemory = (PFN_vkFreeMemory)load(context, "vkFreeMemory"); - table->vkGetBufferMemoryRequirements = (PFN_vkGetBufferMemoryRequirements)load(context, "vkGetBufferMemoryRequirements"); - table->vkGetDeviceMemoryCommitment = (PFN_vkGetDeviceMemoryCommitment)load(context, "vkGetDeviceMemoryCommitment"); - table->vkGetDeviceQueue = (PFN_vkGetDeviceQueue)load(context, "vkGetDeviceQueue"); - table->vkGetEventStatus = (PFN_vkGetEventStatus)load(context, "vkGetEventStatus"); - table->vkGetFenceStatus = (PFN_vkGetFenceStatus)load(context, "vkGetFenceStatus"); - table->vkGetImageMemoryRequirements = (PFN_vkGetImageMemoryRequirements)load(context, "vkGetImageMemoryRequirements"); - table->vkGetImageSparseMemoryRequirements = (PFN_vkGetImageSparseMemoryRequirements)load(context, "vkGetImageSparseMemoryRequirements"); - table->vkGetImageSubresourceLayout = (PFN_vkGetImageSubresourceLayout)load(context, "vkGetImageSubresourceLayout"); - table->vkGetPipelineCacheData = (PFN_vkGetPipelineCacheData)load(context, "vkGetPipelineCacheData"); - table->vkGetQueryPoolResults = (PFN_vkGetQueryPoolResults)load(context, "vkGetQueryPoolResults"); - table->vkGetRenderAreaGranularity = (PFN_vkGetRenderAreaGranularity)load(context, "vkGetRenderAreaGranularity"); - table->vkInvalidateMappedMemoryRanges = (PFN_vkInvalidateMappedMemoryRanges)load(context, "vkInvalidateMappedMemoryRanges"); - table->vkMapMemory = (PFN_vkMapMemory)load(context, "vkMapMemory"); - table->vkMergePipelineCaches = (PFN_vkMergePipelineCaches)load(context, "vkMergePipelineCaches"); - table->vkQueueBindSparse = (PFN_vkQueueBindSparse)load(context, "vkQueueBindSparse"); - table->vkQueueSubmit = (PFN_vkQueueSubmit)load(context, "vkQueueSubmit"); - table->vkQueueWaitIdle = (PFN_vkQueueWaitIdle)load(context, "vkQueueWaitIdle"); - table->vkResetCommandBuffer = (PFN_vkResetCommandBuffer)load(context, "vkResetCommandBuffer"); - table->vkResetCommandPool = (PFN_vkResetCommandPool)load(context, "vkResetCommandPool"); - table->vkResetDescriptorPool = (PFN_vkResetDescriptorPool)load(context, "vkResetDescriptorPool"); - table->vkResetEvent = (PFN_vkResetEvent)load(context, "vkResetEvent"); - table->vkResetFences = (PFN_vkResetFences)load(context, "vkResetFences"); - table->vkSetEvent = (PFN_vkSetEvent)load(context, "vkSetEvent"); - table->vkUnmapMemory = (PFN_vkUnmapMemory)load(context, "vkUnmapMemory"); - table->vkUpdateDescriptorSets = (PFN_vkUpdateDescriptorSets)load(context, "vkUpdateDescriptorSets"); - table->vkWaitForFences = (PFN_vkWaitForFences)load(context, "vkWaitForFences"); -#endif /* defined(VK_VERSION_1_0) */ -#if defined(VK_VERSION_1_1) - table->vkBindBufferMemory2 = (PFN_vkBindBufferMemory2)load(context, "vkBindBufferMemory2"); - table->vkBindImageMemory2 = (PFN_vkBindImageMemory2)load(context, "vkBindImageMemory2"); - table->vkCmdDispatchBase = (PFN_vkCmdDispatchBase)load(context, "vkCmdDispatchBase"); - table->vkCmdSetDeviceMask = (PFN_vkCmdSetDeviceMask)load(context, "vkCmdSetDeviceMask"); - table->vkCreateDescriptorUpdateTemplate = (PFN_vkCreateDescriptorUpdateTemplate)load(context, "vkCreateDescriptorUpdateTemplate"); - table->vkCreateSamplerYcbcrConversion = (PFN_vkCreateSamplerYcbcrConversion)load(context, "vkCreateSamplerYcbcrConversion"); - table->vkDestroyDescriptorUpdateTemplate = (PFN_vkDestroyDescriptorUpdateTemplate)load(context, "vkDestroyDescriptorUpdateTemplate"); - table->vkDestroySamplerYcbcrConversion = (PFN_vkDestroySamplerYcbcrConversion)load(context, "vkDestroySamplerYcbcrConversion"); - table->vkGetBufferMemoryRequirements2 = (PFN_vkGetBufferMemoryRequirements2)load(context, "vkGetBufferMemoryRequirements2"); - table->vkGetDescriptorSetLayoutSupport = (PFN_vkGetDescriptorSetLayoutSupport)load(context, "vkGetDescriptorSetLayoutSupport"); - table->vkGetDeviceGroupPeerMemoryFeatures = (PFN_vkGetDeviceGroupPeerMemoryFeatures)load(context, "vkGetDeviceGroupPeerMemoryFeatures"); - table->vkGetDeviceQueue2 = (PFN_vkGetDeviceQueue2)load(context, "vkGetDeviceQueue2"); - table->vkGetImageMemoryRequirements2 = (PFN_vkGetImageMemoryRequirements2)load(context, "vkGetImageMemoryRequirements2"); - table->vkGetImageSparseMemoryRequirements2 = (PFN_vkGetImageSparseMemoryRequirements2)load(context, "vkGetImageSparseMemoryRequirements2"); - table->vkTrimCommandPool = (PFN_vkTrimCommandPool)load(context, "vkTrimCommandPool"); - table->vkUpdateDescriptorSetWithTemplate = (PFN_vkUpdateDescriptorSetWithTemplate)load(context, "vkUpdateDescriptorSetWithTemplate"); -#endif /* defined(VK_VERSION_1_1) */ -#if defined(VK_VERSION_1_2) - table->vkCmdBeginRenderPass2 = (PFN_vkCmdBeginRenderPass2)load(context, "vkCmdBeginRenderPass2"); - table->vkCmdDrawIndexedIndirectCount = (PFN_vkCmdDrawIndexedIndirectCount)load(context, "vkCmdDrawIndexedIndirectCount"); - table->vkCmdDrawIndirectCount = (PFN_vkCmdDrawIndirectCount)load(context, "vkCmdDrawIndirectCount"); - table->vkCmdEndRenderPass2 = (PFN_vkCmdEndRenderPass2)load(context, "vkCmdEndRenderPass2"); - table->vkCmdNextSubpass2 = (PFN_vkCmdNextSubpass2)load(context, "vkCmdNextSubpass2"); - table->vkCreateRenderPass2 = (PFN_vkCreateRenderPass2)load(context, "vkCreateRenderPass2"); - table->vkGetBufferDeviceAddress = (PFN_vkGetBufferDeviceAddress)load(context, "vkGetBufferDeviceAddress"); - table->vkGetBufferOpaqueCaptureAddress = (PFN_vkGetBufferOpaqueCaptureAddress)load(context, "vkGetBufferOpaqueCaptureAddress"); - table->vkGetDeviceMemoryOpaqueCaptureAddress = (PFN_vkGetDeviceMemoryOpaqueCaptureAddress)load(context, "vkGetDeviceMemoryOpaqueCaptureAddress"); - table->vkGetSemaphoreCounterValue = (PFN_vkGetSemaphoreCounterValue)load(context, "vkGetSemaphoreCounterValue"); - table->vkResetQueryPool = (PFN_vkResetQueryPool)load(context, "vkResetQueryPool"); - table->vkSignalSemaphore = (PFN_vkSignalSemaphore)load(context, "vkSignalSemaphore"); - table->vkWaitSemaphores = (PFN_vkWaitSemaphores)load(context, "vkWaitSemaphores"); -#endif /* defined(VK_VERSION_1_2) */ -#if defined(VK_VERSION_1_3) - table->vkCmdBeginRendering = (PFN_vkCmdBeginRendering)load(context, "vkCmdBeginRendering"); - table->vkCmdBindVertexBuffers2 = (PFN_vkCmdBindVertexBuffers2)load(context, "vkCmdBindVertexBuffers2"); - table->vkCmdBlitImage2 = (PFN_vkCmdBlitImage2)load(context, "vkCmdBlitImage2"); - table->vkCmdCopyBuffer2 = (PFN_vkCmdCopyBuffer2)load(context, "vkCmdCopyBuffer2"); - table->vkCmdCopyBufferToImage2 = (PFN_vkCmdCopyBufferToImage2)load(context, "vkCmdCopyBufferToImage2"); - table->vkCmdCopyImage2 = (PFN_vkCmdCopyImage2)load(context, "vkCmdCopyImage2"); - table->vkCmdCopyImageToBuffer2 = (PFN_vkCmdCopyImageToBuffer2)load(context, "vkCmdCopyImageToBuffer2"); - table->vkCmdEndRendering = (PFN_vkCmdEndRendering)load(context, "vkCmdEndRendering"); - table->vkCmdPipelineBarrier2 = (PFN_vkCmdPipelineBarrier2)load(context, "vkCmdPipelineBarrier2"); - table->vkCmdResetEvent2 = (PFN_vkCmdResetEvent2)load(context, "vkCmdResetEvent2"); - table->vkCmdResolveImage2 = (PFN_vkCmdResolveImage2)load(context, "vkCmdResolveImage2"); - table->vkCmdSetCullMode = (PFN_vkCmdSetCullMode)load(context, "vkCmdSetCullMode"); - table->vkCmdSetDepthBiasEnable = (PFN_vkCmdSetDepthBiasEnable)load(context, "vkCmdSetDepthBiasEnable"); - table->vkCmdSetDepthBoundsTestEnable = (PFN_vkCmdSetDepthBoundsTestEnable)load(context, "vkCmdSetDepthBoundsTestEnable"); - table->vkCmdSetDepthCompareOp = (PFN_vkCmdSetDepthCompareOp)load(context, "vkCmdSetDepthCompareOp"); - table->vkCmdSetDepthTestEnable = (PFN_vkCmdSetDepthTestEnable)load(context, "vkCmdSetDepthTestEnable"); - table->vkCmdSetDepthWriteEnable = (PFN_vkCmdSetDepthWriteEnable)load(context, "vkCmdSetDepthWriteEnable"); - table->vkCmdSetEvent2 = (PFN_vkCmdSetEvent2)load(context, "vkCmdSetEvent2"); - table->vkCmdSetFrontFace = (PFN_vkCmdSetFrontFace)load(context, "vkCmdSetFrontFace"); - table->vkCmdSetPrimitiveRestartEnable = (PFN_vkCmdSetPrimitiveRestartEnable)load(context, "vkCmdSetPrimitiveRestartEnable"); - table->vkCmdSetPrimitiveTopology = (PFN_vkCmdSetPrimitiveTopology)load(context, "vkCmdSetPrimitiveTopology"); - table->vkCmdSetRasterizerDiscardEnable = (PFN_vkCmdSetRasterizerDiscardEnable)load(context, "vkCmdSetRasterizerDiscardEnable"); - table->vkCmdSetScissorWithCount = (PFN_vkCmdSetScissorWithCount)load(context, "vkCmdSetScissorWithCount"); - table->vkCmdSetStencilOp = (PFN_vkCmdSetStencilOp)load(context, "vkCmdSetStencilOp"); - table->vkCmdSetStencilTestEnable = (PFN_vkCmdSetStencilTestEnable)load(context, "vkCmdSetStencilTestEnable"); - table->vkCmdSetViewportWithCount = (PFN_vkCmdSetViewportWithCount)load(context, "vkCmdSetViewportWithCount"); - table->vkCmdWaitEvents2 = (PFN_vkCmdWaitEvents2)load(context, "vkCmdWaitEvents2"); - table->vkCmdWriteTimestamp2 = (PFN_vkCmdWriteTimestamp2)load(context, "vkCmdWriteTimestamp2"); - table->vkCreatePrivateDataSlot = (PFN_vkCreatePrivateDataSlot)load(context, "vkCreatePrivateDataSlot"); - table->vkDestroyPrivateDataSlot = (PFN_vkDestroyPrivateDataSlot)load(context, "vkDestroyPrivateDataSlot"); - table->vkGetDeviceBufferMemoryRequirements = (PFN_vkGetDeviceBufferMemoryRequirements)load(context, "vkGetDeviceBufferMemoryRequirements"); - table->vkGetDeviceImageMemoryRequirements = (PFN_vkGetDeviceImageMemoryRequirements)load(context, "vkGetDeviceImageMemoryRequirements"); - table->vkGetDeviceImageSparseMemoryRequirements = (PFN_vkGetDeviceImageSparseMemoryRequirements)load(context, "vkGetDeviceImageSparseMemoryRequirements"); - table->vkGetPrivateData = (PFN_vkGetPrivateData)load(context, "vkGetPrivateData"); - table->vkQueueSubmit2 = (PFN_vkQueueSubmit2)load(context, "vkQueueSubmit2"); - table->vkSetPrivateData = (PFN_vkSetPrivateData)load(context, "vkSetPrivateData"); -#endif /* defined(VK_VERSION_1_3) */ -#if defined(VK_AMDX_shader_enqueue) - table->vkCmdDispatchGraphAMDX = (PFN_vkCmdDispatchGraphAMDX)load(context, "vkCmdDispatchGraphAMDX"); - table->vkCmdDispatchGraphIndirectAMDX = (PFN_vkCmdDispatchGraphIndirectAMDX)load(context, "vkCmdDispatchGraphIndirectAMDX"); - table->vkCmdDispatchGraphIndirectCountAMDX = (PFN_vkCmdDispatchGraphIndirectCountAMDX)load(context, "vkCmdDispatchGraphIndirectCountAMDX"); - table->vkCmdInitializeGraphScratchMemoryAMDX = (PFN_vkCmdInitializeGraphScratchMemoryAMDX)load(context, "vkCmdInitializeGraphScratchMemoryAMDX"); - table->vkCreateExecutionGraphPipelinesAMDX = (PFN_vkCreateExecutionGraphPipelinesAMDX)load(context, "vkCreateExecutionGraphPipelinesAMDX"); - table->vkGetExecutionGraphPipelineNodeIndexAMDX = (PFN_vkGetExecutionGraphPipelineNodeIndexAMDX)load(context, "vkGetExecutionGraphPipelineNodeIndexAMDX"); - table->vkGetExecutionGraphPipelineScratchSizeAMDX = (PFN_vkGetExecutionGraphPipelineScratchSizeAMDX)load(context, "vkGetExecutionGraphPipelineScratchSizeAMDX"); -#endif /* defined(VK_AMDX_shader_enqueue) */ -#if defined(VK_AMD_anti_lag) - table->vkAntiLagUpdateAMD = (PFN_vkAntiLagUpdateAMD)load(context, "vkAntiLagUpdateAMD"); -#endif /* defined(VK_AMD_anti_lag) */ -#if defined(VK_AMD_buffer_marker) - table->vkCmdWriteBufferMarkerAMD = (PFN_vkCmdWriteBufferMarkerAMD)load(context, "vkCmdWriteBufferMarkerAMD"); -#endif /* defined(VK_AMD_buffer_marker) */ -#if defined(VK_AMD_display_native_hdr) - table->vkSetLocalDimmingAMD = (PFN_vkSetLocalDimmingAMD)load(context, "vkSetLocalDimmingAMD"); -#endif /* defined(VK_AMD_display_native_hdr) */ -#if defined(VK_AMD_draw_indirect_count) - table->vkCmdDrawIndexedIndirectCountAMD = (PFN_vkCmdDrawIndexedIndirectCountAMD)load(context, "vkCmdDrawIndexedIndirectCountAMD"); - table->vkCmdDrawIndirectCountAMD = (PFN_vkCmdDrawIndirectCountAMD)load(context, "vkCmdDrawIndirectCountAMD"); -#endif /* defined(VK_AMD_draw_indirect_count) */ -#if defined(VK_AMD_shader_info) - table->vkGetShaderInfoAMD = (PFN_vkGetShaderInfoAMD)load(context, "vkGetShaderInfoAMD"); -#endif /* defined(VK_AMD_shader_info) */ -#if defined(VK_ANDROID_external_memory_android_hardware_buffer) - table->vkGetAndroidHardwareBufferPropertiesANDROID = (PFN_vkGetAndroidHardwareBufferPropertiesANDROID)load(context, "vkGetAndroidHardwareBufferPropertiesANDROID"); - table->vkGetMemoryAndroidHardwareBufferANDROID = (PFN_vkGetMemoryAndroidHardwareBufferANDROID)load(context, "vkGetMemoryAndroidHardwareBufferANDROID"); -#endif /* defined(VK_ANDROID_external_memory_android_hardware_buffer) */ -#if defined(VK_EXT_attachment_feedback_loop_dynamic_state) - table->vkCmdSetAttachmentFeedbackLoopEnableEXT = (PFN_vkCmdSetAttachmentFeedbackLoopEnableEXT)load(context, "vkCmdSetAttachmentFeedbackLoopEnableEXT"); -#endif /* defined(VK_EXT_attachment_feedback_loop_dynamic_state) */ -#if defined(VK_EXT_buffer_device_address) - table->vkGetBufferDeviceAddressEXT = (PFN_vkGetBufferDeviceAddressEXT)load(context, "vkGetBufferDeviceAddressEXT"); -#endif /* defined(VK_EXT_buffer_device_address) */ -#if defined(VK_EXT_calibrated_timestamps) - table->vkGetCalibratedTimestampsEXT = (PFN_vkGetCalibratedTimestampsEXT)load(context, "vkGetCalibratedTimestampsEXT"); -#endif /* defined(VK_EXT_calibrated_timestamps) */ -#if defined(VK_EXT_color_write_enable) - table->vkCmdSetColorWriteEnableEXT = (PFN_vkCmdSetColorWriteEnableEXT)load(context, "vkCmdSetColorWriteEnableEXT"); -#endif /* defined(VK_EXT_color_write_enable) */ -#if defined(VK_EXT_conditional_rendering) - table->vkCmdBeginConditionalRenderingEXT = (PFN_vkCmdBeginConditionalRenderingEXT)load(context, "vkCmdBeginConditionalRenderingEXT"); - table->vkCmdEndConditionalRenderingEXT = (PFN_vkCmdEndConditionalRenderingEXT)load(context, "vkCmdEndConditionalRenderingEXT"); -#endif /* defined(VK_EXT_conditional_rendering) */ -#if defined(VK_EXT_debug_marker) - table->vkCmdDebugMarkerBeginEXT = (PFN_vkCmdDebugMarkerBeginEXT)load(context, "vkCmdDebugMarkerBeginEXT"); - table->vkCmdDebugMarkerEndEXT = (PFN_vkCmdDebugMarkerEndEXT)load(context, "vkCmdDebugMarkerEndEXT"); - table->vkCmdDebugMarkerInsertEXT = (PFN_vkCmdDebugMarkerInsertEXT)load(context, "vkCmdDebugMarkerInsertEXT"); - table->vkDebugMarkerSetObjectNameEXT = (PFN_vkDebugMarkerSetObjectNameEXT)load(context, "vkDebugMarkerSetObjectNameEXT"); - table->vkDebugMarkerSetObjectTagEXT = (PFN_vkDebugMarkerSetObjectTagEXT)load(context, "vkDebugMarkerSetObjectTagEXT"); -#endif /* defined(VK_EXT_debug_marker) */ -#if defined(VK_EXT_depth_bias_control) - table->vkCmdSetDepthBias2EXT = (PFN_vkCmdSetDepthBias2EXT)load(context, "vkCmdSetDepthBias2EXT"); -#endif /* defined(VK_EXT_depth_bias_control) */ -#if defined(VK_EXT_descriptor_buffer) - table->vkCmdBindDescriptorBufferEmbeddedSamplersEXT = (PFN_vkCmdBindDescriptorBufferEmbeddedSamplersEXT)load(context, "vkCmdBindDescriptorBufferEmbeddedSamplersEXT"); - table->vkCmdBindDescriptorBuffersEXT = (PFN_vkCmdBindDescriptorBuffersEXT)load(context, "vkCmdBindDescriptorBuffersEXT"); - table->vkCmdSetDescriptorBufferOffsetsEXT = (PFN_vkCmdSetDescriptorBufferOffsetsEXT)load(context, "vkCmdSetDescriptorBufferOffsetsEXT"); - table->vkGetBufferOpaqueCaptureDescriptorDataEXT = (PFN_vkGetBufferOpaqueCaptureDescriptorDataEXT)load(context, "vkGetBufferOpaqueCaptureDescriptorDataEXT"); - table->vkGetDescriptorEXT = (PFN_vkGetDescriptorEXT)load(context, "vkGetDescriptorEXT"); - table->vkGetDescriptorSetLayoutBindingOffsetEXT = (PFN_vkGetDescriptorSetLayoutBindingOffsetEXT)load(context, "vkGetDescriptorSetLayoutBindingOffsetEXT"); - table->vkGetDescriptorSetLayoutSizeEXT = (PFN_vkGetDescriptorSetLayoutSizeEXT)load(context, "vkGetDescriptorSetLayoutSizeEXT"); - table->vkGetImageOpaqueCaptureDescriptorDataEXT = (PFN_vkGetImageOpaqueCaptureDescriptorDataEXT)load(context, "vkGetImageOpaqueCaptureDescriptorDataEXT"); - table->vkGetImageViewOpaqueCaptureDescriptorDataEXT = (PFN_vkGetImageViewOpaqueCaptureDescriptorDataEXT)load(context, "vkGetImageViewOpaqueCaptureDescriptorDataEXT"); - table->vkGetSamplerOpaqueCaptureDescriptorDataEXT = (PFN_vkGetSamplerOpaqueCaptureDescriptorDataEXT)load(context, "vkGetSamplerOpaqueCaptureDescriptorDataEXT"); -#endif /* defined(VK_EXT_descriptor_buffer) */ -#if defined(VK_EXT_descriptor_buffer) && (defined(VK_KHR_acceleration_structure) || defined(VK_NV_ray_tracing)) - table->vkGetAccelerationStructureOpaqueCaptureDescriptorDataEXT = (PFN_vkGetAccelerationStructureOpaqueCaptureDescriptorDataEXT)load(context, "vkGetAccelerationStructureOpaqueCaptureDescriptorDataEXT"); -#endif /* defined(VK_EXT_descriptor_buffer) && (defined(VK_KHR_acceleration_structure) || defined(VK_NV_ray_tracing)) */ -#if defined(VK_EXT_device_fault) - table->vkGetDeviceFaultInfoEXT = (PFN_vkGetDeviceFaultInfoEXT)load(context, "vkGetDeviceFaultInfoEXT"); -#endif /* defined(VK_EXT_device_fault) */ -#if defined(VK_EXT_discard_rectangles) - table->vkCmdSetDiscardRectangleEXT = (PFN_vkCmdSetDiscardRectangleEXT)load(context, "vkCmdSetDiscardRectangleEXT"); -#endif /* defined(VK_EXT_discard_rectangles) */ -#if defined(VK_EXT_discard_rectangles) && VK_EXT_DISCARD_RECTANGLES_SPEC_VERSION >= 2 - table->vkCmdSetDiscardRectangleEnableEXT = (PFN_vkCmdSetDiscardRectangleEnableEXT)load(context, "vkCmdSetDiscardRectangleEnableEXT"); - table->vkCmdSetDiscardRectangleModeEXT = (PFN_vkCmdSetDiscardRectangleModeEXT)load(context, "vkCmdSetDiscardRectangleModeEXT"); -#endif /* defined(VK_EXT_discard_rectangles) && VK_EXT_DISCARD_RECTANGLES_SPEC_VERSION >= 2 */ -#if defined(VK_EXT_display_control) - table->vkDisplayPowerControlEXT = (PFN_vkDisplayPowerControlEXT)load(context, "vkDisplayPowerControlEXT"); - table->vkGetSwapchainCounterEXT = (PFN_vkGetSwapchainCounterEXT)load(context, "vkGetSwapchainCounterEXT"); - table->vkRegisterDeviceEventEXT = (PFN_vkRegisterDeviceEventEXT)load(context, "vkRegisterDeviceEventEXT"); - table->vkRegisterDisplayEventEXT = (PFN_vkRegisterDisplayEventEXT)load(context, "vkRegisterDisplayEventEXT"); -#endif /* defined(VK_EXT_display_control) */ -#if defined(VK_EXT_external_memory_host) - table->vkGetMemoryHostPointerPropertiesEXT = (PFN_vkGetMemoryHostPointerPropertiesEXT)load(context, "vkGetMemoryHostPointerPropertiesEXT"); -#endif /* defined(VK_EXT_external_memory_host) */ -#if defined(VK_EXT_full_screen_exclusive) - table->vkAcquireFullScreenExclusiveModeEXT = (PFN_vkAcquireFullScreenExclusiveModeEXT)load(context, "vkAcquireFullScreenExclusiveModeEXT"); - table->vkReleaseFullScreenExclusiveModeEXT = (PFN_vkReleaseFullScreenExclusiveModeEXT)load(context, "vkReleaseFullScreenExclusiveModeEXT"); -#endif /* defined(VK_EXT_full_screen_exclusive) */ -#if defined(VK_EXT_full_screen_exclusive) && (defined(VK_KHR_device_group) || defined(VK_VERSION_1_1)) - table->vkGetDeviceGroupSurfacePresentModes2EXT = (PFN_vkGetDeviceGroupSurfacePresentModes2EXT)load(context, "vkGetDeviceGroupSurfacePresentModes2EXT"); -#endif /* defined(VK_EXT_full_screen_exclusive) && (defined(VK_KHR_device_group) || defined(VK_VERSION_1_1)) */ -#if defined(VK_EXT_hdr_metadata) - table->vkSetHdrMetadataEXT = (PFN_vkSetHdrMetadataEXT)load(context, "vkSetHdrMetadataEXT"); -#endif /* defined(VK_EXT_hdr_metadata) */ -#if defined(VK_EXT_host_image_copy) - table->vkCopyImageToImageEXT = (PFN_vkCopyImageToImageEXT)load(context, "vkCopyImageToImageEXT"); - table->vkCopyImageToMemoryEXT = (PFN_vkCopyImageToMemoryEXT)load(context, "vkCopyImageToMemoryEXT"); - table->vkCopyMemoryToImageEXT = (PFN_vkCopyMemoryToImageEXT)load(context, "vkCopyMemoryToImageEXT"); - table->vkTransitionImageLayoutEXT = (PFN_vkTransitionImageLayoutEXT)load(context, "vkTransitionImageLayoutEXT"); -#endif /* defined(VK_EXT_host_image_copy) */ -#if defined(VK_EXT_host_query_reset) - table->vkResetQueryPoolEXT = (PFN_vkResetQueryPoolEXT)load(context, "vkResetQueryPoolEXT"); -#endif /* defined(VK_EXT_host_query_reset) */ -#if defined(VK_EXT_image_drm_format_modifier) - table->vkGetImageDrmFormatModifierPropertiesEXT = (PFN_vkGetImageDrmFormatModifierPropertiesEXT)load(context, "vkGetImageDrmFormatModifierPropertiesEXT"); -#endif /* defined(VK_EXT_image_drm_format_modifier) */ -#if defined(VK_EXT_line_rasterization) - table->vkCmdSetLineStippleEXT = (PFN_vkCmdSetLineStippleEXT)load(context, "vkCmdSetLineStippleEXT"); -#endif /* defined(VK_EXT_line_rasterization) */ -#if defined(VK_EXT_mesh_shader) - table->vkCmdDrawMeshTasksEXT = (PFN_vkCmdDrawMeshTasksEXT)load(context, "vkCmdDrawMeshTasksEXT"); - table->vkCmdDrawMeshTasksIndirectCountEXT = (PFN_vkCmdDrawMeshTasksIndirectCountEXT)load(context, "vkCmdDrawMeshTasksIndirectCountEXT"); - table->vkCmdDrawMeshTasksIndirectEXT = (PFN_vkCmdDrawMeshTasksIndirectEXT)load(context, "vkCmdDrawMeshTasksIndirectEXT"); -#endif /* defined(VK_EXT_mesh_shader) */ -#if defined(VK_EXT_metal_objects) - table->vkExportMetalObjectsEXT = (PFN_vkExportMetalObjectsEXT)load(context, "vkExportMetalObjectsEXT"); -#endif /* defined(VK_EXT_metal_objects) */ -#if defined(VK_EXT_multi_draw) - table->vkCmdDrawMultiEXT = (PFN_vkCmdDrawMultiEXT)load(context, "vkCmdDrawMultiEXT"); - table->vkCmdDrawMultiIndexedEXT = (PFN_vkCmdDrawMultiIndexedEXT)load(context, "vkCmdDrawMultiIndexedEXT"); -#endif /* defined(VK_EXT_multi_draw) */ -#if defined(VK_EXT_opacity_micromap) - table->vkBuildMicromapsEXT = (PFN_vkBuildMicromapsEXT)load(context, "vkBuildMicromapsEXT"); - table->vkCmdBuildMicromapsEXT = (PFN_vkCmdBuildMicromapsEXT)load(context, "vkCmdBuildMicromapsEXT"); - table->vkCmdCopyMemoryToMicromapEXT = (PFN_vkCmdCopyMemoryToMicromapEXT)load(context, "vkCmdCopyMemoryToMicromapEXT"); - table->vkCmdCopyMicromapEXT = (PFN_vkCmdCopyMicromapEXT)load(context, "vkCmdCopyMicromapEXT"); - table->vkCmdCopyMicromapToMemoryEXT = (PFN_vkCmdCopyMicromapToMemoryEXT)load(context, "vkCmdCopyMicromapToMemoryEXT"); - table->vkCmdWriteMicromapsPropertiesEXT = (PFN_vkCmdWriteMicromapsPropertiesEXT)load(context, "vkCmdWriteMicromapsPropertiesEXT"); - table->vkCopyMemoryToMicromapEXT = (PFN_vkCopyMemoryToMicromapEXT)load(context, "vkCopyMemoryToMicromapEXT"); - table->vkCopyMicromapEXT = (PFN_vkCopyMicromapEXT)load(context, "vkCopyMicromapEXT"); - table->vkCopyMicromapToMemoryEXT = (PFN_vkCopyMicromapToMemoryEXT)load(context, "vkCopyMicromapToMemoryEXT"); - table->vkCreateMicromapEXT = (PFN_vkCreateMicromapEXT)load(context, "vkCreateMicromapEXT"); - table->vkDestroyMicromapEXT = (PFN_vkDestroyMicromapEXT)load(context, "vkDestroyMicromapEXT"); - table->vkGetDeviceMicromapCompatibilityEXT = (PFN_vkGetDeviceMicromapCompatibilityEXT)load(context, "vkGetDeviceMicromapCompatibilityEXT"); - table->vkGetMicromapBuildSizesEXT = (PFN_vkGetMicromapBuildSizesEXT)load(context, "vkGetMicromapBuildSizesEXT"); - table->vkWriteMicromapsPropertiesEXT = (PFN_vkWriteMicromapsPropertiesEXT)load(context, "vkWriteMicromapsPropertiesEXT"); -#endif /* defined(VK_EXT_opacity_micromap) */ -#if defined(VK_EXT_pageable_device_local_memory) - table->vkSetDeviceMemoryPriorityEXT = (PFN_vkSetDeviceMemoryPriorityEXT)load(context, "vkSetDeviceMemoryPriorityEXT"); -#endif /* defined(VK_EXT_pageable_device_local_memory) */ -#if defined(VK_EXT_pipeline_properties) - table->vkGetPipelinePropertiesEXT = (PFN_vkGetPipelinePropertiesEXT)load(context, "vkGetPipelinePropertiesEXT"); -#endif /* defined(VK_EXT_pipeline_properties) */ -#if defined(VK_EXT_private_data) - table->vkCreatePrivateDataSlotEXT = (PFN_vkCreatePrivateDataSlotEXT)load(context, "vkCreatePrivateDataSlotEXT"); - table->vkDestroyPrivateDataSlotEXT = (PFN_vkDestroyPrivateDataSlotEXT)load(context, "vkDestroyPrivateDataSlotEXT"); - table->vkGetPrivateDataEXT = (PFN_vkGetPrivateDataEXT)load(context, "vkGetPrivateDataEXT"); - table->vkSetPrivateDataEXT = (PFN_vkSetPrivateDataEXT)load(context, "vkSetPrivateDataEXT"); -#endif /* defined(VK_EXT_private_data) */ -#if defined(VK_EXT_sample_locations) - table->vkCmdSetSampleLocationsEXT = (PFN_vkCmdSetSampleLocationsEXT)load(context, "vkCmdSetSampleLocationsEXT"); -#endif /* defined(VK_EXT_sample_locations) */ -#if defined(VK_EXT_shader_module_identifier) - table->vkGetShaderModuleCreateInfoIdentifierEXT = (PFN_vkGetShaderModuleCreateInfoIdentifierEXT)load(context, "vkGetShaderModuleCreateInfoIdentifierEXT"); - table->vkGetShaderModuleIdentifierEXT = (PFN_vkGetShaderModuleIdentifierEXT)load(context, "vkGetShaderModuleIdentifierEXT"); -#endif /* defined(VK_EXT_shader_module_identifier) */ -#if defined(VK_EXT_shader_object) - table->vkCmdBindShadersEXT = (PFN_vkCmdBindShadersEXT)load(context, "vkCmdBindShadersEXT"); - table->vkCreateShadersEXT = (PFN_vkCreateShadersEXT)load(context, "vkCreateShadersEXT"); - table->vkDestroyShaderEXT = (PFN_vkDestroyShaderEXT)load(context, "vkDestroyShaderEXT"); - table->vkGetShaderBinaryDataEXT = (PFN_vkGetShaderBinaryDataEXT)load(context, "vkGetShaderBinaryDataEXT"); -#endif /* defined(VK_EXT_shader_object) */ -#if defined(VK_EXT_swapchain_maintenance1) - table->vkReleaseSwapchainImagesEXT = (PFN_vkReleaseSwapchainImagesEXT)load(context, "vkReleaseSwapchainImagesEXT"); -#endif /* defined(VK_EXT_swapchain_maintenance1) */ -#if defined(VK_EXT_transform_feedback) - table->vkCmdBeginQueryIndexedEXT = (PFN_vkCmdBeginQueryIndexedEXT)load(context, "vkCmdBeginQueryIndexedEXT"); - table->vkCmdBeginTransformFeedbackEXT = (PFN_vkCmdBeginTransformFeedbackEXT)load(context, "vkCmdBeginTransformFeedbackEXT"); - table->vkCmdBindTransformFeedbackBuffersEXT = (PFN_vkCmdBindTransformFeedbackBuffersEXT)load(context, "vkCmdBindTransformFeedbackBuffersEXT"); - table->vkCmdDrawIndirectByteCountEXT = (PFN_vkCmdDrawIndirectByteCountEXT)load(context, "vkCmdDrawIndirectByteCountEXT"); - table->vkCmdEndQueryIndexedEXT = (PFN_vkCmdEndQueryIndexedEXT)load(context, "vkCmdEndQueryIndexedEXT"); - table->vkCmdEndTransformFeedbackEXT = (PFN_vkCmdEndTransformFeedbackEXT)load(context, "vkCmdEndTransformFeedbackEXT"); -#endif /* defined(VK_EXT_transform_feedback) */ -#if defined(VK_EXT_validation_cache) - table->vkCreateValidationCacheEXT = (PFN_vkCreateValidationCacheEXT)load(context, "vkCreateValidationCacheEXT"); - table->vkDestroyValidationCacheEXT = (PFN_vkDestroyValidationCacheEXT)load(context, "vkDestroyValidationCacheEXT"); - table->vkGetValidationCacheDataEXT = (PFN_vkGetValidationCacheDataEXT)load(context, "vkGetValidationCacheDataEXT"); - table->vkMergeValidationCachesEXT = (PFN_vkMergeValidationCachesEXT)load(context, "vkMergeValidationCachesEXT"); -#endif /* defined(VK_EXT_validation_cache) */ -#if defined(VK_FUCHSIA_buffer_collection) - table->vkCreateBufferCollectionFUCHSIA = (PFN_vkCreateBufferCollectionFUCHSIA)load(context, "vkCreateBufferCollectionFUCHSIA"); - table->vkDestroyBufferCollectionFUCHSIA = (PFN_vkDestroyBufferCollectionFUCHSIA)load(context, "vkDestroyBufferCollectionFUCHSIA"); - table->vkGetBufferCollectionPropertiesFUCHSIA = (PFN_vkGetBufferCollectionPropertiesFUCHSIA)load(context, "vkGetBufferCollectionPropertiesFUCHSIA"); - table->vkSetBufferCollectionBufferConstraintsFUCHSIA = (PFN_vkSetBufferCollectionBufferConstraintsFUCHSIA)load(context, "vkSetBufferCollectionBufferConstraintsFUCHSIA"); - table->vkSetBufferCollectionImageConstraintsFUCHSIA = (PFN_vkSetBufferCollectionImageConstraintsFUCHSIA)load(context, "vkSetBufferCollectionImageConstraintsFUCHSIA"); -#endif /* defined(VK_FUCHSIA_buffer_collection) */ -#if defined(VK_FUCHSIA_external_memory) - table->vkGetMemoryZirconHandleFUCHSIA = (PFN_vkGetMemoryZirconHandleFUCHSIA)load(context, "vkGetMemoryZirconHandleFUCHSIA"); - table->vkGetMemoryZirconHandlePropertiesFUCHSIA = (PFN_vkGetMemoryZirconHandlePropertiesFUCHSIA)load(context, "vkGetMemoryZirconHandlePropertiesFUCHSIA"); -#endif /* defined(VK_FUCHSIA_external_memory) */ -#if defined(VK_FUCHSIA_external_semaphore) - table->vkGetSemaphoreZirconHandleFUCHSIA = (PFN_vkGetSemaphoreZirconHandleFUCHSIA)load(context, "vkGetSemaphoreZirconHandleFUCHSIA"); - table->vkImportSemaphoreZirconHandleFUCHSIA = (PFN_vkImportSemaphoreZirconHandleFUCHSIA)load(context, "vkImportSemaphoreZirconHandleFUCHSIA"); -#endif /* defined(VK_FUCHSIA_external_semaphore) */ -#if defined(VK_GOOGLE_display_timing) - table->vkGetPastPresentationTimingGOOGLE = (PFN_vkGetPastPresentationTimingGOOGLE)load(context, "vkGetPastPresentationTimingGOOGLE"); - table->vkGetRefreshCycleDurationGOOGLE = (PFN_vkGetRefreshCycleDurationGOOGLE)load(context, "vkGetRefreshCycleDurationGOOGLE"); -#endif /* defined(VK_GOOGLE_display_timing) */ -#if defined(VK_HUAWEI_cluster_culling_shader) - table->vkCmdDrawClusterHUAWEI = (PFN_vkCmdDrawClusterHUAWEI)load(context, "vkCmdDrawClusterHUAWEI"); - table->vkCmdDrawClusterIndirectHUAWEI = (PFN_vkCmdDrawClusterIndirectHUAWEI)load(context, "vkCmdDrawClusterIndirectHUAWEI"); -#endif /* defined(VK_HUAWEI_cluster_culling_shader) */ -#if defined(VK_HUAWEI_invocation_mask) - table->vkCmdBindInvocationMaskHUAWEI = (PFN_vkCmdBindInvocationMaskHUAWEI)load(context, "vkCmdBindInvocationMaskHUAWEI"); -#endif /* defined(VK_HUAWEI_invocation_mask) */ -#if defined(VK_HUAWEI_subpass_shading) - table->vkCmdSubpassShadingHUAWEI = (PFN_vkCmdSubpassShadingHUAWEI)load(context, "vkCmdSubpassShadingHUAWEI"); - table->vkGetDeviceSubpassShadingMaxWorkgroupSizeHUAWEI = (PFN_vkGetDeviceSubpassShadingMaxWorkgroupSizeHUAWEI)load(context, "vkGetDeviceSubpassShadingMaxWorkgroupSizeHUAWEI"); -#endif /* defined(VK_HUAWEI_subpass_shading) */ -#if defined(VK_INTEL_performance_query) - table->vkAcquirePerformanceConfigurationINTEL = (PFN_vkAcquirePerformanceConfigurationINTEL)load(context, "vkAcquirePerformanceConfigurationINTEL"); - table->vkCmdSetPerformanceMarkerINTEL = (PFN_vkCmdSetPerformanceMarkerINTEL)load(context, "vkCmdSetPerformanceMarkerINTEL"); - table->vkCmdSetPerformanceOverrideINTEL = (PFN_vkCmdSetPerformanceOverrideINTEL)load(context, "vkCmdSetPerformanceOverrideINTEL"); - table->vkCmdSetPerformanceStreamMarkerINTEL = (PFN_vkCmdSetPerformanceStreamMarkerINTEL)load(context, "vkCmdSetPerformanceStreamMarkerINTEL"); - table->vkGetPerformanceParameterINTEL = (PFN_vkGetPerformanceParameterINTEL)load(context, "vkGetPerformanceParameterINTEL"); - table->vkInitializePerformanceApiINTEL = (PFN_vkInitializePerformanceApiINTEL)load(context, "vkInitializePerformanceApiINTEL"); - table->vkQueueSetPerformanceConfigurationINTEL = (PFN_vkQueueSetPerformanceConfigurationINTEL)load(context, "vkQueueSetPerformanceConfigurationINTEL"); - table->vkReleasePerformanceConfigurationINTEL = (PFN_vkReleasePerformanceConfigurationINTEL)load(context, "vkReleasePerformanceConfigurationINTEL"); - table->vkUninitializePerformanceApiINTEL = (PFN_vkUninitializePerformanceApiINTEL)load(context, "vkUninitializePerformanceApiINTEL"); -#endif /* defined(VK_INTEL_performance_query) */ -#if defined(VK_KHR_acceleration_structure) - table->vkBuildAccelerationStructuresKHR = (PFN_vkBuildAccelerationStructuresKHR)load(context, "vkBuildAccelerationStructuresKHR"); - table->vkCmdBuildAccelerationStructuresIndirectKHR = (PFN_vkCmdBuildAccelerationStructuresIndirectKHR)load(context, "vkCmdBuildAccelerationStructuresIndirectKHR"); - table->vkCmdBuildAccelerationStructuresKHR = (PFN_vkCmdBuildAccelerationStructuresKHR)load(context, "vkCmdBuildAccelerationStructuresKHR"); - table->vkCmdCopyAccelerationStructureKHR = (PFN_vkCmdCopyAccelerationStructureKHR)load(context, "vkCmdCopyAccelerationStructureKHR"); - table->vkCmdCopyAccelerationStructureToMemoryKHR = (PFN_vkCmdCopyAccelerationStructureToMemoryKHR)load(context, "vkCmdCopyAccelerationStructureToMemoryKHR"); - table->vkCmdCopyMemoryToAccelerationStructureKHR = (PFN_vkCmdCopyMemoryToAccelerationStructureKHR)load(context, "vkCmdCopyMemoryToAccelerationStructureKHR"); - table->vkCmdWriteAccelerationStructuresPropertiesKHR = (PFN_vkCmdWriteAccelerationStructuresPropertiesKHR)load(context, "vkCmdWriteAccelerationStructuresPropertiesKHR"); - table->vkCopyAccelerationStructureKHR = (PFN_vkCopyAccelerationStructureKHR)load(context, "vkCopyAccelerationStructureKHR"); - table->vkCopyAccelerationStructureToMemoryKHR = (PFN_vkCopyAccelerationStructureToMemoryKHR)load(context, "vkCopyAccelerationStructureToMemoryKHR"); - table->vkCopyMemoryToAccelerationStructureKHR = (PFN_vkCopyMemoryToAccelerationStructureKHR)load(context, "vkCopyMemoryToAccelerationStructureKHR"); - table->vkCreateAccelerationStructureKHR = (PFN_vkCreateAccelerationStructureKHR)load(context, "vkCreateAccelerationStructureKHR"); - table->vkDestroyAccelerationStructureKHR = (PFN_vkDestroyAccelerationStructureKHR)load(context, "vkDestroyAccelerationStructureKHR"); - table->vkGetAccelerationStructureBuildSizesKHR = (PFN_vkGetAccelerationStructureBuildSizesKHR)load(context, "vkGetAccelerationStructureBuildSizesKHR"); - table->vkGetAccelerationStructureDeviceAddressKHR = (PFN_vkGetAccelerationStructureDeviceAddressKHR)load(context, "vkGetAccelerationStructureDeviceAddressKHR"); - table->vkGetDeviceAccelerationStructureCompatibilityKHR = (PFN_vkGetDeviceAccelerationStructureCompatibilityKHR)load(context, "vkGetDeviceAccelerationStructureCompatibilityKHR"); - table->vkWriteAccelerationStructuresPropertiesKHR = (PFN_vkWriteAccelerationStructuresPropertiesKHR)load(context, "vkWriteAccelerationStructuresPropertiesKHR"); -#endif /* defined(VK_KHR_acceleration_structure) */ -#if defined(VK_KHR_bind_memory2) - table->vkBindBufferMemory2KHR = (PFN_vkBindBufferMemory2KHR)load(context, "vkBindBufferMemory2KHR"); - table->vkBindImageMemory2KHR = (PFN_vkBindImageMemory2KHR)load(context, "vkBindImageMemory2KHR"); -#endif /* defined(VK_KHR_bind_memory2) */ -#if defined(VK_KHR_buffer_device_address) - table->vkGetBufferDeviceAddressKHR = (PFN_vkGetBufferDeviceAddressKHR)load(context, "vkGetBufferDeviceAddressKHR"); - table->vkGetBufferOpaqueCaptureAddressKHR = (PFN_vkGetBufferOpaqueCaptureAddressKHR)load(context, "vkGetBufferOpaqueCaptureAddressKHR"); - table->vkGetDeviceMemoryOpaqueCaptureAddressKHR = (PFN_vkGetDeviceMemoryOpaqueCaptureAddressKHR)load(context, "vkGetDeviceMemoryOpaqueCaptureAddressKHR"); -#endif /* defined(VK_KHR_buffer_device_address) */ -#if defined(VK_KHR_calibrated_timestamps) - table->vkGetCalibratedTimestampsKHR = (PFN_vkGetCalibratedTimestampsKHR)load(context, "vkGetCalibratedTimestampsKHR"); -#endif /* defined(VK_KHR_calibrated_timestamps) */ -#if defined(VK_KHR_copy_commands2) - table->vkCmdBlitImage2KHR = (PFN_vkCmdBlitImage2KHR)load(context, "vkCmdBlitImage2KHR"); - table->vkCmdCopyBuffer2KHR = (PFN_vkCmdCopyBuffer2KHR)load(context, "vkCmdCopyBuffer2KHR"); - table->vkCmdCopyBufferToImage2KHR = (PFN_vkCmdCopyBufferToImage2KHR)load(context, "vkCmdCopyBufferToImage2KHR"); - table->vkCmdCopyImage2KHR = (PFN_vkCmdCopyImage2KHR)load(context, "vkCmdCopyImage2KHR"); - table->vkCmdCopyImageToBuffer2KHR = (PFN_vkCmdCopyImageToBuffer2KHR)load(context, "vkCmdCopyImageToBuffer2KHR"); - table->vkCmdResolveImage2KHR = (PFN_vkCmdResolveImage2KHR)load(context, "vkCmdResolveImage2KHR"); -#endif /* defined(VK_KHR_copy_commands2) */ -#if defined(VK_KHR_create_renderpass2) - table->vkCmdBeginRenderPass2KHR = (PFN_vkCmdBeginRenderPass2KHR)load(context, "vkCmdBeginRenderPass2KHR"); - table->vkCmdEndRenderPass2KHR = (PFN_vkCmdEndRenderPass2KHR)load(context, "vkCmdEndRenderPass2KHR"); - table->vkCmdNextSubpass2KHR = (PFN_vkCmdNextSubpass2KHR)load(context, "vkCmdNextSubpass2KHR"); - table->vkCreateRenderPass2KHR = (PFN_vkCreateRenderPass2KHR)load(context, "vkCreateRenderPass2KHR"); -#endif /* defined(VK_KHR_create_renderpass2) */ -#if defined(VK_KHR_deferred_host_operations) - table->vkCreateDeferredOperationKHR = (PFN_vkCreateDeferredOperationKHR)load(context, "vkCreateDeferredOperationKHR"); - table->vkDeferredOperationJoinKHR = (PFN_vkDeferredOperationJoinKHR)load(context, "vkDeferredOperationJoinKHR"); - table->vkDestroyDeferredOperationKHR = (PFN_vkDestroyDeferredOperationKHR)load(context, "vkDestroyDeferredOperationKHR"); - table->vkGetDeferredOperationMaxConcurrencyKHR = (PFN_vkGetDeferredOperationMaxConcurrencyKHR)load(context, "vkGetDeferredOperationMaxConcurrencyKHR"); - table->vkGetDeferredOperationResultKHR = (PFN_vkGetDeferredOperationResultKHR)load(context, "vkGetDeferredOperationResultKHR"); -#endif /* defined(VK_KHR_deferred_host_operations) */ -#if defined(VK_KHR_descriptor_update_template) - table->vkCreateDescriptorUpdateTemplateKHR = (PFN_vkCreateDescriptorUpdateTemplateKHR)load(context, "vkCreateDescriptorUpdateTemplateKHR"); - table->vkDestroyDescriptorUpdateTemplateKHR = (PFN_vkDestroyDescriptorUpdateTemplateKHR)load(context, "vkDestroyDescriptorUpdateTemplateKHR"); - table->vkUpdateDescriptorSetWithTemplateKHR = (PFN_vkUpdateDescriptorSetWithTemplateKHR)load(context, "vkUpdateDescriptorSetWithTemplateKHR"); -#endif /* defined(VK_KHR_descriptor_update_template) */ -#if defined(VK_KHR_device_group) - table->vkCmdDispatchBaseKHR = (PFN_vkCmdDispatchBaseKHR)load(context, "vkCmdDispatchBaseKHR"); - table->vkCmdSetDeviceMaskKHR = (PFN_vkCmdSetDeviceMaskKHR)load(context, "vkCmdSetDeviceMaskKHR"); - table->vkGetDeviceGroupPeerMemoryFeaturesKHR = (PFN_vkGetDeviceGroupPeerMemoryFeaturesKHR)load(context, "vkGetDeviceGroupPeerMemoryFeaturesKHR"); -#endif /* defined(VK_KHR_device_group) */ -#if defined(VK_KHR_display_swapchain) - table->vkCreateSharedSwapchainsKHR = (PFN_vkCreateSharedSwapchainsKHR)load(context, "vkCreateSharedSwapchainsKHR"); -#endif /* defined(VK_KHR_display_swapchain) */ -#if defined(VK_KHR_draw_indirect_count) - table->vkCmdDrawIndexedIndirectCountKHR = (PFN_vkCmdDrawIndexedIndirectCountKHR)load(context, "vkCmdDrawIndexedIndirectCountKHR"); - table->vkCmdDrawIndirectCountKHR = (PFN_vkCmdDrawIndirectCountKHR)load(context, "vkCmdDrawIndirectCountKHR"); -#endif /* defined(VK_KHR_draw_indirect_count) */ -#if defined(VK_KHR_dynamic_rendering) - table->vkCmdBeginRenderingKHR = (PFN_vkCmdBeginRenderingKHR)load(context, "vkCmdBeginRenderingKHR"); - table->vkCmdEndRenderingKHR = (PFN_vkCmdEndRenderingKHR)load(context, "vkCmdEndRenderingKHR"); -#endif /* defined(VK_KHR_dynamic_rendering) */ -#if defined(VK_KHR_dynamic_rendering_local_read) - table->vkCmdSetRenderingAttachmentLocationsKHR = (PFN_vkCmdSetRenderingAttachmentLocationsKHR)load(context, "vkCmdSetRenderingAttachmentLocationsKHR"); - table->vkCmdSetRenderingInputAttachmentIndicesKHR = (PFN_vkCmdSetRenderingInputAttachmentIndicesKHR)load(context, "vkCmdSetRenderingInputAttachmentIndicesKHR"); -#endif /* defined(VK_KHR_dynamic_rendering_local_read) */ -#if defined(VK_KHR_external_fence_fd) - table->vkGetFenceFdKHR = (PFN_vkGetFenceFdKHR)load(context, "vkGetFenceFdKHR"); - table->vkImportFenceFdKHR = (PFN_vkImportFenceFdKHR)load(context, "vkImportFenceFdKHR"); -#endif /* defined(VK_KHR_external_fence_fd) */ -#if defined(VK_KHR_external_fence_win32) - table->vkGetFenceWin32HandleKHR = (PFN_vkGetFenceWin32HandleKHR)load(context, "vkGetFenceWin32HandleKHR"); - table->vkImportFenceWin32HandleKHR = (PFN_vkImportFenceWin32HandleKHR)load(context, "vkImportFenceWin32HandleKHR"); -#endif /* defined(VK_KHR_external_fence_win32) */ -#if defined(VK_KHR_external_memory_fd) - table->vkGetMemoryFdKHR = (PFN_vkGetMemoryFdKHR)load(context, "vkGetMemoryFdKHR"); - table->vkGetMemoryFdPropertiesKHR = (PFN_vkGetMemoryFdPropertiesKHR)load(context, "vkGetMemoryFdPropertiesKHR"); -#endif /* defined(VK_KHR_external_memory_fd) */ -#if defined(VK_KHR_external_memory_win32) - table->vkGetMemoryWin32HandleKHR = (PFN_vkGetMemoryWin32HandleKHR)load(context, "vkGetMemoryWin32HandleKHR"); - table->vkGetMemoryWin32HandlePropertiesKHR = (PFN_vkGetMemoryWin32HandlePropertiesKHR)load(context, "vkGetMemoryWin32HandlePropertiesKHR"); -#endif /* defined(VK_KHR_external_memory_win32) */ -#if defined(VK_KHR_external_semaphore_fd) - table->vkGetSemaphoreFdKHR = (PFN_vkGetSemaphoreFdKHR)load(context, "vkGetSemaphoreFdKHR"); - table->vkImportSemaphoreFdKHR = (PFN_vkImportSemaphoreFdKHR)load(context, "vkImportSemaphoreFdKHR"); -#endif /* defined(VK_KHR_external_semaphore_fd) */ -#if defined(VK_KHR_external_semaphore_win32) - table->vkGetSemaphoreWin32HandleKHR = (PFN_vkGetSemaphoreWin32HandleKHR)load(context, "vkGetSemaphoreWin32HandleKHR"); - table->vkImportSemaphoreWin32HandleKHR = (PFN_vkImportSemaphoreWin32HandleKHR)load(context, "vkImportSemaphoreWin32HandleKHR"); -#endif /* defined(VK_KHR_external_semaphore_win32) */ -#if defined(VK_KHR_fragment_shading_rate) - table->vkCmdSetFragmentShadingRateKHR = (PFN_vkCmdSetFragmentShadingRateKHR)load(context, "vkCmdSetFragmentShadingRateKHR"); -#endif /* defined(VK_KHR_fragment_shading_rate) */ -#if defined(VK_KHR_get_memory_requirements2) - table->vkGetBufferMemoryRequirements2KHR = (PFN_vkGetBufferMemoryRequirements2KHR)load(context, "vkGetBufferMemoryRequirements2KHR"); - table->vkGetImageMemoryRequirements2KHR = (PFN_vkGetImageMemoryRequirements2KHR)load(context, "vkGetImageMemoryRequirements2KHR"); - table->vkGetImageSparseMemoryRequirements2KHR = (PFN_vkGetImageSparseMemoryRequirements2KHR)load(context, "vkGetImageSparseMemoryRequirements2KHR"); -#endif /* defined(VK_KHR_get_memory_requirements2) */ -#if defined(VK_KHR_line_rasterization) - table->vkCmdSetLineStippleKHR = (PFN_vkCmdSetLineStippleKHR)load(context, "vkCmdSetLineStippleKHR"); -#endif /* defined(VK_KHR_line_rasterization) */ -#if defined(VK_KHR_maintenance1) - table->vkTrimCommandPoolKHR = (PFN_vkTrimCommandPoolKHR)load(context, "vkTrimCommandPoolKHR"); -#endif /* defined(VK_KHR_maintenance1) */ -#if defined(VK_KHR_maintenance3) - table->vkGetDescriptorSetLayoutSupportKHR = (PFN_vkGetDescriptorSetLayoutSupportKHR)load(context, "vkGetDescriptorSetLayoutSupportKHR"); -#endif /* defined(VK_KHR_maintenance3) */ -#if defined(VK_KHR_maintenance4) - table->vkGetDeviceBufferMemoryRequirementsKHR = (PFN_vkGetDeviceBufferMemoryRequirementsKHR)load(context, "vkGetDeviceBufferMemoryRequirementsKHR"); - table->vkGetDeviceImageMemoryRequirementsKHR = (PFN_vkGetDeviceImageMemoryRequirementsKHR)load(context, "vkGetDeviceImageMemoryRequirementsKHR"); - table->vkGetDeviceImageSparseMemoryRequirementsKHR = (PFN_vkGetDeviceImageSparseMemoryRequirementsKHR)load(context, "vkGetDeviceImageSparseMemoryRequirementsKHR"); -#endif /* defined(VK_KHR_maintenance4) */ -#if defined(VK_KHR_maintenance5) - table->vkCmdBindIndexBuffer2KHR = (PFN_vkCmdBindIndexBuffer2KHR)load(context, "vkCmdBindIndexBuffer2KHR"); - table->vkGetDeviceImageSubresourceLayoutKHR = (PFN_vkGetDeviceImageSubresourceLayoutKHR)load(context, "vkGetDeviceImageSubresourceLayoutKHR"); - table->vkGetImageSubresourceLayout2KHR = (PFN_vkGetImageSubresourceLayout2KHR)load(context, "vkGetImageSubresourceLayout2KHR"); - table->vkGetRenderingAreaGranularityKHR = (PFN_vkGetRenderingAreaGranularityKHR)load(context, "vkGetRenderingAreaGranularityKHR"); -#endif /* defined(VK_KHR_maintenance5) */ -#if defined(VK_KHR_maintenance6) - table->vkCmdBindDescriptorSets2KHR = (PFN_vkCmdBindDescriptorSets2KHR)load(context, "vkCmdBindDescriptorSets2KHR"); - table->vkCmdPushConstants2KHR = (PFN_vkCmdPushConstants2KHR)load(context, "vkCmdPushConstants2KHR"); -#endif /* defined(VK_KHR_maintenance6) */ -#if defined(VK_KHR_maintenance6) && defined(VK_KHR_push_descriptor) - table->vkCmdPushDescriptorSet2KHR = (PFN_vkCmdPushDescriptorSet2KHR)load(context, "vkCmdPushDescriptorSet2KHR"); - table->vkCmdPushDescriptorSetWithTemplate2KHR = (PFN_vkCmdPushDescriptorSetWithTemplate2KHR)load(context, "vkCmdPushDescriptorSetWithTemplate2KHR"); -#endif /* defined(VK_KHR_maintenance6) && defined(VK_KHR_push_descriptor) */ -#if defined(VK_KHR_maintenance6) && defined(VK_EXT_descriptor_buffer) - table->vkCmdBindDescriptorBufferEmbeddedSamplers2EXT = (PFN_vkCmdBindDescriptorBufferEmbeddedSamplers2EXT)load(context, "vkCmdBindDescriptorBufferEmbeddedSamplers2EXT"); - table->vkCmdSetDescriptorBufferOffsets2EXT = (PFN_vkCmdSetDescriptorBufferOffsets2EXT)load(context, "vkCmdSetDescriptorBufferOffsets2EXT"); -#endif /* defined(VK_KHR_maintenance6) && defined(VK_EXT_descriptor_buffer) */ -#if defined(VK_KHR_map_memory2) - table->vkMapMemory2KHR = (PFN_vkMapMemory2KHR)load(context, "vkMapMemory2KHR"); - table->vkUnmapMemory2KHR = (PFN_vkUnmapMemory2KHR)load(context, "vkUnmapMemory2KHR"); -#endif /* defined(VK_KHR_map_memory2) */ -#if defined(VK_KHR_performance_query) - table->vkAcquireProfilingLockKHR = (PFN_vkAcquireProfilingLockKHR)load(context, "vkAcquireProfilingLockKHR"); - table->vkReleaseProfilingLockKHR = (PFN_vkReleaseProfilingLockKHR)load(context, "vkReleaseProfilingLockKHR"); -#endif /* defined(VK_KHR_performance_query) */ -#if defined(VK_KHR_pipeline_binary) - table->vkCreatePipelineBinariesKHR = (PFN_vkCreatePipelineBinariesKHR)load(context, "vkCreatePipelineBinariesKHR"); - table->vkDestroyPipelineBinaryKHR = (PFN_vkDestroyPipelineBinaryKHR)load(context, "vkDestroyPipelineBinaryKHR"); - table->vkGetPipelineBinaryDataKHR = (PFN_vkGetPipelineBinaryDataKHR)load(context, "vkGetPipelineBinaryDataKHR"); - table->vkGetPipelineKeyKHR = (PFN_vkGetPipelineKeyKHR)load(context, "vkGetPipelineKeyKHR"); - table->vkReleaseCapturedPipelineDataKHR = (PFN_vkReleaseCapturedPipelineDataKHR)load(context, "vkReleaseCapturedPipelineDataKHR"); -#endif /* defined(VK_KHR_pipeline_binary) */ -#if defined(VK_KHR_pipeline_executable_properties) - table->vkGetPipelineExecutableInternalRepresentationsKHR = (PFN_vkGetPipelineExecutableInternalRepresentationsKHR)load(context, "vkGetPipelineExecutableInternalRepresentationsKHR"); - table->vkGetPipelineExecutablePropertiesKHR = (PFN_vkGetPipelineExecutablePropertiesKHR)load(context, "vkGetPipelineExecutablePropertiesKHR"); - table->vkGetPipelineExecutableStatisticsKHR = (PFN_vkGetPipelineExecutableStatisticsKHR)load(context, "vkGetPipelineExecutableStatisticsKHR"); -#endif /* defined(VK_KHR_pipeline_executable_properties) */ -#if defined(VK_KHR_present_wait) - table->vkWaitForPresentKHR = (PFN_vkWaitForPresentKHR)load(context, "vkWaitForPresentKHR"); -#endif /* defined(VK_KHR_present_wait) */ -#if defined(VK_KHR_push_descriptor) - table->vkCmdPushDescriptorSetKHR = (PFN_vkCmdPushDescriptorSetKHR)load(context, "vkCmdPushDescriptorSetKHR"); -#endif /* defined(VK_KHR_push_descriptor) */ -#if defined(VK_KHR_ray_tracing_maintenance1) && defined(VK_KHR_ray_tracing_pipeline) - table->vkCmdTraceRaysIndirect2KHR = (PFN_vkCmdTraceRaysIndirect2KHR)load(context, "vkCmdTraceRaysIndirect2KHR"); -#endif /* defined(VK_KHR_ray_tracing_maintenance1) && defined(VK_KHR_ray_tracing_pipeline) */ -#if defined(VK_KHR_ray_tracing_pipeline) - table->vkCmdSetRayTracingPipelineStackSizeKHR = (PFN_vkCmdSetRayTracingPipelineStackSizeKHR)load(context, "vkCmdSetRayTracingPipelineStackSizeKHR"); - table->vkCmdTraceRaysIndirectKHR = (PFN_vkCmdTraceRaysIndirectKHR)load(context, "vkCmdTraceRaysIndirectKHR"); - table->vkCmdTraceRaysKHR = (PFN_vkCmdTraceRaysKHR)load(context, "vkCmdTraceRaysKHR"); - table->vkCreateRayTracingPipelinesKHR = (PFN_vkCreateRayTracingPipelinesKHR)load(context, "vkCreateRayTracingPipelinesKHR"); - table->vkGetRayTracingCaptureReplayShaderGroupHandlesKHR = (PFN_vkGetRayTracingCaptureReplayShaderGroupHandlesKHR)load(context, "vkGetRayTracingCaptureReplayShaderGroupHandlesKHR"); - table->vkGetRayTracingShaderGroupHandlesKHR = (PFN_vkGetRayTracingShaderGroupHandlesKHR)load(context, "vkGetRayTracingShaderGroupHandlesKHR"); - table->vkGetRayTracingShaderGroupStackSizeKHR = (PFN_vkGetRayTracingShaderGroupStackSizeKHR)load(context, "vkGetRayTracingShaderGroupStackSizeKHR"); -#endif /* defined(VK_KHR_ray_tracing_pipeline) */ -#if defined(VK_KHR_sampler_ycbcr_conversion) - table->vkCreateSamplerYcbcrConversionKHR = (PFN_vkCreateSamplerYcbcrConversionKHR)load(context, "vkCreateSamplerYcbcrConversionKHR"); - table->vkDestroySamplerYcbcrConversionKHR = (PFN_vkDestroySamplerYcbcrConversionKHR)load(context, "vkDestroySamplerYcbcrConversionKHR"); -#endif /* defined(VK_KHR_sampler_ycbcr_conversion) */ -#if defined(VK_KHR_shared_presentable_image) - table->vkGetSwapchainStatusKHR = (PFN_vkGetSwapchainStatusKHR)load(context, "vkGetSwapchainStatusKHR"); -#endif /* defined(VK_KHR_shared_presentable_image) */ -#if defined(VK_KHR_swapchain) - table->vkAcquireNextImageKHR = (PFN_vkAcquireNextImageKHR)load(context, "vkAcquireNextImageKHR"); - table->vkCreateSwapchainKHR = (PFN_vkCreateSwapchainKHR)load(context, "vkCreateSwapchainKHR"); - table->vkDestroySwapchainKHR = (PFN_vkDestroySwapchainKHR)load(context, "vkDestroySwapchainKHR"); - table->vkGetSwapchainImagesKHR = (PFN_vkGetSwapchainImagesKHR)load(context, "vkGetSwapchainImagesKHR"); - table->vkQueuePresentKHR = (PFN_vkQueuePresentKHR)load(context, "vkQueuePresentKHR"); -#endif /* defined(VK_KHR_swapchain) */ -#if defined(VK_KHR_synchronization2) - table->vkCmdPipelineBarrier2KHR = (PFN_vkCmdPipelineBarrier2KHR)load(context, "vkCmdPipelineBarrier2KHR"); - table->vkCmdResetEvent2KHR = (PFN_vkCmdResetEvent2KHR)load(context, "vkCmdResetEvent2KHR"); - table->vkCmdSetEvent2KHR = (PFN_vkCmdSetEvent2KHR)load(context, "vkCmdSetEvent2KHR"); - table->vkCmdWaitEvents2KHR = (PFN_vkCmdWaitEvents2KHR)load(context, "vkCmdWaitEvents2KHR"); - table->vkCmdWriteTimestamp2KHR = (PFN_vkCmdWriteTimestamp2KHR)load(context, "vkCmdWriteTimestamp2KHR"); - table->vkQueueSubmit2KHR = (PFN_vkQueueSubmit2KHR)load(context, "vkQueueSubmit2KHR"); -#endif /* defined(VK_KHR_synchronization2) */ -#if defined(VK_KHR_synchronization2) && defined(VK_AMD_buffer_marker) - table->vkCmdWriteBufferMarker2AMD = (PFN_vkCmdWriteBufferMarker2AMD)load(context, "vkCmdWriteBufferMarker2AMD"); -#endif /* defined(VK_KHR_synchronization2) && defined(VK_AMD_buffer_marker) */ -#if defined(VK_KHR_synchronization2) && defined(VK_NV_device_diagnostic_checkpoints) - table->vkGetQueueCheckpointData2NV = (PFN_vkGetQueueCheckpointData2NV)load(context, "vkGetQueueCheckpointData2NV"); -#endif /* defined(VK_KHR_synchronization2) && defined(VK_NV_device_diagnostic_checkpoints) */ -#if defined(VK_KHR_timeline_semaphore) - table->vkGetSemaphoreCounterValueKHR = (PFN_vkGetSemaphoreCounterValueKHR)load(context, "vkGetSemaphoreCounterValueKHR"); - table->vkSignalSemaphoreKHR = (PFN_vkSignalSemaphoreKHR)load(context, "vkSignalSemaphoreKHR"); - table->vkWaitSemaphoresKHR = (PFN_vkWaitSemaphoresKHR)load(context, "vkWaitSemaphoresKHR"); -#endif /* defined(VK_KHR_timeline_semaphore) */ -#if defined(VK_KHR_video_decode_queue) - table->vkCmdDecodeVideoKHR = (PFN_vkCmdDecodeVideoKHR)load(context, "vkCmdDecodeVideoKHR"); -#endif /* defined(VK_KHR_video_decode_queue) */ -#if defined(VK_KHR_video_encode_queue) - table->vkCmdEncodeVideoKHR = (PFN_vkCmdEncodeVideoKHR)load(context, "vkCmdEncodeVideoKHR"); - table->vkGetEncodedVideoSessionParametersKHR = (PFN_vkGetEncodedVideoSessionParametersKHR)load(context, "vkGetEncodedVideoSessionParametersKHR"); -#endif /* defined(VK_KHR_video_encode_queue) */ -#if defined(VK_KHR_video_queue) - table->vkBindVideoSessionMemoryKHR = (PFN_vkBindVideoSessionMemoryKHR)load(context, "vkBindVideoSessionMemoryKHR"); - table->vkCmdBeginVideoCodingKHR = (PFN_vkCmdBeginVideoCodingKHR)load(context, "vkCmdBeginVideoCodingKHR"); - table->vkCmdControlVideoCodingKHR = (PFN_vkCmdControlVideoCodingKHR)load(context, "vkCmdControlVideoCodingKHR"); - table->vkCmdEndVideoCodingKHR = (PFN_vkCmdEndVideoCodingKHR)load(context, "vkCmdEndVideoCodingKHR"); - table->vkCreateVideoSessionKHR = (PFN_vkCreateVideoSessionKHR)load(context, "vkCreateVideoSessionKHR"); - table->vkCreateVideoSessionParametersKHR = (PFN_vkCreateVideoSessionParametersKHR)load(context, "vkCreateVideoSessionParametersKHR"); - table->vkDestroyVideoSessionKHR = (PFN_vkDestroyVideoSessionKHR)load(context, "vkDestroyVideoSessionKHR"); - table->vkDestroyVideoSessionParametersKHR = (PFN_vkDestroyVideoSessionParametersKHR)load(context, "vkDestroyVideoSessionParametersKHR"); - table->vkGetVideoSessionMemoryRequirementsKHR = (PFN_vkGetVideoSessionMemoryRequirementsKHR)load(context, "vkGetVideoSessionMemoryRequirementsKHR"); - table->vkUpdateVideoSessionParametersKHR = (PFN_vkUpdateVideoSessionParametersKHR)load(context, "vkUpdateVideoSessionParametersKHR"); -#endif /* defined(VK_KHR_video_queue) */ -#if defined(VK_NVX_binary_import) - table->vkCmdCuLaunchKernelNVX = (PFN_vkCmdCuLaunchKernelNVX)load(context, "vkCmdCuLaunchKernelNVX"); - table->vkCreateCuFunctionNVX = (PFN_vkCreateCuFunctionNVX)load(context, "vkCreateCuFunctionNVX"); - table->vkCreateCuModuleNVX = (PFN_vkCreateCuModuleNVX)load(context, "vkCreateCuModuleNVX"); - table->vkDestroyCuFunctionNVX = (PFN_vkDestroyCuFunctionNVX)load(context, "vkDestroyCuFunctionNVX"); - table->vkDestroyCuModuleNVX = (PFN_vkDestroyCuModuleNVX)load(context, "vkDestroyCuModuleNVX"); -#endif /* defined(VK_NVX_binary_import) */ -#if defined(VK_NVX_image_view_handle) - table->vkGetImageViewAddressNVX = (PFN_vkGetImageViewAddressNVX)load(context, "vkGetImageViewAddressNVX"); - table->vkGetImageViewHandleNVX = (PFN_vkGetImageViewHandleNVX)load(context, "vkGetImageViewHandleNVX"); -#endif /* defined(VK_NVX_image_view_handle) */ -#if defined(VK_NV_clip_space_w_scaling) - table->vkCmdSetViewportWScalingNV = (PFN_vkCmdSetViewportWScalingNV)load(context, "vkCmdSetViewportWScalingNV"); -#endif /* defined(VK_NV_clip_space_w_scaling) */ -#if defined(VK_NV_copy_memory_indirect) - table->vkCmdCopyMemoryIndirectNV = (PFN_vkCmdCopyMemoryIndirectNV)load(context, "vkCmdCopyMemoryIndirectNV"); - table->vkCmdCopyMemoryToImageIndirectNV = (PFN_vkCmdCopyMemoryToImageIndirectNV)load(context, "vkCmdCopyMemoryToImageIndirectNV"); -#endif /* defined(VK_NV_copy_memory_indirect) */ -#if defined(VK_NV_cuda_kernel_launch) - table->vkCmdCudaLaunchKernelNV = (PFN_vkCmdCudaLaunchKernelNV)load(context, "vkCmdCudaLaunchKernelNV"); - table->vkCreateCudaFunctionNV = (PFN_vkCreateCudaFunctionNV)load(context, "vkCreateCudaFunctionNV"); - table->vkCreateCudaModuleNV = (PFN_vkCreateCudaModuleNV)load(context, "vkCreateCudaModuleNV"); - table->vkDestroyCudaFunctionNV = (PFN_vkDestroyCudaFunctionNV)load(context, "vkDestroyCudaFunctionNV"); - table->vkDestroyCudaModuleNV = (PFN_vkDestroyCudaModuleNV)load(context, "vkDestroyCudaModuleNV"); - table->vkGetCudaModuleCacheNV = (PFN_vkGetCudaModuleCacheNV)load(context, "vkGetCudaModuleCacheNV"); -#endif /* defined(VK_NV_cuda_kernel_launch) */ -#if defined(VK_NV_device_diagnostic_checkpoints) - table->vkCmdSetCheckpointNV = (PFN_vkCmdSetCheckpointNV)load(context, "vkCmdSetCheckpointNV"); - table->vkGetQueueCheckpointDataNV = (PFN_vkGetQueueCheckpointDataNV)load(context, "vkGetQueueCheckpointDataNV"); -#endif /* defined(VK_NV_device_diagnostic_checkpoints) */ -#if defined(VK_NV_device_generated_commands) - table->vkCmdBindPipelineShaderGroupNV = (PFN_vkCmdBindPipelineShaderGroupNV)load(context, "vkCmdBindPipelineShaderGroupNV"); - table->vkCmdExecuteGeneratedCommandsNV = (PFN_vkCmdExecuteGeneratedCommandsNV)load(context, "vkCmdExecuteGeneratedCommandsNV"); - table->vkCmdPreprocessGeneratedCommandsNV = (PFN_vkCmdPreprocessGeneratedCommandsNV)load(context, "vkCmdPreprocessGeneratedCommandsNV"); - table->vkCreateIndirectCommandsLayoutNV = (PFN_vkCreateIndirectCommandsLayoutNV)load(context, "vkCreateIndirectCommandsLayoutNV"); - table->vkDestroyIndirectCommandsLayoutNV = (PFN_vkDestroyIndirectCommandsLayoutNV)load(context, "vkDestroyIndirectCommandsLayoutNV"); - table->vkGetGeneratedCommandsMemoryRequirementsNV = (PFN_vkGetGeneratedCommandsMemoryRequirementsNV)load(context, "vkGetGeneratedCommandsMemoryRequirementsNV"); -#endif /* defined(VK_NV_device_generated_commands) */ -#if defined(VK_NV_device_generated_commands_compute) - table->vkCmdUpdatePipelineIndirectBufferNV = (PFN_vkCmdUpdatePipelineIndirectBufferNV)load(context, "vkCmdUpdatePipelineIndirectBufferNV"); - table->vkGetPipelineIndirectDeviceAddressNV = (PFN_vkGetPipelineIndirectDeviceAddressNV)load(context, "vkGetPipelineIndirectDeviceAddressNV"); - table->vkGetPipelineIndirectMemoryRequirementsNV = (PFN_vkGetPipelineIndirectMemoryRequirementsNV)load(context, "vkGetPipelineIndirectMemoryRequirementsNV"); -#endif /* defined(VK_NV_device_generated_commands_compute) */ -#if defined(VK_NV_external_memory_rdma) - table->vkGetMemoryRemoteAddressNV = (PFN_vkGetMemoryRemoteAddressNV)load(context, "vkGetMemoryRemoteAddressNV"); -#endif /* defined(VK_NV_external_memory_rdma) */ -#if defined(VK_NV_external_memory_win32) - table->vkGetMemoryWin32HandleNV = (PFN_vkGetMemoryWin32HandleNV)load(context, "vkGetMemoryWin32HandleNV"); -#endif /* defined(VK_NV_external_memory_win32) */ -#if defined(VK_NV_fragment_shading_rate_enums) - table->vkCmdSetFragmentShadingRateEnumNV = (PFN_vkCmdSetFragmentShadingRateEnumNV)load(context, "vkCmdSetFragmentShadingRateEnumNV"); -#endif /* defined(VK_NV_fragment_shading_rate_enums) */ -#if defined(VK_NV_low_latency2) - table->vkGetLatencyTimingsNV = (PFN_vkGetLatencyTimingsNV)load(context, "vkGetLatencyTimingsNV"); - table->vkLatencySleepNV = (PFN_vkLatencySleepNV)load(context, "vkLatencySleepNV"); - table->vkQueueNotifyOutOfBandNV = (PFN_vkQueueNotifyOutOfBandNV)load(context, "vkQueueNotifyOutOfBandNV"); - table->vkSetLatencyMarkerNV = (PFN_vkSetLatencyMarkerNV)load(context, "vkSetLatencyMarkerNV"); - table->vkSetLatencySleepModeNV = (PFN_vkSetLatencySleepModeNV)load(context, "vkSetLatencySleepModeNV"); -#endif /* defined(VK_NV_low_latency2) */ -#if defined(VK_NV_memory_decompression) - table->vkCmdDecompressMemoryIndirectCountNV = (PFN_vkCmdDecompressMemoryIndirectCountNV)load(context, "vkCmdDecompressMemoryIndirectCountNV"); - table->vkCmdDecompressMemoryNV = (PFN_vkCmdDecompressMemoryNV)load(context, "vkCmdDecompressMemoryNV"); -#endif /* defined(VK_NV_memory_decompression) */ -#if defined(VK_NV_mesh_shader) - table->vkCmdDrawMeshTasksIndirectCountNV = (PFN_vkCmdDrawMeshTasksIndirectCountNV)load(context, "vkCmdDrawMeshTasksIndirectCountNV"); - table->vkCmdDrawMeshTasksIndirectNV = (PFN_vkCmdDrawMeshTasksIndirectNV)load(context, "vkCmdDrawMeshTasksIndirectNV"); - table->vkCmdDrawMeshTasksNV = (PFN_vkCmdDrawMeshTasksNV)load(context, "vkCmdDrawMeshTasksNV"); -#endif /* defined(VK_NV_mesh_shader) */ -#if defined(VK_NV_optical_flow) - table->vkBindOpticalFlowSessionImageNV = (PFN_vkBindOpticalFlowSessionImageNV)load(context, "vkBindOpticalFlowSessionImageNV"); - table->vkCmdOpticalFlowExecuteNV = (PFN_vkCmdOpticalFlowExecuteNV)load(context, "vkCmdOpticalFlowExecuteNV"); - table->vkCreateOpticalFlowSessionNV = (PFN_vkCreateOpticalFlowSessionNV)load(context, "vkCreateOpticalFlowSessionNV"); - table->vkDestroyOpticalFlowSessionNV = (PFN_vkDestroyOpticalFlowSessionNV)load(context, "vkDestroyOpticalFlowSessionNV"); -#endif /* defined(VK_NV_optical_flow) */ -#if defined(VK_NV_ray_tracing) - table->vkBindAccelerationStructureMemoryNV = (PFN_vkBindAccelerationStructureMemoryNV)load(context, "vkBindAccelerationStructureMemoryNV"); - table->vkCmdBuildAccelerationStructureNV = (PFN_vkCmdBuildAccelerationStructureNV)load(context, "vkCmdBuildAccelerationStructureNV"); - table->vkCmdCopyAccelerationStructureNV = (PFN_vkCmdCopyAccelerationStructureNV)load(context, "vkCmdCopyAccelerationStructureNV"); - table->vkCmdTraceRaysNV = (PFN_vkCmdTraceRaysNV)load(context, "vkCmdTraceRaysNV"); - table->vkCmdWriteAccelerationStructuresPropertiesNV = (PFN_vkCmdWriteAccelerationStructuresPropertiesNV)load(context, "vkCmdWriteAccelerationStructuresPropertiesNV"); - table->vkCompileDeferredNV = (PFN_vkCompileDeferredNV)load(context, "vkCompileDeferredNV"); - table->vkCreateAccelerationStructureNV = (PFN_vkCreateAccelerationStructureNV)load(context, "vkCreateAccelerationStructureNV"); - table->vkCreateRayTracingPipelinesNV = (PFN_vkCreateRayTracingPipelinesNV)load(context, "vkCreateRayTracingPipelinesNV"); - table->vkDestroyAccelerationStructureNV = (PFN_vkDestroyAccelerationStructureNV)load(context, "vkDestroyAccelerationStructureNV"); - table->vkGetAccelerationStructureHandleNV = (PFN_vkGetAccelerationStructureHandleNV)load(context, "vkGetAccelerationStructureHandleNV"); - table->vkGetAccelerationStructureMemoryRequirementsNV = (PFN_vkGetAccelerationStructureMemoryRequirementsNV)load(context, "vkGetAccelerationStructureMemoryRequirementsNV"); - table->vkGetRayTracingShaderGroupHandlesNV = (PFN_vkGetRayTracingShaderGroupHandlesNV)load(context, "vkGetRayTracingShaderGroupHandlesNV"); -#endif /* defined(VK_NV_ray_tracing) */ -#if defined(VK_NV_scissor_exclusive) && VK_NV_SCISSOR_EXCLUSIVE_SPEC_VERSION >= 2 - table->vkCmdSetExclusiveScissorEnableNV = (PFN_vkCmdSetExclusiveScissorEnableNV)load(context, "vkCmdSetExclusiveScissorEnableNV"); -#endif /* defined(VK_NV_scissor_exclusive) && VK_NV_SCISSOR_EXCLUSIVE_SPEC_VERSION >= 2 */ -#if defined(VK_NV_scissor_exclusive) - table->vkCmdSetExclusiveScissorNV = (PFN_vkCmdSetExclusiveScissorNV)load(context, "vkCmdSetExclusiveScissorNV"); -#endif /* defined(VK_NV_scissor_exclusive) */ -#if defined(VK_NV_shading_rate_image) - table->vkCmdBindShadingRateImageNV = (PFN_vkCmdBindShadingRateImageNV)load(context, "vkCmdBindShadingRateImageNV"); - table->vkCmdSetCoarseSampleOrderNV = (PFN_vkCmdSetCoarseSampleOrderNV)load(context, "vkCmdSetCoarseSampleOrderNV"); - table->vkCmdSetViewportShadingRatePaletteNV = (PFN_vkCmdSetViewportShadingRatePaletteNV)load(context, "vkCmdSetViewportShadingRatePaletteNV"); -#endif /* defined(VK_NV_shading_rate_image) */ -#if defined(VK_QCOM_tile_properties) - table->vkGetDynamicRenderingTilePropertiesQCOM = (PFN_vkGetDynamicRenderingTilePropertiesQCOM)load(context, "vkGetDynamicRenderingTilePropertiesQCOM"); - table->vkGetFramebufferTilePropertiesQCOM = (PFN_vkGetFramebufferTilePropertiesQCOM)load(context, "vkGetFramebufferTilePropertiesQCOM"); -#endif /* defined(VK_QCOM_tile_properties) */ -#if defined(VK_QNX_external_memory_screen_buffer) - table->vkGetScreenBufferPropertiesQNX = (PFN_vkGetScreenBufferPropertiesQNX)load(context, "vkGetScreenBufferPropertiesQNX"); -#endif /* defined(VK_QNX_external_memory_screen_buffer) */ -#if defined(VK_VALVE_descriptor_set_host_mapping) - table->vkGetDescriptorSetHostMappingVALVE = (PFN_vkGetDescriptorSetHostMappingVALVE)load(context, "vkGetDescriptorSetHostMappingVALVE"); - table->vkGetDescriptorSetLayoutHostMappingInfoVALVE = (PFN_vkGetDescriptorSetLayoutHostMappingInfoVALVE)load(context, "vkGetDescriptorSetLayoutHostMappingInfoVALVE"); -#endif /* defined(VK_VALVE_descriptor_set_host_mapping) */ -#if (defined(VK_EXT_extended_dynamic_state)) || (defined(VK_EXT_shader_object)) - table->vkCmdBindVertexBuffers2EXT = (PFN_vkCmdBindVertexBuffers2EXT)load(context, "vkCmdBindVertexBuffers2EXT"); - table->vkCmdSetCullModeEXT = (PFN_vkCmdSetCullModeEXT)load(context, "vkCmdSetCullModeEXT"); - table->vkCmdSetDepthBoundsTestEnableEXT = (PFN_vkCmdSetDepthBoundsTestEnableEXT)load(context, "vkCmdSetDepthBoundsTestEnableEXT"); - table->vkCmdSetDepthCompareOpEXT = (PFN_vkCmdSetDepthCompareOpEXT)load(context, "vkCmdSetDepthCompareOpEXT"); - table->vkCmdSetDepthTestEnableEXT = (PFN_vkCmdSetDepthTestEnableEXT)load(context, "vkCmdSetDepthTestEnableEXT"); - table->vkCmdSetDepthWriteEnableEXT = (PFN_vkCmdSetDepthWriteEnableEXT)load(context, "vkCmdSetDepthWriteEnableEXT"); - table->vkCmdSetFrontFaceEXT = (PFN_vkCmdSetFrontFaceEXT)load(context, "vkCmdSetFrontFaceEXT"); - table->vkCmdSetPrimitiveTopologyEXT = (PFN_vkCmdSetPrimitiveTopologyEXT)load(context, "vkCmdSetPrimitiveTopologyEXT"); - table->vkCmdSetScissorWithCountEXT = (PFN_vkCmdSetScissorWithCountEXT)load(context, "vkCmdSetScissorWithCountEXT"); - table->vkCmdSetStencilOpEXT = (PFN_vkCmdSetStencilOpEXT)load(context, "vkCmdSetStencilOpEXT"); - table->vkCmdSetStencilTestEnableEXT = (PFN_vkCmdSetStencilTestEnableEXT)load(context, "vkCmdSetStencilTestEnableEXT"); - table->vkCmdSetViewportWithCountEXT = (PFN_vkCmdSetViewportWithCountEXT)load(context, "vkCmdSetViewportWithCountEXT"); -#endif /* (defined(VK_EXT_extended_dynamic_state)) || (defined(VK_EXT_shader_object)) */ -#if (defined(VK_EXT_extended_dynamic_state2)) || (defined(VK_EXT_shader_object)) - table->vkCmdSetDepthBiasEnableEXT = (PFN_vkCmdSetDepthBiasEnableEXT)load(context, "vkCmdSetDepthBiasEnableEXT"); - table->vkCmdSetLogicOpEXT = (PFN_vkCmdSetLogicOpEXT)load(context, "vkCmdSetLogicOpEXT"); - table->vkCmdSetPatchControlPointsEXT = (PFN_vkCmdSetPatchControlPointsEXT)load(context, "vkCmdSetPatchControlPointsEXT"); - table->vkCmdSetPrimitiveRestartEnableEXT = (PFN_vkCmdSetPrimitiveRestartEnableEXT)load(context, "vkCmdSetPrimitiveRestartEnableEXT"); - table->vkCmdSetRasterizerDiscardEnableEXT = (PFN_vkCmdSetRasterizerDiscardEnableEXT)load(context, "vkCmdSetRasterizerDiscardEnableEXT"); -#endif /* (defined(VK_EXT_extended_dynamic_state2)) || (defined(VK_EXT_shader_object)) */ -#if (defined(VK_EXT_extended_dynamic_state3)) || (defined(VK_EXT_shader_object)) - table->vkCmdSetAlphaToCoverageEnableEXT = (PFN_vkCmdSetAlphaToCoverageEnableEXT)load(context, "vkCmdSetAlphaToCoverageEnableEXT"); - table->vkCmdSetAlphaToOneEnableEXT = (PFN_vkCmdSetAlphaToOneEnableEXT)load(context, "vkCmdSetAlphaToOneEnableEXT"); - table->vkCmdSetColorBlendEnableEXT = (PFN_vkCmdSetColorBlendEnableEXT)load(context, "vkCmdSetColorBlendEnableEXT"); - table->vkCmdSetColorBlendEquationEXT = (PFN_vkCmdSetColorBlendEquationEXT)load(context, "vkCmdSetColorBlendEquationEXT"); - table->vkCmdSetColorWriteMaskEXT = (PFN_vkCmdSetColorWriteMaskEXT)load(context, "vkCmdSetColorWriteMaskEXT"); - table->vkCmdSetDepthClampEnableEXT = (PFN_vkCmdSetDepthClampEnableEXT)load(context, "vkCmdSetDepthClampEnableEXT"); - table->vkCmdSetLogicOpEnableEXT = (PFN_vkCmdSetLogicOpEnableEXT)load(context, "vkCmdSetLogicOpEnableEXT"); - table->vkCmdSetPolygonModeEXT = (PFN_vkCmdSetPolygonModeEXT)load(context, "vkCmdSetPolygonModeEXT"); - table->vkCmdSetRasterizationSamplesEXT = (PFN_vkCmdSetRasterizationSamplesEXT)load(context, "vkCmdSetRasterizationSamplesEXT"); - table->vkCmdSetSampleMaskEXT = (PFN_vkCmdSetSampleMaskEXT)load(context, "vkCmdSetSampleMaskEXT"); -#endif /* (defined(VK_EXT_extended_dynamic_state3)) || (defined(VK_EXT_shader_object)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && (defined(VK_KHR_maintenance2) || defined(VK_VERSION_1_1))) || (defined(VK_EXT_shader_object)) - table->vkCmdSetTessellationDomainOriginEXT = (PFN_vkCmdSetTessellationDomainOriginEXT)load(context, "vkCmdSetTessellationDomainOriginEXT"); -#endif /* (defined(VK_EXT_extended_dynamic_state3) && (defined(VK_KHR_maintenance2) || defined(VK_VERSION_1_1))) || (defined(VK_EXT_shader_object)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_transform_feedback)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_transform_feedback)) - table->vkCmdSetRasterizationStreamEXT = (PFN_vkCmdSetRasterizationStreamEXT)load(context, "vkCmdSetRasterizationStreamEXT"); -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_transform_feedback)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_transform_feedback)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_conservative_rasterization)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_conservative_rasterization)) - table->vkCmdSetConservativeRasterizationModeEXT = (PFN_vkCmdSetConservativeRasterizationModeEXT)load(context, "vkCmdSetConservativeRasterizationModeEXT"); - table->vkCmdSetExtraPrimitiveOverestimationSizeEXT = (PFN_vkCmdSetExtraPrimitiveOverestimationSizeEXT)load(context, "vkCmdSetExtraPrimitiveOverestimationSizeEXT"); -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_conservative_rasterization)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_conservative_rasterization)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_depth_clip_enable)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_depth_clip_enable)) - table->vkCmdSetDepthClipEnableEXT = (PFN_vkCmdSetDepthClipEnableEXT)load(context, "vkCmdSetDepthClipEnableEXT"); -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_depth_clip_enable)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_depth_clip_enable)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_sample_locations)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_sample_locations)) - table->vkCmdSetSampleLocationsEnableEXT = (PFN_vkCmdSetSampleLocationsEnableEXT)load(context, "vkCmdSetSampleLocationsEnableEXT"); -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_sample_locations)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_sample_locations)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_blend_operation_advanced)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_blend_operation_advanced)) - table->vkCmdSetColorBlendAdvancedEXT = (PFN_vkCmdSetColorBlendAdvancedEXT)load(context, "vkCmdSetColorBlendAdvancedEXT"); -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_blend_operation_advanced)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_blend_operation_advanced)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_provoking_vertex)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_provoking_vertex)) - table->vkCmdSetProvokingVertexModeEXT = (PFN_vkCmdSetProvokingVertexModeEXT)load(context, "vkCmdSetProvokingVertexModeEXT"); -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_provoking_vertex)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_provoking_vertex)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_line_rasterization)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_line_rasterization)) - table->vkCmdSetLineRasterizationModeEXT = (PFN_vkCmdSetLineRasterizationModeEXT)load(context, "vkCmdSetLineRasterizationModeEXT"); - table->vkCmdSetLineStippleEnableEXT = (PFN_vkCmdSetLineStippleEnableEXT)load(context, "vkCmdSetLineStippleEnableEXT"); -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_line_rasterization)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_line_rasterization)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_depth_clip_control)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_depth_clip_control)) - table->vkCmdSetDepthClipNegativeOneToOneEXT = (PFN_vkCmdSetDepthClipNegativeOneToOneEXT)load(context, "vkCmdSetDepthClipNegativeOneToOneEXT"); -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_depth_clip_control)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_depth_clip_control)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_clip_space_w_scaling)) || (defined(VK_EXT_shader_object) && defined(VK_NV_clip_space_w_scaling)) - table->vkCmdSetViewportWScalingEnableNV = (PFN_vkCmdSetViewportWScalingEnableNV)load(context, "vkCmdSetViewportWScalingEnableNV"); -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_clip_space_w_scaling)) || (defined(VK_EXT_shader_object) && defined(VK_NV_clip_space_w_scaling)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_viewport_swizzle)) || (defined(VK_EXT_shader_object) && defined(VK_NV_viewport_swizzle)) - table->vkCmdSetViewportSwizzleNV = (PFN_vkCmdSetViewportSwizzleNV)load(context, "vkCmdSetViewportSwizzleNV"); -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_viewport_swizzle)) || (defined(VK_EXT_shader_object) && defined(VK_NV_viewport_swizzle)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_fragment_coverage_to_color)) || (defined(VK_EXT_shader_object) && defined(VK_NV_fragment_coverage_to_color)) - table->vkCmdSetCoverageToColorEnableNV = (PFN_vkCmdSetCoverageToColorEnableNV)load(context, "vkCmdSetCoverageToColorEnableNV"); - table->vkCmdSetCoverageToColorLocationNV = (PFN_vkCmdSetCoverageToColorLocationNV)load(context, "vkCmdSetCoverageToColorLocationNV"); -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_fragment_coverage_to_color)) || (defined(VK_EXT_shader_object) && defined(VK_NV_fragment_coverage_to_color)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_framebuffer_mixed_samples)) || (defined(VK_EXT_shader_object) && defined(VK_NV_framebuffer_mixed_samples)) - table->vkCmdSetCoverageModulationModeNV = (PFN_vkCmdSetCoverageModulationModeNV)load(context, "vkCmdSetCoverageModulationModeNV"); - table->vkCmdSetCoverageModulationTableEnableNV = (PFN_vkCmdSetCoverageModulationTableEnableNV)load(context, "vkCmdSetCoverageModulationTableEnableNV"); - table->vkCmdSetCoverageModulationTableNV = (PFN_vkCmdSetCoverageModulationTableNV)load(context, "vkCmdSetCoverageModulationTableNV"); -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_framebuffer_mixed_samples)) || (defined(VK_EXT_shader_object) && defined(VK_NV_framebuffer_mixed_samples)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_shading_rate_image)) || (defined(VK_EXT_shader_object) && defined(VK_NV_shading_rate_image)) - table->vkCmdSetShadingRateImageEnableNV = (PFN_vkCmdSetShadingRateImageEnableNV)load(context, "vkCmdSetShadingRateImageEnableNV"); -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_shading_rate_image)) || (defined(VK_EXT_shader_object) && defined(VK_NV_shading_rate_image)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_representative_fragment_test)) || (defined(VK_EXT_shader_object) && defined(VK_NV_representative_fragment_test)) - table->vkCmdSetRepresentativeFragmentTestEnableNV = (PFN_vkCmdSetRepresentativeFragmentTestEnableNV)load(context, "vkCmdSetRepresentativeFragmentTestEnableNV"); -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_representative_fragment_test)) || (defined(VK_EXT_shader_object) && defined(VK_NV_representative_fragment_test)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_coverage_reduction_mode)) || (defined(VK_EXT_shader_object) && defined(VK_NV_coverage_reduction_mode)) - table->vkCmdSetCoverageReductionModeNV = (PFN_vkCmdSetCoverageReductionModeNV)load(context, "vkCmdSetCoverageReductionModeNV"); -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_coverage_reduction_mode)) || (defined(VK_EXT_shader_object) && defined(VK_NV_coverage_reduction_mode)) */ -#if (defined(VK_EXT_host_image_copy)) || (defined(VK_EXT_image_compression_control)) - table->vkGetImageSubresourceLayout2EXT = (PFN_vkGetImageSubresourceLayout2EXT)load(context, "vkGetImageSubresourceLayout2EXT"); -#endif /* (defined(VK_EXT_host_image_copy)) || (defined(VK_EXT_image_compression_control)) */ -#if (defined(VK_EXT_shader_object)) || (defined(VK_EXT_vertex_input_dynamic_state)) - table->vkCmdSetVertexInputEXT = (PFN_vkCmdSetVertexInputEXT)load(context, "vkCmdSetVertexInputEXT"); -#endif /* (defined(VK_EXT_shader_object)) || (defined(VK_EXT_vertex_input_dynamic_state)) */ -#if (defined(VK_KHR_descriptor_update_template) && defined(VK_KHR_push_descriptor)) || (defined(VK_KHR_push_descriptor) && (defined(VK_VERSION_1_1) || defined(VK_KHR_descriptor_update_template))) - table->vkCmdPushDescriptorSetWithTemplateKHR = (PFN_vkCmdPushDescriptorSetWithTemplateKHR)load(context, "vkCmdPushDescriptorSetWithTemplateKHR"); -#endif /* (defined(VK_KHR_descriptor_update_template) && defined(VK_KHR_push_descriptor)) || (defined(VK_KHR_push_descriptor) && (defined(VK_VERSION_1_1) || defined(VK_KHR_descriptor_update_template))) */ -#if (defined(VK_KHR_device_group) && defined(VK_KHR_surface)) || (defined(VK_KHR_swapchain) && defined(VK_VERSION_1_1)) - table->vkGetDeviceGroupPresentCapabilitiesKHR = (PFN_vkGetDeviceGroupPresentCapabilitiesKHR)load(context, "vkGetDeviceGroupPresentCapabilitiesKHR"); - table->vkGetDeviceGroupSurfacePresentModesKHR = (PFN_vkGetDeviceGroupSurfacePresentModesKHR)load(context, "vkGetDeviceGroupSurfacePresentModesKHR"); -#endif /* (defined(VK_KHR_device_group) && defined(VK_KHR_surface)) || (defined(VK_KHR_swapchain) && defined(VK_VERSION_1_1)) */ -#if (defined(VK_KHR_device_group) && defined(VK_KHR_swapchain)) || (defined(VK_KHR_swapchain) && defined(VK_VERSION_1_1)) - table->vkAcquireNextImage2KHR = (PFN_vkAcquireNextImage2KHR)load(context, "vkAcquireNextImage2KHR"); -#endif /* (defined(VK_KHR_device_group) && defined(VK_KHR_swapchain)) || (defined(VK_KHR_swapchain) && defined(VK_VERSION_1_1)) */ - /* VOLK_GENERATE_LOAD_DEVICE_TABLE */ -} - -#ifdef __GNUC__ -#ifdef VOLK_DEFAULT_VISIBILITY -# pragma GCC visibility push(default) -#else -# pragma GCC visibility push(hidden) -#endif -#endif - -/* VOLK_GENERATE_PROTOTYPES_C */ -#if defined(VK_VERSION_1_0) -PFN_vkAllocateCommandBuffers vkAllocateCommandBuffers; -PFN_vkAllocateDescriptorSets vkAllocateDescriptorSets; -PFN_vkAllocateMemory vkAllocateMemory; -PFN_vkBeginCommandBuffer vkBeginCommandBuffer; -PFN_vkBindBufferMemory vkBindBufferMemory; -PFN_vkBindImageMemory vkBindImageMemory; -PFN_vkCmdBeginQuery vkCmdBeginQuery; -PFN_vkCmdBeginRenderPass vkCmdBeginRenderPass; -PFN_vkCmdBindDescriptorSets vkCmdBindDescriptorSets; -PFN_vkCmdBindIndexBuffer vkCmdBindIndexBuffer; -PFN_vkCmdBindPipeline vkCmdBindPipeline; -PFN_vkCmdBindVertexBuffers vkCmdBindVertexBuffers; -PFN_vkCmdBlitImage vkCmdBlitImage; -PFN_vkCmdClearAttachments vkCmdClearAttachments; -PFN_vkCmdClearColorImage vkCmdClearColorImage; -PFN_vkCmdClearDepthStencilImage vkCmdClearDepthStencilImage; -PFN_vkCmdCopyBuffer vkCmdCopyBuffer; -PFN_vkCmdCopyBufferToImage vkCmdCopyBufferToImage; -PFN_vkCmdCopyImage vkCmdCopyImage; -PFN_vkCmdCopyImageToBuffer vkCmdCopyImageToBuffer; -PFN_vkCmdCopyQueryPoolResults vkCmdCopyQueryPoolResults; -PFN_vkCmdDispatch vkCmdDispatch; -PFN_vkCmdDispatchIndirect vkCmdDispatchIndirect; -PFN_vkCmdDraw vkCmdDraw; -PFN_vkCmdDrawIndexed vkCmdDrawIndexed; -PFN_vkCmdDrawIndexedIndirect vkCmdDrawIndexedIndirect; -PFN_vkCmdDrawIndirect vkCmdDrawIndirect; -PFN_vkCmdEndQuery vkCmdEndQuery; -PFN_vkCmdEndRenderPass vkCmdEndRenderPass; -PFN_vkCmdExecuteCommands vkCmdExecuteCommands; -PFN_vkCmdFillBuffer vkCmdFillBuffer; -PFN_vkCmdNextSubpass vkCmdNextSubpass; -PFN_vkCmdPipelineBarrier vkCmdPipelineBarrier; -PFN_vkCmdPushConstants vkCmdPushConstants; -PFN_vkCmdResetEvent vkCmdResetEvent; -PFN_vkCmdResetQueryPool vkCmdResetQueryPool; -PFN_vkCmdResolveImage vkCmdResolveImage; -PFN_vkCmdSetBlendConstants vkCmdSetBlendConstants; -PFN_vkCmdSetDepthBias vkCmdSetDepthBias; -PFN_vkCmdSetDepthBounds vkCmdSetDepthBounds; -PFN_vkCmdSetEvent vkCmdSetEvent; -PFN_vkCmdSetLineWidth vkCmdSetLineWidth; -PFN_vkCmdSetScissor vkCmdSetScissor; -PFN_vkCmdSetStencilCompareMask vkCmdSetStencilCompareMask; -PFN_vkCmdSetStencilReference vkCmdSetStencilReference; -PFN_vkCmdSetStencilWriteMask vkCmdSetStencilWriteMask; -PFN_vkCmdSetViewport vkCmdSetViewport; -PFN_vkCmdUpdateBuffer vkCmdUpdateBuffer; -PFN_vkCmdWaitEvents vkCmdWaitEvents; -PFN_vkCmdWriteTimestamp vkCmdWriteTimestamp; -PFN_vkCreateBuffer vkCreateBuffer; -PFN_vkCreateBufferView vkCreateBufferView; -PFN_vkCreateCommandPool vkCreateCommandPool; -PFN_vkCreateComputePipelines vkCreateComputePipelines; -PFN_vkCreateDescriptorPool vkCreateDescriptorPool; -PFN_vkCreateDescriptorSetLayout vkCreateDescriptorSetLayout; -PFN_vkCreateDevice vkCreateDevice; -PFN_vkCreateEvent vkCreateEvent; -PFN_vkCreateFence vkCreateFence; -PFN_vkCreateFramebuffer vkCreateFramebuffer; -PFN_vkCreateGraphicsPipelines vkCreateGraphicsPipelines; -PFN_vkCreateImage vkCreateImage; -PFN_vkCreateImageView vkCreateImageView; -PFN_vkCreateInstance vkCreateInstance; -PFN_vkCreatePipelineCache vkCreatePipelineCache; -PFN_vkCreatePipelineLayout vkCreatePipelineLayout; -PFN_vkCreateQueryPool vkCreateQueryPool; -PFN_vkCreateRenderPass vkCreateRenderPass; -PFN_vkCreateSampler vkCreateSampler; -PFN_vkCreateSemaphore vkCreateSemaphore; -PFN_vkCreateShaderModule vkCreateShaderModule; -PFN_vkDestroyBuffer vkDestroyBuffer; -PFN_vkDestroyBufferView vkDestroyBufferView; -PFN_vkDestroyCommandPool vkDestroyCommandPool; -PFN_vkDestroyDescriptorPool vkDestroyDescriptorPool; -PFN_vkDestroyDescriptorSetLayout vkDestroyDescriptorSetLayout; -PFN_vkDestroyDevice vkDestroyDevice; -PFN_vkDestroyEvent vkDestroyEvent; -PFN_vkDestroyFence vkDestroyFence; -PFN_vkDestroyFramebuffer vkDestroyFramebuffer; -PFN_vkDestroyImage vkDestroyImage; -PFN_vkDestroyImageView vkDestroyImageView; -PFN_vkDestroyInstance vkDestroyInstance; -PFN_vkDestroyPipeline vkDestroyPipeline; -PFN_vkDestroyPipelineCache vkDestroyPipelineCache; -PFN_vkDestroyPipelineLayout vkDestroyPipelineLayout; -PFN_vkDestroyQueryPool vkDestroyQueryPool; -PFN_vkDestroyRenderPass vkDestroyRenderPass; -PFN_vkDestroySampler vkDestroySampler; -PFN_vkDestroySemaphore vkDestroySemaphore; -PFN_vkDestroyShaderModule vkDestroyShaderModule; -PFN_vkDeviceWaitIdle vkDeviceWaitIdle; -PFN_vkEndCommandBuffer vkEndCommandBuffer; -PFN_vkEnumerateDeviceExtensionProperties vkEnumerateDeviceExtensionProperties; -PFN_vkEnumerateDeviceLayerProperties vkEnumerateDeviceLayerProperties; -PFN_vkEnumerateInstanceExtensionProperties vkEnumerateInstanceExtensionProperties; -PFN_vkEnumerateInstanceLayerProperties vkEnumerateInstanceLayerProperties; -PFN_vkEnumeratePhysicalDevices vkEnumeratePhysicalDevices; -PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges; -PFN_vkFreeCommandBuffers vkFreeCommandBuffers; -PFN_vkFreeDescriptorSets vkFreeDescriptorSets; -PFN_vkFreeMemory vkFreeMemory; -PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements; -PFN_vkGetDeviceMemoryCommitment vkGetDeviceMemoryCommitment; -PFN_vkGetDeviceProcAddr vkGetDeviceProcAddr; -PFN_vkGetDeviceQueue vkGetDeviceQueue; -PFN_vkGetEventStatus vkGetEventStatus; -PFN_vkGetFenceStatus vkGetFenceStatus; -PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements; -PFN_vkGetImageSparseMemoryRequirements vkGetImageSparseMemoryRequirements; -PFN_vkGetImageSubresourceLayout vkGetImageSubresourceLayout; -PFN_vkGetInstanceProcAddr vkGetInstanceProcAddr; -PFN_vkGetPhysicalDeviceFeatures vkGetPhysicalDeviceFeatures; -PFN_vkGetPhysicalDeviceFormatProperties vkGetPhysicalDeviceFormatProperties; -PFN_vkGetPhysicalDeviceImageFormatProperties vkGetPhysicalDeviceImageFormatProperties; -PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties; -PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties; -PFN_vkGetPhysicalDeviceQueueFamilyProperties vkGetPhysicalDeviceQueueFamilyProperties; -PFN_vkGetPhysicalDeviceSparseImageFormatProperties vkGetPhysicalDeviceSparseImageFormatProperties; -PFN_vkGetPipelineCacheData vkGetPipelineCacheData; -PFN_vkGetQueryPoolResults vkGetQueryPoolResults; -PFN_vkGetRenderAreaGranularity vkGetRenderAreaGranularity; -PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges; -PFN_vkMapMemory vkMapMemory; -PFN_vkMergePipelineCaches vkMergePipelineCaches; -PFN_vkQueueBindSparse vkQueueBindSparse; -PFN_vkQueueSubmit vkQueueSubmit; -PFN_vkQueueWaitIdle vkQueueWaitIdle; -PFN_vkResetCommandBuffer vkResetCommandBuffer; -PFN_vkResetCommandPool vkResetCommandPool; -PFN_vkResetDescriptorPool vkResetDescriptorPool; -PFN_vkResetEvent vkResetEvent; -PFN_vkResetFences vkResetFences; -PFN_vkSetEvent vkSetEvent; -PFN_vkUnmapMemory vkUnmapMemory; -PFN_vkUpdateDescriptorSets vkUpdateDescriptorSets; -PFN_vkWaitForFences vkWaitForFences; -#endif /* defined(VK_VERSION_1_0) */ -#if defined(VK_VERSION_1_1) -PFN_vkBindBufferMemory2 vkBindBufferMemory2; -PFN_vkBindImageMemory2 vkBindImageMemory2; -PFN_vkCmdDispatchBase vkCmdDispatchBase; -PFN_vkCmdSetDeviceMask vkCmdSetDeviceMask; -PFN_vkCreateDescriptorUpdateTemplate vkCreateDescriptorUpdateTemplate; -PFN_vkCreateSamplerYcbcrConversion vkCreateSamplerYcbcrConversion; -PFN_vkDestroyDescriptorUpdateTemplate vkDestroyDescriptorUpdateTemplate; -PFN_vkDestroySamplerYcbcrConversion vkDestroySamplerYcbcrConversion; -PFN_vkEnumerateInstanceVersion vkEnumerateInstanceVersion; -PFN_vkEnumeratePhysicalDeviceGroups vkEnumeratePhysicalDeviceGroups; -PFN_vkGetBufferMemoryRequirements2 vkGetBufferMemoryRequirements2; -PFN_vkGetDescriptorSetLayoutSupport vkGetDescriptorSetLayoutSupport; -PFN_vkGetDeviceGroupPeerMemoryFeatures vkGetDeviceGroupPeerMemoryFeatures; -PFN_vkGetDeviceQueue2 vkGetDeviceQueue2; -PFN_vkGetImageMemoryRequirements2 vkGetImageMemoryRequirements2; -PFN_vkGetImageSparseMemoryRequirements2 vkGetImageSparseMemoryRequirements2; -PFN_vkGetPhysicalDeviceExternalBufferProperties vkGetPhysicalDeviceExternalBufferProperties; -PFN_vkGetPhysicalDeviceExternalFenceProperties vkGetPhysicalDeviceExternalFenceProperties; -PFN_vkGetPhysicalDeviceExternalSemaphoreProperties vkGetPhysicalDeviceExternalSemaphoreProperties; -PFN_vkGetPhysicalDeviceFeatures2 vkGetPhysicalDeviceFeatures2; -PFN_vkGetPhysicalDeviceFormatProperties2 vkGetPhysicalDeviceFormatProperties2; -PFN_vkGetPhysicalDeviceImageFormatProperties2 vkGetPhysicalDeviceImageFormatProperties2; -PFN_vkGetPhysicalDeviceMemoryProperties2 vkGetPhysicalDeviceMemoryProperties2; -PFN_vkGetPhysicalDeviceProperties2 vkGetPhysicalDeviceProperties2; -PFN_vkGetPhysicalDeviceQueueFamilyProperties2 vkGetPhysicalDeviceQueueFamilyProperties2; -PFN_vkGetPhysicalDeviceSparseImageFormatProperties2 vkGetPhysicalDeviceSparseImageFormatProperties2; -PFN_vkTrimCommandPool vkTrimCommandPool; -PFN_vkUpdateDescriptorSetWithTemplate vkUpdateDescriptorSetWithTemplate; -#endif /* defined(VK_VERSION_1_1) */ -#if defined(VK_VERSION_1_2) -PFN_vkCmdBeginRenderPass2 vkCmdBeginRenderPass2; -PFN_vkCmdDrawIndexedIndirectCount vkCmdDrawIndexedIndirectCount; -PFN_vkCmdDrawIndirectCount vkCmdDrawIndirectCount; -PFN_vkCmdEndRenderPass2 vkCmdEndRenderPass2; -PFN_vkCmdNextSubpass2 vkCmdNextSubpass2; -PFN_vkCreateRenderPass2 vkCreateRenderPass2; -PFN_vkGetBufferDeviceAddress vkGetBufferDeviceAddress; -PFN_vkGetBufferOpaqueCaptureAddress vkGetBufferOpaqueCaptureAddress; -PFN_vkGetDeviceMemoryOpaqueCaptureAddress vkGetDeviceMemoryOpaqueCaptureAddress; -PFN_vkGetSemaphoreCounterValue vkGetSemaphoreCounterValue; -PFN_vkResetQueryPool vkResetQueryPool; -PFN_vkSignalSemaphore vkSignalSemaphore; -PFN_vkWaitSemaphores vkWaitSemaphores; -#endif /* defined(VK_VERSION_1_2) */ -#if defined(VK_VERSION_1_3) -PFN_vkCmdBeginRendering vkCmdBeginRendering; -PFN_vkCmdBindVertexBuffers2 vkCmdBindVertexBuffers2; -PFN_vkCmdBlitImage2 vkCmdBlitImage2; -PFN_vkCmdCopyBuffer2 vkCmdCopyBuffer2; -PFN_vkCmdCopyBufferToImage2 vkCmdCopyBufferToImage2; -PFN_vkCmdCopyImage2 vkCmdCopyImage2; -PFN_vkCmdCopyImageToBuffer2 vkCmdCopyImageToBuffer2; -PFN_vkCmdEndRendering vkCmdEndRendering; -PFN_vkCmdPipelineBarrier2 vkCmdPipelineBarrier2; -PFN_vkCmdResetEvent2 vkCmdResetEvent2; -PFN_vkCmdResolveImage2 vkCmdResolveImage2; -PFN_vkCmdSetCullMode vkCmdSetCullMode; -PFN_vkCmdSetDepthBiasEnable vkCmdSetDepthBiasEnable; -PFN_vkCmdSetDepthBoundsTestEnable vkCmdSetDepthBoundsTestEnable; -PFN_vkCmdSetDepthCompareOp vkCmdSetDepthCompareOp; -PFN_vkCmdSetDepthTestEnable vkCmdSetDepthTestEnable; -PFN_vkCmdSetDepthWriteEnable vkCmdSetDepthWriteEnable; -PFN_vkCmdSetEvent2 vkCmdSetEvent2; -PFN_vkCmdSetFrontFace vkCmdSetFrontFace; -PFN_vkCmdSetPrimitiveRestartEnable vkCmdSetPrimitiveRestartEnable; -PFN_vkCmdSetPrimitiveTopology vkCmdSetPrimitiveTopology; -PFN_vkCmdSetRasterizerDiscardEnable vkCmdSetRasterizerDiscardEnable; -PFN_vkCmdSetScissorWithCount vkCmdSetScissorWithCount; -PFN_vkCmdSetStencilOp vkCmdSetStencilOp; -PFN_vkCmdSetStencilTestEnable vkCmdSetStencilTestEnable; -PFN_vkCmdSetViewportWithCount vkCmdSetViewportWithCount; -PFN_vkCmdWaitEvents2 vkCmdWaitEvents2; -PFN_vkCmdWriteTimestamp2 vkCmdWriteTimestamp2; -PFN_vkCreatePrivateDataSlot vkCreatePrivateDataSlot; -PFN_vkDestroyPrivateDataSlot vkDestroyPrivateDataSlot; -PFN_vkGetDeviceBufferMemoryRequirements vkGetDeviceBufferMemoryRequirements; -PFN_vkGetDeviceImageMemoryRequirements vkGetDeviceImageMemoryRequirements; -PFN_vkGetDeviceImageSparseMemoryRequirements vkGetDeviceImageSparseMemoryRequirements; -PFN_vkGetPhysicalDeviceToolProperties vkGetPhysicalDeviceToolProperties; -PFN_vkGetPrivateData vkGetPrivateData; -PFN_vkQueueSubmit2 vkQueueSubmit2; -PFN_vkSetPrivateData vkSetPrivateData; -#endif /* defined(VK_VERSION_1_3) */ -#if defined(VK_AMDX_shader_enqueue) -PFN_vkCmdDispatchGraphAMDX vkCmdDispatchGraphAMDX; -PFN_vkCmdDispatchGraphIndirectAMDX vkCmdDispatchGraphIndirectAMDX; -PFN_vkCmdDispatchGraphIndirectCountAMDX vkCmdDispatchGraphIndirectCountAMDX; -PFN_vkCmdInitializeGraphScratchMemoryAMDX vkCmdInitializeGraphScratchMemoryAMDX; -PFN_vkCreateExecutionGraphPipelinesAMDX vkCreateExecutionGraphPipelinesAMDX; -PFN_vkGetExecutionGraphPipelineNodeIndexAMDX vkGetExecutionGraphPipelineNodeIndexAMDX; -PFN_vkGetExecutionGraphPipelineScratchSizeAMDX vkGetExecutionGraphPipelineScratchSizeAMDX; -#endif /* defined(VK_AMDX_shader_enqueue) */ -#if defined(VK_AMD_anti_lag) -PFN_vkAntiLagUpdateAMD vkAntiLagUpdateAMD; -#endif /* defined(VK_AMD_anti_lag) */ -#if defined(VK_AMD_buffer_marker) -PFN_vkCmdWriteBufferMarkerAMD vkCmdWriteBufferMarkerAMD; -#endif /* defined(VK_AMD_buffer_marker) */ -#if defined(VK_AMD_display_native_hdr) -PFN_vkSetLocalDimmingAMD vkSetLocalDimmingAMD; -#endif /* defined(VK_AMD_display_native_hdr) */ -#if defined(VK_AMD_draw_indirect_count) -PFN_vkCmdDrawIndexedIndirectCountAMD vkCmdDrawIndexedIndirectCountAMD; -PFN_vkCmdDrawIndirectCountAMD vkCmdDrawIndirectCountAMD; -#endif /* defined(VK_AMD_draw_indirect_count) */ -#if defined(VK_AMD_shader_info) -PFN_vkGetShaderInfoAMD vkGetShaderInfoAMD; -#endif /* defined(VK_AMD_shader_info) */ -#if defined(VK_ANDROID_external_memory_android_hardware_buffer) -PFN_vkGetAndroidHardwareBufferPropertiesANDROID vkGetAndroidHardwareBufferPropertiesANDROID; -PFN_vkGetMemoryAndroidHardwareBufferANDROID vkGetMemoryAndroidHardwareBufferANDROID; -#endif /* defined(VK_ANDROID_external_memory_android_hardware_buffer) */ -#if defined(VK_EXT_acquire_drm_display) -PFN_vkAcquireDrmDisplayEXT vkAcquireDrmDisplayEXT; -PFN_vkGetDrmDisplayEXT vkGetDrmDisplayEXT; -#endif /* defined(VK_EXT_acquire_drm_display) */ -#if defined(VK_EXT_acquire_xlib_display) -PFN_vkAcquireXlibDisplayEXT vkAcquireXlibDisplayEXT; -PFN_vkGetRandROutputDisplayEXT vkGetRandROutputDisplayEXT; -#endif /* defined(VK_EXT_acquire_xlib_display) */ -#if defined(VK_EXT_attachment_feedback_loop_dynamic_state) -PFN_vkCmdSetAttachmentFeedbackLoopEnableEXT vkCmdSetAttachmentFeedbackLoopEnableEXT; -#endif /* defined(VK_EXT_attachment_feedback_loop_dynamic_state) */ -#if defined(VK_EXT_buffer_device_address) -PFN_vkGetBufferDeviceAddressEXT vkGetBufferDeviceAddressEXT; -#endif /* defined(VK_EXT_buffer_device_address) */ -#if defined(VK_EXT_calibrated_timestamps) -PFN_vkGetCalibratedTimestampsEXT vkGetCalibratedTimestampsEXT; -PFN_vkGetPhysicalDeviceCalibrateableTimeDomainsEXT vkGetPhysicalDeviceCalibrateableTimeDomainsEXT; -#endif /* defined(VK_EXT_calibrated_timestamps) */ -#if defined(VK_EXT_color_write_enable) -PFN_vkCmdSetColorWriteEnableEXT vkCmdSetColorWriteEnableEXT; -#endif /* defined(VK_EXT_color_write_enable) */ -#if defined(VK_EXT_conditional_rendering) -PFN_vkCmdBeginConditionalRenderingEXT vkCmdBeginConditionalRenderingEXT; -PFN_vkCmdEndConditionalRenderingEXT vkCmdEndConditionalRenderingEXT; -#endif /* defined(VK_EXT_conditional_rendering) */ -#if defined(VK_EXT_debug_marker) -PFN_vkCmdDebugMarkerBeginEXT vkCmdDebugMarkerBeginEXT; -PFN_vkCmdDebugMarkerEndEXT vkCmdDebugMarkerEndEXT; -PFN_vkCmdDebugMarkerInsertEXT vkCmdDebugMarkerInsertEXT; -PFN_vkDebugMarkerSetObjectNameEXT vkDebugMarkerSetObjectNameEXT; -PFN_vkDebugMarkerSetObjectTagEXT vkDebugMarkerSetObjectTagEXT; -#endif /* defined(VK_EXT_debug_marker) */ -#if defined(VK_EXT_debug_report) -PFN_vkCreateDebugReportCallbackEXT vkCreateDebugReportCallbackEXT; -PFN_vkDebugReportMessageEXT vkDebugReportMessageEXT; -PFN_vkDestroyDebugReportCallbackEXT vkDestroyDebugReportCallbackEXT; -#endif /* defined(VK_EXT_debug_report) */ -#if defined(VK_EXT_debug_utils) -PFN_vkCmdBeginDebugUtilsLabelEXT vkCmdBeginDebugUtilsLabelEXT; -PFN_vkCmdEndDebugUtilsLabelEXT vkCmdEndDebugUtilsLabelEXT; -PFN_vkCmdInsertDebugUtilsLabelEXT vkCmdInsertDebugUtilsLabelEXT; -PFN_vkCreateDebugUtilsMessengerEXT vkCreateDebugUtilsMessengerEXT; -PFN_vkDestroyDebugUtilsMessengerEXT vkDestroyDebugUtilsMessengerEXT; -PFN_vkQueueBeginDebugUtilsLabelEXT vkQueueBeginDebugUtilsLabelEXT; -PFN_vkQueueEndDebugUtilsLabelEXT vkQueueEndDebugUtilsLabelEXT; -PFN_vkQueueInsertDebugUtilsLabelEXT vkQueueInsertDebugUtilsLabelEXT; -PFN_vkSetDebugUtilsObjectNameEXT vkSetDebugUtilsObjectNameEXT; -PFN_vkSetDebugUtilsObjectTagEXT vkSetDebugUtilsObjectTagEXT; -PFN_vkSubmitDebugUtilsMessageEXT vkSubmitDebugUtilsMessageEXT; -#endif /* defined(VK_EXT_debug_utils) */ -#if defined(VK_EXT_depth_bias_control) -PFN_vkCmdSetDepthBias2EXT vkCmdSetDepthBias2EXT; -#endif /* defined(VK_EXT_depth_bias_control) */ -#if defined(VK_EXT_descriptor_buffer) -PFN_vkCmdBindDescriptorBufferEmbeddedSamplersEXT vkCmdBindDescriptorBufferEmbeddedSamplersEXT; -PFN_vkCmdBindDescriptorBuffersEXT vkCmdBindDescriptorBuffersEXT; -PFN_vkCmdSetDescriptorBufferOffsetsEXT vkCmdSetDescriptorBufferOffsetsEXT; -PFN_vkGetBufferOpaqueCaptureDescriptorDataEXT vkGetBufferOpaqueCaptureDescriptorDataEXT; -PFN_vkGetDescriptorEXT vkGetDescriptorEXT; -PFN_vkGetDescriptorSetLayoutBindingOffsetEXT vkGetDescriptorSetLayoutBindingOffsetEXT; -PFN_vkGetDescriptorSetLayoutSizeEXT vkGetDescriptorSetLayoutSizeEXT; -PFN_vkGetImageOpaqueCaptureDescriptorDataEXT vkGetImageOpaqueCaptureDescriptorDataEXT; -PFN_vkGetImageViewOpaqueCaptureDescriptorDataEXT vkGetImageViewOpaqueCaptureDescriptorDataEXT; -PFN_vkGetSamplerOpaqueCaptureDescriptorDataEXT vkGetSamplerOpaqueCaptureDescriptorDataEXT; -#endif /* defined(VK_EXT_descriptor_buffer) */ -#if defined(VK_EXT_descriptor_buffer) && (defined(VK_KHR_acceleration_structure) || defined(VK_NV_ray_tracing)) -PFN_vkGetAccelerationStructureOpaqueCaptureDescriptorDataEXT vkGetAccelerationStructureOpaqueCaptureDescriptorDataEXT; -#endif /* defined(VK_EXT_descriptor_buffer) && (defined(VK_KHR_acceleration_structure) || defined(VK_NV_ray_tracing)) */ -#if defined(VK_EXT_device_fault) -PFN_vkGetDeviceFaultInfoEXT vkGetDeviceFaultInfoEXT; -#endif /* defined(VK_EXT_device_fault) */ -#if defined(VK_EXT_direct_mode_display) -PFN_vkReleaseDisplayEXT vkReleaseDisplayEXT; -#endif /* defined(VK_EXT_direct_mode_display) */ -#if defined(VK_EXT_directfb_surface) -PFN_vkCreateDirectFBSurfaceEXT vkCreateDirectFBSurfaceEXT; -PFN_vkGetPhysicalDeviceDirectFBPresentationSupportEXT vkGetPhysicalDeviceDirectFBPresentationSupportEXT; -#endif /* defined(VK_EXT_directfb_surface) */ -#if defined(VK_EXT_discard_rectangles) -PFN_vkCmdSetDiscardRectangleEXT vkCmdSetDiscardRectangleEXT; -#endif /* defined(VK_EXT_discard_rectangles) */ -#if defined(VK_EXT_discard_rectangles) && VK_EXT_DISCARD_RECTANGLES_SPEC_VERSION >= 2 -PFN_vkCmdSetDiscardRectangleEnableEXT vkCmdSetDiscardRectangleEnableEXT; -PFN_vkCmdSetDiscardRectangleModeEXT vkCmdSetDiscardRectangleModeEXT; -#endif /* defined(VK_EXT_discard_rectangles) && VK_EXT_DISCARD_RECTANGLES_SPEC_VERSION >= 2 */ -#if defined(VK_EXT_display_control) -PFN_vkDisplayPowerControlEXT vkDisplayPowerControlEXT; -PFN_vkGetSwapchainCounterEXT vkGetSwapchainCounterEXT; -PFN_vkRegisterDeviceEventEXT vkRegisterDeviceEventEXT; -PFN_vkRegisterDisplayEventEXT vkRegisterDisplayEventEXT; -#endif /* defined(VK_EXT_display_control) */ -#if defined(VK_EXT_display_surface_counter) -PFN_vkGetPhysicalDeviceSurfaceCapabilities2EXT vkGetPhysicalDeviceSurfaceCapabilities2EXT; -#endif /* defined(VK_EXT_display_surface_counter) */ -#if defined(VK_EXT_external_memory_host) -PFN_vkGetMemoryHostPointerPropertiesEXT vkGetMemoryHostPointerPropertiesEXT; -#endif /* defined(VK_EXT_external_memory_host) */ -#if defined(VK_EXT_full_screen_exclusive) -PFN_vkAcquireFullScreenExclusiveModeEXT vkAcquireFullScreenExclusiveModeEXT; -PFN_vkGetPhysicalDeviceSurfacePresentModes2EXT vkGetPhysicalDeviceSurfacePresentModes2EXT; -PFN_vkReleaseFullScreenExclusiveModeEXT vkReleaseFullScreenExclusiveModeEXT; -#endif /* defined(VK_EXT_full_screen_exclusive) */ -#if defined(VK_EXT_full_screen_exclusive) && (defined(VK_KHR_device_group) || defined(VK_VERSION_1_1)) -PFN_vkGetDeviceGroupSurfacePresentModes2EXT vkGetDeviceGroupSurfacePresentModes2EXT; -#endif /* defined(VK_EXT_full_screen_exclusive) && (defined(VK_KHR_device_group) || defined(VK_VERSION_1_1)) */ -#if defined(VK_EXT_hdr_metadata) -PFN_vkSetHdrMetadataEXT vkSetHdrMetadataEXT; -#endif /* defined(VK_EXT_hdr_metadata) */ -#if defined(VK_EXT_headless_surface) -PFN_vkCreateHeadlessSurfaceEXT vkCreateHeadlessSurfaceEXT; -#endif /* defined(VK_EXT_headless_surface) */ -#if defined(VK_EXT_host_image_copy) -PFN_vkCopyImageToImageEXT vkCopyImageToImageEXT; -PFN_vkCopyImageToMemoryEXT vkCopyImageToMemoryEXT; -PFN_vkCopyMemoryToImageEXT vkCopyMemoryToImageEXT; -PFN_vkTransitionImageLayoutEXT vkTransitionImageLayoutEXT; -#endif /* defined(VK_EXT_host_image_copy) */ -#if defined(VK_EXT_host_query_reset) -PFN_vkResetQueryPoolEXT vkResetQueryPoolEXT; -#endif /* defined(VK_EXT_host_query_reset) */ -#if defined(VK_EXT_image_drm_format_modifier) -PFN_vkGetImageDrmFormatModifierPropertiesEXT vkGetImageDrmFormatModifierPropertiesEXT; -#endif /* defined(VK_EXT_image_drm_format_modifier) */ -#if defined(VK_EXT_line_rasterization) -PFN_vkCmdSetLineStippleEXT vkCmdSetLineStippleEXT; -#endif /* defined(VK_EXT_line_rasterization) */ -#if defined(VK_EXT_mesh_shader) -PFN_vkCmdDrawMeshTasksEXT vkCmdDrawMeshTasksEXT; -PFN_vkCmdDrawMeshTasksIndirectCountEXT vkCmdDrawMeshTasksIndirectCountEXT; -PFN_vkCmdDrawMeshTasksIndirectEXT vkCmdDrawMeshTasksIndirectEXT; -#endif /* defined(VK_EXT_mesh_shader) */ -#if defined(VK_EXT_metal_objects) -PFN_vkExportMetalObjectsEXT vkExportMetalObjectsEXT; -#endif /* defined(VK_EXT_metal_objects) */ -#if defined(VK_EXT_metal_surface) -PFN_vkCreateMetalSurfaceEXT vkCreateMetalSurfaceEXT; -#endif /* defined(VK_EXT_metal_surface) */ -#if defined(VK_EXT_multi_draw) -PFN_vkCmdDrawMultiEXT vkCmdDrawMultiEXT; -PFN_vkCmdDrawMultiIndexedEXT vkCmdDrawMultiIndexedEXT; -#endif /* defined(VK_EXT_multi_draw) */ -#if defined(VK_EXT_opacity_micromap) -PFN_vkBuildMicromapsEXT vkBuildMicromapsEXT; -PFN_vkCmdBuildMicromapsEXT vkCmdBuildMicromapsEXT; -PFN_vkCmdCopyMemoryToMicromapEXT vkCmdCopyMemoryToMicromapEXT; -PFN_vkCmdCopyMicromapEXT vkCmdCopyMicromapEXT; -PFN_vkCmdCopyMicromapToMemoryEXT vkCmdCopyMicromapToMemoryEXT; -PFN_vkCmdWriteMicromapsPropertiesEXT vkCmdWriteMicromapsPropertiesEXT; -PFN_vkCopyMemoryToMicromapEXT vkCopyMemoryToMicromapEXT; -PFN_vkCopyMicromapEXT vkCopyMicromapEXT; -PFN_vkCopyMicromapToMemoryEXT vkCopyMicromapToMemoryEXT; -PFN_vkCreateMicromapEXT vkCreateMicromapEXT; -PFN_vkDestroyMicromapEXT vkDestroyMicromapEXT; -PFN_vkGetDeviceMicromapCompatibilityEXT vkGetDeviceMicromapCompatibilityEXT; -PFN_vkGetMicromapBuildSizesEXT vkGetMicromapBuildSizesEXT; -PFN_vkWriteMicromapsPropertiesEXT vkWriteMicromapsPropertiesEXT; -#endif /* defined(VK_EXT_opacity_micromap) */ -#if defined(VK_EXT_pageable_device_local_memory) -PFN_vkSetDeviceMemoryPriorityEXT vkSetDeviceMemoryPriorityEXT; -#endif /* defined(VK_EXT_pageable_device_local_memory) */ -#if defined(VK_EXT_pipeline_properties) -PFN_vkGetPipelinePropertiesEXT vkGetPipelinePropertiesEXT; -#endif /* defined(VK_EXT_pipeline_properties) */ -#if defined(VK_EXT_private_data) -PFN_vkCreatePrivateDataSlotEXT vkCreatePrivateDataSlotEXT; -PFN_vkDestroyPrivateDataSlotEXT vkDestroyPrivateDataSlotEXT; -PFN_vkGetPrivateDataEXT vkGetPrivateDataEXT; -PFN_vkSetPrivateDataEXT vkSetPrivateDataEXT; -#endif /* defined(VK_EXT_private_data) */ -#if defined(VK_EXT_sample_locations) -PFN_vkCmdSetSampleLocationsEXT vkCmdSetSampleLocationsEXT; -PFN_vkGetPhysicalDeviceMultisamplePropertiesEXT vkGetPhysicalDeviceMultisamplePropertiesEXT; -#endif /* defined(VK_EXT_sample_locations) */ -#if defined(VK_EXT_shader_module_identifier) -PFN_vkGetShaderModuleCreateInfoIdentifierEXT vkGetShaderModuleCreateInfoIdentifierEXT; -PFN_vkGetShaderModuleIdentifierEXT vkGetShaderModuleIdentifierEXT; -#endif /* defined(VK_EXT_shader_module_identifier) */ -#if defined(VK_EXT_shader_object) -PFN_vkCmdBindShadersEXT vkCmdBindShadersEXT; -PFN_vkCreateShadersEXT vkCreateShadersEXT; -PFN_vkDestroyShaderEXT vkDestroyShaderEXT; -PFN_vkGetShaderBinaryDataEXT vkGetShaderBinaryDataEXT; -#endif /* defined(VK_EXT_shader_object) */ -#if defined(VK_EXT_swapchain_maintenance1) -PFN_vkReleaseSwapchainImagesEXT vkReleaseSwapchainImagesEXT; -#endif /* defined(VK_EXT_swapchain_maintenance1) */ -#if defined(VK_EXT_tooling_info) -PFN_vkGetPhysicalDeviceToolPropertiesEXT vkGetPhysicalDeviceToolPropertiesEXT; -#endif /* defined(VK_EXT_tooling_info) */ -#if defined(VK_EXT_transform_feedback) -PFN_vkCmdBeginQueryIndexedEXT vkCmdBeginQueryIndexedEXT; -PFN_vkCmdBeginTransformFeedbackEXT vkCmdBeginTransformFeedbackEXT; -PFN_vkCmdBindTransformFeedbackBuffersEXT vkCmdBindTransformFeedbackBuffersEXT; -PFN_vkCmdDrawIndirectByteCountEXT vkCmdDrawIndirectByteCountEXT; -PFN_vkCmdEndQueryIndexedEXT vkCmdEndQueryIndexedEXT; -PFN_vkCmdEndTransformFeedbackEXT vkCmdEndTransformFeedbackEXT; -#endif /* defined(VK_EXT_transform_feedback) */ -#if defined(VK_EXT_validation_cache) -PFN_vkCreateValidationCacheEXT vkCreateValidationCacheEXT; -PFN_vkDestroyValidationCacheEXT vkDestroyValidationCacheEXT; -PFN_vkGetValidationCacheDataEXT vkGetValidationCacheDataEXT; -PFN_vkMergeValidationCachesEXT vkMergeValidationCachesEXT; -#endif /* defined(VK_EXT_validation_cache) */ -#if defined(VK_FUCHSIA_buffer_collection) -PFN_vkCreateBufferCollectionFUCHSIA vkCreateBufferCollectionFUCHSIA; -PFN_vkDestroyBufferCollectionFUCHSIA vkDestroyBufferCollectionFUCHSIA; -PFN_vkGetBufferCollectionPropertiesFUCHSIA vkGetBufferCollectionPropertiesFUCHSIA; -PFN_vkSetBufferCollectionBufferConstraintsFUCHSIA vkSetBufferCollectionBufferConstraintsFUCHSIA; -PFN_vkSetBufferCollectionImageConstraintsFUCHSIA vkSetBufferCollectionImageConstraintsFUCHSIA; -#endif /* defined(VK_FUCHSIA_buffer_collection) */ -#if defined(VK_FUCHSIA_external_memory) -PFN_vkGetMemoryZirconHandleFUCHSIA vkGetMemoryZirconHandleFUCHSIA; -PFN_vkGetMemoryZirconHandlePropertiesFUCHSIA vkGetMemoryZirconHandlePropertiesFUCHSIA; -#endif /* defined(VK_FUCHSIA_external_memory) */ -#if defined(VK_FUCHSIA_external_semaphore) -PFN_vkGetSemaphoreZirconHandleFUCHSIA vkGetSemaphoreZirconHandleFUCHSIA; -PFN_vkImportSemaphoreZirconHandleFUCHSIA vkImportSemaphoreZirconHandleFUCHSIA; -#endif /* defined(VK_FUCHSIA_external_semaphore) */ -#if defined(VK_FUCHSIA_imagepipe_surface) -PFN_vkCreateImagePipeSurfaceFUCHSIA vkCreateImagePipeSurfaceFUCHSIA; -#endif /* defined(VK_FUCHSIA_imagepipe_surface) */ -#if defined(VK_GGP_stream_descriptor_surface) -PFN_vkCreateStreamDescriptorSurfaceGGP vkCreateStreamDescriptorSurfaceGGP; -#endif /* defined(VK_GGP_stream_descriptor_surface) */ -#if defined(VK_GOOGLE_display_timing) -PFN_vkGetPastPresentationTimingGOOGLE vkGetPastPresentationTimingGOOGLE; -PFN_vkGetRefreshCycleDurationGOOGLE vkGetRefreshCycleDurationGOOGLE; -#endif /* defined(VK_GOOGLE_display_timing) */ -#if defined(VK_HUAWEI_cluster_culling_shader) -PFN_vkCmdDrawClusterHUAWEI vkCmdDrawClusterHUAWEI; -PFN_vkCmdDrawClusterIndirectHUAWEI vkCmdDrawClusterIndirectHUAWEI; -#endif /* defined(VK_HUAWEI_cluster_culling_shader) */ -#if defined(VK_HUAWEI_invocation_mask) -PFN_vkCmdBindInvocationMaskHUAWEI vkCmdBindInvocationMaskHUAWEI; -#endif /* defined(VK_HUAWEI_invocation_mask) */ -#if defined(VK_HUAWEI_subpass_shading) -PFN_vkCmdSubpassShadingHUAWEI vkCmdSubpassShadingHUAWEI; -PFN_vkGetDeviceSubpassShadingMaxWorkgroupSizeHUAWEI vkGetDeviceSubpassShadingMaxWorkgroupSizeHUAWEI; -#endif /* defined(VK_HUAWEI_subpass_shading) */ -#if defined(VK_INTEL_performance_query) -PFN_vkAcquirePerformanceConfigurationINTEL vkAcquirePerformanceConfigurationINTEL; -PFN_vkCmdSetPerformanceMarkerINTEL vkCmdSetPerformanceMarkerINTEL; -PFN_vkCmdSetPerformanceOverrideINTEL vkCmdSetPerformanceOverrideINTEL; -PFN_vkCmdSetPerformanceStreamMarkerINTEL vkCmdSetPerformanceStreamMarkerINTEL; -PFN_vkGetPerformanceParameterINTEL vkGetPerformanceParameterINTEL; -PFN_vkInitializePerformanceApiINTEL vkInitializePerformanceApiINTEL; -PFN_vkQueueSetPerformanceConfigurationINTEL vkQueueSetPerformanceConfigurationINTEL; -PFN_vkReleasePerformanceConfigurationINTEL vkReleasePerformanceConfigurationINTEL; -PFN_vkUninitializePerformanceApiINTEL vkUninitializePerformanceApiINTEL; -#endif /* defined(VK_INTEL_performance_query) */ -#if defined(VK_KHR_acceleration_structure) -PFN_vkBuildAccelerationStructuresKHR vkBuildAccelerationStructuresKHR; -PFN_vkCmdBuildAccelerationStructuresIndirectKHR vkCmdBuildAccelerationStructuresIndirectKHR; -PFN_vkCmdBuildAccelerationStructuresKHR vkCmdBuildAccelerationStructuresKHR; -PFN_vkCmdCopyAccelerationStructureKHR vkCmdCopyAccelerationStructureKHR; -PFN_vkCmdCopyAccelerationStructureToMemoryKHR vkCmdCopyAccelerationStructureToMemoryKHR; -PFN_vkCmdCopyMemoryToAccelerationStructureKHR vkCmdCopyMemoryToAccelerationStructureKHR; -PFN_vkCmdWriteAccelerationStructuresPropertiesKHR vkCmdWriteAccelerationStructuresPropertiesKHR; -PFN_vkCopyAccelerationStructureKHR vkCopyAccelerationStructureKHR; -PFN_vkCopyAccelerationStructureToMemoryKHR vkCopyAccelerationStructureToMemoryKHR; -PFN_vkCopyMemoryToAccelerationStructureKHR vkCopyMemoryToAccelerationStructureKHR; -PFN_vkCreateAccelerationStructureKHR vkCreateAccelerationStructureKHR; -PFN_vkDestroyAccelerationStructureKHR vkDestroyAccelerationStructureKHR; -PFN_vkGetAccelerationStructureBuildSizesKHR vkGetAccelerationStructureBuildSizesKHR; -PFN_vkGetAccelerationStructureDeviceAddressKHR vkGetAccelerationStructureDeviceAddressKHR; -PFN_vkGetDeviceAccelerationStructureCompatibilityKHR vkGetDeviceAccelerationStructureCompatibilityKHR; -PFN_vkWriteAccelerationStructuresPropertiesKHR vkWriteAccelerationStructuresPropertiesKHR; -#endif /* defined(VK_KHR_acceleration_structure) */ -#if defined(VK_KHR_android_surface) -PFN_vkCreateAndroidSurfaceKHR vkCreateAndroidSurfaceKHR; -#endif /* defined(VK_KHR_android_surface) */ -#if defined(VK_KHR_bind_memory2) -PFN_vkBindBufferMemory2KHR vkBindBufferMemory2KHR; -PFN_vkBindImageMemory2KHR vkBindImageMemory2KHR; -#endif /* defined(VK_KHR_bind_memory2) */ -#if defined(VK_KHR_buffer_device_address) -PFN_vkGetBufferDeviceAddressKHR vkGetBufferDeviceAddressKHR; -PFN_vkGetBufferOpaqueCaptureAddressKHR vkGetBufferOpaqueCaptureAddressKHR; -PFN_vkGetDeviceMemoryOpaqueCaptureAddressKHR vkGetDeviceMemoryOpaqueCaptureAddressKHR; -#endif /* defined(VK_KHR_buffer_device_address) */ -#if defined(VK_KHR_calibrated_timestamps) -PFN_vkGetCalibratedTimestampsKHR vkGetCalibratedTimestampsKHR; -PFN_vkGetPhysicalDeviceCalibrateableTimeDomainsKHR vkGetPhysicalDeviceCalibrateableTimeDomainsKHR; -#endif /* defined(VK_KHR_calibrated_timestamps) */ -#if defined(VK_KHR_cooperative_matrix) -PFN_vkGetPhysicalDeviceCooperativeMatrixPropertiesKHR vkGetPhysicalDeviceCooperativeMatrixPropertiesKHR; -#endif /* defined(VK_KHR_cooperative_matrix) */ -#if defined(VK_KHR_copy_commands2) -PFN_vkCmdBlitImage2KHR vkCmdBlitImage2KHR; -PFN_vkCmdCopyBuffer2KHR vkCmdCopyBuffer2KHR; -PFN_vkCmdCopyBufferToImage2KHR vkCmdCopyBufferToImage2KHR; -PFN_vkCmdCopyImage2KHR vkCmdCopyImage2KHR; -PFN_vkCmdCopyImageToBuffer2KHR vkCmdCopyImageToBuffer2KHR; -PFN_vkCmdResolveImage2KHR vkCmdResolveImage2KHR; -#endif /* defined(VK_KHR_copy_commands2) */ -#if defined(VK_KHR_create_renderpass2) -PFN_vkCmdBeginRenderPass2KHR vkCmdBeginRenderPass2KHR; -PFN_vkCmdEndRenderPass2KHR vkCmdEndRenderPass2KHR; -PFN_vkCmdNextSubpass2KHR vkCmdNextSubpass2KHR; -PFN_vkCreateRenderPass2KHR vkCreateRenderPass2KHR; -#endif /* defined(VK_KHR_create_renderpass2) */ -#if defined(VK_KHR_deferred_host_operations) -PFN_vkCreateDeferredOperationKHR vkCreateDeferredOperationKHR; -PFN_vkDeferredOperationJoinKHR vkDeferredOperationJoinKHR; -PFN_vkDestroyDeferredOperationKHR vkDestroyDeferredOperationKHR; -PFN_vkGetDeferredOperationMaxConcurrencyKHR vkGetDeferredOperationMaxConcurrencyKHR; -PFN_vkGetDeferredOperationResultKHR vkGetDeferredOperationResultKHR; -#endif /* defined(VK_KHR_deferred_host_operations) */ -#if defined(VK_KHR_descriptor_update_template) -PFN_vkCreateDescriptorUpdateTemplateKHR vkCreateDescriptorUpdateTemplateKHR; -PFN_vkDestroyDescriptorUpdateTemplateKHR vkDestroyDescriptorUpdateTemplateKHR; -PFN_vkUpdateDescriptorSetWithTemplateKHR vkUpdateDescriptorSetWithTemplateKHR; -#endif /* defined(VK_KHR_descriptor_update_template) */ -#if defined(VK_KHR_device_group) -PFN_vkCmdDispatchBaseKHR vkCmdDispatchBaseKHR; -PFN_vkCmdSetDeviceMaskKHR vkCmdSetDeviceMaskKHR; -PFN_vkGetDeviceGroupPeerMemoryFeaturesKHR vkGetDeviceGroupPeerMemoryFeaturesKHR; -#endif /* defined(VK_KHR_device_group) */ -#if defined(VK_KHR_device_group_creation) -PFN_vkEnumeratePhysicalDeviceGroupsKHR vkEnumeratePhysicalDeviceGroupsKHR; -#endif /* defined(VK_KHR_device_group_creation) */ -#if defined(VK_KHR_display) -PFN_vkCreateDisplayModeKHR vkCreateDisplayModeKHR; -PFN_vkCreateDisplayPlaneSurfaceKHR vkCreateDisplayPlaneSurfaceKHR; -PFN_vkGetDisplayModePropertiesKHR vkGetDisplayModePropertiesKHR; -PFN_vkGetDisplayPlaneCapabilitiesKHR vkGetDisplayPlaneCapabilitiesKHR; -PFN_vkGetDisplayPlaneSupportedDisplaysKHR vkGetDisplayPlaneSupportedDisplaysKHR; -PFN_vkGetPhysicalDeviceDisplayPlanePropertiesKHR vkGetPhysicalDeviceDisplayPlanePropertiesKHR; -PFN_vkGetPhysicalDeviceDisplayPropertiesKHR vkGetPhysicalDeviceDisplayPropertiesKHR; -#endif /* defined(VK_KHR_display) */ -#if defined(VK_KHR_display_swapchain) -PFN_vkCreateSharedSwapchainsKHR vkCreateSharedSwapchainsKHR; -#endif /* defined(VK_KHR_display_swapchain) */ -#if defined(VK_KHR_draw_indirect_count) -PFN_vkCmdDrawIndexedIndirectCountKHR vkCmdDrawIndexedIndirectCountKHR; -PFN_vkCmdDrawIndirectCountKHR vkCmdDrawIndirectCountKHR; -#endif /* defined(VK_KHR_draw_indirect_count) */ -#if defined(VK_KHR_dynamic_rendering) -PFN_vkCmdBeginRenderingKHR vkCmdBeginRenderingKHR; -PFN_vkCmdEndRenderingKHR vkCmdEndRenderingKHR; -#endif /* defined(VK_KHR_dynamic_rendering) */ -#if defined(VK_KHR_dynamic_rendering_local_read) -PFN_vkCmdSetRenderingAttachmentLocationsKHR vkCmdSetRenderingAttachmentLocationsKHR; -PFN_vkCmdSetRenderingInputAttachmentIndicesKHR vkCmdSetRenderingInputAttachmentIndicesKHR; -#endif /* defined(VK_KHR_dynamic_rendering_local_read) */ -#if defined(VK_KHR_external_fence_capabilities) -PFN_vkGetPhysicalDeviceExternalFencePropertiesKHR vkGetPhysicalDeviceExternalFencePropertiesKHR; -#endif /* defined(VK_KHR_external_fence_capabilities) */ -#if defined(VK_KHR_external_fence_fd) -PFN_vkGetFenceFdKHR vkGetFenceFdKHR; -PFN_vkImportFenceFdKHR vkImportFenceFdKHR; -#endif /* defined(VK_KHR_external_fence_fd) */ -#if defined(VK_KHR_external_fence_win32) -PFN_vkGetFenceWin32HandleKHR vkGetFenceWin32HandleKHR; -PFN_vkImportFenceWin32HandleKHR vkImportFenceWin32HandleKHR; -#endif /* defined(VK_KHR_external_fence_win32) */ -#if defined(VK_KHR_external_memory_capabilities) -PFN_vkGetPhysicalDeviceExternalBufferPropertiesKHR vkGetPhysicalDeviceExternalBufferPropertiesKHR; -#endif /* defined(VK_KHR_external_memory_capabilities) */ -#if defined(VK_KHR_external_memory_fd) -PFN_vkGetMemoryFdKHR vkGetMemoryFdKHR; -PFN_vkGetMemoryFdPropertiesKHR vkGetMemoryFdPropertiesKHR; -#endif /* defined(VK_KHR_external_memory_fd) */ -#if defined(VK_KHR_external_memory_win32) -PFN_vkGetMemoryWin32HandleKHR vkGetMemoryWin32HandleKHR; -PFN_vkGetMemoryWin32HandlePropertiesKHR vkGetMemoryWin32HandlePropertiesKHR; -#endif /* defined(VK_KHR_external_memory_win32) */ -#if defined(VK_KHR_external_semaphore_capabilities) -PFN_vkGetPhysicalDeviceExternalSemaphorePropertiesKHR vkGetPhysicalDeviceExternalSemaphorePropertiesKHR; -#endif /* defined(VK_KHR_external_semaphore_capabilities) */ -#if defined(VK_KHR_external_semaphore_fd) -PFN_vkGetSemaphoreFdKHR vkGetSemaphoreFdKHR; -PFN_vkImportSemaphoreFdKHR vkImportSemaphoreFdKHR; -#endif /* defined(VK_KHR_external_semaphore_fd) */ -#if defined(VK_KHR_external_semaphore_win32) -PFN_vkGetSemaphoreWin32HandleKHR vkGetSemaphoreWin32HandleKHR; -PFN_vkImportSemaphoreWin32HandleKHR vkImportSemaphoreWin32HandleKHR; -#endif /* defined(VK_KHR_external_semaphore_win32) */ -#if defined(VK_KHR_fragment_shading_rate) -PFN_vkCmdSetFragmentShadingRateKHR vkCmdSetFragmentShadingRateKHR; -PFN_vkGetPhysicalDeviceFragmentShadingRatesKHR vkGetPhysicalDeviceFragmentShadingRatesKHR; -#endif /* defined(VK_KHR_fragment_shading_rate) */ -#if defined(VK_KHR_get_display_properties2) -PFN_vkGetDisplayModeProperties2KHR vkGetDisplayModeProperties2KHR; -PFN_vkGetDisplayPlaneCapabilities2KHR vkGetDisplayPlaneCapabilities2KHR; -PFN_vkGetPhysicalDeviceDisplayPlaneProperties2KHR vkGetPhysicalDeviceDisplayPlaneProperties2KHR; -PFN_vkGetPhysicalDeviceDisplayProperties2KHR vkGetPhysicalDeviceDisplayProperties2KHR; -#endif /* defined(VK_KHR_get_display_properties2) */ -#if defined(VK_KHR_get_memory_requirements2) -PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR; -PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR; -PFN_vkGetImageSparseMemoryRequirements2KHR vkGetImageSparseMemoryRequirements2KHR; -#endif /* defined(VK_KHR_get_memory_requirements2) */ -#if defined(VK_KHR_get_physical_device_properties2) -PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR; -PFN_vkGetPhysicalDeviceFormatProperties2KHR vkGetPhysicalDeviceFormatProperties2KHR; -PFN_vkGetPhysicalDeviceImageFormatProperties2KHR vkGetPhysicalDeviceImageFormatProperties2KHR; -PFN_vkGetPhysicalDeviceMemoryProperties2KHR vkGetPhysicalDeviceMemoryProperties2KHR; -PFN_vkGetPhysicalDeviceProperties2KHR vkGetPhysicalDeviceProperties2KHR; -PFN_vkGetPhysicalDeviceQueueFamilyProperties2KHR vkGetPhysicalDeviceQueueFamilyProperties2KHR; -PFN_vkGetPhysicalDeviceSparseImageFormatProperties2KHR vkGetPhysicalDeviceSparseImageFormatProperties2KHR; -#endif /* defined(VK_KHR_get_physical_device_properties2) */ -#if defined(VK_KHR_get_surface_capabilities2) -PFN_vkGetPhysicalDeviceSurfaceCapabilities2KHR vkGetPhysicalDeviceSurfaceCapabilities2KHR; -PFN_vkGetPhysicalDeviceSurfaceFormats2KHR vkGetPhysicalDeviceSurfaceFormats2KHR; -#endif /* defined(VK_KHR_get_surface_capabilities2) */ -#if defined(VK_KHR_line_rasterization) -PFN_vkCmdSetLineStippleKHR vkCmdSetLineStippleKHR; -#endif /* defined(VK_KHR_line_rasterization) */ -#if defined(VK_KHR_maintenance1) -PFN_vkTrimCommandPoolKHR vkTrimCommandPoolKHR; -#endif /* defined(VK_KHR_maintenance1) */ -#if defined(VK_KHR_maintenance3) -PFN_vkGetDescriptorSetLayoutSupportKHR vkGetDescriptorSetLayoutSupportKHR; -#endif /* defined(VK_KHR_maintenance3) */ -#if defined(VK_KHR_maintenance4) -PFN_vkGetDeviceBufferMemoryRequirementsKHR vkGetDeviceBufferMemoryRequirementsKHR; -PFN_vkGetDeviceImageMemoryRequirementsKHR vkGetDeviceImageMemoryRequirementsKHR; -PFN_vkGetDeviceImageSparseMemoryRequirementsKHR vkGetDeviceImageSparseMemoryRequirementsKHR; -#endif /* defined(VK_KHR_maintenance4) */ -#if defined(VK_KHR_maintenance5) -PFN_vkCmdBindIndexBuffer2KHR vkCmdBindIndexBuffer2KHR; -PFN_vkGetDeviceImageSubresourceLayoutKHR vkGetDeviceImageSubresourceLayoutKHR; -PFN_vkGetImageSubresourceLayout2KHR vkGetImageSubresourceLayout2KHR; -PFN_vkGetRenderingAreaGranularityKHR vkGetRenderingAreaGranularityKHR; -#endif /* defined(VK_KHR_maintenance5) */ -#if defined(VK_KHR_maintenance6) -PFN_vkCmdBindDescriptorSets2KHR vkCmdBindDescriptorSets2KHR; -PFN_vkCmdPushConstants2KHR vkCmdPushConstants2KHR; -#endif /* defined(VK_KHR_maintenance6) */ -#if defined(VK_KHR_maintenance6) && defined(VK_KHR_push_descriptor) -PFN_vkCmdPushDescriptorSet2KHR vkCmdPushDescriptorSet2KHR; -PFN_vkCmdPushDescriptorSetWithTemplate2KHR vkCmdPushDescriptorSetWithTemplate2KHR; -#endif /* defined(VK_KHR_maintenance6) && defined(VK_KHR_push_descriptor) */ -#if defined(VK_KHR_maintenance6) && defined(VK_EXT_descriptor_buffer) -PFN_vkCmdBindDescriptorBufferEmbeddedSamplers2EXT vkCmdBindDescriptorBufferEmbeddedSamplers2EXT; -PFN_vkCmdSetDescriptorBufferOffsets2EXT vkCmdSetDescriptorBufferOffsets2EXT; -#endif /* defined(VK_KHR_maintenance6) && defined(VK_EXT_descriptor_buffer) */ -#if defined(VK_KHR_map_memory2) -PFN_vkMapMemory2KHR vkMapMemory2KHR; -PFN_vkUnmapMemory2KHR vkUnmapMemory2KHR; -#endif /* defined(VK_KHR_map_memory2) */ -#if defined(VK_KHR_performance_query) -PFN_vkAcquireProfilingLockKHR vkAcquireProfilingLockKHR; -PFN_vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR; -PFN_vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR; -PFN_vkReleaseProfilingLockKHR vkReleaseProfilingLockKHR; -#endif /* defined(VK_KHR_performance_query) */ -#if defined(VK_KHR_pipeline_binary) -PFN_vkCreatePipelineBinariesKHR vkCreatePipelineBinariesKHR; -PFN_vkDestroyPipelineBinaryKHR vkDestroyPipelineBinaryKHR; -PFN_vkGetPipelineBinaryDataKHR vkGetPipelineBinaryDataKHR; -PFN_vkGetPipelineKeyKHR vkGetPipelineKeyKHR; -PFN_vkReleaseCapturedPipelineDataKHR vkReleaseCapturedPipelineDataKHR; -#endif /* defined(VK_KHR_pipeline_binary) */ -#if defined(VK_KHR_pipeline_executable_properties) -PFN_vkGetPipelineExecutableInternalRepresentationsKHR vkGetPipelineExecutableInternalRepresentationsKHR; -PFN_vkGetPipelineExecutablePropertiesKHR vkGetPipelineExecutablePropertiesKHR; -PFN_vkGetPipelineExecutableStatisticsKHR vkGetPipelineExecutableStatisticsKHR; -#endif /* defined(VK_KHR_pipeline_executable_properties) */ -#if defined(VK_KHR_present_wait) -PFN_vkWaitForPresentKHR vkWaitForPresentKHR; -#endif /* defined(VK_KHR_present_wait) */ -#if defined(VK_KHR_push_descriptor) -PFN_vkCmdPushDescriptorSetKHR vkCmdPushDescriptorSetKHR; -#endif /* defined(VK_KHR_push_descriptor) */ -#if defined(VK_KHR_ray_tracing_maintenance1) && defined(VK_KHR_ray_tracing_pipeline) -PFN_vkCmdTraceRaysIndirect2KHR vkCmdTraceRaysIndirect2KHR; -#endif /* defined(VK_KHR_ray_tracing_maintenance1) && defined(VK_KHR_ray_tracing_pipeline) */ -#if defined(VK_KHR_ray_tracing_pipeline) -PFN_vkCmdSetRayTracingPipelineStackSizeKHR vkCmdSetRayTracingPipelineStackSizeKHR; -PFN_vkCmdTraceRaysIndirectKHR vkCmdTraceRaysIndirectKHR; -PFN_vkCmdTraceRaysKHR vkCmdTraceRaysKHR; -PFN_vkCreateRayTracingPipelinesKHR vkCreateRayTracingPipelinesKHR; -PFN_vkGetRayTracingCaptureReplayShaderGroupHandlesKHR vkGetRayTracingCaptureReplayShaderGroupHandlesKHR; -PFN_vkGetRayTracingShaderGroupHandlesKHR vkGetRayTracingShaderGroupHandlesKHR; -PFN_vkGetRayTracingShaderGroupStackSizeKHR vkGetRayTracingShaderGroupStackSizeKHR; -#endif /* defined(VK_KHR_ray_tracing_pipeline) */ -#if defined(VK_KHR_sampler_ycbcr_conversion) -PFN_vkCreateSamplerYcbcrConversionKHR vkCreateSamplerYcbcrConversionKHR; -PFN_vkDestroySamplerYcbcrConversionKHR vkDestroySamplerYcbcrConversionKHR; -#endif /* defined(VK_KHR_sampler_ycbcr_conversion) */ -#if defined(VK_KHR_shared_presentable_image) -PFN_vkGetSwapchainStatusKHR vkGetSwapchainStatusKHR; -#endif /* defined(VK_KHR_shared_presentable_image) */ -#if defined(VK_KHR_surface) -PFN_vkDestroySurfaceKHR vkDestroySurfaceKHR; -PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR vkGetPhysicalDeviceSurfaceCapabilitiesKHR; -PFN_vkGetPhysicalDeviceSurfaceFormatsKHR vkGetPhysicalDeviceSurfaceFormatsKHR; -PFN_vkGetPhysicalDeviceSurfacePresentModesKHR vkGetPhysicalDeviceSurfacePresentModesKHR; -PFN_vkGetPhysicalDeviceSurfaceSupportKHR vkGetPhysicalDeviceSurfaceSupportKHR; -#endif /* defined(VK_KHR_surface) */ -#if defined(VK_KHR_swapchain) -PFN_vkAcquireNextImageKHR vkAcquireNextImageKHR; -PFN_vkCreateSwapchainKHR vkCreateSwapchainKHR; -PFN_vkDestroySwapchainKHR vkDestroySwapchainKHR; -PFN_vkGetSwapchainImagesKHR vkGetSwapchainImagesKHR; -PFN_vkQueuePresentKHR vkQueuePresentKHR; -#endif /* defined(VK_KHR_swapchain) */ -#if defined(VK_KHR_synchronization2) -PFN_vkCmdPipelineBarrier2KHR vkCmdPipelineBarrier2KHR; -PFN_vkCmdResetEvent2KHR vkCmdResetEvent2KHR; -PFN_vkCmdSetEvent2KHR vkCmdSetEvent2KHR; -PFN_vkCmdWaitEvents2KHR vkCmdWaitEvents2KHR; -PFN_vkCmdWriteTimestamp2KHR vkCmdWriteTimestamp2KHR; -PFN_vkQueueSubmit2KHR vkQueueSubmit2KHR; -#endif /* defined(VK_KHR_synchronization2) */ -#if defined(VK_KHR_synchronization2) && defined(VK_AMD_buffer_marker) -PFN_vkCmdWriteBufferMarker2AMD vkCmdWriteBufferMarker2AMD; -#endif /* defined(VK_KHR_synchronization2) && defined(VK_AMD_buffer_marker) */ -#if defined(VK_KHR_synchronization2) && defined(VK_NV_device_diagnostic_checkpoints) -PFN_vkGetQueueCheckpointData2NV vkGetQueueCheckpointData2NV; -#endif /* defined(VK_KHR_synchronization2) && defined(VK_NV_device_diagnostic_checkpoints) */ -#if defined(VK_KHR_timeline_semaphore) -PFN_vkGetSemaphoreCounterValueKHR vkGetSemaphoreCounterValueKHR; -PFN_vkSignalSemaphoreKHR vkSignalSemaphoreKHR; -PFN_vkWaitSemaphoresKHR vkWaitSemaphoresKHR; -#endif /* defined(VK_KHR_timeline_semaphore) */ -#if defined(VK_KHR_video_decode_queue) -PFN_vkCmdDecodeVideoKHR vkCmdDecodeVideoKHR; -#endif /* defined(VK_KHR_video_decode_queue) */ -#if defined(VK_KHR_video_encode_queue) -PFN_vkCmdEncodeVideoKHR vkCmdEncodeVideoKHR; -PFN_vkGetEncodedVideoSessionParametersKHR vkGetEncodedVideoSessionParametersKHR; -PFN_vkGetPhysicalDeviceVideoEncodeQualityLevelPropertiesKHR vkGetPhysicalDeviceVideoEncodeQualityLevelPropertiesKHR; -#endif /* defined(VK_KHR_video_encode_queue) */ -#if defined(VK_KHR_video_queue) -PFN_vkBindVideoSessionMemoryKHR vkBindVideoSessionMemoryKHR; -PFN_vkCmdBeginVideoCodingKHR vkCmdBeginVideoCodingKHR; -PFN_vkCmdControlVideoCodingKHR vkCmdControlVideoCodingKHR; -PFN_vkCmdEndVideoCodingKHR vkCmdEndVideoCodingKHR; -PFN_vkCreateVideoSessionKHR vkCreateVideoSessionKHR; -PFN_vkCreateVideoSessionParametersKHR vkCreateVideoSessionParametersKHR; -PFN_vkDestroyVideoSessionKHR vkDestroyVideoSessionKHR; -PFN_vkDestroyVideoSessionParametersKHR vkDestroyVideoSessionParametersKHR; -PFN_vkGetPhysicalDeviceVideoCapabilitiesKHR vkGetPhysicalDeviceVideoCapabilitiesKHR; -PFN_vkGetPhysicalDeviceVideoFormatPropertiesKHR vkGetPhysicalDeviceVideoFormatPropertiesKHR; -PFN_vkGetVideoSessionMemoryRequirementsKHR vkGetVideoSessionMemoryRequirementsKHR; -PFN_vkUpdateVideoSessionParametersKHR vkUpdateVideoSessionParametersKHR; -#endif /* defined(VK_KHR_video_queue) */ -#if defined(VK_KHR_wayland_surface) -PFN_vkCreateWaylandSurfaceKHR vkCreateWaylandSurfaceKHR; -PFN_vkGetPhysicalDeviceWaylandPresentationSupportKHR vkGetPhysicalDeviceWaylandPresentationSupportKHR; -#endif /* defined(VK_KHR_wayland_surface) */ -#if defined(VK_KHR_win32_surface) -PFN_vkCreateWin32SurfaceKHR vkCreateWin32SurfaceKHR; -PFN_vkGetPhysicalDeviceWin32PresentationSupportKHR vkGetPhysicalDeviceWin32PresentationSupportKHR; -#endif /* defined(VK_KHR_win32_surface) */ -#if defined(VK_KHR_xcb_surface) -PFN_vkCreateXcbSurfaceKHR vkCreateXcbSurfaceKHR; -PFN_vkGetPhysicalDeviceXcbPresentationSupportKHR vkGetPhysicalDeviceXcbPresentationSupportKHR; -#endif /* defined(VK_KHR_xcb_surface) */ -#if defined(VK_KHR_xlib_surface) -PFN_vkCreateXlibSurfaceKHR vkCreateXlibSurfaceKHR; -PFN_vkGetPhysicalDeviceXlibPresentationSupportKHR vkGetPhysicalDeviceXlibPresentationSupportKHR; -#endif /* defined(VK_KHR_xlib_surface) */ -#if defined(VK_MVK_ios_surface) -PFN_vkCreateIOSSurfaceMVK vkCreateIOSSurfaceMVK; -#endif /* defined(VK_MVK_ios_surface) */ -#if defined(VK_MVK_macos_surface) -PFN_vkCreateMacOSSurfaceMVK vkCreateMacOSSurfaceMVK; -#endif /* defined(VK_MVK_macos_surface) */ -#if defined(VK_NN_vi_surface) -PFN_vkCreateViSurfaceNN vkCreateViSurfaceNN; -#endif /* defined(VK_NN_vi_surface) */ -#if defined(VK_NVX_binary_import) -PFN_vkCmdCuLaunchKernelNVX vkCmdCuLaunchKernelNVX; -PFN_vkCreateCuFunctionNVX vkCreateCuFunctionNVX; -PFN_vkCreateCuModuleNVX vkCreateCuModuleNVX; -PFN_vkDestroyCuFunctionNVX vkDestroyCuFunctionNVX; -PFN_vkDestroyCuModuleNVX vkDestroyCuModuleNVX; -#endif /* defined(VK_NVX_binary_import) */ -#if defined(VK_NVX_image_view_handle) -PFN_vkGetImageViewAddressNVX vkGetImageViewAddressNVX; -PFN_vkGetImageViewHandleNVX vkGetImageViewHandleNVX; -#endif /* defined(VK_NVX_image_view_handle) */ -#if defined(VK_NV_acquire_winrt_display) -PFN_vkAcquireWinrtDisplayNV vkAcquireWinrtDisplayNV; -PFN_vkGetWinrtDisplayNV vkGetWinrtDisplayNV; -#endif /* defined(VK_NV_acquire_winrt_display) */ -#if defined(VK_NV_clip_space_w_scaling) -PFN_vkCmdSetViewportWScalingNV vkCmdSetViewportWScalingNV; -#endif /* defined(VK_NV_clip_space_w_scaling) */ -#if defined(VK_NV_cooperative_matrix) -PFN_vkGetPhysicalDeviceCooperativeMatrixPropertiesNV vkGetPhysicalDeviceCooperativeMatrixPropertiesNV; -#endif /* defined(VK_NV_cooperative_matrix) */ -#if defined(VK_NV_copy_memory_indirect) -PFN_vkCmdCopyMemoryIndirectNV vkCmdCopyMemoryIndirectNV; -PFN_vkCmdCopyMemoryToImageIndirectNV vkCmdCopyMemoryToImageIndirectNV; -#endif /* defined(VK_NV_copy_memory_indirect) */ -#if defined(VK_NV_coverage_reduction_mode) -PFN_vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV; -#endif /* defined(VK_NV_coverage_reduction_mode) */ -#if defined(VK_NV_cuda_kernel_launch) -PFN_vkCmdCudaLaunchKernelNV vkCmdCudaLaunchKernelNV; -PFN_vkCreateCudaFunctionNV vkCreateCudaFunctionNV; -PFN_vkCreateCudaModuleNV vkCreateCudaModuleNV; -PFN_vkDestroyCudaFunctionNV vkDestroyCudaFunctionNV; -PFN_vkDestroyCudaModuleNV vkDestroyCudaModuleNV; -PFN_vkGetCudaModuleCacheNV vkGetCudaModuleCacheNV; -#endif /* defined(VK_NV_cuda_kernel_launch) */ -#if defined(VK_NV_device_diagnostic_checkpoints) -PFN_vkCmdSetCheckpointNV vkCmdSetCheckpointNV; -PFN_vkGetQueueCheckpointDataNV vkGetQueueCheckpointDataNV; -#endif /* defined(VK_NV_device_diagnostic_checkpoints) */ -#if defined(VK_NV_device_generated_commands) -PFN_vkCmdBindPipelineShaderGroupNV vkCmdBindPipelineShaderGroupNV; -PFN_vkCmdExecuteGeneratedCommandsNV vkCmdExecuteGeneratedCommandsNV; -PFN_vkCmdPreprocessGeneratedCommandsNV vkCmdPreprocessGeneratedCommandsNV; -PFN_vkCreateIndirectCommandsLayoutNV vkCreateIndirectCommandsLayoutNV; -PFN_vkDestroyIndirectCommandsLayoutNV vkDestroyIndirectCommandsLayoutNV; -PFN_vkGetGeneratedCommandsMemoryRequirementsNV vkGetGeneratedCommandsMemoryRequirementsNV; -#endif /* defined(VK_NV_device_generated_commands) */ -#if defined(VK_NV_device_generated_commands_compute) -PFN_vkCmdUpdatePipelineIndirectBufferNV vkCmdUpdatePipelineIndirectBufferNV; -PFN_vkGetPipelineIndirectDeviceAddressNV vkGetPipelineIndirectDeviceAddressNV; -PFN_vkGetPipelineIndirectMemoryRequirementsNV vkGetPipelineIndirectMemoryRequirementsNV; -#endif /* defined(VK_NV_device_generated_commands_compute) */ -#if defined(VK_NV_external_memory_capabilities) -PFN_vkGetPhysicalDeviceExternalImageFormatPropertiesNV vkGetPhysicalDeviceExternalImageFormatPropertiesNV; -#endif /* defined(VK_NV_external_memory_capabilities) */ -#if defined(VK_NV_external_memory_rdma) -PFN_vkGetMemoryRemoteAddressNV vkGetMemoryRemoteAddressNV; -#endif /* defined(VK_NV_external_memory_rdma) */ -#if defined(VK_NV_external_memory_win32) -PFN_vkGetMemoryWin32HandleNV vkGetMemoryWin32HandleNV; -#endif /* defined(VK_NV_external_memory_win32) */ -#if defined(VK_NV_fragment_shading_rate_enums) -PFN_vkCmdSetFragmentShadingRateEnumNV vkCmdSetFragmentShadingRateEnumNV; -#endif /* defined(VK_NV_fragment_shading_rate_enums) */ -#if defined(VK_NV_low_latency2) -PFN_vkGetLatencyTimingsNV vkGetLatencyTimingsNV; -PFN_vkLatencySleepNV vkLatencySleepNV; -PFN_vkQueueNotifyOutOfBandNV vkQueueNotifyOutOfBandNV; -PFN_vkSetLatencyMarkerNV vkSetLatencyMarkerNV; -PFN_vkSetLatencySleepModeNV vkSetLatencySleepModeNV; -#endif /* defined(VK_NV_low_latency2) */ -#if defined(VK_NV_memory_decompression) -PFN_vkCmdDecompressMemoryIndirectCountNV vkCmdDecompressMemoryIndirectCountNV; -PFN_vkCmdDecompressMemoryNV vkCmdDecompressMemoryNV; -#endif /* defined(VK_NV_memory_decompression) */ -#if defined(VK_NV_mesh_shader) -PFN_vkCmdDrawMeshTasksIndirectCountNV vkCmdDrawMeshTasksIndirectCountNV; -PFN_vkCmdDrawMeshTasksIndirectNV vkCmdDrawMeshTasksIndirectNV; -PFN_vkCmdDrawMeshTasksNV vkCmdDrawMeshTasksNV; -#endif /* defined(VK_NV_mesh_shader) */ -#if defined(VK_NV_optical_flow) -PFN_vkBindOpticalFlowSessionImageNV vkBindOpticalFlowSessionImageNV; -PFN_vkCmdOpticalFlowExecuteNV vkCmdOpticalFlowExecuteNV; -PFN_vkCreateOpticalFlowSessionNV vkCreateOpticalFlowSessionNV; -PFN_vkDestroyOpticalFlowSessionNV vkDestroyOpticalFlowSessionNV; -PFN_vkGetPhysicalDeviceOpticalFlowImageFormatsNV vkGetPhysicalDeviceOpticalFlowImageFormatsNV; -#endif /* defined(VK_NV_optical_flow) */ -#if defined(VK_NV_ray_tracing) -PFN_vkBindAccelerationStructureMemoryNV vkBindAccelerationStructureMemoryNV; -PFN_vkCmdBuildAccelerationStructureNV vkCmdBuildAccelerationStructureNV; -PFN_vkCmdCopyAccelerationStructureNV vkCmdCopyAccelerationStructureNV; -PFN_vkCmdTraceRaysNV vkCmdTraceRaysNV; -PFN_vkCmdWriteAccelerationStructuresPropertiesNV vkCmdWriteAccelerationStructuresPropertiesNV; -PFN_vkCompileDeferredNV vkCompileDeferredNV; -PFN_vkCreateAccelerationStructureNV vkCreateAccelerationStructureNV; -PFN_vkCreateRayTracingPipelinesNV vkCreateRayTracingPipelinesNV; -PFN_vkDestroyAccelerationStructureNV vkDestroyAccelerationStructureNV; -PFN_vkGetAccelerationStructureHandleNV vkGetAccelerationStructureHandleNV; -PFN_vkGetAccelerationStructureMemoryRequirementsNV vkGetAccelerationStructureMemoryRequirementsNV; -PFN_vkGetRayTracingShaderGroupHandlesNV vkGetRayTracingShaderGroupHandlesNV; -#endif /* defined(VK_NV_ray_tracing) */ -#if defined(VK_NV_scissor_exclusive) && VK_NV_SCISSOR_EXCLUSIVE_SPEC_VERSION >= 2 -PFN_vkCmdSetExclusiveScissorEnableNV vkCmdSetExclusiveScissorEnableNV; -#endif /* defined(VK_NV_scissor_exclusive) && VK_NV_SCISSOR_EXCLUSIVE_SPEC_VERSION >= 2 */ -#if defined(VK_NV_scissor_exclusive) -PFN_vkCmdSetExclusiveScissorNV vkCmdSetExclusiveScissorNV; -#endif /* defined(VK_NV_scissor_exclusive) */ -#if defined(VK_NV_shading_rate_image) -PFN_vkCmdBindShadingRateImageNV vkCmdBindShadingRateImageNV; -PFN_vkCmdSetCoarseSampleOrderNV vkCmdSetCoarseSampleOrderNV; -PFN_vkCmdSetViewportShadingRatePaletteNV vkCmdSetViewportShadingRatePaletteNV; -#endif /* defined(VK_NV_shading_rate_image) */ -#if defined(VK_QCOM_tile_properties) -PFN_vkGetDynamicRenderingTilePropertiesQCOM vkGetDynamicRenderingTilePropertiesQCOM; -PFN_vkGetFramebufferTilePropertiesQCOM vkGetFramebufferTilePropertiesQCOM; -#endif /* defined(VK_QCOM_tile_properties) */ -#if defined(VK_QNX_external_memory_screen_buffer) -PFN_vkGetScreenBufferPropertiesQNX vkGetScreenBufferPropertiesQNX; -#endif /* defined(VK_QNX_external_memory_screen_buffer) */ -#if defined(VK_QNX_screen_surface) -PFN_vkCreateScreenSurfaceQNX vkCreateScreenSurfaceQNX; -PFN_vkGetPhysicalDeviceScreenPresentationSupportQNX vkGetPhysicalDeviceScreenPresentationSupportQNX; -#endif /* defined(VK_QNX_screen_surface) */ -#if defined(VK_VALVE_descriptor_set_host_mapping) -PFN_vkGetDescriptorSetHostMappingVALVE vkGetDescriptorSetHostMappingVALVE; -PFN_vkGetDescriptorSetLayoutHostMappingInfoVALVE vkGetDescriptorSetLayoutHostMappingInfoVALVE; -#endif /* defined(VK_VALVE_descriptor_set_host_mapping) */ -#if (defined(VK_EXT_extended_dynamic_state)) || (defined(VK_EXT_shader_object)) -PFN_vkCmdBindVertexBuffers2EXT vkCmdBindVertexBuffers2EXT; -PFN_vkCmdSetCullModeEXT vkCmdSetCullModeEXT; -PFN_vkCmdSetDepthBoundsTestEnableEXT vkCmdSetDepthBoundsTestEnableEXT; -PFN_vkCmdSetDepthCompareOpEXT vkCmdSetDepthCompareOpEXT; -PFN_vkCmdSetDepthTestEnableEXT vkCmdSetDepthTestEnableEXT; -PFN_vkCmdSetDepthWriteEnableEXT vkCmdSetDepthWriteEnableEXT; -PFN_vkCmdSetFrontFaceEXT vkCmdSetFrontFaceEXT; -PFN_vkCmdSetPrimitiveTopologyEXT vkCmdSetPrimitiveTopologyEXT; -PFN_vkCmdSetScissorWithCountEXT vkCmdSetScissorWithCountEXT; -PFN_vkCmdSetStencilOpEXT vkCmdSetStencilOpEXT; -PFN_vkCmdSetStencilTestEnableEXT vkCmdSetStencilTestEnableEXT; -PFN_vkCmdSetViewportWithCountEXT vkCmdSetViewportWithCountEXT; -#endif /* (defined(VK_EXT_extended_dynamic_state)) || (defined(VK_EXT_shader_object)) */ -#if (defined(VK_EXT_extended_dynamic_state2)) || (defined(VK_EXT_shader_object)) -PFN_vkCmdSetDepthBiasEnableEXT vkCmdSetDepthBiasEnableEXT; -PFN_vkCmdSetLogicOpEXT vkCmdSetLogicOpEXT; -PFN_vkCmdSetPatchControlPointsEXT vkCmdSetPatchControlPointsEXT; -PFN_vkCmdSetPrimitiveRestartEnableEXT vkCmdSetPrimitiveRestartEnableEXT; -PFN_vkCmdSetRasterizerDiscardEnableEXT vkCmdSetRasterizerDiscardEnableEXT; -#endif /* (defined(VK_EXT_extended_dynamic_state2)) || (defined(VK_EXT_shader_object)) */ -#if (defined(VK_EXT_extended_dynamic_state3)) || (defined(VK_EXT_shader_object)) -PFN_vkCmdSetAlphaToCoverageEnableEXT vkCmdSetAlphaToCoverageEnableEXT; -PFN_vkCmdSetAlphaToOneEnableEXT vkCmdSetAlphaToOneEnableEXT; -PFN_vkCmdSetColorBlendEnableEXT vkCmdSetColorBlendEnableEXT; -PFN_vkCmdSetColorBlendEquationEXT vkCmdSetColorBlendEquationEXT; -PFN_vkCmdSetColorWriteMaskEXT vkCmdSetColorWriteMaskEXT; -PFN_vkCmdSetDepthClampEnableEXT vkCmdSetDepthClampEnableEXT; -PFN_vkCmdSetLogicOpEnableEXT vkCmdSetLogicOpEnableEXT; -PFN_vkCmdSetPolygonModeEXT vkCmdSetPolygonModeEXT; -PFN_vkCmdSetRasterizationSamplesEXT vkCmdSetRasterizationSamplesEXT; -PFN_vkCmdSetSampleMaskEXT vkCmdSetSampleMaskEXT; -#endif /* (defined(VK_EXT_extended_dynamic_state3)) || (defined(VK_EXT_shader_object)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && (defined(VK_KHR_maintenance2) || defined(VK_VERSION_1_1))) || (defined(VK_EXT_shader_object)) -PFN_vkCmdSetTessellationDomainOriginEXT vkCmdSetTessellationDomainOriginEXT; -#endif /* (defined(VK_EXT_extended_dynamic_state3) && (defined(VK_KHR_maintenance2) || defined(VK_VERSION_1_1))) || (defined(VK_EXT_shader_object)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_transform_feedback)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_transform_feedback)) -PFN_vkCmdSetRasterizationStreamEXT vkCmdSetRasterizationStreamEXT; -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_transform_feedback)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_transform_feedback)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_conservative_rasterization)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_conservative_rasterization)) -PFN_vkCmdSetConservativeRasterizationModeEXT vkCmdSetConservativeRasterizationModeEXT; -PFN_vkCmdSetExtraPrimitiveOverestimationSizeEXT vkCmdSetExtraPrimitiveOverestimationSizeEXT; -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_conservative_rasterization)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_conservative_rasterization)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_depth_clip_enable)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_depth_clip_enable)) -PFN_vkCmdSetDepthClipEnableEXT vkCmdSetDepthClipEnableEXT; -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_depth_clip_enable)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_depth_clip_enable)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_sample_locations)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_sample_locations)) -PFN_vkCmdSetSampleLocationsEnableEXT vkCmdSetSampleLocationsEnableEXT; -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_sample_locations)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_sample_locations)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_blend_operation_advanced)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_blend_operation_advanced)) -PFN_vkCmdSetColorBlendAdvancedEXT vkCmdSetColorBlendAdvancedEXT; -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_blend_operation_advanced)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_blend_operation_advanced)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_provoking_vertex)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_provoking_vertex)) -PFN_vkCmdSetProvokingVertexModeEXT vkCmdSetProvokingVertexModeEXT; -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_provoking_vertex)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_provoking_vertex)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_line_rasterization)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_line_rasterization)) -PFN_vkCmdSetLineRasterizationModeEXT vkCmdSetLineRasterizationModeEXT; -PFN_vkCmdSetLineStippleEnableEXT vkCmdSetLineStippleEnableEXT; -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_line_rasterization)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_line_rasterization)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_depth_clip_control)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_depth_clip_control)) -PFN_vkCmdSetDepthClipNegativeOneToOneEXT vkCmdSetDepthClipNegativeOneToOneEXT; -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_depth_clip_control)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_depth_clip_control)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_clip_space_w_scaling)) || (defined(VK_EXT_shader_object) && defined(VK_NV_clip_space_w_scaling)) -PFN_vkCmdSetViewportWScalingEnableNV vkCmdSetViewportWScalingEnableNV; -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_clip_space_w_scaling)) || (defined(VK_EXT_shader_object) && defined(VK_NV_clip_space_w_scaling)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_viewport_swizzle)) || (defined(VK_EXT_shader_object) && defined(VK_NV_viewport_swizzle)) -PFN_vkCmdSetViewportSwizzleNV vkCmdSetViewportSwizzleNV; -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_viewport_swizzle)) || (defined(VK_EXT_shader_object) && defined(VK_NV_viewport_swizzle)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_fragment_coverage_to_color)) || (defined(VK_EXT_shader_object) && defined(VK_NV_fragment_coverage_to_color)) -PFN_vkCmdSetCoverageToColorEnableNV vkCmdSetCoverageToColorEnableNV; -PFN_vkCmdSetCoverageToColorLocationNV vkCmdSetCoverageToColorLocationNV; -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_fragment_coverage_to_color)) || (defined(VK_EXT_shader_object) && defined(VK_NV_fragment_coverage_to_color)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_framebuffer_mixed_samples)) || (defined(VK_EXT_shader_object) && defined(VK_NV_framebuffer_mixed_samples)) -PFN_vkCmdSetCoverageModulationModeNV vkCmdSetCoverageModulationModeNV; -PFN_vkCmdSetCoverageModulationTableEnableNV vkCmdSetCoverageModulationTableEnableNV; -PFN_vkCmdSetCoverageModulationTableNV vkCmdSetCoverageModulationTableNV; -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_framebuffer_mixed_samples)) || (defined(VK_EXT_shader_object) && defined(VK_NV_framebuffer_mixed_samples)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_shading_rate_image)) || (defined(VK_EXT_shader_object) && defined(VK_NV_shading_rate_image)) -PFN_vkCmdSetShadingRateImageEnableNV vkCmdSetShadingRateImageEnableNV; -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_shading_rate_image)) || (defined(VK_EXT_shader_object) && defined(VK_NV_shading_rate_image)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_representative_fragment_test)) || (defined(VK_EXT_shader_object) && defined(VK_NV_representative_fragment_test)) -PFN_vkCmdSetRepresentativeFragmentTestEnableNV vkCmdSetRepresentativeFragmentTestEnableNV; -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_representative_fragment_test)) || (defined(VK_EXT_shader_object) && defined(VK_NV_representative_fragment_test)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_coverage_reduction_mode)) || (defined(VK_EXT_shader_object) && defined(VK_NV_coverage_reduction_mode)) -PFN_vkCmdSetCoverageReductionModeNV vkCmdSetCoverageReductionModeNV; -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_coverage_reduction_mode)) || (defined(VK_EXT_shader_object) && defined(VK_NV_coverage_reduction_mode)) */ -#if (defined(VK_EXT_host_image_copy)) || (defined(VK_EXT_image_compression_control)) -PFN_vkGetImageSubresourceLayout2EXT vkGetImageSubresourceLayout2EXT; -#endif /* (defined(VK_EXT_host_image_copy)) || (defined(VK_EXT_image_compression_control)) */ -#if (defined(VK_EXT_shader_object)) || (defined(VK_EXT_vertex_input_dynamic_state)) -PFN_vkCmdSetVertexInputEXT vkCmdSetVertexInputEXT; -#endif /* (defined(VK_EXT_shader_object)) || (defined(VK_EXT_vertex_input_dynamic_state)) */ -#if (defined(VK_KHR_descriptor_update_template) && defined(VK_KHR_push_descriptor)) || (defined(VK_KHR_push_descriptor) && (defined(VK_VERSION_1_1) || defined(VK_KHR_descriptor_update_template))) -PFN_vkCmdPushDescriptorSetWithTemplateKHR vkCmdPushDescriptorSetWithTemplateKHR; -#endif /* (defined(VK_KHR_descriptor_update_template) && defined(VK_KHR_push_descriptor)) || (defined(VK_KHR_push_descriptor) && (defined(VK_VERSION_1_1) || defined(VK_KHR_descriptor_update_template))) */ -#if (defined(VK_KHR_device_group) && defined(VK_KHR_surface)) || (defined(VK_KHR_swapchain) && defined(VK_VERSION_1_1)) -PFN_vkGetDeviceGroupPresentCapabilitiesKHR vkGetDeviceGroupPresentCapabilitiesKHR; -PFN_vkGetDeviceGroupSurfacePresentModesKHR vkGetDeviceGroupSurfacePresentModesKHR; -PFN_vkGetPhysicalDevicePresentRectanglesKHR vkGetPhysicalDevicePresentRectanglesKHR; -#endif /* (defined(VK_KHR_device_group) && defined(VK_KHR_surface)) || (defined(VK_KHR_swapchain) && defined(VK_VERSION_1_1)) */ -#if (defined(VK_KHR_device_group) && defined(VK_KHR_swapchain)) || (defined(VK_KHR_swapchain) && defined(VK_VERSION_1_1)) -PFN_vkAcquireNextImage2KHR vkAcquireNextImage2KHR; -#endif /* (defined(VK_KHR_device_group) && defined(VK_KHR_swapchain)) || (defined(VK_KHR_swapchain) && defined(VK_VERSION_1_1)) */ -/* VOLK_GENERATE_PROTOTYPES_C */ - -#ifdef __GNUC__ -# pragma GCC visibility pop -#endif - -#ifdef __cplusplus -} -#endif -/* clang-format on */ diff --git a/third_party/volk.h b/third_party/volk.h deleted file mode 100644 index 7e9edd4..0000000 --- a/third_party/volk.h +++ /dev/null @@ -1,2089 +0,0 @@ -/** - * volk - * - * Copyright (C) 2018-2024, by Arseny Kapoulkine (arseny.kapoulkine@gmail.com) - * Report bugs and download new versions at https://github.com/zeux/volk - * - * This library is distributed under the MIT License. See notice at the end of this file. - */ -/* clang-format off */ -#ifndef VOLK_H_ -#define VOLK_H_ - -#if defined(VULKAN_H_) && !defined(VK_NO_PROTOTYPES) -# error To use volk, you need to define VK_NO_PROTOTYPES before including vulkan.h -#endif - -/* VOLK_GENERATE_VERSION_DEFINE */ -#define VOLK_HEADER_VERSION 295 -/* VOLK_GENERATE_VERSION_DEFINE */ - -#ifndef VK_NO_PROTOTYPES -# define VK_NO_PROTOTYPES -#endif - -#ifndef VULKAN_H_ -# ifdef VOLK_VULKAN_H_PATH -# include VOLK_VULKAN_H_PATH -# elif defined(VK_USE_PLATFORM_WIN32_KHR) -# include -# include - - /* When VK_USE_PLATFORM_WIN32_KHR is defined, instead of including vulkan.h directly, we include individual parts of the SDK - * This is necessary to avoid including which is very heavy - it takes 200ms to parse without WIN32_LEAN_AND_MEAN - * and 100ms to parse with it. vulkan_win32.h only needs a few symbols that are easy to redefine ourselves. - */ - typedef unsigned long DWORD; - typedef const wchar_t* LPCWSTR; - typedef void* HANDLE; - typedef struct HINSTANCE__* HINSTANCE; - typedef struct HWND__* HWND; - typedef struct HMONITOR__* HMONITOR; - typedef struct _SECURITY_ATTRIBUTES SECURITY_ATTRIBUTES; - -# include - -# ifdef VK_ENABLE_BETA_EXTENSIONS -# include -# endif -# else -# include -# endif -#endif - -/* Disable several extensions on earlier SDKs because later SDKs introduce a backwards incompatible change to function signatures */ -#if VK_HEADER_VERSION < 140 -# undef VK_NVX_image_view_handle -#endif -#if VK_HEADER_VERSION < 184 -# undef VK_HUAWEI_subpass_shading -#endif - -#ifdef __cplusplus -extern "C" { -#endif - -struct VolkDeviceTable; - -/** - * Initialize library by loading Vulkan loader; call this function before creating the Vulkan instance. - * - * Returns VK_SUCCESS on success and VK_ERROR_INITIALIZATION_FAILED otherwise. - */ -VkResult volkInitialize(void); - -/** - * Initialize library by providing a custom handler to load global symbols. - * - * This function can be used instead of volkInitialize. - * The handler function pointer will be asked to load global Vulkan symbols which require no instance - * (such as vkCreateInstance, vkEnumerateInstance* and vkEnumerateInstanceVersion if available). - */ -void volkInitializeCustom(PFN_vkGetInstanceProcAddr handler); - -/** - * Finalize library by unloading Vulkan loader and resetting global symbols to NULL. - * - * This function does not need to be called on process exit (as loader will be unloaded automatically) or if volkInitialize failed. - * In general this function is optional to call but may be useful in rare cases eg if volk needs to be reinitialized multiple times. - */ -void volkFinalize(void); - -/** - * Get Vulkan instance version supported by the Vulkan loader, or 0 if Vulkan isn't supported - * - * Returns 0 if volkInitialize wasn't called or failed. - */ -uint32_t volkGetInstanceVersion(void); - -/** - * Load global function pointers using application-created VkInstance; call this function after creating the Vulkan instance. - */ -void volkLoadInstance(VkInstance instance); - -/** - * Load global function pointers using application-created VkInstance; call this function after creating the Vulkan instance. - * Skips loading device-based function pointers, requires usage of volkLoadDevice afterwards. - */ -void volkLoadInstanceOnly(VkInstance instance); - -/** - * Load global function pointers using application-created VkDevice; call this function after creating the Vulkan device. - * - * Note: this is not suitable for applications that want to use multiple VkDevice objects concurrently. - */ -void volkLoadDevice(VkDevice device); - -/** - * Return last VkInstance for which global function pointers have been loaded via volkLoadInstance(), - * or VK_NULL_HANDLE if volkLoadInstance() has not been called. - */ -VkInstance volkGetLoadedInstance(void); - -/** - * Return last VkDevice for which global function pointers have been loaded via volkLoadDevice(), - * or VK_NULL_HANDLE if volkLoadDevice() has not been called. - */ -VkDevice volkGetLoadedDevice(void); - -/** - * Load function pointers using application-created VkDevice into a table. - * Application should use function pointers from that table instead of using global function pointers. - */ -void volkLoadDeviceTable(struct VolkDeviceTable* table, VkDevice device); - -/** - * Device-specific function pointer table - */ -struct VolkDeviceTable -{ - /* VOLK_GENERATE_DEVICE_TABLE */ -#if defined(VK_VERSION_1_0) - PFN_vkAllocateCommandBuffers vkAllocateCommandBuffers; - PFN_vkAllocateDescriptorSets vkAllocateDescriptorSets; - PFN_vkAllocateMemory vkAllocateMemory; - PFN_vkBeginCommandBuffer vkBeginCommandBuffer; - PFN_vkBindBufferMemory vkBindBufferMemory; - PFN_vkBindImageMemory vkBindImageMemory; - PFN_vkCmdBeginQuery vkCmdBeginQuery; - PFN_vkCmdBeginRenderPass vkCmdBeginRenderPass; - PFN_vkCmdBindDescriptorSets vkCmdBindDescriptorSets; - PFN_vkCmdBindIndexBuffer vkCmdBindIndexBuffer; - PFN_vkCmdBindPipeline vkCmdBindPipeline; - PFN_vkCmdBindVertexBuffers vkCmdBindVertexBuffers; - PFN_vkCmdBlitImage vkCmdBlitImage; - PFN_vkCmdClearAttachments vkCmdClearAttachments; - PFN_vkCmdClearColorImage vkCmdClearColorImage; - PFN_vkCmdClearDepthStencilImage vkCmdClearDepthStencilImage; - PFN_vkCmdCopyBuffer vkCmdCopyBuffer; - PFN_vkCmdCopyBufferToImage vkCmdCopyBufferToImage; - PFN_vkCmdCopyImage vkCmdCopyImage; - PFN_vkCmdCopyImageToBuffer vkCmdCopyImageToBuffer; - PFN_vkCmdCopyQueryPoolResults vkCmdCopyQueryPoolResults; - PFN_vkCmdDispatch vkCmdDispatch; - PFN_vkCmdDispatchIndirect vkCmdDispatchIndirect; - PFN_vkCmdDraw vkCmdDraw; - PFN_vkCmdDrawIndexed vkCmdDrawIndexed; - PFN_vkCmdDrawIndexedIndirect vkCmdDrawIndexedIndirect; - PFN_vkCmdDrawIndirect vkCmdDrawIndirect; - PFN_vkCmdEndQuery vkCmdEndQuery; - PFN_vkCmdEndRenderPass vkCmdEndRenderPass; - PFN_vkCmdExecuteCommands vkCmdExecuteCommands; - PFN_vkCmdFillBuffer vkCmdFillBuffer; - PFN_vkCmdNextSubpass vkCmdNextSubpass; - PFN_vkCmdPipelineBarrier vkCmdPipelineBarrier; - PFN_vkCmdPushConstants vkCmdPushConstants; - PFN_vkCmdResetEvent vkCmdResetEvent; - PFN_vkCmdResetQueryPool vkCmdResetQueryPool; - PFN_vkCmdResolveImage vkCmdResolveImage; - PFN_vkCmdSetBlendConstants vkCmdSetBlendConstants; - PFN_vkCmdSetDepthBias vkCmdSetDepthBias; - PFN_vkCmdSetDepthBounds vkCmdSetDepthBounds; - PFN_vkCmdSetEvent vkCmdSetEvent; - PFN_vkCmdSetLineWidth vkCmdSetLineWidth; - PFN_vkCmdSetScissor vkCmdSetScissor; - PFN_vkCmdSetStencilCompareMask vkCmdSetStencilCompareMask; - PFN_vkCmdSetStencilReference vkCmdSetStencilReference; - PFN_vkCmdSetStencilWriteMask vkCmdSetStencilWriteMask; - PFN_vkCmdSetViewport vkCmdSetViewport; - PFN_vkCmdUpdateBuffer vkCmdUpdateBuffer; - PFN_vkCmdWaitEvents vkCmdWaitEvents; - PFN_vkCmdWriteTimestamp vkCmdWriteTimestamp; - PFN_vkCreateBuffer vkCreateBuffer; - PFN_vkCreateBufferView vkCreateBufferView; - PFN_vkCreateCommandPool vkCreateCommandPool; - PFN_vkCreateComputePipelines vkCreateComputePipelines; - PFN_vkCreateDescriptorPool vkCreateDescriptorPool; - PFN_vkCreateDescriptorSetLayout vkCreateDescriptorSetLayout; - PFN_vkCreateEvent vkCreateEvent; - PFN_vkCreateFence vkCreateFence; - PFN_vkCreateFramebuffer vkCreateFramebuffer; - PFN_vkCreateGraphicsPipelines vkCreateGraphicsPipelines; - PFN_vkCreateImage vkCreateImage; - PFN_vkCreateImageView vkCreateImageView; - PFN_vkCreatePipelineCache vkCreatePipelineCache; - PFN_vkCreatePipelineLayout vkCreatePipelineLayout; - PFN_vkCreateQueryPool vkCreateQueryPool; - PFN_vkCreateRenderPass vkCreateRenderPass; - PFN_vkCreateSampler vkCreateSampler; - PFN_vkCreateSemaphore vkCreateSemaphore; - PFN_vkCreateShaderModule vkCreateShaderModule; - PFN_vkDestroyBuffer vkDestroyBuffer; - PFN_vkDestroyBufferView vkDestroyBufferView; - PFN_vkDestroyCommandPool vkDestroyCommandPool; - PFN_vkDestroyDescriptorPool vkDestroyDescriptorPool; - PFN_vkDestroyDescriptorSetLayout vkDestroyDescriptorSetLayout; - PFN_vkDestroyDevice vkDestroyDevice; - PFN_vkDestroyEvent vkDestroyEvent; - PFN_vkDestroyFence vkDestroyFence; - PFN_vkDestroyFramebuffer vkDestroyFramebuffer; - PFN_vkDestroyImage vkDestroyImage; - PFN_vkDestroyImageView vkDestroyImageView; - PFN_vkDestroyPipeline vkDestroyPipeline; - PFN_vkDestroyPipelineCache vkDestroyPipelineCache; - PFN_vkDestroyPipelineLayout vkDestroyPipelineLayout; - PFN_vkDestroyQueryPool vkDestroyQueryPool; - PFN_vkDestroyRenderPass vkDestroyRenderPass; - PFN_vkDestroySampler vkDestroySampler; - PFN_vkDestroySemaphore vkDestroySemaphore; - PFN_vkDestroyShaderModule vkDestroyShaderModule; - PFN_vkDeviceWaitIdle vkDeviceWaitIdle; - PFN_vkEndCommandBuffer vkEndCommandBuffer; - PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges; - PFN_vkFreeCommandBuffers vkFreeCommandBuffers; - PFN_vkFreeDescriptorSets vkFreeDescriptorSets; - PFN_vkFreeMemory vkFreeMemory; - PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements; - PFN_vkGetDeviceMemoryCommitment vkGetDeviceMemoryCommitment; - PFN_vkGetDeviceQueue vkGetDeviceQueue; - PFN_vkGetEventStatus vkGetEventStatus; - PFN_vkGetFenceStatus vkGetFenceStatus; - PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements; - PFN_vkGetImageSparseMemoryRequirements vkGetImageSparseMemoryRequirements; - PFN_vkGetImageSubresourceLayout vkGetImageSubresourceLayout; - PFN_vkGetPipelineCacheData vkGetPipelineCacheData; - PFN_vkGetQueryPoolResults vkGetQueryPoolResults; - PFN_vkGetRenderAreaGranularity vkGetRenderAreaGranularity; - PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges; - PFN_vkMapMemory vkMapMemory; - PFN_vkMergePipelineCaches vkMergePipelineCaches; - PFN_vkQueueBindSparse vkQueueBindSparse; - PFN_vkQueueSubmit vkQueueSubmit; - PFN_vkQueueWaitIdle vkQueueWaitIdle; - PFN_vkResetCommandBuffer vkResetCommandBuffer; - PFN_vkResetCommandPool vkResetCommandPool; - PFN_vkResetDescriptorPool vkResetDescriptorPool; - PFN_vkResetEvent vkResetEvent; - PFN_vkResetFences vkResetFences; - PFN_vkSetEvent vkSetEvent; - PFN_vkUnmapMemory vkUnmapMemory; - PFN_vkUpdateDescriptorSets vkUpdateDescriptorSets; - PFN_vkWaitForFences vkWaitForFences; -#endif /* defined(VK_VERSION_1_0) */ -#if defined(VK_VERSION_1_1) - PFN_vkBindBufferMemory2 vkBindBufferMemory2; - PFN_vkBindImageMemory2 vkBindImageMemory2; - PFN_vkCmdDispatchBase vkCmdDispatchBase; - PFN_vkCmdSetDeviceMask vkCmdSetDeviceMask; - PFN_vkCreateDescriptorUpdateTemplate vkCreateDescriptorUpdateTemplate; - PFN_vkCreateSamplerYcbcrConversion vkCreateSamplerYcbcrConversion; - PFN_vkDestroyDescriptorUpdateTemplate vkDestroyDescriptorUpdateTemplate; - PFN_vkDestroySamplerYcbcrConversion vkDestroySamplerYcbcrConversion; - PFN_vkGetBufferMemoryRequirements2 vkGetBufferMemoryRequirements2; - PFN_vkGetDescriptorSetLayoutSupport vkGetDescriptorSetLayoutSupport; - PFN_vkGetDeviceGroupPeerMemoryFeatures vkGetDeviceGroupPeerMemoryFeatures; - PFN_vkGetDeviceQueue2 vkGetDeviceQueue2; - PFN_vkGetImageMemoryRequirements2 vkGetImageMemoryRequirements2; - PFN_vkGetImageSparseMemoryRequirements2 vkGetImageSparseMemoryRequirements2; - PFN_vkTrimCommandPool vkTrimCommandPool; - PFN_vkUpdateDescriptorSetWithTemplate vkUpdateDescriptorSetWithTemplate; -#endif /* defined(VK_VERSION_1_1) */ -#if defined(VK_VERSION_1_2) - PFN_vkCmdBeginRenderPass2 vkCmdBeginRenderPass2; - PFN_vkCmdDrawIndexedIndirectCount vkCmdDrawIndexedIndirectCount; - PFN_vkCmdDrawIndirectCount vkCmdDrawIndirectCount; - PFN_vkCmdEndRenderPass2 vkCmdEndRenderPass2; - PFN_vkCmdNextSubpass2 vkCmdNextSubpass2; - PFN_vkCreateRenderPass2 vkCreateRenderPass2; - PFN_vkGetBufferDeviceAddress vkGetBufferDeviceAddress; - PFN_vkGetBufferOpaqueCaptureAddress vkGetBufferOpaqueCaptureAddress; - PFN_vkGetDeviceMemoryOpaqueCaptureAddress vkGetDeviceMemoryOpaqueCaptureAddress; - PFN_vkGetSemaphoreCounterValue vkGetSemaphoreCounterValue; - PFN_vkResetQueryPool vkResetQueryPool; - PFN_vkSignalSemaphore vkSignalSemaphore; - PFN_vkWaitSemaphores vkWaitSemaphores; -#endif /* defined(VK_VERSION_1_2) */ -#if defined(VK_VERSION_1_3) - PFN_vkCmdBeginRendering vkCmdBeginRendering; - PFN_vkCmdBindVertexBuffers2 vkCmdBindVertexBuffers2; - PFN_vkCmdBlitImage2 vkCmdBlitImage2; - PFN_vkCmdCopyBuffer2 vkCmdCopyBuffer2; - PFN_vkCmdCopyBufferToImage2 vkCmdCopyBufferToImage2; - PFN_vkCmdCopyImage2 vkCmdCopyImage2; - PFN_vkCmdCopyImageToBuffer2 vkCmdCopyImageToBuffer2; - PFN_vkCmdEndRendering vkCmdEndRendering; - PFN_vkCmdPipelineBarrier2 vkCmdPipelineBarrier2; - PFN_vkCmdResetEvent2 vkCmdResetEvent2; - PFN_vkCmdResolveImage2 vkCmdResolveImage2; - PFN_vkCmdSetCullMode vkCmdSetCullMode; - PFN_vkCmdSetDepthBiasEnable vkCmdSetDepthBiasEnable; - PFN_vkCmdSetDepthBoundsTestEnable vkCmdSetDepthBoundsTestEnable; - PFN_vkCmdSetDepthCompareOp vkCmdSetDepthCompareOp; - PFN_vkCmdSetDepthTestEnable vkCmdSetDepthTestEnable; - PFN_vkCmdSetDepthWriteEnable vkCmdSetDepthWriteEnable; - PFN_vkCmdSetEvent2 vkCmdSetEvent2; - PFN_vkCmdSetFrontFace vkCmdSetFrontFace; - PFN_vkCmdSetPrimitiveRestartEnable vkCmdSetPrimitiveRestartEnable; - PFN_vkCmdSetPrimitiveTopology vkCmdSetPrimitiveTopology; - PFN_vkCmdSetRasterizerDiscardEnable vkCmdSetRasterizerDiscardEnable; - PFN_vkCmdSetScissorWithCount vkCmdSetScissorWithCount; - PFN_vkCmdSetStencilOp vkCmdSetStencilOp; - PFN_vkCmdSetStencilTestEnable vkCmdSetStencilTestEnable; - PFN_vkCmdSetViewportWithCount vkCmdSetViewportWithCount; - PFN_vkCmdWaitEvents2 vkCmdWaitEvents2; - PFN_vkCmdWriteTimestamp2 vkCmdWriteTimestamp2; - PFN_vkCreatePrivateDataSlot vkCreatePrivateDataSlot; - PFN_vkDestroyPrivateDataSlot vkDestroyPrivateDataSlot; - PFN_vkGetDeviceBufferMemoryRequirements vkGetDeviceBufferMemoryRequirements; - PFN_vkGetDeviceImageMemoryRequirements vkGetDeviceImageMemoryRequirements; - PFN_vkGetDeviceImageSparseMemoryRequirements vkGetDeviceImageSparseMemoryRequirements; - PFN_vkGetPrivateData vkGetPrivateData; - PFN_vkQueueSubmit2 vkQueueSubmit2; - PFN_vkSetPrivateData vkSetPrivateData; -#endif /* defined(VK_VERSION_1_3) */ -#if defined(VK_AMDX_shader_enqueue) - PFN_vkCmdDispatchGraphAMDX vkCmdDispatchGraphAMDX; - PFN_vkCmdDispatchGraphIndirectAMDX vkCmdDispatchGraphIndirectAMDX; - PFN_vkCmdDispatchGraphIndirectCountAMDX vkCmdDispatchGraphIndirectCountAMDX; - PFN_vkCmdInitializeGraphScratchMemoryAMDX vkCmdInitializeGraphScratchMemoryAMDX; - PFN_vkCreateExecutionGraphPipelinesAMDX vkCreateExecutionGraphPipelinesAMDX; - PFN_vkGetExecutionGraphPipelineNodeIndexAMDX vkGetExecutionGraphPipelineNodeIndexAMDX; - PFN_vkGetExecutionGraphPipelineScratchSizeAMDX vkGetExecutionGraphPipelineScratchSizeAMDX; -#endif /* defined(VK_AMDX_shader_enqueue) */ -#if defined(VK_AMD_anti_lag) - PFN_vkAntiLagUpdateAMD vkAntiLagUpdateAMD; -#endif /* defined(VK_AMD_anti_lag) */ -#if defined(VK_AMD_buffer_marker) - PFN_vkCmdWriteBufferMarkerAMD vkCmdWriteBufferMarkerAMD; -#endif /* defined(VK_AMD_buffer_marker) */ -#if defined(VK_AMD_display_native_hdr) - PFN_vkSetLocalDimmingAMD vkSetLocalDimmingAMD; -#endif /* defined(VK_AMD_display_native_hdr) */ -#if defined(VK_AMD_draw_indirect_count) - PFN_vkCmdDrawIndexedIndirectCountAMD vkCmdDrawIndexedIndirectCountAMD; - PFN_vkCmdDrawIndirectCountAMD vkCmdDrawIndirectCountAMD; -#endif /* defined(VK_AMD_draw_indirect_count) */ -#if defined(VK_AMD_shader_info) - PFN_vkGetShaderInfoAMD vkGetShaderInfoAMD; -#endif /* defined(VK_AMD_shader_info) */ -#if defined(VK_ANDROID_external_memory_android_hardware_buffer) - PFN_vkGetAndroidHardwareBufferPropertiesANDROID vkGetAndroidHardwareBufferPropertiesANDROID; - PFN_vkGetMemoryAndroidHardwareBufferANDROID vkGetMemoryAndroidHardwareBufferANDROID; -#endif /* defined(VK_ANDROID_external_memory_android_hardware_buffer) */ -#if defined(VK_EXT_attachment_feedback_loop_dynamic_state) - PFN_vkCmdSetAttachmentFeedbackLoopEnableEXT vkCmdSetAttachmentFeedbackLoopEnableEXT; -#endif /* defined(VK_EXT_attachment_feedback_loop_dynamic_state) */ -#if defined(VK_EXT_buffer_device_address) - PFN_vkGetBufferDeviceAddressEXT vkGetBufferDeviceAddressEXT; -#endif /* defined(VK_EXT_buffer_device_address) */ -#if defined(VK_EXT_calibrated_timestamps) - PFN_vkGetCalibratedTimestampsEXT vkGetCalibratedTimestampsEXT; -#endif /* defined(VK_EXT_calibrated_timestamps) */ -#if defined(VK_EXT_color_write_enable) - PFN_vkCmdSetColorWriteEnableEXT vkCmdSetColorWriteEnableEXT; -#endif /* defined(VK_EXT_color_write_enable) */ -#if defined(VK_EXT_conditional_rendering) - PFN_vkCmdBeginConditionalRenderingEXT vkCmdBeginConditionalRenderingEXT; - PFN_vkCmdEndConditionalRenderingEXT vkCmdEndConditionalRenderingEXT; -#endif /* defined(VK_EXT_conditional_rendering) */ -#if defined(VK_EXT_debug_marker) - PFN_vkCmdDebugMarkerBeginEXT vkCmdDebugMarkerBeginEXT; - PFN_vkCmdDebugMarkerEndEXT vkCmdDebugMarkerEndEXT; - PFN_vkCmdDebugMarkerInsertEXT vkCmdDebugMarkerInsertEXT; - PFN_vkDebugMarkerSetObjectNameEXT vkDebugMarkerSetObjectNameEXT; - PFN_vkDebugMarkerSetObjectTagEXT vkDebugMarkerSetObjectTagEXT; -#endif /* defined(VK_EXT_debug_marker) */ -#if defined(VK_EXT_depth_bias_control) - PFN_vkCmdSetDepthBias2EXT vkCmdSetDepthBias2EXT; -#endif /* defined(VK_EXT_depth_bias_control) */ -#if defined(VK_EXT_descriptor_buffer) - PFN_vkCmdBindDescriptorBufferEmbeddedSamplersEXT vkCmdBindDescriptorBufferEmbeddedSamplersEXT; - PFN_vkCmdBindDescriptorBuffersEXT vkCmdBindDescriptorBuffersEXT; - PFN_vkCmdSetDescriptorBufferOffsetsEXT vkCmdSetDescriptorBufferOffsetsEXT; - PFN_vkGetBufferOpaqueCaptureDescriptorDataEXT vkGetBufferOpaqueCaptureDescriptorDataEXT; - PFN_vkGetDescriptorEXT vkGetDescriptorEXT; - PFN_vkGetDescriptorSetLayoutBindingOffsetEXT vkGetDescriptorSetLayoutBindingOffsetEXT; - PFN_vkGetDescriptorSetLayoutSizeEXT vkGetDescriptorSetLayoutSizeEXT; - PFN_vkGetImageOpaqueCaptureDescriptorDataEXT vkGetImageOpaqueCaptureDescriptorDataEXT; - PFN_vkGetImageViewOpaqueCaptureDescriptorDataEXT vkGetImageViewOpaqueCaptureDescriptorDataEXT; - PFN_vkGetSamplerOpaqueCaptureDescriptorDataEXT vkGetSamplerOpaqueCaptureDescriptorDataEXT; -#endif /* defined(VK_EXT_descriptor_buffer) */ -#if defined(VK_EXT_descriptor_buffer) && (defined(VK_KHR_acceleration_structure) || defined(VK_NV_ray_tracing)) - PFN_vkGetAccelerationStructureOpaqueCaptureDescriptorDataEXT vkGetAccelerationStructureOpaqueCaptureDescriptorDataEXT; -#endif /* defined(VK_EXT_descriptor_buffer) && (defined(VK_KHR_acceleration_structure) || defined(VK_NV_ray_tracing)) */ -#if defined(VK_EXT_device_fault) - PFN_vkGetDeviceFaultInfoEXT vkGetDeviceFaultInfoEXT; -#endif /* defined(VK_EXT_device_fault) */ -#if defined(VK_EXT_discard_rectangles) - PFN_vkCmdSetDiscardRectangleEXT vkCmdSetDiscardRectangleEXT; -#endif /* defined(VK_EXT_discard_rectangles) */ -#if defined(VK_EXT_discard_rectangles) && VK_EXT_DISCARD_RECTANGLES_SPEC_VERSION >= 2 - PFN_vkCmdSetDiscardRectangleEnableEXT vkCmdSetDiscardRectangleEnableEXT; - PFN_vkCmdSetDiscardRectangleModeEXT vkCmdSetDiscardRectangleModeEXT; -#endif /* defined(VK_EXT_discard_rectangles) && VK_EXT_DISCARD_RECTANGLES_SPEC_VERSION >= 2 */ -#if defined(VK_EXT_display_control) - PFN_vkDisplayPowerControlEXT vkDisplayPowerControlEXT; - PFN_vkGetSwapchainCounterEXT vkGetSwapchainCounterEXT; - PFN_vkRegisterDeviceEventEXT vkRegisterDeviceEventEXT; - PFN_vkRegisterDisplayEventEXT vkRegisterDisplayEventEXT; -#endif /* defined(VK_EXT_display_control) */ -#if defined(VK_EXT_external_memory_host) - PFN_vkGetMemoryHostPointerPropertiesEXT vkGetMemoryHostPointerPropertiesEXT; -#endif /* defined(VK_EXT_external_memory_host) */ -#if defined(VK_EXT_full_screen_exclusive) - PFN_vkAcquireFullScreenExclusiveModeEXT vkAcquireFullScreenExclusiveModeEXT; - PFN_vkReleaseFullScreenExclusiveModeEXT vkReleaseFullScreenExclusiveModeEXT; -#endif /* defined(VK_EXT_full_screen_exclusive) */ -#if defined(VK_EXT_full_screen_exclusive) && (defined(VK_KHR_device_group) || defined(VK_VERSION_1_1)) - PFN_vkGetDeviceGroupSurfacePresentModes2EXT vkGetDeviceGroupSurfacePresentModes2EXT; -#endif /* defined(VK_EXT_full_screen_exclusive) && (defined(VK_KHR_device_group) || defined(VK_VERSION_1_1)) */ -#if defined(VK_EXT_hdr_metadata) - PFN_vkSetHdrMetadataEXT vkSetHdrMetadataEXT; -#endif /* defined(VK_EXT_hdr_metadata) */ -#if defined(VK_EXT_host_image_copy) - PFN_vkCopyImageToImageEXT vkCopyImageToImageEXT; - PFN_vkCopyImageToMemoryEXT vkCopyImageToMemoryEXT; - PFN_vkCopyMemoryToImageEXT vkCopyMemoryToImageEXT; - PFN_vkTransitionImageLayoutEXT vkTransitionImageLayoutEXT; -#endif /* defined(VK_EXT_host_image_copy) */ -#if defined(VK_EXT_host_query_reset) - PFN_vkResetQueryPoolEXT vkResetQueryPoolEXT; -#endif /* defined(VK_EXT_host_query_reset) */ -#if defined(VK_EXT_image_drm_format_modifier) - PFN_vkGetImageDrmFormatModifierPropertiesEXT vkGetImageDrmFormatModifierPropertiesEXT; -#endif /* defined(VK_EXT_image_drm_format_modifier) */ -#if defined(VK_EXT_line_rasterization) - PFN_vkCmdSetLineStippleEXT vkCmdSetLineStippleEXT; -#endif /* defined(VK_EXT_line_rasterization) */ -#if defined(VK_EXT_mesh_shader) - PFN_vkCmdDrawMeshTasksEXT vkCmdDrawMeshTasksEXT; - PFN_vkCmdDrawMeshTasksIndirectCountEXT vkCmdDrawMeshTasksIndirectCountEXT; - PFN_vkCmdDrawMeshTasksIndirectEXT vkCmdDrawMeshTasksIndirectEXT; -#endif /* defined(VK_EXT_mesh_shader) */ -#if defined(VK_EXT_metal_objects) - PFN_vkExportMetalObjectsEXT vkExportMetalObjectsEXT; -#endif /* defined(VK_EXT_metal_objects) */ -#if defined(VK_EXT_multi_draw) - PFN_vkCmdDrawMultiEXT vkCmdDrawMultiEXT; - PFN_vkCmdDrawMultiIndexedEXT vkCmdDrawMultiIndexedEXT; -#endif /* defined(VK_EXT_multi_draw) */ -#if defined(VK_EXT_opacity_micromap) - PFN_vkBuildMicromapsEXT vkBuildMicromapsEXT; - PFN_vkCmdBuildMicromapsEXT vkCmdBuildMicromapsEXT; - PFN_vkCmdCopyMemoryToMicromapEXT vkCmdCopyMemoryToMicromapEXT; - PFN_vkCmdCopyMicromapEXT vkCmdCopyMicromapEXT; - PFN_vkCmdCopyMicromapToMemoryEXT vkCmdCopyMicromapToMemoryEXT; - PFN_vkCmdWriteMicromapsPropertiesEXT vkCmdWriteMicromapsPropertiesEXT; - PFN_vkCopyMemoryToMicromapEXT vkCopyMemoryToMicromapEXT; - PFN_vkCopyMicromapEXT vkCopyMicromapEXT; - PFN_vkCopyMicromapToMemoryEXT vkCopyMicromapToMemoryEXT; - PFN_vkCreateMicromapEXT vkCreateMicromapEXT; - PFN_vkDestroyMicromapEXT vkDestroyMicromapEXT; - PFN_vkGetDeviceMicromapCompatibilityEXT vkGetDeviceMicromapCompatibilityEXT; - PFN_vkGetMicromapBuildSizesEXT vkGetMicromapBuildSizesEXT; - PFN_vkWriteMicromapsPropertiesEXT vkWriteMicromapsPropertiesEXT; -#endif /* defined(VK_EXT_opacity_micromap) */ -#if defined(VK_EXT_pageable_device_local_memory) - PFN_vkSetDeviceMemoryPriorityEXT vkSetDeviceMemoryPriorityEXT; -#endif /* defined(VK_EXT_pageable_device_local_memory) */ -#if defined(VK_EXT_pipeline_properties) - PFN_vkGetPipelinePropertiesEXT vkGetPipelinePropertiesEXT; -#endif /* defined(VK_EXT_pipeline_properties) */ -#if defined(VK_EXT_private_data) - PFN_vkCreatePrivateDataSlotEXT vkCreatePrivateDataSlotEXT; - PFN_vkDestroyPrivateDataSlotEXT vkDestroyPrivateDataSlotEXT; - PFN_vkGetPrivateDataEXT vkGetPrivateDataEXT; - PFN_vkSetPrivateDataEXT vkSetPrivateDataEXT; -#endif /* defined(VK_EXT_private_data) */ -#if defined(VK_EXT_sample_locations) - PFN_vkCmdSetSampleLocationsEXT vkCmdSetSampleLocationsEXT; -#endif /* defined(VK_EXT_sample_locations) */ -#if defined(VK_EXT_shader_module_identifier) - PFN_vkGetShaderModuleCreateInfoIdentifierEXT vkGetShaderModuleCreateInfoIdentifierEXT; - PFN_vkGetShaderModuleIdentifierEXT vkGetShaderModuleIdentifierEXT; -#endif /* defined(VK_EXT_shader_module_identifier) */ -#if defined(VK_EXT_shader_object) - PFN_vkCmdBindShadersEXT vkCmdBindShadersEXT; - PFN_vkCreateShadersEXT vkCreateShadersEXT; - PFN_vkDestroyShaderEXT vkDestroyShaderEXT; - PFN_vkGetShaderBinaryDataEXT vkGetShaderBinaryDataEXT; -#endif /* defined(VK_EXT_shader_object) */ -#if defined(VK_EXT_swapchain_maintenance1) - PFN_vkReleaseSwapchainImagesEXT vkReleaseSwapchainImagesEXT; -#endif /* defined(VK_EXT_swapchain_maintenance1) */ -#if defined(VK_EXT_transform_feedback) - PFN_vkCmdBeginQueryIndexedEXT vkCmdBeginQueryIndexedEXT; - PFN_vkCmdBeginTransformFeedbackEXT vkCmdBeginTransformFeedbackEXT; - PFN_vkCmdBindTransformFeedbackBuffersEXT vkCmdBindTransformFeedbackBuffersEXT; - PFN_vkCmdDrawIndirectByteCountEXT vkCmdDrawIndirectByteCountEXT; - PFN_vkCmdEndQueryIndexedEXT vkCmdEndQueryIndexedEXT; - PFN_vkCmdEndTransformFeedbackEXT vkCmdEndTransformFeedbackEXT; -#endif /* defined(VK_EXT_transform_feedback) */ -#if defined(VK_EXT_validation_cache) - PFN_vkCreateValidationCacheEXT vkCreateValidationCacheEXT; - PFN_vkDestroyValidationCacheEXT vkDestroyValidationCacheEXT; - PFN_vkGetValidationCacheDataEXT vkGetValidationCacheDataEXT; - PFN_vkMergeValidationCachesEXT vkMergeValidationCachesEXT; -#endif /* defined(VK_EXT_validation_cache) */ -#if defined(VK_FUCHSIA_buffer_collection) - PFN_vkCreateBufferCollectionFUCHSIA vkCreateBufferCollectionFUCHSIA; - PFN_vkDestroyBufferCollectionFUCHSIA vkDestroyBufferCollectionFUCHSIA; - PFN_vkGetBufferCollectionPropertiesFUCHSIA vkGetBufferCollectionPropertiesFUCHSIA; - PFN_vkSetBufferCollectionBufferConstraintsFUCHSIA vkSetBufferCollectionBufferConstraintsFUCHSIA; - PFN_vkSetBufferCollectionImageConstraintsFUCHSIA vkSetBufferCollectionImageConstraintsFUCHSIA; -#endif /* defined(VK_FUCHSIA_buffer_collection) */ -#if defined(VK_FUCHSIA_external_memory) - PFN_vkGetMemoryZirconHandleFUCHSIA vkGetMemoryZirconHandleFUCHSIA; - PFN_vkGetMemoryZirconHandlePropertiesFUCHSIA vkGetMemoryZirconHandlePropertiesFUCHSIA; -#endif /* defined(VK_FUCHSIA_external_memory) */ -#if defined(VK_FUCHSIA_external_semaphore) - PFN_vkGetSemaphoreZirconHandleFUCHSIA vkGetSemaphoreZirconHandleFUCHSIA; - PFN_vkImportSemaphoreZirconHandleFUCHSIA vkImportSemaphoreZirconHandleFUCHSIA; -#endif /* defined(VK_FUCHSIA_external_semaphore) */ -#if defined(VK_GOOGLE_display_timing) - PFN_vkGetPastPresentationTimingGOOGLE vkGetPastPresentationTimingGOOGLE; - PFN_vkGetRefreshCycleDurationGOOGLE vkGetRefreshCycleDurationGOOGLE; -#endif /* defined(VK_GOOGLE_display_timing) */ -#if defined(VK_HUAWEI_cluster_culling_shader) - PFN_vkCmdDrawClusterHUAWEI vkCmdDrawClusterHUAWEI; - PFN_vkCmdDrawClusterIndirectHUAWEI vkCmdDrawClusterIndirectHUAWEI; -#endif /* defined(VK_HUAWEI_cluster_culling_shader) */ -#if defined(VK_HUAWEI_invocation_mask) - PFN_vkCmdBindInvocationMaskHUAWEI vkCmdBindInvocationMaskHUAWEI; -#endif /* defined(VK_HUAWEI_invocation_mask) */ -#if defined(VK_HUAWEI_subpass_shading) - PFN_vkCmdSubpassShadingHUAWEI vkCmdSubpassShadingHUAWEI; - PFN_vkGetDeviceSubpassShadingMaxWorkgroupSizeHUAWEI vkGetDeviceSubpassShadingMaxWorkgroupSizeHUAWEI; -#endif /* defined(VK_HUAWEI_subpass_shading) */ -#if defined(VK_INTEL_performance_query) - PFN_vkAcquirePerformanceConfigurationINTEL vkAcquirePerformanceConfigurationINTEL; - PFN_vkCmdSetPerformanceMarkerINTEL vkCmdSetPerformanceMarkerINTEL; - PFN_vkCmdSetPerformanceOverrideINTEL vkCmdSetPerformanceOverrideINTEL; - PFN_vkCmdSetPerformanceStreamMarkerINTEL vkCmdSetPerformanceStreamMarkerINTEL; - PFN_vkGetPerformanceParameterINTEL vkGetPerformanceParameterINTEL; - PFN_vkInitializePerformanceApiINTEL vkInitializePerformanceApiINTEL; - PFN_vkQueueSetPerformanceConfigurationINTEL vkQueueSetPerformanceConfigurationINTEL; - PFN_vkReleasePerformanceConfigurationINTEL vkReleasePerformanceConfigurationINTEL; - PFN_vkUninitializePerformanceApiINTEL vkUninitializePerformanceApiINTEL; -#endif /* defined(VK_INTEL_performance_query) */ -#if defined(VK_KHR_acceleration_structure) - PFN_vkBuildAccelerationStructuresKHR vkBuildAccelerationStructuresKHR; - PFN_vkCmdBuildAccelerationStructuresIndirectKHR vkCmdBuildAccelerationStructuresIndirectKHR; - PFN_vkCmdBuildAccelerationStructuresKHR vkCmdBuildAccelerationStructuresKHR; - PFN_vkCmdCopyAccelerationStructureKHR vkCmdCopyAccelerationStructureKHR; - PFN_vkCmdCopyAccelerationStructureToMemoryKHR vkCmdCopyAccelerationStructureToMemoryKHR; - PFN_vkCmdCopyMemoryToAccelerationStructureKHR vkCmdCopyMemoryToAccelerationStructureKHR; - PFN_vkCmdWriteAccelerationStructuresPropertiesKHR vkCmdWriteAccelerationStructuresPropertiesKHR; - PFN_vkCopyAccelerationStructureKHR vkCopyAccelerationStructureKHR; - PFN_vkCopyAccelerationStructureToMemoryKHR vkCopyAccelerationStructureToMemoryKHR; - PFN_vkCopyMemoryToAccelerationStructureKHR vkCopyMemoryToAccelerationStructureKHR; - PFN_vkCreateAccelerationStructureKHR vkCreateAccelerationStructureKHR; - PFN_vkDestroyAccelerationStructureKHR vkDestroyAccelerationStructureKHR; - PFN_vkGetAccelerationStructureBuildSizesKHR vkGetAccelerationStructureBuildSizesKHR; - PFN_vkGetAccelerationStructureDeviceAddressKHR vkGetAccelerationStructureDeviceAddressKHR; - PFN_vkGetDeviceAccelerationStructureCompatibilityKHR vkGetDeviceAccelerationStructureCompatibilityKHR; - PFN_vkWriteAccelerationStructuresPropertiesKHR vkWriteAccelerationStructuresPropertiesKHR; -#endif /* defined(VK_KHR_acceleration_structure) */ -#if defined(VK_KHR_bind_memory2) - PFN_vkBindBufferMemory2KHR vkBindBufferMemory2KHR; - PFN_vkBindImageMemory2KHR vkBindImageMemory2KHR; -#endif /* defined(VK_KHR_bind_memory2) */ -#if defined(VK_KHR_buffer_device_address) - PFN_vkGetBufferDeviceAddressKHR vkGetBufferDeviceAddressKHR; - PFN_vkGetBufferOpaqueCaptureAddressKHR vkGetBufferOpaqueCaptureAddressKHR; - PFN_vkGetDeviceMemoryOpaqueCaptureAddressKHR vkGetDeviceMemoryOpaqueCaptureAddressKHR; -#endif /* defined(VK_KHR_buffer_device_address) */ -#if defined(VK_KHR_calibrated_timestamps) - PFN_vkGetCalibratedTimestampsKHR vkGetCalibratedTimestampsKHR; -#endif /* defined(VK_KHR_calibrated_timestamps) */ -#if defined(VK_KHR_copy_commands2) - PFN_vkCmdBlitImage2KHR vkCmdBlitImage2KHR; - PFN_vkCmdCopyBuffer2KHR vkCmdCopyBuffer2KHR; - PFN_vkCmdCopyBufferToImage2KHR vkCmdCopyBufferToImage2KHR; - PFN_vkCmdCopyImage2KHR vkCmdCopyImage2KHR; - PFN_vkCmdCopyImageToBuffer2KHR vkCmdCopyImageToBuffer2KHR; - PFN_vkCmdResolveImage2KHR vkCmdResolveImage2KHR; -#endif /* defined(VK_KHR_copy_commands2) */ -#if defined(VK_KHR_create_renderpass2) - PFN_vkCmdBeginRenderPass2KHR vkCmdBeginRenderPass2KHR; - PFN_vkCmdEndRenderPass2KHR vkCmdEndRenderPass2KHR; - PFN_vkCmdNextSubpass2KHR vkCmdNextSubpass2KHR; - PFN_vkCreateRenderPass2KHR vkCreateRenderPass2KHR; -#endif /* defined(VK_KHR_create_renderpass2) */ -#if defined(VK_KHR_deferred_host_operations) - PFN_vkCreateDeferredOperationKHR vkCreateDeferredOperationKHR; - PFN_vkDeferredOperationJoinKHR vkDeferredOperationJoinKHR; - PFN_vkDestroyDeferredOperationKHR vkDestroyDeferredOperationKHR; - PFN_vkGetDeferredOperationMaxConcurrencyKHR vkGetDeferredOperationMaxConcurrencyKHR; - PFN_vkGetDeferredOperationResultKHR vkGetDeferredOperationResultKHR; -#endif /* defined(VK_KHR_deferred_host_operations) */ -#if defined(VK_KHR_descriptor_update_template) - PFN_vkCreateDescriptorUpdateTemplateKHR vkCreateDescriptorUpdateTemplateKHR; - PFN_vkDestroyDescriptorUpdateTemplateKHR vkDestroyDescriptorUpdateTemplateKHR; - PFN_vkUpdateDescriptorSetWithTemplateKHR vkUpdateDescriptorSetWithTemplateKHR; -#endif /* defined(VK_KHR_descriptor_update_template) */ -#if defined(VK_KHR_device_group) - PFN_vkCmdDispatchBaseKHR vkCmdDispatchBaseKHR; - PFN_vkCmdSetDeviceMaskKHR vkCmdSetDeviceMaskKHR; - PFN_vkGetDeviceGroupPeerMemoryFeaturesKHR vkGetDeviceGroupPeerMemoryFeaturesKHR; -#endif /* defined(VK_KHR_device_group) */ -#if defined(VK_KHR_display_swapchain) - PFN_vkCreateSharedSwapchainsKHR vkCreateSharedSwapchainsKHR; -#endif /* defined(VK_KHR_display_swapchain) */ -#if defined(VK_KHR_draw_indirect_count) - PFN_vkCmdDrawIndexedIndirectCountKHR vkCmdDrawIndexedIndirectCountKHR; - PFN_vkCmdDrawIndirectCountKHR vkCmdDrawIndirectCountKHR; -#endif /* defined(VK_KHR_draw_indirect_count) */ -#if defined(VK_KHR_dynamic_rendering) - PFN_vkCmdBeginRenderingKHR vkCmdBeginRenderingKHR; - PFN_vkCmdEndRenderingKHR vkCmdEndRenderingKHR; -#endif /* defined(VK_KHR_dynamic_rendering) */ -#if defined(VK_KHR_dynamic_rendering_local_read) - PFN_vkCmdSetRenderingAttachmentLocationsKHR vkCmdSetRenderingAttachmentLocationsKHR; - PFN_vkCmdSetRenderingInputAttachmentIndicesKHR vkCmdSetRenderingInputAttachmentIndicesKHR; -#endif /* defined(VK_KHR_dynamic_rendering_local_read) */ -#if defined(VK_KHR_external_fence_fd) - PFN_vkGetFenceFdKHR vkGetFenceFdKHR; - PFN_vkImportFenceFdKHR vkImportFenceFdKHR; -#endif /* defined(VK_KHR_external_fence_fd) */ -#if defined(VK_KHR_external_fence_win32) - PFN_vkGetFenceWin32HandleKHR vkGetFenceWin32HandleKHR; - PFN_vkImportFenceWin32HandleKHR vkImportFenceWin32HandleKHR; -#endif /* defined(VK_KHR_external_fence_win32) */ -#if defined(VK_KHR_external_memory_fd) - PFN_vkGetMemoryFdKHR vkGetMemoryFdKHR; - PFN_vkGetMemoryFdPropertiesKHR vkGetMemoryFdPropertiesKHR; -#endif /* defined(VK_KHR_external_memory_fd) */ -#if defined(VK_KHR_external_memory_win32) - PFN_vkGetMemoryWin32HandleKHR vkGetMemoryWin32HandleKHR; - PFN_vkGetMemoryWin32HandlePropertiesKHR vkGetMemoryWin32HandlePropertiesKHR; -#endif /* defined(VK_KHR_external_memory_win32) */ -#if defined(VK_KHR_external_semaphore_fd) - PFN_vkGetSemaphoreFdKHR vkGetSemaphoreFdKHR; - PFN_vkImportSemaphoreFdKHR vkImportSemaphoreFdKHR; -#endif /* defined(VK_KHR_external_semaphore_fd) */ -#if defined(VK_KHR_external_semaphore_win32) - PFN_vkGetSemaphoreWin32HandleKHR vkGetSemaphoreWin32HandleKHR; - PFN_vkImportSemaphoreWin32HandleKHR vkImportSemaphoreWin32HandleKHR; -#endif /* defined(VK_KHR_external_semaphore_win32) */ -#if defined(VK_KHR_fragment_shading_rate) - PFN_vkCmdSetFragmentShadingRateKHR vkCmdSetFragmentShadingRateKHR; -#endif /* defined(VK_KHR_fragment_shading_rate) */ -#if defined(VK_KHR_get_memory_requirements2) - PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR; - PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR; - PFN_vkGetImageSparseMemoryRequirements2KHR vkGetImageSparseMemoryRequirements2KHR; -#endif /* defined(VK_KHR_get_memory_requirements2) */ -#if defined(VK_KHR_line_rasterization) - PFN_vkCmdSetLineStippleKHR vkCmdSetLineStippleKHR; -#endif /* defined(VK_KHR_line_rasterization) */ -#if defined(VK_KHR_maintenance1) - PFN_vkTrimCommandPoolKHR vkTrimCommandPoolKHR; -#endif /* defined(VK_KHR_maintenance1) */ -#if defined(VK_KHR_maintenance3) - PFN_vkGetDescriptorSetLayoutSupportKHR vkGetDescriptorSetLayoutSupportKHR; -#endif /* defined(VK_KHR_maintenance3) */ -#if defined(VK_KHR_maintenance4) - PFN_vkGetDeviceBufferMemoryRequirementsKHR vkGetDeviceBufferMemoryRequirementsKHR; - PFN_vkGetDeviceImageMemoryRequirementsKHR vkGetDeviceImageMemoryRequirementsKHR; - PFN_vkGetDeviceImageSparseMemoryRequirementsKHR vkGetDeviceImageSparseMemoryRequirementsKHR; -#endif /* defined(VK_KHR_maintenance4) */ -#if defined(VK_KHR_maintenance5) - PFN_vkCmdBindIndexBuffer2KHR vkCmdBindIndexBuffer2KHR; - PFN_vkGetDeviceImageSubresourceLayoutKHR vkGetDeviceImageSubresourceLayoutKHR; - PFN_vkGetImageSubresourceLayout2KHR vkGetImageSubresourceLayout2KHR; - PFN_vkGetRenderingAreaGranularityKHR vkGetRenderingAreaGranularityKHR; -#endif /* defined(VK_KHR_maintenance5) */ -#if defined(VK_KHR_maintenance6) - PFN_vkCmdBindDescriptorSets2KHR vkCmdBindDescriptorSets2KHR; - PFN_vkCmdPushConstants2KHR vkCmdPushConstants2KHR; -#endif /* defined(VK_KHR_maintenance6) */ -#if defined(VK_KHR_maintenance6) && defined(VK_KHR_push_descriptor) - PFN_vkCmdPushDescriptorSet2KHR vkCmdPushDescriptorSet2KHR; - PFN_vkCmdPushDescriptorSetWithTemplate2KHR vkCmdPushDescriptorSetWithTemplate2KHR; -#endif /* defined(VK_KHR_maintenance6) && defined(VK_KHR_push_descriptor) */ -#if defined(VK_KHR_maintenance6) && defined(VK_EXT_descriptor_buffer) - PFN_vkCmdBindDescriptorBufferEmbeddedSamplers2EXT vkCmdBindDescriptorBufferEmbeddedSamplers2EXT; - PFN_vkCmdSetDescriptorBufferOffsets2EXT vkCmdSetDescriptorBufferOffsets2EXT; -#endif /* defined(VK_KHR_maintenance6) && defined(VK_EXT_descriptor_buffer) */ -#if defined(VK_KHR_map_memory2) - PFN_vkMapMemory2KHR vkMapMemory2KHR; - PFN_vkUnmapMemory2KHR vkUnmapMemory2KHR; -#endif /* defined(VK_KHR_map_memory2) */ -#if defined(VK_KHR_performance_query) - PFN_vkAcquireProfilingLockKHR vkAcquireProfilingLockKHR; - PFN_vkReleaseProfilingLockKHR vkReleaseProfilingLockKHR; -#endif /* defined(VK_KHR_performance_query) */ -#if defined(VK_KHR_pipeline_binary) - PFN_vkCreatePipelineBinariesKHR vkCreatePipelineBinariesKHR; - PFN_vkDestroyPipelineBinaryKHR vkDestroyPipelineBinaryKHR; - PFN_vkGetPipelineBinaryDataKHR vkGetPipelineBinaryDataKHR; - PFN_vkGetPipelineKeyKHR vkGetPipelineKeyKHR; - PFN_vkReleaseCapturedPipelineDataKHR vkReleaseCapturedPipelineDataKHR; -#endif /* defined(VK_KHR_pipeline_binary) */ -#if defined(VK_KHR_pipeline_executable_properties) - PFN_vkGetPipelineExecutableInternalRepresentationsKHR vkGetPipelineExecutableInternalRepresentationsKHR; - PFN_vkGetPipelineExecutablePropertiesKHR vkGetPipelineExecutablePropertiesKHR; - PFN_vkGetPipelineExecutableStatisticsKHR vkGetPipelineExecutableStatisticsKHR; -#endif /* defined(VK_KHR_pipeline_executable_properties) */ -#if defined(VK_KHR_present_wait) - PFN_vkWaitForPresentKHR vkWaitForPresentKHR; -#endif /* defined(VK_KHR_present_wait) */ -#if defined(VK_KHR_push_descriptor) - PFN_vkCmdPushDescriptorSetKHR vkCmdPushDescriptorSetKHR; -#endif /* defined(VK_KHR_push_descriptor) */ -#if defined(VK_KHR_ray_tracing_maintenance1) && defined(VK_KHR_ray_tracing_pipeline) - PFN_vkCmdTraceRaysIndirect2KHR vkCmdTraceRaysIndirect2KHR; -#endif /* defined(VK_KHR_ray_tracing_maintenance1) && defined(VK_KHR_ray_tracing_pipeline) */ -#if defined(VK_KHR_ray_tracing_pipeline) - PFN_vkCmdSetRayTracingPipelineStackSizeKHR vkCmdSetRayTracingPipelineStackSizeKHR; - PFN_vkCmdTraceRaysIndirectKHR vkCmdTraceRaysIndirectKHR; - PFN_vkCmdTraceRaysKHR vkCmdTraceRaysKHR; - PFN_vkCreateRayTracingPipelinesKHR vkCreateRayTracingPipelinesKHR; - PFN_vkGetRayTracingCaptureReplayShaderGroupHandlesKHR vkGetRayTracingCaptureReplayShaderGroupHandlesKHR; - PFN_vkGetRayTracingShaderGroupHandlesKHR vkGetRayTracingShaderGroupHandlesKHR; - PFN_vkGetRayTracingShaderGroupStackSizeKHR vkGetRayTracingShaderGroupStackSizeKHR; -#endif /* defined(VK_KHR_ray_tracing_pipeline) */ -#if defined(VK_KHR_sampler_ycbcr_conversion) - PFN_vkCreateSamplerYcbcrConversionKHR vkCreateSamplerYcbcrConversionKHR; - PFN_vkDestroySamplerYcbcrConversionKHR vkDestroySamplerYcbcrConversionKHR; -#endif /* defined(VK_KHR_sampler_ycbcr_conversion) */ -#if defined(VK_KHR_shared_presentable_image) - PFN_vkGetSwapchainStatusKHR vkGetSwapchainStatusKHR; -#endif /* defined(VK_KHR_shared_presentable_image) */ -#if defined(VK_KHR_swapchain) - PFN_vkAcquireNextImageKHR vkAcquireNextImageKHR; - PFN_vkCreateSwapchainKHR vkCreateSwapchainKHR; - PFN_vkDestroySwapchainKHR vkDestroySwapchainKHR; - PFN_vkGetSwapchainImagesKHR vkGetSwapchainImagesKHR; - PFN_vkQueuePresentKHR vkQueuePresentKHR; -#endif /* defined(VK_KHR_swapchain) */ -#if defined(VK_KHR_synchronization2) - PFN_vkCmdPipelineBarrier2KHR vkCmdPipelineBarrier2KHR; - PFN_vkCmdResetEvent2KHR vkCmdResetEvent2KHR; - PFN_vkCmdSetEvent2KHR vkCmdSetEvent2KHR; - PFN_vkCmdWaitEvents2KHR vkCmdWaitEvents2KHR; - PFN_vkCmdWriteTimestamp2KHR vkCmdWriteTimestamp2KHR; - PFN_vkQueueSubmit2KHR vkQueueSubmit2KHR; -#endif /* defined(VK_KHR_synchronization2) */ -#if defined(VK_KHR_synchronization2) && defined(VK_AMD_buffer_marker) - PFN_vkCmdWriteBufferMarker2AMD vkCmdWriteBufferMarker2AMD; -#endif /* defined(VK_KHR_synchronization2) && defined(VK_AMD_buffer_marker) */ -#if defined(VK_KHR_synchronization2) && defined(VK_NV_device_diagnostic_checkpoints) - PFN_vkGetQueueCheckpointData2NV vkGetQueueCheckpointData2NV; -#endif /* defined(VK_KHR_synchronization2) && defined(VK_NV_device_diagnostic_checkpoints) */ -#if defined(VK_KHR_timeline_semaphore) - PFN_vkGetSemaphoreCounterValueKHR vkGetSemaphoreCounterValueKHR; - PFN_vkSignalSemaphoreKHR vkSignalSemaphoreKHR; - PFN_vkWaitSemaphoresKHR vkWaitSemaphoresKHR; -#endif /* defined(VK_KHR_timeline_semaphore) */ -#if defined(VK_KHR_video_decode_queue) - PFN_vkCmdDecodeVideoKHR vkCmdDecodeVideoKHR; -#endif /* defined(VK_KHR_video_decode_queue) */ -#if defined(VK_KHR_video_encode_queue) - PFN_vkCmdEncodeVideoKHR vkCmdEncodeVideoKHR; - PFN_vkGetEncodedVideoSessionParametersKHR vkGetEncodedVideoSessionParametersKHR; -#endif /* defined(VK_KHR_video_encode_queue) */ -#if defined(VK_KHR_video_queue) - PFN_vkBindVideoSessionMemoryKHR vkBindVideoSessionMemoryKHR; - PFN_vkCmdBeginVideoCodingKHR vkCmdBeginVideoCodingKHR; - PFN_vkCmdControlVideoCodingKHR vkCmdControlVideoCodingKHR; - PFN_vkCmdEndVideoCodingKHR vkCmdEndVideoCodingKHR; - PFN_vkCreateVideoSessionKHR vkCreateVideoSessionKHR; - PFN_vkCreateVideoSessionParametersKHR vkCreateVideoSessionParametersKHR; - PFN_vkDestroyVideoSessionKHR vkDestroyVideoSessionKHR; - PFN_vkDestroyVideoSessionParametersKHR vkDestroyVideoSessionParametersKHR; - PFN_vkGetVideoSessionMemoryRequirementsKHR vkGetVideoSessionMemoryRequirementsKHR; - PFN_vkUpdateVideoSessionParametersKHR vkUpdateVideoSessionParametersKHR; -#endif /* defined(VK_KHR_video_queue) */ -#if defined(VK_NVX_binary_import) - PFN_vkCmdCuLaunchKernelNVX vkCmdCuLaunchKernelNVX; - PFN_vkCreateCuFunctionNVX vkCreateCuFunctionNVX; - PFN_vkCreateCuModuleNVX vkCreateCuModuleNVX; - PFN_vkDestroyCuFunctionNVX vkDestroyCuFunctionNVX; - PFN_vkDestroyCuModuleNVX vkDestroyCuModuleNVX; -#endif /* defined(VK_NVX_binary_import) */ -#if defined(VK_NVX_image_view_handle) - PFN_vkGetImageViewAddressNVX vkGetImageViewAddressNVX; - PFN_vkGetImageViewHandleNVX vkGetImageViewHandleNVX; -#endif /* defined(VK_NVX_image_view_handle) */ -#if defined(VK_NV_clip_space_w_scaling) - PFN_vkCmdSetViewportWScalingNV vkCmdSetViewportWScalingNV; -#endif /* defined(VK_NV_clip_space_w_scaling) */ -#if defined(VK_NV_copy_memory_indirect) - PFN_vkCmdCopyMemoryIndirectNV vkCmdCopyMemoryIndirectNV; - PFN_vkCmdCopyMemoryToImageIndirectNV vkCmdCopyMemoryToImageIndirectNV; -#endif /* defined(VK_NV_copy_memory_indirect) */ -#if defined(VK_NV_cuda_kernel_launch) - PFN_vkCmdCudaLaunchKernelNV vkCmdCudaLaunchKernelNV; - PFN_vkCreateCudaFunctionNV vkCreateCudaFunctionNV; - PFN_vkCreateCudaModuleNV vkCreateCudaModuleNV; - PFN_vkDestroyCudaFunctionNV vkDestroyCudaFunctionNV; - PFN_vkDestroyCudaModuleNV vkDestroyCudaModuleNV; - PFN_vkGetCudaModuleCacheNV vkGetCudaModuleCacheNV; -#endif /* defined(VK_NV_cuda_kernel_launch) */ -#if defined(VK_NV_device_diagnostic_checkpoints) - PFN_vkCmdSetCheckpointNV vkCmdSetCheckpointNV; - PFN_vkGetQueueCheckpointDataNV vkGetQueueCheckpointDataNV; -#endif /* defined(VK_NV_device_diagnostic_checkpoints) */ -#if defined(VK_NV_device_generated_commands) - PFN_vkCmdBindPipelineShaderGroupNV vkCmdBindPipelineShaderGroupNV; - PFN_vkCmdExecuteGeneratedCommandsNV vkCmdExecuteGeneratedCommandsNV; - PFN_vkCmdPreprocessGeneratedCommandsNV vkCmdPreprocessGeneratedCommandsNV; - PFN_vkCreateIndirectCommandsLayoutNV vkCreateIndirectCommandsLayoutNV; - PFN_vkDestroyIndirectCommandsLayoutNV vkDestroyIndirectCommandsLayoutNV; - PFN_vkGetGeneratedCommandsMemoryRequirementsNV vkGetGeneratedCommandsMemoryRequirementsNV; -#endif /* defined(VK_NV_device_generated_commands) */ -#if defined(VK_NV_device_generated_commands_compute) - PFN_vkCmdUpdatePipelineIndirectBufferNV vkCmdUpdatePipelineIndirectBufferNV; - PFN_vkGetPipelineIndirectDeviceAddressNV vkGetPipelineIndirectDeviceAddressNV; - PFN_vkGetPipelineIndirectMemoryRequirementsNV vkGetPipelineIndirectMemoryRequirementsNV; -#endif /* defined(VK_NV_device_generated_commands_compute) */ -#if defined(VK_NV_external_memory_rdma) - PFN_vkGetMemoryRemoteAddressNV vkGetMemoryRemoteAddressNV; -#endif /* defined(VK_NV_external_memory_rdma) */ -#if defined(VK_NV_external_memory_win32) - PFN_vkGetMemoryWin32HandleNV vkGetMemoryWin32HandleNV; -#endif /* defined(VK_NV_external_memory_win32) */ -#if defined(VK_NV_fragment_shading_rate_enums) - PFN_vkCmdSetFragmentShadingRateEnumNV vkCmdSetFragmentShadingRateEnumNV; -#endif /* defined(VK_NV_fragment_shading_rate_enums) */ -#if defined(VK_NV_low_latency2) - PFN_vkGetLatencyTimingsNV vkGetLatencyTimingsNV; - PFN_vkLatencySleepNV vkLatencySleepNV; - PFN_vkQueueNotifyOutOfBandNV vkQueueNotifyOutOfBandNV; - PFN_vkSetLatencyMarkerNV vkSetLatencyMarkerNV; - PFN_vkSetLatencySleepModeNV vkSetLatencySleepModeNV; -#endif /* defined(VK_NV_low_latency2) */ -#if defined(VK_NV_memory_decompression) - PFN_vkCmdDecompressMemoryIndirectCountNV vkCmdDecompressMemoryIndirectCountNV; - PFN_vkCmdDecompressMemoryNV vkCmdDecompressMemoryNV; -#endif /* defined(VK_NV_memory_decompression) */ -#if defined(VK_NV_mesh_shader) - PFN_vkCmdDrawMeshTasksIndirectCountNV vkCmdDrawMeshTasksIndirectCountNV; - PFN_vkCmdDrawMeshTasksIndirectNV vkCmdDrawMeshTasksIndirectNV; - PFN_vkCmdDrawMeshTasksNV vkCmdDrawMeshTasksNV; -#endif /* defined(VK_NV_mesh_shader) */ -#if defined(VK_NV_optical_flow) - PFN_vkBindOpticalFlowSessionImageNV vkBindOpticalFlowSessionImageNV; - PFN_vkCmdOpticalFlowExecuteNV vkCmdOpticalFlowExecuteNV; - PFN_vkCreateOpticalFlowSessionNV vkCreateOpticalFlowSessionNV; - PFN_vkDestroyOpticalFlowSessionNV vkDestroyOpticalFlowSessionNV; -#endif /* defined(VK_NV_optical_flow) */ -#if defined(VK_NV_ray_tracing) - PFN_vkBindAccelerationStructureMemoryNV vkBindAccelerationStructureMemoryNV; - PFN_vkCmdBuildAccelerationStructureNV vkCmdBuildAccelerationStructureNV; - PFN_vkCmdCopyAccelerationStructureNV vkCmdCopyAccelerationStructureNV; - PFN_vkCmdTraceRaysNV vkCmdTraceRaysNV; - PFN_vkCmdWriteAccelerationStructuresPropertiesNV vkCmdWriteAccelerationStructuresPropertiesNV; - PFN_vkCompileDeferredNV vkCompileDeferredNV; - PFN_vkCreateAccelerationStructureNV vkCreateAccelerationStructureNV; - PFN_vkCreateRayTracingPipelinesNV vkCreateRayTracingPipelinesNV; - PFN_vkDestroyAccelerationStructureNV vkDestroyAccelerationStructureNV; - PFN_vkGetAccelerationStructureHandleNV vkGetAccelerationStructureHandleNV; - PFN_vkGetAccelerationStructureMemoryRequirementsNV vkGetAccelerationStructureMemoryRequirementsNV; - PFN_vkGetRayTracingShaderGroupHandlesNV vkGetRayTracingShaderGroupHandlesNV; -#endif /* defined(VK_NV_ray_tracing) */ -#if defined(VK_NV_scissor_exclusive) && VK_NV_SCISSOR_EXCLUSIVE_SPEC_VERSION >= 2 - PFN_vkCmdSetExclusiveScissorEnableNV vkCmdSetExclusiveScissorEnableNV; -#endif /* defined(VK_NV_scissor_exclusive) && VK_NV_SCISSOR_EXCLUSIVE_SPEC_VERSION >= 2 */ -#if defined(VK_NV_scissor_exclusive) - PFN_vkCmdSetExclusiveScissorNV vkCmdSetExclusiveScissorNV; -#endif /* defined(VK_NV_scissor_exclusive) */ -#if defined(VK_NV_shading_rate_image) - PFN_vkCmdBindShadingRateImageNV vkCmdBindShadingRateImageNV; - PFN_vkCmdSetCoarseSampleOrderNV vkCmdSetCoarseSampleOrderNV; - PFN_vkCmdSetViewportShadingRatePaletteNV vkCmdSetViewportShadingRatePaletteNV; -#endif /* defined(VK_NV_shading_rate_image) */ -#if defined(VK_QCOM_tile_properties) - PFN_vkGetDynamicRenderingTilePropertiesQCOM vkGetDynamicRenderingTilePropertiesQCOM; - PFN_vkGetFramebufferTilePropertiesQCOM vkGetFramebufferTilePropertiesQCOM; -#endif /* defined(VK_QCOM_tile_properties) */ -#if defined(VK_QNX_external_memory_screen_buffer) - PFN_vkGetScreenBufferPropertiesQNX vkGetScreenBufferPropertiesQNX; -#endif /* defined(VK_QNX_external_memory_screen_buffer) */ -#if defined(VK_VALVE_descriptor_set_host_mapping) - PFN_vkGetDescriptorSetHostMappingVALVE vkGetDescriptorSetHostMappingVALVE; - PFN_vkGetDescriptorSetLayoutHostMappingInfoVALVE vkGetDescriptorSetLayoutHostMappingInfoVALVE; -#endif /* defined(VK_VALVE_descriptor_set_host_mapping) */ -#if (defined(VK_EXT_extended_dynamic_state)) || (defined(VK_EXT_shader_object)) - PFN_vkCmdBindVertexBuffers2EXT vkCmdBindVertexBuffers2EXT; - PFN_vkCmdSetCullModeEXT vkCmdSetCullModeEXT; - PFN_vkCmdSetDepthBoundsTestEnableEXT vkCmdSetDepthBoundsTestEnableEXT; - PFN_vkCmdSetDepthCompareOpEXT vkCmdSetDepthCompareOpEXT; - PFN_vkCmdSetDepthTestEnableEXT vkCmdSetDepthTestEnableEXT; - PFN_vkCmdSetDepthWriteEnableEXT vkCmdSetDepthWriteEnableEXT; - PFN_vkCmdSetFrontFaceEXT vkCmdSetFrontFaceEXT; - PFN_vkCmdSetPrimitiveTopologyEXT vkCmdSetPrimitiveTopologyEXT; - PFN_vkCmdSetScissorWithCountEXT vkCmdSetScissorWithCountEXT; - PFN_vkCmdSetStencilOpEXT vkCmdSetStencilOpEXT; - PFN_vkCmdSetStencilTestEnableEXT vkCmdSetStencilTestEnableEXT; - PFN_vkCmdSetViewportWithCountEXT vkCmdSetViewportWithCountEXT; -#endif /* (defined(VK_EXT_extended_dynamic_state)) || (defined(VK_EXT_shader_object)) */ -#if (defined(VK_EXT_extended_dynamic_state2)) || (defined(VK_EXT_shader_object)) - PFN_vkCmdSetDepthBiasEnableEXT vkCmdSetDepthBiasEnableEXT; - PFN_vkCmdSetLogicOpEXT vkCmdSetLogicOpEXT; - PFN_vkCmdSetPatchControlPointsEXT vkCmdSetPatchControlPointsEXT; - PFN_vkCmdSetPrimitiveRestartEnableEXT vkCmdSetPrimitiveRestartEnableEXT; - PFN_vkCmdSetRasterizerDiscardEnableEXT vkCmdSetRasterizerDiscardEnableEXT; -#endif /* (defined(VK_EXT_extended_dynamic_state2)) || (defined(VK_EXT_shader_object)) */ -#if (defined(VK_EXT_extended_dynamic_state3)) || (defined(VK_EXT_shader_object)) - PFN_vkCmdSetAlphaToCoverageEnableEXT vkCmdSetAlphaToCoverageEnableEXT; - PFN_vkCmdSetAlphaToOneEnableEXT vkCmdSetAlphaToOneEnableEXT; - PFN_vkCmdSetColorBlendEnableEXT vkCmdSetColorBlendEnableEXT; - PFN_vkCmdSetColorBlendEquationEXT vkCmdSetColorBlendEquationEXT; - PFN_vkCmdSetColorWriteMaskEXT vkCmdSetColorWriteMaskEXT; - PFN_vkCmdSetDepthClampEnableEXT vkCmdSetDepthClampEnableEXT; - PFN_vkCmdSetLogicOpEnableEXT vkCmdSetLogicOpEnableEXT; - PFN_vkCmdSetPolygonModeEXT vkCmdSetPolygonModeEXT; - PFN_vkCmdSetRasterizationSamplesEXT vkCmdSetRasterizationSamplesEXT; - PFN_vkCmdSetSampleMaskEXT vkCmdSetSampleMaskEXT; -#endif /* (defined(VK_EXT_extended_dynamic_state3)) || (defined(VK_EXT_shader_object)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && (defined(VK_KHR_maintenance2) || defined(VK_VERSION_1_1))) || (defined(VK_EXT_shader_object)) - PFN_vkCmdSetTessellationDomainOriginEXT vkCmdSetTessellationDomainOriginEXT; -#endif /* (defined(VK_EXT_extended_dynamic_state3) && (defined(VK_KHR_maintenance2) || defined(VK_VERSION_1_1))) || (defined(VK_EXT_shader_object)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_transform_feedback)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_transform_feedback)) - PFN_vkCmdSetRasterizationStreamEXT vkCmdSetRasterizationStreamEXT; -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_transform_feedback)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_transform_feedback)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_conservative_rasterization)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_conservative_rasterization)) - PFN_vkCmdSetConservativeRasterizationModeEXT vkCmdSetConservativeRasterizationModeEXT; - PFN_vkCmdSetExtraPrimitiveOverestimationSizeEXT vkCmdSetExtraPrimitiveOverestimationSizeEXT; -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_conservative_rasterization)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_conservative_rasterization)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_depth_clip_enable)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_depth_clip_enable)) - PFN_vkCmdSetDepthClipEnableEXT vkCmdSetDepthClipEnableEXT; -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_depth_clip_enable)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_depth_clip_enable)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_sample_locations)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_sample_locations)) - PFN_vkCmdSetSampleLocationsEnableEXT vkCmdSetSampleLocationsEnableEXT; -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_sample_locations)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_sample_locations)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_blend_operation_advanced)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_blend_operation_advanced)) - PFN_vkCmdSetColorBlendAdvancedEXT vkCmdSetColorBlendAdvancedEXT; -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_blend_operation_advanced)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_blend_operation_advanced)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_provoking_vertex)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_provoking_vertex)) - PFN_vkCmdSetProvokingVertexModeEXT vkCmdSetProvokingVertexModeEXT; -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_provoking_vertex)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_provoking_vertex)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_line_rasterization)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_line_rasterization)) - PFN_vkCmdSetLineRasterizationModeEXT vkCmdSetLineRasterizationModeEXT; - PFN_vkCmdSetLineStippleEnableEXT vkCmdSetLineStippleEnableEXT; -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_line_rasterization)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_line_rasterization)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_depth_clip_control)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_depth_clip_control)) - PFN_vkCmdSetDepthClipNegativeOneToOneEXT vkCmdSetDepthClipNegativeOneToOneEXT; -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_depth_clip_control)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_depth_clip_control)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_clip_space_w_scaling)) || (defined(VK_EXT_shader_object) && defined(VK_NV_clip_space_w_scaling)) - PFN_vkCmdSetViewportWScalingEnableNV vkCmdSetViewportWScalingEnableNV; -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_clip_space_w_scaling)) || (defined(VK_EXT_shader_object) && defined(VK_NV_clip_space_w_scaling)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_viewport_swizzle)) || (defined(VK_EXT_shader_object) && defined(VK_NV_viewport_swizzle)) - PFN_vkCmdSetViewportSwizzleNV vkCmdSetViewportSwizzleNV; -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_viewport_swizzle)) || (defined(VK_EXT_shader_object) && defined(VK_NV_viewport_swizzle)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_fragment_coverage_to_color)) || (defined(VK_EXT_shader_object) && defined(VK_NV_fragment_coverage_to_color)) - PFN_vkCmdSetCoverageToColorEnableNV vkCmdSetCoverageToColorEnableNV; - PFN_vkCmdSetCoverageToColorLocationNV vkCmdSetCoverageToColorLocationNV; -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_fragment_coverage_to_color)) || (defined(VK_EXT_shader_object) && defined(VK_NV_fragment_coverage_to_color)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_framebuffer_mixed_samples)) || (defined(VK_EXT_shader_object) && defined(VK_NV_framebuffer_mixed_samples)) - PFN_vkCmdSetCoverageModulationModeNV vkCmdSetCoverageModulationModeNV; - PFN_vkCmdSetCoverageModulationTableEnableNV vkCmdSetCoverageModulationTableEnableNV; - PFN_vkCmdSetCoverageModulationTableNV vkCmdSetCoverageModulationTableNV; -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_framebuffer_mixed_samples)) || (defined(VK_EXT_shader_object) && defined(VK_NV_framebuffer_mixed_samples)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_shading_rate_image)) || (defined(VK_EXT_shader_object) && defined(VK_NV_shading_rate_image)) - PFN_vkCmdSetShadingRateImageEnableNV vkCmdSetShadingRateImageEnableNV; -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_shading_rate_image)) || (defined(VK_EXT_shader_object) && defined(VK_NV_shading_rate_image)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_representative_fragment_test)) || (defined(VK_EXT_shader_object) && defined(VK_NV_representative_fragment_test)) - PFN_vkCmdSetRepresentativeFragmentTestEnableNV vkCmdSetRepresentativeFragmentTestEnableNV; -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_representative_fragment_test)) || (defined(VK_EXT_shader_object) && defined(VK_NV_representative_fragment_test)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_coverage_reduction_mode)) || (defined(VK_EXT_shader_object) && defined(VK_NV_coverage_reduction_mode)) - PFN_vkCmdSetCoverageReductionModeNV vkCmdSetCoverageReductionModeNV; -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_coverage_reduction_mode)) || (defined(VK_EXT_shader_object) && defined(VK_NV_coverage_reduction_mode)) */ -#if (defined(VK_EXT_host_image_copy)) || (defined(VK_EXT_image_compression_control)) - PFN_vkGetImageSubresourceLayout2EXT vkGetImageSubresourceLayout2EXT; -#endif /* (defined(VK_EXT_host_image_copy)) || (defined(VK_EXT_image_compression_control)) */ -#if (defined(VK_EXT_shader_object)) || (defined(VK_EXT_vertex_input_dynamic_state)) - PFN_vkCmdSetVertexInputEXT vkCmdSetVertexInputEXT; -#endif /* (defined(VK_EXT_shader_object)) || (defined(VK_EXT_vertex_input_dynamic_state)) */ -#if (defined(VK_KHR_descriptor_update_template) && defined(VK_KHR_push_descriptor)) || (defined(VK_KHR_push_descriptor) && (defined(VK_VERSION_1_1) || defined(VK_KHR_descriptor_update_template))) - PFN_vkCmdPushDescriptorSetWithTemplateKHR vkCmdPushDescriptorSetWithTemplateKHR; -#endif /* (defined(VK_KHR_descriptor_update_template) && defined(VK_KHR_push_descriptor)) || (defined(VK_KHR_push_descriptor) && (defined(VK_VERSION_1_1) || defined(VK_KHR_descriptor_update_template))) */ -#if (defined(VK_KHR_device_group) && defined(VK_KHR_surface)) || (defined(VK_KHR_swapchain) && defined(VK_VERSION_1_1)) - PFN_vkGetDeviceGroupPresentCapabilitiesKHR vkGetDeviceGroupPresentCapabilitiesKHR; - PFN_vkGetDeviceGroupSurfacePresentModesKHR vkGetDeviceGroupSurfacePresentModesKHR; -#endif /* (defined(VK_KHR_device_group) && defined(VK_KHR_surface)) || (defined(VK_KHR_swapchain) && defined(VK_VERSION_1_1)) */ -#if (defined(VK_KHR_device_group) && defined(VK_KHR_swapchain)) || (defined(VK_KHR_swapchain) && defined(VK_VERSION_1_1)) - PFN_vkAcquireNextImage2KHR vkAcquireNextImage2KHR; -#endif /* (defined(VK_KHR_device_group) && defined(VK_KHR_swapchain)) || (defined(VK_KHR_swapchain) && defined(VK_VERSION_1_1)) */ - /* VOLK_GENERATE_DEVICE_TABLE */ -}; - -/* VOLK_GENERATE_PROTOTYPES_H */ -#if defined(VK_VERSION_1_0) -extern PFN_vkAllocateCommandBuffers vkAllocateCommandBuffers; -extern PFN_vkAllocateDescriptorSets vkAllocateDescriptorSets; -extern PFN_vkAllocateMemory vkAllocateMemory; -extern PFN_vkBeginCommandBuffer vkBeginCommandBuffer; -extern PFN_vkBindBufferMemory vkBindBufferMemory; -extern PFN_vkBindImageMemory vkBindImageMemory; -extern PFN_vkCmdBeginQuery vkCmdBeginQuery; -extern PFN_vkCmdBeginRenderPass vkCmdBeginRenderPass; -extern PFN_vkCmdBindDescriptorSets vkCmdBindDescriptorSets; -extern PFN_vkCmdBindIndexBuffer vkCmdBindIndexBuffer; -extern PFN_vkCmdBindPipeline vkCmdBindPipeline; -extern PFN_vkCmdBindVertexBuffers vkCmdBindVertexBuffers; -extern PFN_vkCmdBlitImage vkCmdBlitImage; -extern PFN_vkCmdClearAttachments vkCmdClearAttachments; -extern PFN_vkCmdClearColorImage vkCmdClearColorImage; -extern PFN_vkCmdClearDepthStencilImage vkCmdClearDepthStencilImage; -extern PFN_vkCmdCopyBuffer vkCmdCopyBuffer; -extern PFN_vkCmdCopyBufferToImage vkCmdCopyBufferToImage; -extern PFN_vkCmdCopyImage vkCmdCopyImage; -extern PFN_vkCmdCopyImageToBuffer vkCmdCopyImageToBuffer; -extern PFN_vkCmdCopyQueryPoolResults vkCmdCopyQueryPoolResults; -extern PFN_vkCmdDispatch vkCmdDispatch; -extern PFN_vkCmdDispatchIndirect vkCmdDispatchIndirect; -extern PFN_vkCmdDraw vkCmdDraw; -extern PFN_vkCmdDrawIndexed vkCmdDrawIndexed; -extern PFN_vkCmdDrawIndexedIndirect vkCmdDrawIndexedIndirect; -extern PFN_vkCmdDrawIndirect vkCmdDrawIndirect; -extern PFN_vkCmdEndQuery vkCmdEndQuery; -extern PFN_vkCmdEndRenderPass vkCmdEndRenderPass; -extern PFN_vkCmdExecuteCommands vkCmdExecuteCommands; -extern PFN_vkCmdFillBuffer vkCmdFillBuffer; -extern PFN_vkCmdNextSubpass vkCmdNextSubpass; -extern PFN_vkCmdPipelineBarrier vkCmdPipelineBarrier; -extern PFN_vkCmdPushConstants vkCmdPushConstants; -extern PFN_vkCmdResetEvent vkCmdResetEvent; -extern PFN_vkCmdResetQueryPool vkCmdResetQueryPool; -extern PFN_vkCmdResolveImage vkCmdResolveImage; -extern PFN_vkCmdSetBlendConstants vkCmdSetBlendConstants; -extern PFN_vkCmdSetDepthBias vkCmdSetDepthBias; -extern PFN_vkCmdSetDepthBounds vkCmdSetDepthBounds; -extern PFN_vkCmdSetEvent vkCmdSetEvent; -extern PFN_vkCmdSetLineWidth vkCmdSetLineWidth; -extern PFN_vkCmdSetScissor vkCmdSetScissor; -extern PFN_vkCmdSetStencilCompareMask vkCmdSetStencilCompareMask; -extern PFN_vkCmdSetStencilReference vkCmdSetStencilReference; -extern PFN_vkCmdSetStencilWriteMask vkCmdSetStencilWriteMask; -extern PFN_vkCmdSetViewport vkCmdSetViewport; -extern PFN_vkCmdUpdateBuffer vkCmdUpdateBuffer; -extern PFN_vkCmdWaitEvents vkCmdWaitEvents; -extern PFN_vkCmdWriteTimestamp vkCmdWriteTimestamp; -extern PFN_vkCreateBuffer vkCreateBuffer; -extern PFN_vkCreateBufferView vkCreateBufferView; -extern PFN_vkCreateCommandPool vkCreateCommandPool; -extern PFN_vkCreateComputePipelines vkCreateComputePipelines; -extern PFN_vkCreateDescriptorPool vkCreateDescriptorPool; -extern PFN_vkCreateDescriptorSetLayout vkCreateDescriptorSetLayout; -extern PFN_vkCreateDevice vkCreateDevice; -extern PFN_vkCreateEvent vkCreateEvent; -extern PFN_vkCreateFence vkCreateFence; -extern PFN_vkCreateFramebuffer vkCreateFramebuffer; -extern PFN_vkCreateGraphicsPipelines vkCreateGraphicsPipelines; -extern PFN_vkCreateImage vkCreateImage; -extern PFN_vkCreateImageView vkCreateImageView; -extern PFN_vkCreateInstance vkCreateInstance; -extern PFN_vkCreatePipelineCache vkCreatePipelineCache; -extern PFN_vkCreatePipelineLayout vkCreatePipelineLayout; -extern PFN_vkCreateQueryPool vkCreateQueryPool; -extern PFN_vkCreateRenderPass vkCreateRenderPass; -extern PFN_vkCreateSampler vkCreateSampler; -extern PFN_vkCreateSemaphore vkCreateSemaphore; -extern PFN_vkCreateShaderModule vkCreateShaderModule; -extern PFN_vkDestroyBuffer vkDestroyBuffer; -extern PFN_vkDestroyBufferView vkDestroyBufferView; -extern PFN_vkDestroyCommandPool vkDestroyCommandPool; -extern PFN_vkDestroyDescriptorPool vkDestroyDescriptorPool; -extern PFN_vkDestroyDescriptorSetLayout vkDestroyDescriptorSetLayout; -extern PFN_vkDestroyDevice vkDestroyDevice; -extern PFN_vkDestroyEvent vkDestroyEvent; -extern PFN_vkDestroyFence vkDestroyFence; -extern PFN_vkDestroyFramebuffer vkDestroyFramebuffer; -extern PFN_vkDestroyImage vkDestroyImage; -extern PFN_vkDestroyImageView vkDestroyImageView; -extern PFN_vkDestroyInstance vkDestroyInstance; -extern PFN_vkDestroyPipeline vkDestroyPipeline; -extern PFN_vkDestroyPipelineCache vkDestroyPipelineCache; -extern PFN_vkDestroyPipelineLayout vkDestroyPipelineLayout; -extern PFN_vkDestroyQueryPool vkDestroyQueryPool; -extern PFN_vkDestroyRenderPass vkDestroyRenderPass; -extern PFN_vkDestroySampler vkDestroySampler; -extern PFN_vkDestroySemaphore vkDestroySemaphore; -extern PFN_vkDestroyShaderModule vkDestroyShaderModule; -extern PFN_vkDeviceWaitIdle vkDeviceWaitIdle; -extern PFN_vkEndCommandBuffer vkEndCommandBuffer; -extern PFN_vkEnumerateDeviceExtensionProperties vkEnumerateDeviceExtensionProperties; -extern PFN_vkEnumerateDeviceLayerProperties vkEnumerateDeviceLayerProperties; -extern PFN_vkEnumerateInstanceExtensionProperties vkEnumerateInstanceExtensionProperties; -extern PFN_vkEnumerateInstanceLayerProperties vkEnumerateInstanceLayerProperties; -extern PFN_vkEnumeratePhysicalDevices vkEnumeratePhysicalDevices; -extern PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges; -extern PFN_vkFreeCommandBuffers vkFreeCommandBuffers; -extern PFN_vkFreeDescriptorSets vkFreeDescriptorSets; -extern PFN_vkFreeMemory vkFreeMemory; -extern PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements; -extern PFN_vkGetDeviceMemoryCommitment vkGetDeviceMemoryCommitment; -extern PFN_vkGetDeviceProcAddr vkGetDeviceProcAddr; -extern PFN_vkGetDeviceQueue vkGetDeviceQueue; -extern PFN_vkGetEventStatus vkGetEventStatus; -extern PFN_vkGetFenceStatus vkGetFenceStatus; -extern PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements; -extern PFN_vkGetImageSparseMemoryRequirements vkGetImageSparseMemoryRequirements; -extern PFN_vkGetImageSubresourceLayout vkGetImageSubresourceLayout; -extern PFN_vkGetInstanceProcAddr vkGetInstanceProcAddr; -extern PFN_vkGetPhysicalDeviceFeatures vkGetPhysicalDeviceFeatures; -extern PFN_vkGetPhysicalDeviceFormatProperties vkGetPhysicalDeviceFormatProperties; -extern PFN_vkGetPhysicalDeviceImageFormatProperties vkGetPhysicalDeviceImageFormatProperties; -extern PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties; -extern PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties; -extern PFN_vkGetPhysicalDeviceQueueFamilyProperties vkGetPhysicalDeviceQueueFamilyProperties; -extern PFN_vkGetPhysicalDeviceSparseImageFormatProperties vkGetPhysicalDeviceSparseImageFormatProperties; -extern PFN_vkGetPipelineCacheData vkGetPipelineCacheData; -extern PFN_vkGetQueryPoolResults vkGetQueryPoolResults; -extern PFN_vkGetRenderAreaGranularity vkGetRenderAreaGranularity; -extern PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges; -extern PFN_vkMapMemory vkMapMemory; -extern PFN_vkMergePipelineCaches vkMergePipelineCaches; -extern PFN_vkQueueBindSparse vkQueueBindSparse; -extern PFN_vkQueueSubmit vkQueueSubmit; -extern PFN_vkQueueWaitIdle vkQueueWaitIdle; -extern PFN_vkResetCommandBuffer vkResetCommandBuffer; -extern PFN_vkResetCommandPool vkResetCommandPool; -extern PFN_vkResetDescriptorPool vkResetDescriptorPool; -extern PFN_vkResetEvent vkResetEvent; -extern PFN_vkResetFences vkResetFences; -extern PFN_vkSetEvent vkSetEvent; -extern PFN_vkUnmapMemory vkUnmapMemory; -extern PFN_vkUpdateDescriptorSets vkUpdateDescriptorSets; -extern PFN_vkWaitForFences vkWaitForFences; -#endif /* defined(VK_VERSION_1_0) */ -#if defined(VK_VERSION_1_1) -extern PFN_vkBindBufferMemory2 vkBindBufferMemory2; -extern PFN_vkBindImageMemory2 vkBindImageMemory2; -extern PFN_vkCmdDispatchBase vkCmdDispatchBase; -extern PFN_vkCmdSetDeviceMask vkCmdSetDeviceMask; -extern PFN_vkCreateDescriptorUpdateTemplate vkCreateDescriptorUpdateTemplate; -extern PFN_vkCreateSamplerYcbcrConversion vkCreateSamplerYcbcrConversion; -extern PFN_vkDestroyDescriptorUpdateTemplate vkDestroyDescriptorUpdateTemplate; -extern PFN_vkDestroySamplerYcbcrConversion vkDestroySamplerYcbcrConversion; -extern PFN_vkEnumerateInstanceVersion vkEnumerateInstanceVersion; -extern PFN_vkEnumeratePhysicalDeviceGroups vkEnumeratePhysicalDeviceGroups; -extern PFN_vkGetBufferMemoryRequirements2 vkGetBufferMemoryRequirements2; -extern PFN_vkGetDescriptorSetLayoutSupport vkGetDescriptorSetLayoutSupport; -extern PFN_vkGetDeviceGroupPeerMemoryFeatures vkGetDeviceGroupPeerMemoryFeatures; -extern PFN_vkGetDeviceQueue2 vkGetDeviceQueue2; -extern PFN_vkGetImageMemoryRequirements2 vkGetImageMemoryRequirements2; -extern PFN_vkGetImageSparseMemoryRequirements2 vkGetImageSparseMemoryRequirements2; -extern PFN_vkGetPhysicalDeviceExternalBufferProperties vkGetPhysicalDeviceExternalBufferProperties; -extern PFN_vkGetPhysicalDeviceExternalFenceProperties vkGetPhysicalDeviceExternalFenceProperties; -extern PFN_vkGetPhysicalDeviceExternalSemaphoreProperties vkGetPhysicalDeviceExternalSemaphoreProperties; -extern PFN_vkGetPhysicalDeviceFeatures2 vkGetPhysicalDeviceFeatures2; -extern PFN_vkGetPhysicalDeviceFormatProperties2 vkGetPhysicalDeviceFormatProperties2; -extern PFN_vkGetPhysicalDeviceImageFormatProperties2 vkGetPhysicalDeviceImageFormatProperties2; -extern PFN_vkGetPhysicalDeviceMemoryProperties2 vkGetPhysicalDeviceMemoryProperties2; -extern PFN_vkGetPhysicalDeviceProperties2 vkGetPhysicalDeviceProperties2; -extern PFN_vkGetPhysicalDeviceQueueFamilyProperties2 vkGetPhysicalDeviceQueueFamilyProperties2; -extern PFN_vkGetPhysicalDeviceSparseImageFormatProperties2 vkGetPhysicalDeviceSparseImageFormatProperties2; -extern PFN_vkTrimCommandPool vkTrimCommandPool; -extern PFN_vkUpdateDescriptorSetWithTemplate vkUpdateDescriptorSetWithTemplate; -#endif /* defined(VK_VERSION_1_1) */ -#if defined(VK_VERSION_1_2) -extern PFN_vkCmdBeginRenderPass2 vkCmdBeginRenderPass2; -extern PFN_vkCmdDrawIndexedIndirectCount vkCmdDrawIndexedIndirectCount; -extern PFN_vkCmdDrawIndirectCount vkCmdDrawIndirectCount; -extern PFN_vkCmdEndRenderPass2 vkCmdEndRenderPass2; -extern PFN_vkCmdNextSubpass2 vkCmdNextSubpass2; -extern PFN_vkCreateRenderPass2 vkCreateRenderPass2; -extern PFN_vkGetBufferDeviceAddress vkGetBufferDeviceAddress; -extern PFN_vkGetBufferOpaqueCaptureAddress vkGetBufferOpaqueCaptureAddress; -extern PFN_vkGetDeviceMemoryOpaqueCaptureAddress vkGetDeviceMemoryOpaqueCaptureAddress; -extern PFN_vkGetSemaphoreCounterValue vkGetSemaphoreCounterValue; -extern PFN_vkResetQueryPool vkResetQueryPool; -extern PFN_vkSignalSemaphore vkSignalSemaphore; -extern PFN_vkWaitSemaphores vkWaitSemaphores; -#endif /* defined(VK_VERSION_1_2) */ -#if defined(VK_VERSION_1_3) -extern PFN_vkCmdBeginRendering vkCmdBeginRendering; -extern PFN_vkCmdBindVertexBuffers2 vkCmdBindVertexBuffers2; -extern PFN_vkCmdBlitImage2 vkCmdBlitImage2; -extern PFN_vkCmdCopyBuffer2 vkCmdCopyBuffer2; -extern PFN_vkCmdCopyBufferToImage2 vkCmdCopyBufferToImage2; -extern PFN_vkCmdCopyImage2 vkCmdCopyImage2; -extern PFN_vkCmdCopyImageToBuffer2 vkCmdCopyImageToBuffer2; -extern PFN_vkCmdEndRendering vkCmdEndRendering; -extern PFN_vkCmdPipelineBarrier2 vkCmdPipelineBarrier2; -extern PFN_vkCmdResetEvent2 vkCmdResetEvent2; -extern PFN_vkCmdResolveImage2 vkCmdResolveImage2; -extern PFN_vkCmdSetCullMode vkCmdSetCullMode; -extern PFN_vkCmdSetDepthBiasEnable vkCmdSetDepthBiasEnable; -extern PFN_vkCmdSetDepthBoundsTestEnable vkCmdSetDepthBoundsTestEnable; -extern PFN_vkCmdSetDepthCompareOp vkCmdSetDepthCompareOp; -extern PFN_vkCmdSetDepthTestEnable vkCmdSetDepthTestEnable; -extern PFN_vkCmdSetDepthWriteEnable vkCmdSetDepthWriteEnable; -extern PFN_vkCmdSetEvent2 vkCmdSetEvent2; -extern PFN_vkCmdSetFrontFace vkCmdSetFrontFace; -extern PFN_vkCmdSetPrimitiveRestartEnable vkCmdSetPrimitiveRestartEnable; -extern PFN_vkCmdSetPrimitiveTopology vkCmdSetPrimitiveTopology; -extern PFN_vkCmdSetRasterizerDiscardEnable vkCmdSetRasterizerDiscardEnable; -extern PFN_vkCmdSetScissorWithCount vkCmdSetScissorWithCount; -extern PFN_vkCmdSetStencilOp vkCmdSetStencilOp; -extern PFN_vkCmdSetStencilTestEnable vkCmdSetStencilTestEnable; -extern PFN_vkCmdSetViewportWithCount vkCmdSetViewportWithCount; -extern PFN_vkCmdWaitEvents2 vkCmdWaitEvents2; -extern PFN_vkCmdWriteTimestamp2 vkCmdWriteTimestamp2; -extern PFN_vkCreatePrivateDataSlot vkCreatePrivateDataSlot; -extern PFN_vkDestroyPrivateDataSlot vkDestroyPrivateDataSlot; -extern PFN_vkGetDeviceBufferMemoryRequirements vkGetDeviceBufferMemoryRequirements; -extern PFN_vkGetDeviceImageMemoryRequirements vkGetDeviceImageMemoryRequirements; -extern PFN_vkGetDeviceImageSparseMemoryRequirements vkGetDeviceImageSparseMemoryRequirements; -extern PFN_vkGetPhysicalDeviceToolProperties vkGetPhysicalDeviceToolProperties; -extern PFN_vkGetPrivateData vkGetPrivateData; -extern PFN_vkQueueSubmit2 vkQueueSubmit2; -extern PFN_vkSetPrivateData vkSetPrivateData; -#endif /* defined(VK_VERSION_1_3) */ -#if defined(VK_AMDX_shader_enqueue) -extern PFN_vkCmdDispatchGraphAMDX vkCmdDispatchGraphAMDX; -extern PFN_vkCmdDispatchGraphIndirectAMDX vkCmdDispatchGraphIndirectAMDX; -extern PFN_vkCmdDispatchGraphIndirectCountAMDX vkCmdDispatchGraphIndirectCountAMDX; -extern PFN_vkCmdInitializeGraphScratchMemoryAMDX vkCmdInitializeGraphScratchMemoryAMDX; -extern PFN_vkCreateExecutionGraphPipelinesAMDX vkCreateExecutionGraphPipelinesAMDX; -extern PFN_vkGetExecutionGraphPipelineNodeIndexAMDX vkGetExecutionGraphPipelineNodeIndexAMDX; -extern PFN_vkGetExecutionGraphPipelineScratchSizeAMDX vkGetExecutionGraphPipelineScratchSizeAMDX; -#endif /* defined(VK_AMDX_shader_enqueue) */ -#if defined(VK_AMD_anti_lag) -extern PFN_vkAntiLagUpdateAMD vkAntiLagUpdateAMD; -#endif /* defined(VK_AMD_anti_lag) */ -#if defined(VK_AMD_buffer_marker) -extern PFN_vkCmdWriteBufferMarkerAMD vkCmdWriteBufferMarkerAMD; -#endif /* defined(VK_AMD_buffer_marker) */ -#if defined(VK_AMD_display_native_hdr) -extern PFN_vkSetLocalDimmingAMD vkSetLocalDimmingAMD; -#endif /* defined(VK_AMD_display_native_hdr) */ -#if defined(VK_AMD_draw_indirect_count) -extern PFN_vkCmdDrawIndexedIndirectCountAMD vkCmdDrawIndexedIndirectCountAMD; -extern PFN_vkCmdDrawIndirectCountAMD vkCmdDrawIndirectCountAMD; -#endif /* defined(VK_AMD_draw_indirect_count) */ -#if defined(VK_AMD_shader_info) -extern PFN_vkGetShaderInfoAMD vkGetShaderInfoAMD; -#endif /* defined(VK_AMD_shader_info) */ -#if defined(VK_ANDROID_external_memory_android_hardware_buffer) -extern PFN_vkGetAndroidHardwareBufferPropertiesANDROID vkGetAndroidHardwareBufferPropertiesANDROID; -extern PFN_vkGetMemoryAndroidHardwareBufferANDROID vkGetMemoryAndroidHardwareBufferANDROID; -#endif /* defined(VK_ANDROID_external_memory_android_hardware_buffer) */ -#if defined(VK_EXT_acquire_drm_display) -extern PFN_vkAcquireDrmDisplayEXT vkAcquireDrmDisplayEXT; -extern PFN_vkGetDrmDisplayEXT vkGetDrmDisplayEXT; -#endif /* defined(VK_EXT_acquire_drm_display) */ -#if defined(VK_EXT_acquire_xlib_display) -extern PFN_vkAcquireXlibDisplayEXT vkAcquireXlibDisplayEXT; -extern PFN_vkGetRandROutputDisplayEXT vkGetRandROutputDisplayEXT; -#endif /* defined(VK_EXT_acquire_xlib_display) */ -#if defined(VK_EXT_attachment_feedback_loop_dynamic_state) -extern PFN_vkCmdSetAttachmentFeedbackLoopEnableEXT vkCmdSetAttachmentFeedbackLoopEnableEXT; -#endif /* defined(VK_EXT_attachment_feedback_loop_dynamic_state) */ -#if defined(VK_EXT_buffer_device_address) -extern PFN_vkGetBufferDeviceAddressEXT vkGetBufferDeviceAddressEXT; -#endif /* defined(VK_EXT_buffer_device_address) */ -#if defined(VK_EXT_calibrated_timestamps) -extern PFN_vkGetCalibratedTimestampsEXT vkGetCalibratedTimestampsEXT; -extern PFN_vkGetPhysicalDeviceCalibrateableTimeDomainsEXT vkGetPhysicalDeviceCalibrateableTimeDomainsEXT; -#endif /* defined(VK_EXT_calibrated_timestamps) */ -#if defined(VK_EXT_color_write_enable) -extern PFN_vkCmdSetColorWriteEnableEXT vkCmdSetColorWriteEnableEXT; -#endif /* defined(VK_EXT_color_write_enable) */ -#if defined(VK_EXT_conditional_rendering) -extern PFN_vkCmdBeginConditionalRenderingEXT vkCmdBeginConditionalRenderingEXT; -extern PFN_vkCmdEndConditionalRenderingEXT vkCmdEndConditionalRenderingEXT; -#endif /* defined(VK_EXT_conditional_rendering) */ -#if defined(VK_EXT_debug_marker) -extern PFN_vkCmdDebugMarkerBeginEXT vkCmdDebugMarkerBeginEXT; -extern PFN_vkCmdDebugMarkerEndEXT vkCmdDebugMarkerEndEXT; -extern PFN_vkCmdDebugMarkerInsertEXT vkCmdDebugMarkerInsertEXT; -extern PFN_vkDebugMarkerSetObjectNameEXT vkDebugMarkerSetObjectNameEXT; -extern PFN_vkDebugMarkerSetObjectTagEXT vkDebugMarkerSetObjectTagEXT; -#endif /* defined(VK_EXT_debug_marker) */ -#if defined(VK_EXT_debug_report) -extern PFN_vkCreateDebugReportCallbackEXT vkCreateDebugReportCallbackEXT; -extern PFN_vkDebugReportMessageEXT vkDebugReportMessageEXT; -extern PFN_vkDestroyDebugReportCallbackEXT vkDestroyDebugReportCallbackEXT; -#endif /* defined(VK_EXT_debug_report) */ -#if defined(VK_EXT_debug_utils) -extern PFN_vkCmdBeginDebugUtilsLabelEXT vkCmdBeginDebugUtilsLabelEXT; -extern PFN_vkCmdEndDebugUtilsLabelEXT vkCmdEndDebugUtilsLabelEXT; -extern PFN_vkCmdInsertDebugUtilsLabelEXT vkCmdInsertDebugUtilsLabelEXT; -extern PFN_vkCreateDebugUtilsMessengerEXT vkCreateDebugUtilsMessengerEXT; -extern PFN_vkDestroyDebugUtilsMessengerEXT vkDestroyDebugUtilsMessengerEXT; -extern PFN_vkQueueBeginDebugUtilsLabelEXT vkQueueBeginDebugUtilsLabelEXT; -extern PFN_vkQueueEndDebugUtilsLabelEXT vkQueueEndDebugUtilsLabelEXT; -extern PFN_vkQueueInsertDebugUtilsLabelEXT vkQueueInsertDebugUtilsLabelEXT; -extern PFN_vkSetDebugUtilsObjectNameEXT vkSetDebugUtilsObjectNameEXT; -extern PFN_vkSetDebugUtilsObjectTagEXT vkSetDebugUtilsObjectTagEXT; -extern PFN_vkSubmitDebugUtilsMessageEXT vkSubmitDebugUtilsMessageEXT; -#endif /* defined(VK_EXT_debug_utils) */ -#if defined(VK_EXT_depth_bias_control) -extern PFN_vkCmdSetDepthBias2EXT vkCmdSetDepthBias2EXT; -#endif /* defined(VK_EXT_depth_bias_control) */ -#if defined(VK_EXT_descriptor_buffer) -extern PFN_vkCmdBindDescriptorBufferEmbeddedSamplersEXT vkCmdBindDescriptorBufferEmbeddedSamplersEXT; -extern PFN_vkCmdBindDescriptorBuffersEXT vkCmdBindDescriptorBuffersEXT; -extern PFN_vkCmdSetDescriptorBufferOffsetsEXT vkCmdSetDescriptorBufferOffsetsEXT; -extern PFN_vkGetBufferOpaqueCaptureDescriptorDataEXT vkGetBufferOpaqueCaptureDescriptorDataEXT; -extern PFN_vkGetDescriptorEXT vkGetDescriptorEXT; -extern PFN_vkGetDescriptorSetLayoutBindingOffsetEXT vkGetDescriptorSetLayoutBindingOffsetEXT; -extern PFN_vkGetDescriptorSetLayoutSizeEXT vkGetDescriptorSetLayoutSizeEXT; -extern PFN_vkGetImageOpaqueCaptureDescriptorDataEXT vkGetImageOpaqueCaptureDescriptorDataEXT; -extern PFN_vkGetImageViewOpaqueCaptureDescriptorDataEXT vkGetImageViewOpaqueCaptureDescriptorDataEXT; -extern PFN_vkGetSamplerOpaqueCaptureDescriptorDataEXT vkGetSamplerOpaqueCaptureDescriptorDataEXT; -#endif /* defined(VK_EXT_descriptor_buffer) */ -#if defined(VK_EXT_descriptor_buffer) && (defined(VK_KHR_acceleration_structure) || defined(VK_NV_ray_tracing)) -extern PFN_vkGetAccelerationStructureOpaqueCaptureDescriptorDataEXT vkGetAccelerationStructureOpaqueCaptureDescriptorDataEXT; -#endif /* defined(VK_EXT_descriptor_buffer) && (defined(VK_KHR_acceleration_structure) || defined(VK_NV_ray_tracing)) */ -#if defined(VK_EXT_device_fault) -extern PFN_vkGetDeviceFaultInfoEXT vkGetDeviceFaultInfoEXT; -#endif /* defined(VK_EXT_device_fault) */ -#if defined(VK_EXT_direct_mode_display) -extern PFN_vkReleaseDisplayEXT vkReleaseDisplayEXT; -#endif /* defined(VK_EXT_direct_mode_display) */ -#if defined(VK_EXT_directfb_surface) -extern PFN_vkCreateDirectFBSurfaceEXT vkCreateDirectFBSurfaceEXT; -extern PFN_vkGetPhysicalDeviceDirectFBPresentationSupportEXT vkGetPhysicalDeviceDirectFBPresentationSupportEXT; -#endif /* defined(VK_EXT_directfb_surface) */ -#if defined(VK_EXT_discard_rectangles) -extern PFN_vkCmdSetDiscardRectangleEXT vkCmdSetDiscardRectangleEXT; -#endif /* defined(VK_EXT_discard_rectangles) */ -#if defined(VK_EXT_discard_rectangles) && VK_EXT_DISCARD_RECTANGLES_SPEC_VERSION >= 2 -extern PFN_vkCmdSetDiscardRectangleEnableEXT vkCmdSetDiscardRectangleEnableEXT; -extern PFN_vkCmdSetDiscardRectangleModeEXT vkCmdSetDiscardRectangleModeEXT; -#endif /* defined(VK_EXT_discard_rectangles) && VK_EXT_DISCARD_RECTANGLES_SPEC_VERSION >= 2 */ -#if defined(VK_EXT_display_control) -extern PFN_vkDisplayPowerControlEXT vkDisplayPowerControlEXT; -extern PFN_vkGetSwapchainCounterEXT vkGetSwapchainCounterEXT; -extern PFN_vkRegisterDeviceEventEXT vkRegisterDeviceEventEXT; -extern PFN_vkRegisterDisplayEventEXT vkRegisterDisplayEventEXT; -#endif /* defined(VK_EXT_display_control) */ -#if defined(VK_EXT_display_surface_counter) -extern PFN_vkGetPhysicalDeviceSurfaceCapabilities2EXT vkGetPhysicalDeviceSurfaceCapabilities2EXT; -#endif /* defined(VK_EXT_display_surface_counter) */ -#if defined(VK_EXT_external_memory_host) -extern PFN_vkGetMemoryHostPointerPropertiesEXT vkGetMemoryHostPointerPropertiesEXT; -#endif /* defined(VK_EXT_external_memory_host) */ -#if defined(VK_EXT_full_screen_exclusive) -extern PFN_vkAcquireFullScreenExclusiveModeEXT vkAcquireFullScreenExclusiveModeEXT; -extern PFN_vkGetPhysicalDeviceSurfacePresentModes2EXT vkGetPhysicalDeviceSurfacePresentModes2EXT; -extern PFN_vkReleaseFullScreenExclusiveModeEXT vkReleaseFullScreenExclusiveModeEXT; -#endif /* defined(VK_EXT_full_screen_exclusive) */ -#if defined(VK_EXT_full_screen_exclusive) && (defined(VK_KHR_device_group) || defined(VK_VERSION_1_1)) -extern PFN_vkGetDeviceGroupSurfacePresentModes2EXT vkGetDeviceGroupSurfacePresentModes2EXT; -#endif /* defined(VK_EXT_full_screen_exclusive) && (defined(VK_KHR_device_group) || defined(VK_VERSION_1_1)) */ -#if defined(VK_EXT_hdr_metadata) -extern PFN_vkSetHdrMetadataEXT vkSetHdrMetadataEXT; -#endif /* defined(VK_EXT_hdr_metadata) */ -#if defined(VK_EXT_headless_surface) -extern PFN_vkCreateHeadlessSurfaceEXT vkCreateHeadlessSurfaceEXT; -#endif /* defined(VK_EXT_headless_surface) */ -#if defined(VK_EXT_host_image_copy) -extern PFN_vkCopyImageToImageEXT vkCopyImageToImageEXT; -extern PFN_vkCopyImageToMemoryEXT vkCopyImageToMemoryEXT; -extern PFN_vkCopyMemoryToImageEXT vkCopyMemoryToImageEXT; -extern PFN_vkTransitionImageLayoutEXT vkTransitionImageLayoutEXT; -#endif /* defined(VK_EXT_host_image_copy) */ -#if defined(VK_EXT_host_query_reset) -extern PFN_vkResetQueryPoolEXT vkResetQueryPoolEXT; -#endif /* defined(VK_EXT_host_query_reset) */ -#if defined(VK_EXT_image_drm_format_modifier) -extern PFN_vkGetImageDrmFormatModifierPropertiesEXT vkGetImageDrmFormatModifierPropertiesEXT; -#endif /* defined(VK_EXT_image_drm_format_modifier) */ -#if defined(VK_EXT_line_rasterization) -extern PFN_vkCmdSetLineStippleEXT vkCmdSetLineStippleEXT; -#endif /* defined(VK_EXT_line_rasterization) */ -#if defined(VK_EXT_mesh_shader) -extern PFN_vkCmdDrawMeshTasksEXT vkCmdDrawMeshTasksEXT; -extern PFN_vkCmdDrawMeshTasksIndirectCountEXT vkCmdDrawMeshTasksIndirectCountEXT; -extern PFN_vkCmdDrawMeshTasksIndirectEXT vkCmdDrawMeshTasksIndirectEXT; -#endif /* defined(VK_EXT_mesh_shader) */ -#if defined(VK_EXT_metal_objects) -extern PFN_vkExportMetalObjectsEXT vkExportMetalObjectsEXT; -#endif /* defined(VK_EXT_metal_objects) */ -#if defined(VK_EXT_metal_surface) -extern PFN_vkCreateMetalSurfaceEXT vkCreateMetalSurfaceEXT; -#endif /* defined(VK_EXT_metal_surface) */ -#if defined(VK_EXT_multi_draw) -extern PFN_vkCmdDrawMultiEXT vkCmdDrawMultiEXT; -extern PFN_vkCmdDrawMultiIndexedEXT vkCmdDrawMultiIndexedEXT; -#endif /* defined(VK_EXT_multi_draw) */ -#if defined(VK_EXT_opacity_micromap) -extern PFN_vkBuildMicromapsEXT vkBuildMicromapsEXT; -extern PFN_vkCmdBuildMicromapsEXT vkCmdBuildMicromapsEXT; -extern PFN_vkCmdCopyMemoryToMicromapEXT vkCmdCopyMemoryToMicromapEXT; -extern PFN_vkCmdCopyMicromapEXT vkCmdCopyMicromapEXT; -extern PFN_vkCmdCopyMicromapToMemoryEXT vkCmdCopyMicromapToMemoryEXT; -extern PFN_vkCmdWriteMicromapsPropertiesEXT vkCmdWriteMicromapsPropertiesEXT; -extern PFN_vkCopyMemoryToMicromapEXT vkCopyMemoryToMicromapEXT; -extern PFN_vkCopyMicromapEXT vkCopyMicromapEXT; -extern PFN_vkCopyMicromapToMemoryEXT vkCopyMicromapToMemoryEXT; -extern PFN_vkCreateMicromapEXT vkCreateMicromapEXT; -extern PFN_vkDestroyMicromapEXT vkDestroyMicromapEXT; -extern PFN_vkGetDeviceMicromapCompatibilityEXT vkGetDeviceMicromapCompatibilityEXT; -extern PFN_vkGetMicromapBuildSizesEXT vkGetMicromapBuildSizesEXT; -extern PFN_vkWriteMicromapsPropertiesEXT vkWriteMicromapsPropertiesEXT; -#endif /* defined(VK_EXT_opacity_micromap) */ -#if defined(VK_EXT_pageable_device_local_memory) -extern PFN_vkSetDeviceMemoryPriorityEXT vkSetDeviceMemoryPriorityEXT; -#endif /* defined(VK_EXT_pageable_device_local_memory) */ -#if defined(VK_EXT_pipeline_properties) -extern PFN_vkGetPipelinePropertiesEXT vkGetPipelinePropertiesEXT; -#endif /* defined(VK_EXT_pipeline_properties) */ -#if defined(VK_EXT_private_data) -extern PFN_vkCreatePrivateDataSlotEXT vkCreatePrivateDataSlotEXT; -extern PFN_vkDestroyPrivateDataSlotEXT vkDestroyPrivateDataSlotEXT; -extern PFN_vkGetPrivateDataEXT vkGetPrivateDataEXT; -extern PFN_vkSetPrivateDataEXT vkSetPrivateDataEXT; -#endif /* defined(VK_EXT_private_data) */ -#if defined(VK_EXT_sample_locations) -extern PFN_vkCmdSetSampleLocationsEXT vkCmdSetSampleLocationsEXT; -extern PFN_vkGetPhysicalDeviceMultisamplePropertiesEXT vkGetPhysicalDeviceMultisamplePropertiesEXT; -#endif /* defined(VK_EXT_sample_locations) */ -#if defined(VK_EXT_shader_module_identifier) -extern PFN_vkGetShaderModuleCreateInfoIdentifierEXT vkGetShaderModuleCreateInfoIdentifierEXT; -extern PFN_vkGetShaderModuleIdentifierEXT vkGetShaderModuleIdentifierEXT; -#endif /* defined(VK_EXT_shader_module_identifier) */ -#if defined(VK_EXT_shader_object) -extern PFN_vkCmdBindShadersEXT vkCmdBindShadersEXT; -extern PFN_vkCreateShadersEXT vkCreateShadersEXT; -extern PFN_vkDestroyShaderEXT vkDestroyShaderEXT; -extern PFN_vkGetShaderBinaryDataEXT vkGetShaderBinaryDataEXT; -#endif /* defined(VK_EXT_shader_object) */ -#if defined(VK_EXT_swapchain_maintenance1) -extern PFN_vkReleaseSwapchainImagesEXT vkReleaseSwapchainImagesEXT; -#endif /* defined(VK_EXT_swapchain_maintenance1) */ -#if defined(VK_EXT_tooling_info) -extern PFN_vkGetPhysicalDeviceToolPropertiesEXT vkGetPhysicalDeviceToolPropertiesEXT; -#endif /* defined(VK_EXT_tooling_info) */ -#if defined(VK_EXT_transform_feedback) -extern PFN_vkCmdBeginQueryIndexedEXT vkCmdBeginQueryIndexedEXT; -extern PFN_vkCmdBeginTransformFeedbackEXT vkCmdBeginTransformFeedbackEXT; -extern PFN_vkCmdBindTransformFeedbackBuffersEXT vkCmdBindTransformFeedbackBuffersEXT; -extern PFN_vkCmdDrawIndirectByteCountEXT vkCmdDrawIndirectByteCountEXT; -extern PFN_vkCmdEndQueryIndexedEXT vkCmdEndQueryIndexedEXT; -extern PFN_vkCmdEndTransformFeedbackEXT vkCmdEndTransformFeedbackEXT; -#endif /* defined(VK_EXT_transform_feedback) */ -#if defined(VK_EXT_validation_cache) -extern PFN_vkCreateValidationCacheEXT vkCreateValidationCacheEXT; -extern PFN_vkDestroyValidationCacheEXT vkDestroyValidationCacheEXT; -extern PFN_vkGetValidationCacheDataEXT vkGetValidationCacheDataEXT; -extern PFN_vkMergeValidationCachesEXT vkMergeValidationCachesEXT; -#endif /* defined(VK_EXT_validation_cache) */ -#if defined(VK_FUCHSIA_buffer_collection) -extern PFN_vkCreateBufferCollectionFUCHSIA vkCreateBufferCollectionFUCHSIA; -extern PFN_vkDestroyBufferCollectionFUCHSIA vkDestroyBufferCollectionFUCHSIA; -extern PFN_vkGetBufferCollectionPropertiesFUCHSIA vkGetBufferCollectionPropertiesFUCHSIA; -extern PFN_vkSetBufferCollectionBufferConstraintsFUCHSIA vkSetBufferCollectionBufferConstraintsFUCHSIA; -extern PFN_vkSetBufferCollectionImageConstraintsFUCHSIA vkSetBufferCollectionImageConstraintsFUCHSIA; -#endif /* defined(VK_FUCHSIA_buffer_collection) */ -#if defined(VK_FUCHSIA_external_memory) -extern PFN_vkGetMemoryZirconHandleFUCHSIA vkGetMemoryZirconHandleFUCHSIA; -extern PFN_vkGetMemoryZirconHandlePropertiesFUCHSIA vkGetMemoryZirconHandlePropertiesFUCHSIA; -#endif /* defined(VK_FUCHSIA_external_memory) */ -#if defined(VK_FUCHSIA_external_semaphore) -extern PFN_vkGetSemaphoreZirconHandleFUCHSIA vkGetSemaphoreZirconHandleFUCHSIA; -extern PFN_vkImportSemaphoreZirconHandleFUCHSIA vkImportSemaphoreZirconHandleFUCHSIA; -#endif /* defined(VK_FUCHSIA_external_semaphore) */ -#if defined(VK_FUCHSIA_imagepipe_surface) -extern PFN_vkCreateImagePipeSurfaceFUCHSIA vkCreateImagePipeSurfaceFUCHSIA; -#endif /* defined(VK_FUCHSIA_imagepipe_surface) */ -#if defined(VK_GGP_stream_descriptor_surface) -extern PFN_vkCreateStreamDescriptorSurfaceGGP vkCreateStreamDescriptorSurfaceGGP; -#endif /* defined(VK_GGP_stream_descriptor_surface) */ -#if defined(VK_GOOGLE_display_timing) -extern PFN_vkGetPastPresentationTimingGOOGLE vkGetPastPresentationTimingGOOGLE; -extern PFN_vkGetRefreshCycleDurationGOOGLE vkGetRefreshCycleDurationGOOGLE; -#endif /* defined(VK_GOOGLE_display_timing) */ -#if defined(VK_HUAWEI_cluster_culling_shader) -extern PFN_vkCmdDrawClusterHUAWEI vkCmdDrawClusterHUAWEI; -extern PFN_vkCmdDrawClusterIndirectHUAWEI vkCmdDrawClusterIndirectHUAWEI; -#endif /* defined(VK_HUAWEI_cluster_culling_shader) */ -#if defined(VK_HUAWEI_invocation_mask) -extern PFN_vkCmdBindInvocationMaskHUAWEI vkCmdBindInvocationMaskHUAWEI; -#endif /* defined(VK_HUAWEI_invocation_mask) */ -#if defined(VK_HUAWEI_subpass_shading) -extern PFN_vkCmdSubpassShadingHUAWEI vkCmdSubpassShadingHUAWEI; -extern PFN_vkGetDeviceSubpassShadingMaxWorkgroupSizeHUAWEI vkGetDeviceSubpassShadingMaxWorkgroupSizeHUAWEI; -#endif /* defined(VK_HUAWEI_subpass_shading) */ -#if defined(VK_INTEL_performance_query) -extern PFN_vkAcquirePerformanceConfigurationINTEL vkAcquirePerformanceConfigurationINTEL; -extern PFN_vkCmdSetPerformanceMarkerINTEL vkCmdSetPerformanceMarkerINTEL; -extern PFN_vkCmdSetPerformanceOverrideINTEL vkCmdSetPerformanceOverrideINTEL; -extern PFN_vkCmdSetPerformanceStreamMarkerINTEL vkCmdSetPerformanceStreamMarkerINTEL; -extern PFN_vkGetPerformanceParameterINTEL vkGetPerformanceParameterINTEL; -extern PFN_vkInitializePerformanceApiINTEL vkInitializePerformanceApiINTEL; -extern PFN_vkQueueSetPerformanceConfigurationINTEL vkQueueSetPerformanceConfigurationINTEL; -extern PFN_vkReleasePerformanceConfigurationINTEL vkReleasePerformanceConfigurationINTEL; -extern PFN_vkUninitializePerformanceApiINTEL vkUninitializePerformanceApiINTEL; -#endif /* defined(VK_INTEL_performance_query) */ -#if defined(VK_KHR_acceleration_structure) -extern PFN_vkBuildAccelerationStructuresKHR vkBuildAccelerationStructuresKHR; -extern PFN_vkCmdBuildAccelerationStructuresIndirectKHR vkCmdBuildAccelerationStructuresIndirectKHR; -extern PFN_vkCmdBuildAccelerationStructuresKHR vkCmdBuildAccelerationStructuresKHR; -extern PFN_vkCmdCopyAccelerationStructureKHR vkCmdCopyAccelerationStructureKHR; -extern PFN_vkCmdCopyAccelerationStructureToMemoryKHR vkCmdCopyAccelerationStructureToMemoryKHR; -extern PFN_vkCmdCopyMemoryToAccelerationStructureKHR vkCmdCopyMemoryToAccelerationStructureKHR; -extern PFN_vkCmdWriteAccelerationStructuresPropertiesKHR vkCmdWriteAccelerationStructuresPropertiesKHR; -extern PFN_vkCopyAccelerationStructureKHR vkCopyAccelerationStructureKHR; -extern PFN_vkCopyAccelerationStructureToMemoryKHR vkCopyAccelerationStructureToMemoryKHR; -extern PFN_vkCopyMemoryToAccelerationStructureKHR vkCopyMemoryToAccelerationStructureKHR; -extern PFN_vkCreateAccelerationStructureKHR vkCreateAccelerationStructureKHR; -extern PFN_vkDestroyAccelerationStructureKHR vkDestroyAccelerationStructureKHR; -extern PFN_vkGetAccelerationStructureBuildSizesKHR vkGetAccelerationStructureBuildSizesKHR; -extern PFN_vkGetAccelerationStructureDeviceAddressKHR vkGetAccelerationStructureDeviceAddressKHR; -extern PFN_vkGetDeviceAccelerationStructureCompatibilityKHR vkGetDeviceAccelerationStructureCompatibilityKHR; -extern PFN_vkWriteAccelerationStructuresPropertiesKHR vkWriteAccelerationStructuresPropertiesKHR; -#endif /* defined(VK_KHR_acceleration_structure) */ -#if defined(VK_KHR_android_surface) -extern PFN_vkCreateAndroidSurfaceKHR vkCreateAndroidSurfaceKHR; -#endif /* defined(VK_KHR_android_surface) */ -#if defined(VK_KHR_bind_memory2) -extern PFN_vkBindBufferMemory2KHR vkBindBufferMemory2KHR; -extern PFN_vkBindImageMemory2KHR vkBindImageMemory2KHR; -#endif /* defined(VK_KHR_bind_memory2) */ -#if defined(VK_KHR_buffer_device_address) -extern PFN_vkGetBufferDeviceAddressKHR vkGetBufferDeviceAddressKHR; -extern PFN_vkGetBufferOpaqueCaptureAddressKHR vkGetBufferOpaqueCaptureAddressKHR; -extern PFN_vkGetDeviceMemoryOpaqueCaptureAddressKHR vkGetDeviceMemoryOpaqueCaptureAddressKHR; -#endif /* defined(VK_KHR_buffer_device_address) */ -#if defined(VK_KHR_calibrated_timestamps) -extern PFN_vkGetCalibratedTimestampsKHR vkGetCalibratedTimestampsKHR; -extern PFN_vkGetPhysicalDeviceCalibrateableTimeDomainsKHR vkGetPhysicalDeviceCalibrateableTimeDomainsKHR; -#endif /* defined(VK_KHR_calibrated_timestamps) */ -#if defined(VK_KHR_cooperative_matrix) -extern PFN_vkGetPhysicalDeviceCooperativeMatrixPropertiesKHR vkGetPhysicalDeviceCooperativeMatrixPropertiesKHR; -#endif /* defined(VK_KHR_cooperative_matrix) */ -#if defined(VK_KHR_copy_commands2) -extern PFN_vkCmdBlitImage2KHR vkCmdBlitImage2KHR; -extern PFN_vkCmdCopyBuffer2KHR vkCmdCopyBuffer2KHR; -extern PFN_vkCmdCopyBufferToImage2KHR vkCmdCopyBufferToImage2KHR; -extern PFN_vkCmdCopyImage2KHR vkCmdCopyImage2KHR; -extern PFN_vkCmdCopyImageToBuffer2KHR vkCmdCopyImageToBuffer2KHR; -extern PFN_vkCmdResolveImage2KHR vkCmdResolveImage2KHR; -#endif /* defined(VK_KHR_copy_commands2) */ -#if defined(VK_KHR_create_renderpass2) -extern PFN_vkCmdBeginRenderPass2KHR vkCmdBeginRenderPass2KHR; -extern PFN_vkCmdEndRenderPass2KHR vkCmdEndRenderPass2KHR; -extern PFN_vkCmdNextSubpass2KHR vkCmdNextSubpass2KHR; -extern PFN_vkCreateRenderPass2KHR vkCreateRenderPass2KHR; -#endif /* defined(VK_KHR_create_renderpass2) */ -#if defined(VK_KHR_deferred_host_operations) -extern PFN_vkCreateDeferredOperationKHR vkCreateDeferredOperationKHR; -extern PFN_vkDeferredOperationJoinKHR vkDeferredOperationJoinKHR; -extern PFN_vkDestroyDeferredOperationKHR vkDestroyDeferredOperationKHR; -extern PFN_vkGetDeferredOperationMaxConcurrencyKHR vkGetDeferredOperationMaxConcurrencyKHR; -extern PFN_vkGetDeferredOperationResultKHR vkGetDeferredOperationResultKHR; -#endif /* defined(VK_KHR_deferred_host_operations) */ -#if defined(VK_KHR_descriptor_update_template) -extern PFN_vkCreateDescriptorUpdateTemplateKHR vkCreateDescriptorUpdateTemplateKHR; -extern PFN_vkDestroyDescriptorUpdateTemplateKHR vkDestroyDescriptorUpdateTemplateKHR; -extern PFN_vkUpdateDescriptorSetWithTemplateKHR vkUpdateDescriptorSetWithTemplateKHR; -#endif /* defined(VK_KHR_descriptor_update_template) */ -#if defined(VK_KHR_device_group) -extern PFN_vkCmdDispatchBaseKHR vkCmdDispatchBaseKHR; -extern PFN_vkCmdSetDeviceMaskKHR vkCmdSetDeviceMaskKHR; -extern PFN_vkGetDeviceGroupPeerMemoryFeaturesKHR vkGetDeviceGroupPeerMemoryFeaturesKHR; -#endif /* defined(VK_KHR_device_group) */ -#if defined(VK_KHR_device_group_creation) -extern PFN_vkEnumeratePhysicalDeviceGroupsKHR vkEnumeratePhysicalDeviceGroupsKHR; -#endif /* defined(VK_KHR_device_group_creation) */ -#if defined(VK_KHR_display) -extern PFN_vkCreateDisplayModeKHR vkCreateDisplayModeKHR; -extern PFN_vkCreateDisplayPlaneSurfaceKHR vkCreateDisplayPlaneSurfaceKHR; -extern PFN_vkGetDisplayModePropertiesKHR vkGetDisplayModePropertiesKHR; -extern PFN_vkGetDisplayPlaneCapabilitiesKHR vkGetDisplayPlaneCapabilitiesKHR; -extern PFN_vkGetDisplayPlaneSupportedDisplaysKHR vkGetDisplayPlaneSupportedDisplaysKHR; -extern PFN_vkGetPhysicalDeviceDisplayPlanePropertiesKHR vkGetPhysicalDeviceDisplayPlanePropertiesKHR; -extern PFN_vkGetPhysicalDeviceDisplayPropertiesKHR vkGetPhysicalDeviceDisplayPropertiesKHR; -#endif /* defined(VK_KHR_display) */ -#if defined(VK_KHR_display_swapchain) -extern PFN_vkCreateSharedSwapchainsKHR vkCreateSharedSwapchainsKHR; -#endif /* defined(VK_KHR_display_swapchain) */ -#if defined(VK_KHR_draw_indirect_count) -extern PFN_vkCmdDrawIndexedIndirectCountKHR vkCmdDrawIndexedIndirectCountKHR; -extern PFN_vkCmdDrawIndirectCountKHR vkCmdDrawIndirectCountKHR; -#endif /* defined(VK_KHR_draw_indirect_count) */ -#if defined(VK_KHR_dynamic_rendering) -extern PFN_vkCmdBeginRenderingKHR vkCmdBeginRenderingKHR; -extern PFN_vkCmdEndRenderingKHR vkCmdEndRenderingKHR; -#endif /* defined(VK_KHR_dynamic_rendering) */ -#if defined(VK_KHR_dynamic_rendering_local_read) -extern PFN_vkCmdSetRenderingAttachmentLocationsKHR vkCmdSetRenderingAttachmentLocationsKHR; -extern PFN_vkCmdSetRenderingInputAttachmentIndicesKHR vkCmdSetRenderingInputAttachmentIndicesKHR; -#endif /* defined(VK_KHR_dynamic_rendering_local_read) */ -#if defined(VK_KHR_external_fence_capabilities) -extern PFN_vkGetPhysicalDeviceExternalFencePropertiesKHR vkGetPhysicalDeviceExternalFencePropertiesKHR; -#endif /* defined(VK_KHR_external_fence_capabilities) */ -#if defined(VK_KHR_external_fence_fd) -extern PFN_vkGetFenceFdKHR vkGetFenceFdKHR; -extern PFN_vkImportFenceFdKHR vkImportFenceFdKHR; -#endif /* defined(VK_KHR_external_fence_fd) */ -#if defined(VK_KHR_external_fence_win32) -extern PFN_vkGetFenceWin32HandleKHR vkGetFenceWin32HandleKHR; -extern PFN_vkImportFenceWin32HandleKHR vkImportFenceWin32HandleKHR; -#endif /* defined(VK_KHR_external_fence_win32) */ -#if defined(VK_KHR_external_memory_capabilities) -extern PFN_vkGetPhysicalDeviceExternalBufferPropertiesKHR vkGetPhysicalDeviceExternalBufferPropertiesKHR; -#endif /* defined(VK_KHR_external_memory_capabilities) */ -#if defined(VK_KHR_external_memory_fd) -extern PFN_vkGetMemoryFdKHR vkGetMemoryFdKHR; -extern PFN_vkGetMemoryFdPropertiesKHR vkGetMemoryFdPropertiesKHR; -#endif /* defined(VK_KHR_external_memory_fd) */ -#if defined(VK_KHR_external_memory_win32) -extern PFN_vkGetMemoryWin32HandleKHR vkGetMemoryWin32HandleKHR; -extern PFN_vkGetMemoryWin32HandlePropertiesKHR vkGetMemoryWin32HandlePropertiesKHR; -#endif /* defined(VK_KHR_external_memory_win32) */ -#if defined(VK_KHR_external_semaphore_capabilities) -extern PFN_vkGetPhysicalDeviceExternalSemaphorePropertiesKHR vkGetPhysicalDeviceExternalSemaphorePropertiesKHR; -#endif /* defined(VK_KHR_external_semaphore_capabilities) */ -#if defined(VK_KHR_external_semaphore_fd) -extern PFN_vkGetSemaphoreFdKHR vkGetSemaphoreFdKHR; -extern PFN_vkImportSemaphoreFdKHR vkImportSemaphoreFdKHR; -#endif /* defined(VK_KHR_external_semaphore_fd) */ -#if defined(VK_KHR_external_semaphore_win32) -extern PFN_vkGetSemaphoreWin32HandleKHR vkGetSemaphoreWin32HandleKHR; -extern PFN_vkImportSemaphoreWin32HandleKHR vkImportSemaphoreWin32HandleKHR; -#endif /* defined(VK_KHR_external_semaphore_win32) */ -#if defined(VK_KHR_fragment_shading_rate) -extern PFN_vkCmdSetFragmentShadingRateKHR vkCmdSetFragmentShadingRateKHR; -extern PFN_vkGetPhysicalDeviceFragmentShadingRatesKHR vkGetPhysicalDeviceFragmentShadingRatesKHR; -#endif /* defined(VK_KHR_fragment_shading_rate) */ -#if defined(VK_KHR_get_display_properties2) -extern PFN_vkGetDisplayModeProperties2KHR vkGetDisplayModeProperties2KHR; -extern PFN_vkGetDisplayPlaneCapabilities2KHR vkGetDisplayPlaneCapabilities2KHR; -extern PFN_vkGetPhysicalDeviceDisplayPlaneProperties2KHR vkGetPhysicalDeviceDisplayPlaneProperties2KHR; -extern PFN_vkGetPhysicalDeviceDisplayProperties2KHR vkGetPhysicalDeviceDisplayProperties2KHR; -#endif /* defined(VK_KHR_get_display_properties2) */ -#if defined(VK_KHR_get_memory_requirements2) -extern PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR; -extern PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR; -extern PFN_vkGetImageSparseMemoryRequirements2KHR vkGetImageSparseMemoryRequirements2KHR; -#endif /* defined(VK_KHR_get_memory_requirements2) */ -#if defined(VK_KHR_get_physical_device_properties2) -extern PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR; -extern PFN_vkGetPhysicalDeviceFormatProperties2KHR vkGetPhysicalDeviceFormatProperties2KHR; -extern PFN_vkGetPhysicalDeviceImageFormatProperties2KHR vkGetPhysicalDeviceImageFormatProperties2KHR; -extern PFN_vkGetPhysicalDeviceMemoryProperties2KHR vkGetPhysicalDeviceMemoryProperties2KHR; -extern PFN_vkGetPhysicalDeviceProperties2KHR vkGetPhysicalDeviceProperties2KHR; -extern PFN_vkGetPhysicalDeviceQueueFamilyProperties2KHR vkGetPhysicalDeviceQueueFamilyProperties2KHR; -extern PFN_vkGetPhysicalDeviceSparseImageFormatProperties2KHR vkGetPhysicalDeviceSparseImageFormatProperties2KHR; -#endif /* defined(VK_KHR_get_physical_device_properties2) */ -#if defined(VK_KHR_get_surface_capabilities2) -extern PFN_vkGetPhysicalDeviceSurfaceCapabilities2KHR vkGetPhysicalDeviceSurfaceCapabilities2KHR; -extern PFN_vkGetPhysicalDeviceSurfaceFormats2KHR vkGetPhysicalDeviceSurfaceFormats2KHR; -#endif /* defined(VK_KHR_get_surface_capabilities2) */ -#if defined(VK_KHR_line_rasterization) -extern PFN_vkCmdSetLineStippleKHR vkCmdSetLineStippleKHR; -#endif /* defined(VK_KHR_line_rasterization) */ -#if defined(VK_KHR_maintenance1) -extern PFN_vkTrimCommandPoolKHR vkTrimCommandPoolKHR; -#endif /* defined(VK_KHR_maintenance1) */ -#if defined(VK_KHR_maintenance3) -extern PFN_vkGetDescriptorSetLayoutSupportKHR vkGetDescriptorSetLayoutSupportKHR; -#endif /* defined(VK_KHR_maintenance3) */ -#if defined(VK_KHR_maintenance4) -extern PFN_vkGetDeviceBufferMemoryRequirementsKHR vkGetDeviceBufferMemoryRequirementsKHR; -extern PFN_vkGetDeviceImageMemoryRequirementsKHR vkGetDeviceImageMemoryRequirementsKHR; -extern PFN_vkGetDeviceImageSparseMemoryRequirementsKHR vkGetDeviceImageSparseMemoryRequirementsKHR; -#endif /* defined(VK_KHR_maintenance4) */ -#if defined(VK_KHR_maintenance5) -extern PFN_vkCmdBindIndexBuffer2KHR vkCmdBindIndexBuffer2KHR; -extern PFN_vkGetDeviceImageSubresourceLayoutKHR vkGetDeviceImageSubresourceLayoutKHR; -extern PFN_vkGetImageSubresourceLayout2KHR vkGetImageSubresourceLayout2KHR; -extern PFN_vkGetRenderingAreaGranularityKHR vkGetRenderingAreaGranularityKHR; -#endif /* defined(VK_KHR_maintenance5) */ -#if defined(VK_KHR_maintenance6) -extern PFN_vkCmdBindDescriptorSets2KHR vkCmdBindDescriptorSets2KHR; -extern PFN_vkCmdPushConstants2KHR vkCmdPushConstants2KHR; -#endif /* defined(VK_KHR_maintenance6) */ -#if defined(VK_KHR_maintenance6) && defined(VK_KHR_push_descriptor) -extern PFN_vkCmdPushDescriptorSet2KHR vkCmdPushDescriptorSet2KHR; -extern PFN_vkCmdPushDescriptorSetWithTemplate2KHR vkCmdPushDescriptorSetWithTemplate2KHR; -#endif /* defined(VK_KHR_maintenance6) && defined(VK_KHR_push_descriptor) */ -#if defined(VK_KHR_maintenance6) && defined(VK_EXT_descriptor_buffer) -extern PFN_vkCmdBindDescriptorBufferEmbeddedSamplers2EXT vkCmdBindDescriptorBufferEmbeddedSamplers2EXT; -extern PFN_vkCmdSetDescriptorBufferOffsets2EXT vkCmdSetDescriptorBufferOffsets2EXT; -#endif /* defined(VK_KHR_maintenance6) && defined(VK_EXT_descriptor_buffer) */ -#if defined(VK_KHR_map_memory2) -extern PFN_vkMapMemory2KHR vkMapMemory2KHR; -extern PFN_vkUnmapMemory2KHR vkUnmapMemory2KHR; -#endif /* defined(VK_KHR_map_memory2) */ -#if defined(VK_KHR_performance_query) -extern PFN_vkAcquireProfilingLockKHR vkAcquireProfilingLockKHR; -extern PFN_vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR; -extern PFN_vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR; -extern PFN_vkReleaseProfilingLockKHR vkReleaseProfilingLockKHR; -#endif /* defined(VK_KHR_performance_query) */ -#if defined(VK_KHR_pipeline_binary) -extern PFN_vkCreatePipelineBinariesKHR vkCreatePipelineBinariesKHR; -extern PFN_vkDestroyPipelineBinaryKHR vkDestroyPipelineBinaryKHR; -extern PFN_vkGetPipelineBinaryDataKHR vkGetPipelineBinaryDataKHR; -extern PFN_vkGetPipelineKeyKHR vkGetPipelineKeyKHR; -extern PFN_vkReleaseCapturedPipelineDataKHR vkReleaseCapturedPipelineDataKHR; -#endif /* defined(VK_KHR_pipeline_binary) */ -#if defined(VK_KHR_pipeline_executable_properties) -extern PFN_vkGetPipelineExecutableInternalRepresentationsKHR vkGetPipelineExecutableInternalRepresentationsKHR; -extern PFN_vkGetPipelineExecutablePropertiesKHR vkGetPipelineExecutablePropertiesKHR; -extern PFN_vkGetPipelineExecutableStatisticsKHR vkGetPipelineExecutableStatisticsKHR; -#endif /* defined(VK_KHR_pipeline_executable_properties) */ -#if defined(VK_KHR_present_wait) -extern PFN_vkWaitForPresentKHR vkWaitForPresentKHR; -#endif /* defined(VK_KHR_present_wait) */ -#if defined(VK_KHR_push_descriptor) -extern PFN_vkCmdPushDescriptorSetKHR vkCmdPushDescriptorSetKHR; -#endif /* defined(VK_KHR_push_descriptor) */ -#if defined(VK_KHR_ray_tracing_maintenance1) && defined(VK_KHR_ray_tracing_pipeline) -extern PFN_vkCmdTraceRaysIndirect2KHR vkCmdTraceRaysIndirect2KHR; -#endif /* defined(VK_KHR_ray_tracing_maintenance1) && defined(VK_KHR_ray_tracing_pipeline) */ -#if defined(VK_KHR_ray_tracing_pipeline) -extern PFN_vkCmdSetRayTracingPipelineStackSizeKHR vkCmdSetRayTracingPipelineStackSizeKHR; -extern PFN_vkCmdTraceRaysIndirectKHR vkCmdTraceRaysIndirectKHR; -extern PFN_vkCmdTraceRaysKHR vkCmdTraceRaysKHR; -extern PFN_vkCreateRayTracingPipelinesKHR vkCreateRayTracingPipelinesKHR; -extern PFN_vkGetRayTracingCaptureReplayShaderGroupHandlesKHR vkGetRayTracingCaptureReplayShaderGroupHandlesKHR; -extern PFN_vkGetRayTracingShaderGroupHandlesKHR vkGetRayTracingShaderGroupHandlesKHR; -extern PFN_vkGetRayTracingShaderGroupStackSizeKHR vkGetRayTracingShaderGroupStackSizeKHR; -#endif /* defined(VK_KHR_ray_tracing_pipeline) */ -#if defined(VK_KHR_sampler_ycbcr_conversion) -extern PFN_vkCreateSamplerYcbcrConversionKHR vkCreateSamplerYcbcrConversionKHR; -extern PFN_vkDestroySamplerYcbcrConversionKHR vkDestroySamplerYcbcrConversionKHR; -#endif /* defined(VK_KHR_sampler_ycbcr_conversion) */ -#if defined(VK_KHR_shared_presentable_image) -extern PFN_vkGetSwapchainStatusKHR vkGetSwapchainStatusKHR; -#endif /* defined(VK_KHR_shared_presentable_image) */ -#if defined(VK_KHR_surface) -extern PFN_vkDestroySurfaceKHR vkDestroySurfaceKHR; -extern PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR vkGetPhysicalDeviceSurfaceCapabilitiesKHR; -extern PFN_vkGetPhysicalDeviceSurfaceFormatsKHR vkGetPhysicalDeviceSurfaceFormatsKHR; -extern PFN_vkGetPhysicalDeviceSurfacePresentModesKHR vkGetPhysicalDeviceSurfacePresentModesKHR; -extern PFN_vkGetPhysicalDeviceSurfaceSupportKHR vkGetPhysicalDeviceSurfaceSupportKHR; -#endif /* defined(VK_KHR_surface) */ -#if defined(VK_KHR_swapchain) -extern PFN_vkAcquireNextImageKHR vkAcquireNextImageKHR; -extern PFN_vkCreateSwapchainKHR vkCreateSwapchainKHR; -extern PFN_vkDestroySwapchainKHR vkDestroySwapchainKHR; -extern PFN_vkGetSwapchainImagesKHR vkGetSwapchainImagesKHR; -extern PFN_vkQueuePresentKHR vkQueuePresentKHR; -#endif /* defined(VK_KHR_swapchain) */ -#if defined(VK_KHR_synchronization2) -extern PFN_vkCmdPipelineBarrier2KHR vkCmdPipelineBarrier2KHR; -extern PFN_vkCmdResetEvent2KHR vkCmdResetEvent2KHR; -extern PFN_vkCmdSetEvent2KHR vkCmdSetEvent2KHR; -extern PFN_vkCmdWaitEvents2KHR vkCmdWaitEvents2KHR; -extern PFN_vkCmdWriteTimestamp2KHR vkCmdWriteTimestamp2KHR; -extern PFN_vkQueueSubmit2KHR vkQueueSubmit2KHR; -#endif /* defined(VK_KHR_synchronization2) */ -#if defined(VK_KHR_synchronization2) && defined(VK_AMD_buffer_marker) -extern PFN_vkCmdWriteBufferMarker2AMD vkCmdWriteBufferMarker2AMD; -#endif /* defined(VK_KHR_synchronization2) && defined(VK_AMD_buffer_marker) */ -#if defined(VK_KHR_synchronization2) && defined(VK_NV_device_diagnostic_checkpoints) -extern PFN_vkGetQueueCheckpointData2NV vkGetQueueCheckpointData2NV; -#endif /* defined(VK_KHR_synchronization2) && defined(VK_NV_device_diagnostic_checkpoints) */ -#if defined(VK_KHR_timeline_semaphore) -extern PFN_vkGetSemaphoreCounterValueKHR vkGetSemaphoreCounterValueKHR; -extern PFN_vkSignalSemaphoreKHR vkSignalSemaphoreKHR; -extern PFN_vkWaitSemaphoresKHR vkWaitSemaphoresKHR; -#endif /* defined(VK_KHR_timeline_semaphore) */ -#if defined(VK_KHR_video_decode_queue) -extern PFN_vkCmdDecodeVideoKHR vkCmdDecodeVideoKHR; -#endif /* defined(VK_KHR_video_decode_queue) */ -#if defined(VK_KHR_video_encode_queue) -extern PFN_vkCmdEncodeVideoKHR vkCmdEncodeVideoKHR; -extern PFN_vkGetEncodedVideoSessionParametersKHR vkGetEncodedVideoSessionParametersKHR; -extern PFN_vkGetPhysicalDeviceVideoEncodeQualityLevelPropertiesKHR vkGetPhysicalDeviceVideoEncodeQualityLevelPropertiesKHR; -#endif /* defined(VK_KHR_video_encode_queue) */ -#if defined(VK_KHR_video_queue) -extern PFN_vkBindVideoSessionMemoryKHR vkBindVideoSessionMemoryKHR; -extern PFN_vkCmdBeginVideoCodingKHR vkCmdBeginVideoCodingKHR; -extern PFN_vkCmdControlVideoCodingKHR vkCmdControlVideoCodingKHR; -extern PFN_vkCmdEndVideoCodingKHR vkCmdEndVideoCodingKHR; -extern PFN_vkCreateVideoSessionKHR vkCreateVideoSessionKHR; -extern PFN_vkCreateVideoSessionParametersKHR vkCreateVideoSessionParametersKHR; -extern PFN_vkDestroyVideoSessionKHR vkDestroyVideoSessionKHR; -extern PFN_vkDestroyVideoSessionParametersKHR vkDestroyVideoSessionParametersKHR; -extern PFN_vkGetPhysicalDeviceVideoCapabilitiesKHR vkGetPhysicalDeviceVideoCapabilitiesKHR; -extern PFN_vkGetPhysicalDeviceVideoFormatPropertiesKHR vkGetPhysicalDeviceVideoFormatPropertiesKHR; -extern PFN_vkGetVideoSessionMemoryRequirementsKHR vkGetVideoSessionMemoryRequirementsKHR; -extern PFN_vkUpdateVideoSessionParametersKHR vkUpdateVideoSessionParametersKHR; -#endif /* defined(VK_KHR_video_queue) */ -#if defined(VK_KHR_wayland_surface) -extern PFN_vkCreateWaylandSurfaceKHR vkCreateWaylandSurfaceKHR; -extern PFN_vkGetPhysicalDeviceWaylandPresentationSupportKHR vkGetPhysicalDeviceWaylandPresentationSupportKHR; -#endif /* defined(VK_KHR_wayland_surface) */ -#if defined(VK_KHR_win32_surface) -extern PFN_vkCreateWin32SurfaceKHR vkCreateWin32SurfaceKHR; -extern PFN_vkGetPhysicalDeviceWin32PresentationSupportKHR vkGetPhysicalDeviceWin32PresentationSupportKHR; -#endif /* defined(VK_KHR_win32_surface) */ -#if defined(VK_KHR_xcb_surface) -extern PFN_vkCreateXcbSurfaceKHR vkCreateXcbSurfaceKHR; -extern PFN_vkGetPhysicalDeviceXcbPresentationSupportKHR vkGetPhysicalDeviceXcbPresentationSupportKHR; -#endif /* defined(VK_KHR_xcb_surface) */ -#if defined(VK_KHR_xlib_surface) -extern PFN_vkCreateXlibSurfaceKHR vkCreateXlibSurfaceKHR; -extern PFN_vkGetPhysicalDeviceXlibPresentationSupportKHR vkGetPhysicalDeviceXlibPresentationSupportKHR; -#endif /* defined(VK_KHR_xlib_surface) */ -#if defined(VK_MVK_ios_surface) -extern PFN_vkCreateIOSSurfaceMVK vkCreateIOSSurfaceMVK; -#endif /* defined(VK_MVK_ios_surface) */ -#if defined(VK_MVK_macos_surface) -extern PFN_vkCreateMacOSSurfaceMVK vkCreateMacOSSurfaceMVK; -#endif /* defined(VK_MVK_macos_surface) */ -#if defined(VK_NN_vi_surface) -extern PFN_vkCreateViSurfaceNN vkCreateViSurfaceNN; -#endif /* defined(VK_NN_vi_surface) */ -#if defined(VK_NVX_binary_import) -extern PFN_vkCmdCuLaunchKernelNVX vkCmdCuLaunchKernelNVX; -extern PFN_vkCreateCuFunctionNVX vkCreateCuFunctionNVX; -extern PFN_vkCreateCuModuleNVX vkCreateCuModuleNVX; -extern PFN_vkDestroyCuFunctionNVX vkDestroyCuFunctionNVX; -extern PFN_vkDestroyCuModuleNVX vkDestroyCuModuleNVX; -#endif /* defined(VK_NVX_binary_import) */ -#if defined(VK_NVX_image_view_handle) -extern PFN_vkGetImageViewAddressNVX vkGetImageViewAddressNVX; -extern PFN_vkGetImageViewHandleNVX vkGetImageViewHandleNVX; -#endif /* defined(VK_NVX_image_view_handle) */ -#if defined(VK_NV_acquire_winrt_display) -extern PFN_vkAcquireWinrtDisplayNV vkAcquireWinrtDisplayNV; -extern PFN_vkGetWinrtDisplayNV vkGetWinrtDisplayNV; -#endif /* defined(VK_NV_acquire_winrt_display) */ -#if defined(VK_NV_clip_space_w_scaling) -extern PFN_vkCmdSetViewportWScalingNV vkCmdSetViewportWScalingNV; -#endif /* defined(VK_NV_clip_space_w_scaling) */ -#if defined(VK_NV_cooperative_matrix) -extern PFN_vkGetPhysicalDeviceCooperativeMatrixPropertiesNV vkGetPhysicalDeviceCooperativeMatrixPropertiesNV; -#endif /* defined(VK_NV_cooperative_matrix) */ -#if defined(VK_NV_copy_memory_indirect) -extern PFN_vkCmdCopyMemoryIndirectNV vkCmdCopyMemoryIndirectNV; -extern PFN_vkCmdCopyMemoryToImageIndirectNV vkCmdCopyMemoryToImageIndirectNV; -#endif /* defined(VK_NV_copy_memory_indirect) */ -#if defined(VK_NV_coverage_reduction_mode) -extern PFN_vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV; -#endif /* defined(VK_NV_coverage_reduction_mode) */ -#if defined(VK_NV_cuda_kernel_launch) -extern PFN_vkCmdCudaLaunchKernelNV vkCmdCudaLaunchKernelNV; -extern PFN_vkCreateCudaFunctionNV vkCreateCudaFunctionNV; -extern PFN_vkCreateCudaModuleNV vkCreateCudaModuleNV; -extern PFN_vkDestroyCudaFunctionNV vkDestroyCudaFunctionNV; -extern PFN_vkDestroyCudaModuleNV vkDestroyCudaModuleNV; -extern PFN_vkGetCudaModuleCacheNV vkGetCudaModuleCacheNV; -#endif /* defined(VK_NV_cuda_kernel_launch) */ -#if defined(VK_NV_device_diagnostic_checkpoints) -extern PFN_vkCmdSetCheckpointNV vkCmdSetCheckpointNV; -extern PFN_vkGetQueueCheckpointDataNV vkGetQueueCheckpointDataNV; -#endif /* defined(VK_NV_device_diagnostic_checkpoints) */ -#if defined(VK_NV_device_generated_commands) -extern PFN_vkCmdBindPipelineShaderGroupNV vkCmdBindPipelineShaderGroupNV; -extern PFN_vkCmdExecuteGeneratedCommandsNV vkCmdExecuteGeneratedCommandsNV; -extern PFN_vkCmdPreprocessGeneratedCommandsNV vkCmdPreprocessGeneratedCommandsNV; -extern PFN_vkCreateIndirectCommandsLayoutNV vkCreateIndirectCommandsLayoutNV; -extern PFN_vkDestroyIndirectCommandsLayoutNV vkDestroyIndirectCommandsLayoutNV; -extern PFN_vkGetGeneratedCommandsMemoryRequirementsNV vkGetGeneratedCommandsMemoryRequirementsNV; -#endif /* defined(VK_NV_device_generated_commands) */ -#if defined(VK_NV_device_generated_commands_compute) -extern PFN_vkCmdUpdatePipelineIndirectBufferNV vkCmdUpdatePipelineIndirectBufferNV; -extern PFN_vkGetPipelineIndirectDeviceAddressNV vkGetPipelineIndirectDeviceAddressNV; -extern PFN_vkGetPipelineIndirectMemoryRequirementsNV vkGetPipelineIndirectMemoryRequirementsNV; -#endif /* defined(VK_NV_device_generated_commands_compute) */ -#if defined(VK_NV_external_memory_capabilities) -extern PFN_vkGetPhysicalDeviceExternalImageFormatPropertiesNV vkGetPhysicalDeviceExternalImageFormatPropertiesNV; -#endif /* defined(VK_NV_external_memory_capabilities) */ -#if defined(VK_NV_external_memory_rdma) -extern PFN_vkGetMemoryRemoteAddressNV vkGetMemoryRemoteAddressNV; -#endif /* defined(VK_NV_external_memory_rdma) */ -#if defined(VK_NV_external_memory_win32) -extern PFN_vkGetMemoryWin32HandleNV vkGetMemoryWin32HandleNV; -#endif /* defined(VK_NV_external_memory_win32) */ -#if defined(VK_NV_fragment_shading_rate_enums) -extern PFN_vkCmdSetFragmentShadingRateEnumNV vkCmdSetFragmentShadingRateEnumNV; -#endif /* defined(VK_NV_fragment_shading_rate_enums) */ -#if defined(VK_NV_low_latency2) -extern PFN_vkGetLatencyTimingsNV vkGetLatencyTimingsNV; -extern PFN_vkLatencySleepNV vkLatencySleepNV; -extern PFN_vkQueueNotifyOutOfBandNV vkQueueNotifyOutOfBandNV; -extern PFN_vkSetLatencyMarkerNV vkSetLatencyMarkerNV; -extern PFN_vkSetLatencySleepModeNV vkSetLatencySleepModeNV; -#endif /* defined(VK_NV_low_latency2) */ -#if defined(VK_NV_memory_decompression) -extern PFN_vkCmdDecompressMemoryIndirectCountNV vkCmdDecompressMemoryIndirectCountNV; -extern PFN_vkCmdDecompressMemoryNV vkCmdDecompressMemoryNV; -#endif /* defined(VK_NV_memory_decompression) */ -#if defined(VK_NV_mesh_shader) -extern PFN_vkCmdDrawMeshTasksIndirectCountNV vkCmdDrawMeshTasksIndirectCountNV; -extern PFN_vkCmdDrawMeshTasksIndirectNV vkCmdDrawMeshTasksIndirectNV; -extern PFN_vkCmdDrawMeshTasksNV vkCmdDrawMeshTasksNV; -#endif /* defined(VK_NV_mesh_shader) */ -#if defined(VK_NV_optical_flow) -extern PFN_vkBindOpticalFlowSessionImageNV vkBindOpticalFlowSessionImageNV; -extern PFN_vkCmdOpticalFlowExecuteNV vkCmdOpticalFlowExecuteNV; -extern PFN_vkCreateOpticalFlowSessionNV vkCreateOpticalFlowSessionNV; -extern PFN_vkDestroyOpticalFlowSessionNV vkDestroyOpticalFlowSessionNV; -extern PFN_vkGetPhysicalDeviceOpticalFlowImageFormatsNV vkGetPhysicalDeviceOpticalFlowImageFormatsNV; -#endif /* defined(VK_NV_optical_flow) */ -#if defined(VK_NV_ray_tracing) -extern PFN_vkBindAccelerationStructureMemoryNV vkBindAccelerationStructureMemoryNV; -extern PFN_vkCmdBuildAccelerationStructureNV vkCmdBuildAccelerationStructureNV; -extern PFN_vkCmdCopyAccelerationStructureNV vkCmdCopyAccelerationStructureNV; -extern PFN_vkCmdTraceRaysNV vkCmdTraceRaysNV; -extern PFN_vkCmdWriteAccelerationStructuresPropertiesNV vkCmdWriteAccelerationStructuresPropertiesNV; -extern PFN_vkCompileDeferredNV vkCompileDeferredNV; -extern PFN_vkCreateAccelerationStructureNV vkCreateAccelerationStructureNV; -extern PFN_vkCreateRayTracingPipelinesNV vkCreateRayTracingPipelinesNV; -extern PFN_vkDestroyAccelerationStructureNV vkDestroyAccelerationStructureNV; -extern PFN_vkGetAccelerationStructureHandleNV vkGetAccelerationStructureHandleNV; -extern PFN_vkGetAccelerationStructureMemoryRequirementsNV vkGetAccelerationStructureMemoryRequirementsNV; -extern PFN_vkGetRayTracingShaderGroupHandlesNV vkGetRayTracingShaderGroupHandlesNV; -#endif /* defined(VK_NV_ray_tracing) */ -#if defined(VK_NV_scissor_exclusive) && VK_NV_SCISSOR_EXCLUSIVE_SPEC_VERSION >= 2 -extern PFN_vkCmdSetExclusiveScissorEnableNV vkCmdSetExclusiveScissorEnableNV; -#endif /* defined(VK_NV_scissor_exclusive) && VK_NV_SCISSOR_EXCLUSIVE_SPEC_VERSION >= 2 */ -#if defined(VK_NV_scissor_exclusive) -extern PFN_vkCmdSetExclusiveScissorNV vkCmdSetExclusiveScissorNV; -#endif /* defined(VK_NV_scissor_exclusive) */ -#if defined(VK_NV_shading_rate_image) -extern PFN_vkCmdBindShadingRateImageNV vkCmdBindShadingRateImageNV; -extern PFN_vkCmdSetCoarseSampleOrderNV vkCmdSetCoarseSampleOrderNV; -extern PFN_vkCmdSetViewportShadingRatePaletteNV vkCmdSetViewportShadingRatePaletteNV; -#endif /* defined(VK_NV_shading_rate_image) */ -#if defined(VK_QCOM_tile_properties) -extern PFN_vkGetDynamicRenderingTilePropertiesQCOM vkGetDynamicRenderingTilePropertiesQCOM; -extern PFN_vkGetFramebufferTilePropertiesQCOM vkGetFramebufferTilePropertiesQCOM; -#endif /* defined(VK_QCOM_tile_properties) */ -#if defined(VK_QNX_external_memory_screen_buffer) -extern PFN_vkGetScreenBufferPropertiesQNX vkGetScreenBufferPropertiesQNX; -#endif /* defined(VK_QNX_external_memory_screen_buffer) */ -#if defined(VK_QNX_screen_surface) -extern PFN_vkCreateScreenSurfaceQNX vkCreateScreenSurfaceQNX; -extern PFN_vkGetPhysicalDeviceScreenPresentationSupportQNX vkGetPhysicalDeviceScreenPresentationSupportQNX; -#endif /* defined(VK_QNX_screen_surface) */ -#if defined(VK_VALVE_descriptor_set_host_mapping) -extern PFN_vkGetDescriptorSetHostMappingVALVE vkGetDescriptorSetHostMappingVALVE; -extern PFN_vkGetDescriptorSetLayoutHostMappingInfoVALVE vkGetDescriptorSetLayoutHostMappingInfoVALVE; -#endif /* defined(VK_VALVE_descriptor_set_host_mapping) */ -#if (defined(VK_EXT_extended_dynamic_state)) || (defined(VK_EXT_shader_object)) -extern PFN_vkCmdBindVertexBuffers2EXT vkCmdBindVertexBuffers2EXT; -extern PFN_vkCmdSetCullModeEXT vkCmdSetCullModeEXT; -extern PFN_vkCmdSetDepthBoundsTestEnableEXT vkCmdSetDepthBoundsTestEnableEXT; -extern PFN_vkCmdSetDepthCompareOpEXT vkCmdSetDepthCompareOpEXT; -extern PFN_vkCmdSetDepthTestEnableEXT vkCmdSetDepthTestEnableEXT; -extern PFN_vkCmdSetDepthWriteEnableEXT vkCmdSetDepthWriteEnableEXT; -extern PFN_vkCmdSetFrontFaceEXT vkCmdSetFrontFaceEXT; -extern PFN_vkCmdSetPrimitiveTopologyEXT vkCmdSetPrimitiveTopologyEXT; -extern PFN_vkCmdSetScissorWithCountEXT vkCmdSetScissorWithCountEXT; -extern PFN_vkCmdSetStencilOpEXT vkCmdSetStencilOpEXT; -extern PFN_vkCmdSetStencilTestEnableEXT vkCmdSetStencilTestEnableEXT; -extern PFN_vkCmdSetViewportWithCountEXT vkCmdSetViewportWithCountEXT; -#endif /* (defined(VK_EXT_extended_dynamic_state)) || (defined(VK_EXT_shader_object)) */ -#if (defined(VK_EXT_extended_dynamic_state2)) || (defined(VK_EXT_shader_object)) -extern PFN_vkCmdSetDepthBiasEnableEXT vkCmdSetDepthBiasEnableEXT; -extern PFN_vkCmdSetLogicOpEXT vkCmdSetLogicOpEXT; -extern PFN_vkCmdSetPatchControlPointsEXT vkCmdSetPatchControlPointsEXT; -extern PFN_vkCmdSetPrimitiveRestartEnableEXT vkCmdSetPrimitiveRestartEnableEXT; -extern PFN_vkCmdSetRasterizerDiscardEnableEXT vkCmdSetRasterizerDiscardEnableEXT; -#endif /* (defined(VK_EXT_extended_dynamic_state2)) || (defined(VK_EXT_shader_object)) */ -#if (defined(VK_EXT_extended_dynamic_state3)) || (defined(VK_EXT_shader_object)) -extern PFN_vkCmdSetAlphaToCoverageEnableEXT vkCmdSetAlphaToCoverageEnableEXT; -extern PFN_vkCmdSetAlphaToOneEnableEXT vkCmdSetAlphaToOneEnableEXT; -extern PFN_vkCmdSetColorBlendEnableEXT vkCmdSetColorBlendEnableEXT; -extern PFN_vkCmdSetColorBlendEquationEXT vkCmdSetColorBlendEquationEXT; -extern PFN_vkCmdSetColorWriteMaskEXT vkCmdSetColorWriteMaskEXT; -extern PFN_vkCmdSetDepthClampEnableEXT vkCmdSetDepthClampEnableEXT; -extern PFN_vkCmdSetLogicOpEnableEXT vkCmdSetLogicOpEnableEXT; -extern PFN_vkCmdSetPolygonModeEXT vkCmdSetPolygonModeEXT; -extern PFN_vkCmdSetRasterizationSamplesEXT vkCmdSetRasterizationSamplesEXT; -extern PFN_vkCmdSetSampleMaskEXT vkCmdSetSampleMaskEXT; -#endif /* (defined(VK_EXT_extended_dynamic_state3)) || (defined(VK_EXT_shader_object)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && (defined(VK_KHR_maintenance2) || defined(VK_VERSION_1_1))) || (defined(VK_EXT_shader_object)) -extern PFN_vkCmdSetTessellationDomainOriginEXT vkCmdSetTessellationDomainOriginEXT; -#endif /* (defined(VK_EXT_extended_dynamic_state3) && (defined(VK_KHR_maintenance2) || defined(VK_VERSION_1_1))) || (defined(VK_EXT_shader_object)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_transform_feedback)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_transform_feedback)) -extern PFN_vkCmdSetRasterizationStreamEXT vkCmdSetRasterizationStreamEXT; -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_transform_feedback)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_transform_feedback)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_conservative_rasterization)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_conservative_rasterization)) -extern PFN_vkCmdSetConservativeRasterizationModeEXT vkCmdSetConservativeRasterizationModeEXT; -extern PFN_vkCmdSetExtraPrimitiveOverestimationSizeEXT vkCmdSetExtraPrimitiveOverestimationSizeEXT; -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_conservative_rasterization)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_conservative_rasterization)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_depth_clip_enable)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_depth_clip_enable)) -extern PFN_vkCmdSetDepthClipEnableEXT vkCmdSetDepthClipEnableEXT; -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_depth_clip_enable)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_depth_clip_enable)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_sample_locations)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_sample_locations)) -extern PFN_vkCmdSetSampleLocationsEnableEXT vkCmdSetSampleLocationsEnableEXT; -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_sample_locations)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_sample_locations)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_blend_operation_advanced)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_blend_operation_advanced)) -extern PFN_vkCmdSetColorBlendAdvancedEXT vkCmdSetColorBlendAdvancedEXT; -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_blend_operation_advanced)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_blend_operation_advanced)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_provoking_vertex)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_provoking_vertex)) -extern PFN_vkCmdSetProvokingVertexModeEXT vkCmdSetProvokingVertexModeEXT; -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_provoking_vertex)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_provoking_vertex)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_line_rasterization)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_line_rasterization)) -extern PFN_vkCmdSetLineRasterizationModeEXT vkCmdSetLineRasterizationModeEXT; -extern PFN_vkCmdSetLineStippleEnableEXT vkCmdSetLineStippleEnableEXT; -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_line_rasterization)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_line_rasterization)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_depth_clip_control)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_depth_clip_control)) -extern PFN_vkCmdSetDepthClipNegativeOneToOneEXT vkCmdSetDepthClipNegativeOneToOneEXT; -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_EXT_depth_clip_control)) || (defined(VK_EXT_shader_object) && defined(VK_EXT_depth_clip_control)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_clip_space_w_scaling)) || (defined(VK_EXT_shader_object) && defined(VK_NV_clip_space_w_scaling)) -extern PFN_vkCmdSetViewportWScalingEnableNV vkCmdSetViewportWScalingEnableNV; -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_clip_space_w_scaling)) || (defined(VK_EXT_shader_object) && defined(VK_NV_clip_space_w_scaling)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_viewport_swizzle)) || (defined(VK_EXT_shader_object) && defined(VK_NV_viewport_swizzle)) -extern PFN_vkCmdSetViewportSwizzleNV vkCmdSetViewportSwizzleNV; -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_viewport_swizzle)) || (defined(VK_EXT_shader_object) && defined(VK_NV_viewport_swizzle)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_fragment_coverage_to_color)) || (defined(VK_EXT_shader_object) && defined(VK_NV_fragment_coverage_to_color)) -extern PFN_vkCmdSetCoverageToColorEnableNV vkCmdSetCoverageToColorEnableNV; -extern PFN_vkCmdSetCoverageToColorLocationNV vkCmdSetCoverageToColorLocationNV; -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_fragment_coverage_to_color)) || (defined(VK_EXT_shader_object) && defined(VK_NV_fragment_coverage_to_color)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_framebuffer_mixed_samples)) || (defined(VK_EXT_shader_object) && defined(VK_NV_framebuffer_mixed_samples)) -extern PFN_vkCmdSetCoverageModulationModeNV vkCmdSetCoverageModulationModeNV; -extern PFN_vkCmdSetCoverageModulationTableEnableNV vkCmdSetCoverageModulationTableEnableNV; -extern PFN_vkCmdSetCoverageModulationTableNV vkCmdSetCoverageModulationTableNV; -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_framebuffer_mixed_samples)) || (defined(VK_EXT_shader_object) && defined(VK_NV_framebuffer_mixed_samples)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_shading_rate_image)) || (defined(VK_EXT_shader_object) && defined(VK_NV_shading_rate_image)) -extern PFN_vkCmdSetShadingRateImageEnableNV vkCmdSetShadingRateImageEnableNV; -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_shading_rate_image)) || (defined(VK_EXT_shader_object) && defined(VK_NV_shading_rate_image)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_representative_fragment_test)) || (defined(VK_EXT_shader_object) && defined(VK_NV_representative_fragment_test)) -extern PFN_vkCmdSetRepresentativeFragmentTestEnableNV vkCmdSetRepresentativeFragmentTestEnableNV; -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_representative_fragment_test)) || (defined(VK_EXT_shader_object) && defined(VK_NV_representative_fragment_test)) */ -#if (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_coverage_reduction_mode)) || (defined(VK_EXT_shader_object) && defined(VK_NV_coverage_reduction_mode)) -extern PFN_vkCmdSetCoverageReductionModeNV vkCmdSetCoverageReductionModeNV; -#endif /* (defined(VK_EXT_extended_dynamic_state3) && defined(VK_NV_coverage_reduction_mode)) || (defined(VK_EXT_shader_object) && defined(VK_NV_coverage_reduction_mode)) */ -#if (defined(VK_EXT_host_image_copy)) || (defined(VK_EXT_image_compression_control)) -extern PFN_vkGetImageSubresourceLayout2EXT vkGetImageSubresourceLayout2EXT; -#endif /* (defined(VK_EXT_host_image_copy)) || (defined(VK_EXT_image_compression_control)) */ -#if (defined(VK_EXT_shader_object)) || (defined(VK_EXT_vertex_input_dynamic_state)) -extern PFN_vkCmdSetVertexInputEXT vkCmdSetVertexInputEXT; -#endif /* (defined(VK_EXT_shader_object)) || (defined(VK_EXT_vertex_input_dynamic_state)) */ -#if (defined(VK_KHR_descriptor_update_template) && defined(VK_KHR_push_descriptor)) || (defined(VK_KHR_push_descriptor) && (defined(VK_VERSION_1_1) || defined(VK_KHR_descriptor_update_template))) -extern PFN_vkCmdPushDescriptorSetWithTemplateKHR vkCmdPushDescriptorSetWithTemplateKHR; -#endif /* (defined(VK_KHR_descriptor_update_template) && defined(VK_KHR_push_descriptor)) || (defined(VK_KHR_push_descriptor) && (defined(VK_VERSION_1_1) || defined(VK_KHR_descriptor_update_template))) */ -#if (defined(VK_KHR_device_group) && defined(VK_KHR_surface)) || (defined(VK_KHR_swapchain) && defined(VK_VERSION_1_1)) -extern PFN_vkGetDeviceGroupPresentCapabilitiesKHR vkGetDeviceGroupPresentCapabilitiesKHR; -extern PFN_vkGetDeviceGroupSurfacePresentModesKHR vkGetDeviceGroupSurfacePresentModesKHR; -extern PFN_vkGetPhysicalDevicePresentRectanglesKHR vkGetPhysicalDevicePresentRectanglesKHR; -#endif /* (defined(VK_KHR_device_group) && defined(VK_KHR_surface)) || (defined(VK_KHR_swapchain) && defined(VK_VERSION_1_1)) */ -#if (defined(VK_KHR_device_group) && defined(VK_KHR_swapchain)) || (defined(VK_KHR_swapchain) && defined(VK_VERSION_1_1)) -extern PFN_vkAcquireNextImage2KHR vkAcquireNextImage2KHR; -#endif /* (defined(VK_KHR_device_group) && defined(VK_KHR_swapchain)) || (defined(VK_KHR_swapchain) && defined(VK_VERSION_1_1)) */ -/* VOLK_GENERATE_PROTOTYPES_H */ - -#ifdef __cplusplus -} -#endif - -#endif - -#ifdef VOLK_IMPLEMENTATION -#undef VOLK_IMPLEMENTATION -/* Prevent tools like dependency checkers from detecting a cyclic dependency */ -#define VOLK_SOURCE "volk.c" -#include VOLK_SOURCE -#endif - -/** - * Copyright (c) 2018-2024 Arseny Kapoulkine - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. -*/ -/* clang-format on */ diff --git a/third_party/vulkan/vulkan.cppm b/third_party/vulkan/vulkan.cppm index 2af062c..0ba8645 100644 --- a/third_party/vulkan/vulkan.cppm +++ b/third_party/vulkan/vulkan.cppm @@ -12,7 +12,7 @@ module; #include -#if defined( __cpp_lib_modules ) +#if defined( __cpp_lib_modules ) && !defined( VULKAN_HPP_ENABLE_STD_MODULE ) # define VULKAN_HPP_ENABLE_STD_MODULE #endif @@ -364,6 +364,34 @@ export namespace VULKAN_HPP_NAMESPACE using VULKAN_HPP_NAMESPACE::ToolPurposeFlags; using VULKAN_HPP_NAMESPACE::ToolPurposeFlagsEXT; + //=== VK_VERSION_1_4 === + using VULKAN_HPP_NAMESPACE::BufferUsageFlagBits2; + using VULKAN_HPP_NAMESPACE::BufferUsageFlagBits2KHR; + using VULKAN_HPP_NAMESPACE::BufferUsageFlags2; + using VULKAN_HPP_NAMESPACE::BufferUsageFlags2KHR; + using VULKAN_HPP_NAMESPACE::HostImageCopyFlagBits; + using VULKAN_HPP_NAMESPACE::HostImageCopyFlagBitsEXT; + using VULKAN_HPP_NAMESPACE::HostImageCopyFlags; + using VULKAN_HPP_NAMESPACE::HostImageCopyFlagsEXT; + using VULKAN_HPP_NAMESPACE::LineRasterizationMode; + using VULKAN_HPP_NAMESPACE::LineRasterizationModeEXT; + using VULKAN_HPP_NAMESPACE::LineRasterizationModeKHR; + using VULKAN_HPP_NAMESPACE::MemoryUnmapFlagBits; + using VULKAN_HPP_NAMESPACE::MemoryUnmapFlagBitsKHR; + using VULKAN_HPP_NAMESPACE::MemoryUnmapFlags; + using VULKAN_HPP_NAMESPACE::MemoryUnmapFlagsKHR; + using VULKAN_HPP_NAMESPACE::PipelineCreateFlagBits2; + using VULKAN_HPP_NAMESPACE::PipelineCreateFlagBits2KHR; + using VULKAN_HPP_NAMESPACE::PipelineCreateFlags2; + using VULKAN_HPP_NAMESPACE::PipelineCreateFlags2KHR; + using VULKAN_HPP_NAMESPACE::PipelineRobustnessBufferBehavior; + using VULKAN_HPP_NAMESPACE::PipelineRobustnessBufferBehaviorEXT; + using VULKAN_HPP_NAMESPACE::PipelineRobustnessImageBehavior; + using VULKAN_HPP_NAMESPACE::PipelineRobustnessImageBehaviorEXT; + using VULKAN_HPP_NAMESPACE::QueueGlobalPriority; + using VULKAN_HPP_NAMESPACE::QueueGlobalPriorityEXT; + using VULKAN_HPP_NAMESPACE::QueueGlobalPriorityKHR; + //=== VK_KHR_surface === using VULKAN_HPP_NAMESPACE::ColorSpaceKHR; using VULKAN_HPP_NAMESPACE::CompositeAlphaFlagBitsKHR; @@ -505,10 +533,6 @@ export namespace VULKAN_HPP_NAMESPACE using VULKAN_HPP_NAMESPACE::ViSurfaceCreateFlagsNN; #endif /*VK_USE_PLATFORM_VI_NN*/ - //=== VK_EXT_pipeline_robustness === - using VULKAN_HPP_NAMESPACE::PipelineRobustnessBufferBehaviorEXT; - using VULKAN_HPP_NAMESPACE::PipelineRobustnessImageBehaviorEXT; - //=== VK_EXT_conditional_rendering === using VULKAN_HPP_NAMESPACE::ConditionalRenderingFlagBitsEXT; using VULKAN_HPP_NAMESPACE::ConditionalRenderingFlagsEXT; @@ -630,10 +654,6 @@ export namespace VULKAN_HPP_NAMESPACE using VULKAN_HPP_NAMESPACE::PipelineCompilerControlFlagBitsAMD; using VULKAN_HPP_NAMESPACE::PipelineCompilerControlFlagsAMD; - //=== VK_KHR_global_priority === - using VULKAN_HPP_NAMESPACE::QueueGlobalPriorityEXT; - using VULKAN_HPP_NAMESPACE::QueueGlobalPriorityKHR; - //=== VK_AMD_memory_overallocation_behavior === using VULKAN_HPP_NAMESPACE::MemoryOverallocationBehaviorAMD; @@ -687,14 +707,6 @@ export namespace VULKAN_HPP_NAMESPACE //=== VK_KHR_pipeline_executable_properties === using VULKAN_HPP_NAMESPACE::PipelineExecutableStatisticFormatKHR; - //=== VK_EXT_host_image_copy === - using VULKAN_HPP_NAMESPACE::HostImageCopyFlagBitsEXT; - using VULKAN_HPP_NAMESPACE::HostImageCopyFlagsEXT; - - //=== VK_KHR_map_memory2 === - using VULKAN_HPP_NAMESPACE::MemoryUnmapFlagBitsKHR; - using VULKAN_HPP_NAMESPACE::MemoryUnmapFlagsKHR; - //=== VK_EXT_surface_maintenance1 === using VULKAN_HPP_NAMESPACE::PresentGravityFlagBitsEXT; using VULKAN_HPP_NAMESPACE::PresentGravityFlagsEXT; @@ -841,12 +853,6 @@ export namespace VULKAN_HPP_NAMESPACE using VULKAN_HPP_NAMESPACE::OpticalFlowUsageFlagBitsNV; using VULKAN_HPP_NAMESPACE::OpticalFlowUsageFlagsNV; - //=== VK_KHR_maintenance5 === - using VULKAN_HPP_NAMESPACE::BufferUsageFlagBits2KHR; - using VULKAN_HPP_NAMESPACE::BufferUsageFlags2KHR; - using VULKAN_HPP_NAMESPACE::PipelineCreateFlagBits2KHR; - using VULKAN_HPP_NAMESPACE::PipelineCreateFlags2KHR; - //=== VK_AMD_anti_lag === using VULKAN_HPP_NAMESPACE::AntiLagModeAMD; using VULKAN_HPP_NAMESPACE::AntiLagStageAMD; @@ -893,10 +899,6 @@ export namespace VULKAN_HPP_NAMESPACE //=== VK_MSFT_layered_driver === using VULKAN_HPP_NAMESPACE::LayeredDriverUnderlyingApiMSFT; - //=== VK_KHR_line_rasterization === - using VULKAN_HPP_NAMESPACE::LineRasterizationModeEXT; - using VULKAN_HPP_NAMESPACE::LineRasterizationModeKHR; - //=== VK_KHR_calibrated_timestamps === using VULKAN_HPP_NAMESPACE::TimeDomainEXT; using VULKAN_HPP_NAMESPACE::TimeDomainKHR; @@ -958,7 +960,7 @@ export namespace VULKAN_HPP_NAMESPACE using VULKAN_HPP_NAMESPACE::make_error_condition; using VULKAN_HPP_NAMESPACE::MemoryMapFailedError; using VULKAN_HPP_NAMESPACE::NativeWindowInUseKHRError; - using VULKAN_HPP_NAMESPACE::NotPermittedKHRError; + using VULKAN_HPP_NAMESPACE::NotPermittedError; using VULKAN_HPP_NAMESPACE::OutOfDateKHRError; using VULKAN_HPP_NAMESPACE::OutOfDeviceMemoryError; using VULKAN_HPP_NAMESPACE::OutOfHostMemoryError; @@ -1016,6 +1018,9 @@ export namespace VULKAN_HPP_NAMESPACE using VULKAN_HPP_NAMESPACE::MaxDriverInfoSize; using VULKAN_HPP_NAMESPACE::MaxDriverNameSize; + //=== VK_VERSION_1_4 === + using VULKAN_HPP_NAMESPACE::MaxGlobalPrioritySize; + //=== VK_KHR_surface === using VULKAN_HPP_NAMESPACE::KHRSurfaceExtensionName; using VULKAN_HPP_NAMESPACE::KHRSurfaceSpecVersion; @@ -2678,6 +2683,7 @@ export namespace VULKAN_HPP_NAMESPACE using VULKAN_HPP_NAMESPACE::ApiVersion11; using VULKAN_HPP_NAMESPACE::ApiVersion12; using VULKAN_HPP_NAMESPACE::ApiVersion13; + using VULKAN_HPP_NAMESPACE::ApiVersion14; using VULKAN_HPP_NAMESPACE::HeaderVersionComplete; //=============== @@ -3140,6 +3146,120 @@ export namespace VULKAN_HPP_NAMESPACE using VULKAN_HPP_NAMESPACE::WriteDescriptorSetInlineUniformBlock; using VULKAN_HPP_NAMESPACE::WriteDescriptorSetInlineUniformBlockEXT; + //=== VK_VERSION_1_4 === + using VULKAN_HPP_NAMESPACE::BindDescriptorSetsInfo; + using VULKAN_HPP_NAMESPACE::BindDescriptorSetsInfoKHR; + using VULKAN_HPP_NAMESPACE::BindMemoryStatus; + using VULKAN_HPP_NAMESPACE::BindMemoryStatusKHR; + using VULKAN_HPP_NAMESPACE::BufferUsageFlags2CreateInfo; + using VULKAN_HPP_NAMESPACE::BufferUsageFlags2CreateInfoKHR; + using VULKAN_HPP_NAMESPACE::CopyImageToImageInfo; + using VULKAN_HPP_NAMESPACE::CopyImageToImageInfoEXT; + using VULKAN_HPP_NAMESPACE::CopyImageToMemoryInfo; + using VULKAN_HPP_NAMESPACE::CopyImageToMemoryInfoEXT; + using VULKAN_HPP_NAMESPACE::CopyMemoryToImageInfo; + using VULKAN_HPP_NAMESPACE::CopyMemoryToImageInfoEXT; + using VULKAN_HPP_NAMESPACE::DeviceImageSubresourceInfo; + using VULKAN_HPP_NAMESPACE::DeviceImageSubresourceInfoKHR; + using VULKAN_HPP_NAMESPACE::DeviceQueueGlobalPriorityCreateInfo; + using VULKAN_HPP_NAMESPACE::DeviceQueueGlobalPriorityCreateInfoEXT; + using VULKAN_HPP_NAMESPACE::DeviceQueueGlobalPriorityCreateInfoKHR; + using VULKAN_HPP_NAMESPACE::HostImageCopyDevicePerformanceQuery; + using VULKAN_HPP_NAMESPACE::HostImageCopyDevicePerformanceQueryEXT; + using VULKAN_HPP_NAMESPACE::HostImageLayoutTransitionInfo; + using VULKAN_HPP_NAMESPACE::HostImageLayoutTransitionInfoEXT; + using VULKAN_HPP_NAMESPACE::ImageSubresource2; + using VULKAN_HPP_NAMESPACE::ImageSubresource2EXT; + using VULKAN_HPP_NAMESPACE::ImageSubresource2KHR; + using VULKAN_HPP_NAMESPACE::ImageToMemoryCopy; + using VULKAN_HPP_NAMESPACE::ImageToMemoryCopyEXT; + using VULKAN_HPP_NAMESPACE::MemoryMapInfo; + using VULKAN_HPP_NAMESPACE::MemoryMapInfoKHR; + using VULKAN_HPP_NAMESPACE::MemoryToImageCopy; + using VULKAN_HPP_NAMESPACE::MemoryToImageCopyEXT; + using VULKAN_HPP_NAMESPACE::MemoryUnmapInfo; + using VULKAN_HPP_NAMESPACE::MemoryUnmapInfoKHR; + using VULKAN_HPP_NAMESPACE::PhysicalDeviceDynamicRenderingLocalReadFeatures; + using VULKAN_HPP_NAMESPACE::PhysicalDeviceDynamicRenderingLocalReadFeaturesKHR; + using VULKAN_HPP_NAMESPACE::PhysicalDeviceGlobalPriorityQueryFeatures; + using VULKAN_HPP_NAMESPACE::PhysicalDeviceGlobalPriorityQueryFeaturesEXT; + using VULKAN_HPP_NAMESPACE::PhysicalDeviceGlobalPriorityQueryFeaturesKHR; + using VULKAN_HPP_NAMESPACE::PhysicalDeviceHostImageCopyFeatures; + using VULKAN_HPP_NAMESPACE::PhysicalDeviceHostImageCopyFeaturesEXT; + using VULKAN_HPP_NAMESPACE::PhysicalDeviceHostImageCopyProperties; + using VULKAN_HPP_NAMESPACE::PhysicalDeviceHostImageCopyPropertiesEXT; + using VULKAN_HPP_NAMESPACE::PhysicalDeviceIndexTypeUint8Features; + using VULKAN_HPP_NAMESPACE::PhysicalDeviceIndexTypeUint8FeaturesEXT; + using VULKAN_HPP_NAMESPACE::PhysicalDeviceIndexTypeUint8FeaturesKHR; + using VULKAN_HPP_NAMESPACE::PhysicalDeviceLineRasterizationFeatures; + using VULKAN_HPP_NAMESPACE::PhysicalDeviceLineRasterizationFeaturesEXT; + using VULKAN_HPP_NAMESPACE::PhysicalDeviceLineRasterizationFeaturesKHR; + using VULKAN_HPP_NAMESPACE::PhysicalDeviceLineRasterizationProperties; + using VULKAN_HPP_NAMESPACE::PhysicalDeviceLineRasterizationPropertiesEXT; + using VULKAN_HPP_NAMESPACE::PhysicalDeviceLineRasterizationPropertiesKHR; + using VULKAN_HPP_NAMESPACE::PhysicalDeviceMaintenance5Features; + using VULKAN_HPP_NAMESPACE::PhysicalDeviceMaintenance5FeaturesKHR; + using VULKAN_HPP_NAMESPACE::PhysicalDeviceMaintenance5Properties; + using VULKAN_HPP_NAMESPACE::PhysicalDeviceMaintenance5PropertiesKHR; + using VULKAN_HPP_NAMESPACE::PhysicalDeviceMaintenance6Features; + using VULKAN_HPP_NAMESPACE::PhysicalDeviceMaintenance6FeaturesKHR; + using VULKAN_HPP_NAMESPACE::PhysicalDeviceMaintenance6Properties; + using VULKAN_HPP_NAMESPACE::PhysicalDeviceMaintenance6PropertiesKHR; + using VULKAN_HPP_NAMESPACE::PhysicalDevicePipelineProtectedAccessFeatures; + using VULKAN_HPP_NAMESPACE::PhysicalDevicePipelineProtectedAccessFeaturesEXT; + using VULKAN_HPP_NAMESPACE::PhysicalDevicePipelineRobustnessFeatures; + using VULKAN_HPP_NAMESPACE::PhysicalDevicePipelineRobustnessFeaturesEXT; + using VULKAN_HPP_NAMESPACE::PhysicalDevicePipelineRobustnessProperties; + using VULKAN_HPP_NAMESPACE::PhysicalDevicePipelineRobustnessPropertiesEXT; + using VULKAN_HPP_NAMESPACE::PhysicalDevicePushDescriptorProperties; + using VULKAN_HPP_NAMESPACE::PhysicalDevicePushDescriptorPropertiesKHR; + using VULKAN_HPP_NAMESPACE::PhysicalDeviceShaderExpectAssumeFeatures; + using VULKAN_HPP_NAMESPACE::PhysicalDeviceShaderExpectAssumeFeaturesKHR; + using VULKAN_HPP_NAMESPACE::PhysicalDeviceShaderFloatControls2Features; + using VULKAN_HPP_NAMESPACE::PhysicalDeviceShaderFloatControls2FeaturesKHR; + using VULKAN_HPP_NAMESPACE::PhysicalDeviceShaderSubgroupRotateFeatures; + using VULKAN_HPP_NAMESPACE::PhysicalDeviceShaderSubgroupRotateFeaturesKHR; + using VULKAN_HPP_NAMESPACE::PhysicalDeviceVertexAttributeDivisorFeatures; + using VULKAN_HPP_NAMESPACE::PhysicalDeviceVertexAttributeDivisorFeaturesEXT; + using VULKAN_HPP_NAMESPACE::PhysicalDeviceVertexAttributeDivisorFeaturesKHR; + using VULKAN_HPP_NAMESPACE::PhysicalDeviceVertexAttributeDivisorProperties; + using VULKAN_HPP_NAMESPACE::PhysicalDeviceVertexAttributeDivisorPropertiesKHR; + using VULKAN_HPP_NAMESPACE::PhysicalDeviceVulkan14Features; + using VULKAN_HPP_NAMESPACE::PhysicalDeviceVulkan14Properties; + using VULKAN_HPP_NAMESPACE::PipelineCreateFlags2CreateInfo; + using VULKAN_HPP_NAMESPACE::PipelineCreateFlags2CreateInfoKHR; + using VULKAN_HPP_NAMESPACE::PipelineRasterizationLineStateCreateInfo; + using VULKAN_HPP_NAMESPACE::PipelineRasterizationLineStateCreateInfoEXT; + using VULKAN_HPP_NAMESPACE::PipelineRasterizationLineStateCreateInfoKHR; + using VULKAN_HPP_NAMESPACE::PipelineRobustnessCreateInfo; + using VULKAN_HPP_NAMESPACE::PipelineRobustnessCreateInfoEXT; + using VULKAN_HPP_NAMESPACE::PipelineVertexInputDivisorStateCreateInfo; + using VULKAN_HPP_NAMESPACE::PipelineVertexInputDivisorStateCreateInfoEXT; + using VULKAN_HPP_NAMESPACE::PipelineVertexInputDivisorStateCreateInfoKHR; + using VULKAN_HPP_NAMESPACE::PushConstantsInfo; + using VULKAN_HPP_NAMESPACE::PushConstantsInfoKHR; + using VULKAN_HPP_NAMESPACE::PushDescriptorSetInfo; + using VULKAN_HPP_NAMESPACE::PushDescriptorSetInfoKHR; + using VULKAN_HPP_NAMESPACE::PushDescriptorSetWithTemplateInfo; + using VULKAN_HPP_NAMESPACE::PushDescriptorSetWithTemplateInfoKHR; + using VULKAN_HPP_NAMESPACE::QueueFamilyGlobalPriorityProperties; + using VULKAN_HPP_NAMESPACE::QueueFamilyGlobalPriorityPropertiesEXT; + using VULKAN_HPP_NAMESPACE::QueueFamilyGlobalPriorityPropertiesKHR; + using VULKAN_HPP_NAMESPACE::RenderingAreaInfo; + using VULKAN_HPP_NAMESPACE::RenderingAreaInfoKHR; + using VULKAN_HPP_NAMESPACE::RenderingAttachmentLocationInfo; + using VULKAN_HPP_NAMESPACE::RenderingAttachmentLocationInfoKHR; + using VULKAN_HPP_NAMESPACE::RenderingInputAttachmentIndexInfo; + using VULKAN_HPP_NAMESPACE::RenderingInputAttachmentIndexInfoKHR; + using VULKAN_HPP_NAMESPACE::SubresourceHostMemcpySize; + using VULKAN_HPP_NAMESPACE::SubresourceHostMemcpySizeEXT; + using VULKAN_HPP_NAMESPACE::SubresourceLayout2; + using VULKAN_HPP_NAMESPACE::SubresourceLayout2EXT; + using VULKAN_HPP_NAMESPACE::SubresourceLayout2KHR; + using VULKAN_HPP_NAMESPACE::VertexInputBindingDivisorDescription; + using VULKAN_HPP_NAMESPACE::VertexInputBindingDivisorDescriptionEXT; + using VULKAN_HPP_NAMESPACE::VertexInputBindingDivisorDescriptionKHR; + //=== VK_KHR_surface === using VULKAN_HPP_NAMESPACE::SurfaceCapabilitiesKHR; using VULKAN_HPP_NAMESPACE::SurfaceFormatKHR; @@ -3335,11 +3455,6 @@ export namespace VULKAN_HPP_NAMESPACE using VULKAN_HPP_NAMESPACE::ImageViewASTCDecodeModeEXT; using VULKAN_HPP_NAMESPACE::PhysicalDeviceASTCDecodeFeaturesEXT; - //=== VK_EXT_pipeline_robustness === - using VULKAN_HPP_NAMESPACE::PhysicalDevicePipelineRobustnessFeaturesEXT; - using VULKAN_HPP_NAMESPACE::PhysicalDevicePipelineRobustnessPropertiesEXT; - using VULKAN_HPP_NAMESPACE::PipelineRobustnessCreateInfoEXT; - #if defined( VK_USE_PLATFORM_WIN32_KHR ) //=== VK_KHR_external_memory_win32 === using VULKAN_HPP_NAMESPACE::ExportMemoryWin32HandleInfoKHR; @@ -3370,9 +3485,6 @@ export namespace VULKAN_HPP_NAMESPACE using VULKAN_HPP_NAMESPACE::ImportSemaphoreFdInfoKHR; using VULKAN_HPP_NAMESPACE::SemaphoreGetFdInfoKHR; - //=== VK_KHR_push_descriptor === - using VULKAN_HPP_NAMESPACE::PhysicalDevicePushDescriptorPropertiesKHR; - //=== VK_EXT_conditional_rendering === using VULKAN_HPP_NAMESPACE::CommandBufferInheritanceConditionalRenderingInfoEXT; using VULKAN_HPP_NAMESPACE::ConditionalRenderingBeginInfoEXT; @@ -3646,14 +3758,6 @@ export namespace VULKAN_HPP_NAMESPACE using VULKAN_HPP_NAMESPACE::VideoDecodeH265SessionParametersAddInfoKHR; using VULKAN_HPP_NAMESPACE::VideoDecodeH265SessionParametersCreateInfoKHR; - //=== VK_KHR_global_priority === - using VULKAN_HPP_NAMESPACE::DeviceQueueGlobalPriorityCreateInfoEXT; - using VULKAN_HPP_NAMESPACE::DeviceQueueGlobalPriorityCreateInfoKHR; - using VULKAN_HPP_NAMESPACE::PhysicalDeviceGlobalPriorityQueryFeaturesEXT; - using VULKAN_HPP_NAMESPACE::PhysicalDeviceGlobalPriorityQueryFeaturesKHR; - using VULKAN_HPP_NAMESPACE::QueueFamilyGlobalPriorityPropertiesEXT; - using VULKAN_HPP_NAMESPACE::QueueFamilyGlobalPriorityPropertiesKHR; - //=== VK_AMD_memory_overallocation_behavior === using VULKAN_HPP_NAMESPACE::DeviceMemoryOverallocationCreateInfoAMD; @@ -3734,11 +3838,6 @@ export namespace VULKAN_HPP_NAMESPACE //=== VK_AMD_device_coherent_memory === using VULKAN_HPP_NAMESPACE::PhysicalDeviceCoherentMemoryFeaturesAMD; - //=== VK_KHR_dynamic_rendering_local_read === - using VULKAN_HPP_NAMESPACE::PhysicalDeviceDynamicRenderingLocalReadFeaturesKHR; - using VULKAN_HPP_NAMESPACE::RenderingAttachmentLocationInfoKHR; - using VULKAN_HPP_NAMESPACE::RenderingInputAttachmentIndexInfoKHR; - //=== VK_EXT_shader_image_atomic_int64 === using VULKAN_HPP_NAMESPACE::PhysicalDeviceShaderImageAtomicInt64FeaturesEXT; @@ -3816,22 +3915,6 @@ export namespace VULKAN_HPP_NAMESPACE using VULKAN_HPP_NAMESPACE::PipelineInfoEXT; using VULKAN_HPP_NAMESPACE::PipelineInfoKHR; - //=== VK_EXT_host_image_copy === - using VULKAN_HPP_NAMESPACE::CopyImageToImageInfoEXT; - using VULKAN_HPP_NAMESPACE::CopyImageToMemoryInfoEXT; - using VULKAN_HPP_NAMESPACE::CopyMemoryToImageInfoEXT; - using VULKAN_HPP_NAMESPACE::HostImageCopyDevicePerformanceQueryEXT; - using VULKAN_HPP_NAMESPACE::HostImageLayoutTransitionInfoEXT; - using VULKAN_HPP_NAMESPACE::ImageToMemoryCopyEXT; - using VULKAN_HPP_NAMESPACE::MemoryToImageCopyEXT; - using VULKAN_HPP_NAMESPACE::PhysicalDeviceHostImageCopyFeaturesEXT; - using VULKAN_HPP_NAMESPACE::PhysicalDeviceHostImageCopyPropertiesEXT; - using VULKAN_HPP_NAMESPACE::SubresourceHostMemcpySizeEXT; - - //=== VK_KHR_map_memory2 === - using VULKAN_HPP_NAMESPACE::MemoryMapInfoKHR; - using VULKAN_HPP_NAMESPACE::MemoryUnmapInfoKHR; - //=== VK_EXT_map_memory_placed === using VULKAN_HPP_NAMESPACE::MemoryMapPlacedInfoEXT; using VULKAN_HPP_NAMESPACE::PhysicalDeviceMapMemoryPlacedFeaturesEXT; @@ -4193,9 +4276,6 @@ export namespace VULKAN_HPP_NAMESPACE //=== VK_ARM_shader_core_properties === using VULKAN_HPP_NAMESPACE::PhysicalDeviceShaderCorePropertiesARM; - //=== VK_KHR_shader_subgroup_rotate === - using VULKAN_HPP_NAMESPACE::PhysicalDeviceShaderSubgroupRotateFeaturesKHR; - //=== VK_ARM_scheduling_controls === using VULKAN_HPP_NAMESPACE::DeviceQueueShaderCoreControlCreateInfoARM; using VULKAN_HPP_NAMESPACE::PhysicalDeviceSchedulingControlsFeaturesARM; @@ -4306,9 +4386,6 @@ export namespace VULKAN_HPP_NAMESPACE //=== VK_EXT_legacy_dithering === using VULKAN_HPP_NAMESPACE::PhysicalDeviceLegacyDitheringFeaturesEXT; - //=== VK_EXT_pipeline_protected_access === - using VULKAN_HPP_NAMESPACE::PhysicalDevicePipelineProtectedAccessFeaturesEXT; - #if defined( VK_USE_PLATFORM_ANDROID_KHR ) //=== VK_ANDROID_external_format_resolve === using VULKAN_HPP_NAMESPACE::AndroidHardwareBufferFormatResolvePropertiesANDROID; @@ -4316,18 +4393,6 @@ export namespace VULKAN_HPP_NAMESPACE using VULKAN_HPP_NAMESPACE::PhysicalDeviceExternalFormatResolvePropertiesANDROID; #endif /*VK_USE_PLATFORM_ANDROID_KHR*/ - //=== VK_KHR_maintenance5 === - using VULKAN_HPP_NAMESPACE::BufferUsageFlags2CreateInfoKHR; - using VULKAN_HPP_NAMESPACE::DeviceImageSubresourceInfoKHR; - using VULKAN_HPP_NAMESPACE::ImageSubresource2EXT; - using VULKAN_HPP_NAMESPACE::ImageSubresource2KHR; - using VULKAN_HPP_NAMESPACE::PhysicalDeviceMaintenance5FeaturesKHR; - using VULKAN_HPP_NAMESPACE::PhysicalDeviceMaintenance5PropertiesKHR; - using VULKAN_HPP_NAMESPACE::PipelineCreateFlags2CreateInfoKHR; - using VULKAN_HPP_NAMESPACE::RenderingAreaInfoKHR; - using VULKAN_HPP_NAMESPACE::SubresourceLayout2EXT; - using VULKAN_HPP_NAMESPACE::SubresourceLayout2KHR; - //=== VK_AMD_anti_lag === using VULKAN_HPP_NAMESPACE::AntiLagDataAMD; using VULKAN_HPP_NAMESPACE::AntiLagPresentationInfoAMD; @@ -4474,18 +4539,6 @@ export namespace VULKAN_HPP_NAMESPACE //=== VK_EXT_attachment_feedback_loop_dynamic_state === using VULKAN_HPP_NAMESPACE::PhysicalDeviceAttachmentFeedbackLoopDynamicStateFeaturesEXT; - //=== VK_KHR_vertex_attribute_divisor === - using VULKAN_HPP_NAMESPACE::PhysicalDeviceVertexAttributeDivisorFeaturesEXT; - using VULKAN_HPP_NAMESPACE::PhysicalDeviceVertexAttributeDivisorFeaturesKHR; - using VULKAN_HPP_NAMESPACE::PhysicalDeviceVertexAttributeDivisorPropertiesKHR; - using VULKAN_HPP_NAMESPACE::PipelineVertexInputDivisorStateCreateInfoEXT; - using VULKAN_HPP_NAMESPACE::PipelineVertexInputDivisorStateCreateInfoKHR; - using VULKAN_HPP_NAMESPACE::VertexInputBindingDivisorDescriptionEXT; - using VULKAN_HPP_NAMESPACE::VertexInputBindingDivisorDescriptionKHR; - - //=== VK_KHR_shader_float_controls2 === - using VULKAN_HPP_NAMESPACE::PhysicalDeviceShaderFloatControls2FeaturesKHR; - #if defined( VK_USE_PLATFORM_SCREEN_QNX ) //=== VK_QNX_external_memory_screen_buffer === using VULKAN_HPP_NAMESPACE::ExternalFormatQNX; @@ -4498,34 +4551,12 @@ export namespace VULKAN_HPP_NAMESPACE //=== VK_MSFT_layered_driver === using VULKAN_HPP_NAMESPACE::PhysicalDeviceLayeredDriverPropertiesMSFT; - //=== VK_KHR_index_type_uint8 === - using VULKAN_HPP_NAMESPACE::PhysicalDeviceIndexTypeUint8FeaturesEXT; - using VULKAN_HPP_NAMESPACE::PhysicalDeviceIndexTypeUint8FeaturesKHR; - - //=== VK_KHR_line_rasterization === - using VULKAN_HPP_NAMESPACE::PhysicalDeviceLineRasterizationFeaturesEXT; - using VULKAN_HPP_NAMESPACE::PhysicalDeviceLineRasterizationFeaturesKHR; - using VULKAN_HPP_NAMESPACE::PhysicalDeviceLineRasterizationPropertiesEXT; - using VULKAN_HPP_NAMESPACE::PhysicalDeviceLineRasterizationPropertiesKHR; - using VULKAN_HPP_NAMESPACE::PipelineRasterizationLineStateCreateInfoEXT; - using VULKAN_HPP_NAMESPACE::PipelineRasterizationLineStateCreateInfoKHR; - //=== VK_KHR_calibrated_timestamps === using VULKAN_HPP_NAMESPACE::CalibratedTimestampInfoEXT; using VULKAN_HPP_NAMESPACE::CalibratedTimestampInfoKHR; - //=== VK_KHR_shader_expect_assume === - using VULKAN_HPP_NAMESPACE::PhysicalDeviceShaderExpectAssumeFeaturesKHR; - //=== VK_KHR_maintenance6 === using VULKAN_HPP_NAMESPACE::BindDescriptorBufferEmbeddedSamplersInfoEXT; - using VULKAN_HPP_NAMESPACE::BindDescriptorSetsInfoKHR; - using VULKAN_HPP_NAMESPACE::BindMemoryStatusKHR; - using VULKAN_HPP_NAMESPACE::PhysicalDeviceMaintenance6FeaturesKHR; - using VULKAN_HPP_NAMESPACE::PhysicalDeviceMaintenance6PropertiesKHR; - using VULKAN_HPP_NAMESPACE::PushConstantsInfoKHR; - using VULKAN_HPP_NAMESPACE::PushDescriptorSetInfoKHR; - using VULKAN_HPP_NAMESPACE::PushDescriptorSetWithTemplateInfoKHR; using VULKAN_HPP_NAMESPACE::SetDescriptorBufferOffsetsInfoEXT; //=== VK_NV_descriptor_pool_overallocation === @@ -5143,3 +5174,3049 @@ export namespace VULKAN_HPP_NAMESPACE } // namespace VULKAN_HPP_RAII_NAMESPACE #endif } // namespace VULKAN_HPP_NAMESPACE + +export namespace std +{ + + //======================================= + //=== HASH specialization for Flags types === + //======================================= + + template + struct hash>; + + //======================================== + //=== HASH specializations for handles === + //======================================== + + //=== VK_VERSION_1_0 === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + + //=== VK_VERSION_1_1 === + template <> + struct hash; + template <> + struct hash; + + //=== VK_VERSION_1_3 === + template <> + struct hash; + + //=== VK_KHR_surface === + template <> + struct hash; + + //=== VK_KHR_swapchain === + template <> + struct hash; + + //=== VK_KHR_display === + template <> + struct hash; + template <> + struct hash; + + //=== VK_EXT_debug_report === + template <> + struct hash; + + //=== VK_KHR_video_queue === + template <> + struct hash; + template <> + struct hash; + + //=== VK_NVX_binary_import === + template <> + struct hash; + template <> + struct hash; + + //=== VK_EXT_debug_utils === + template <> + struct hash; + + //=== VK_KHR_acceleration_structure === + template <> + struct hash; + + //=== VK_EXT_validation_cache === + template <> + struct hash; + + //=== VK_NV_ray_tracing === + template <> + struct hash; + + //=== VK_INTEL_performance_query === + template <> + struct hash; + + //=== VK_KHR_deferred_host_operations === + template <> + struct hash; + + //=== VK_NV_device_generated_commands === + template <> + struct hash; + +#if defined( VK_ENABLE_BETA_EXTENSIONS ) + //=== VK_NV_cuda_kernel_launch === + template <> + struct hash; + template <> + struct hash; +#endif /*VK_ENABLE_BETA_EXTENSIONS*/ + +#if defined( VK_USE_PLATFORM_FUCHSIA ) + //=== VK_FUCHSIA_buffer_collection === + template <> + struct hash; +#endif /*VK_USE_PLATFORM_FUCHSIA*/ + + //=== VK_EXT_opacity_micromap === + template <> + struct hash; + + //=== VK_NV_optical_flow === + template <> + struct hash; + + //=== VK_EXT_shader_object === + template <> + struct hash; + + //=== VK_KHR_pipeline_binary === + template <> + struct hash; + + //=== VK_EXT_device_generated_commands === + template <> + struct hash; + template <> + struct hash; + + //======================================== + //=== HASH specializations for structs === + //======================================== + + //=== VK_VERSION_1_0 === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + + //=== VK_VERSION_1_1 === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + + //=== VK_VERSION_1_2 === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + + //=== VK_VERSION_1_3 === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + + //=== VK_VERSION_1_4 === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + + //=== VK_KHR_surface === + template <> + struct hash; + template <> + struct hash; + + //=== VK_KHR_swapchain === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + + //=== VK_KHR_display === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + + //=== VK_KHR_display_swapchain === + template <> + struct hash; + +#if defined( VK_USE_PLATFORM_XLIB_KHR ) + //=== VK_KHR_xlib_surface === + template <> + struct hash; +#endif /*VK_USE_PLATFORM_XLIB_KHR*/ + +#if defined( VK_USE_PLATFORM_XCB_KHR ) + //=== VK_KHR_xcb_surface === + template <> + struct hash; +#endif /*VK_USE_PLATFORM_XCB_KHR*/ + +#if defined( VK_USE_PLATFORM_WAYLAND_KHR ) + //=== VK_KHR_wayland_surface === + template <> + struct hash; +#endif /*VK_USE_PLATFORM_WAYLAND_KHR*/ + +#if defined( VK_USE_PLATFORM_ANDROID_KHR ) + //=== VK_KHR_android_surface === + template <> + struct hash; +#endif /*VK_USE_PLATFORM_ANDROID_KHR*/ + +#if defined( VK_USE_PLATFORM_WIN32_KHR ) + //=== VK_KHR_win32_surface === + template <> + struct hash; +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + + //=== VK_EXT_debug_report === + template <> + struct hash; + + //=== VK_AMD_rasterization_order === + template <> + struct hash; + + //=== VK_EXT_debug_marker === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + + //=== VK_KHR_video_queue === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + + //=== VK_KHR_video_decode_queue === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + + //=== VK_NV_dedicated_allocation === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + + //=== VK_EXT_transform_feedback === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + + //=== VK_NVX_binary_import === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + + //=== VK_NVX_image_view_handle === + template <> + struct hash; + template <> + struct hash; + + //=== VK_KHR_video_encode_h264 === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + + //=== VK_KHR_video_encode_h265 === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + + //=== VK_KHR_video_decode_h264 === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + + //=== VK_AMD_texture_gather_bias_lod === + template <> + struct hash; + + //=== VK_AMD_shader_info === + template <> + struct hash; + template <> + struct hash; + +#if defined( VK_USE_PLATFORM_GGP ) + //=== VK_GGP_stream_descriptor_surface === + template <> + struct hash; +#endif /*VK_USE_PLATFORM_GGP*/ + + //=== VK_NV_corner_sampled_image === + template <> + struct hash; + + //=== VK_NV_external_memory_capabilities === + template <> + struct hash; + + //=== VK_NV_external_memory === + template <> + struct hash; + template <> + struct hash; + +#if defined( VK_USE_PLATFORM_WIN32_KHR ) + //=== VK_NV_external_memory_win32 === + template <> + struct hash; + template <> + struct hash; +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + +#if defined( VK_USE_PLATFORM_WIN32_KHR ) + //=== VK_NV_win32_keyed_mutex === + template <> + struct hash; +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + + //=== VK_KHR_device_group === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + + //=== VK_EXT_validation_flags === + template <> + struct hash; + +#if defined( VK_USE_PLATFORM_VI_NN ) + //=== VK_NN_vi_surface === + template <> + struct hash; +#endif /*VK_USE_PLATFORM_VI_NN*/ + + //=== VK_EXT_astc_decode_mode === + template <> + struct hash; + template <> + struct hash; + +#if defined( VK_USE_PLATFORM_WIN32_KHR ) + //=== VK_KHR_external_memory_win32 === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + + //=== VK_KHR_external_memory_fd === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + +#if defined( VK_USE_PLATFORM_WIN32_KHR ) + //=== VK_KHR_win32_keyed_mutex === + template <> + struct hash; +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + +#if defined( VK_USE_PLATFORM_WIN32_KHR ) + //=== VK_KHR_external_semaphore_win32 === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + + //=== VK_KHR_external_semaphore_fd === + template <> + struct hash; + template <> + struct hash; + + //=== VK_EXT_conditional_rendering === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + + //=== VK_KHR_incremental_present === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + + //=== VK_NV_clip_space_w_scaling === + template <> + struct hash; + template <> + struct hash; + + //=== VK_EXT_display_surface_counter === + template <> + struct hash; + + //=== VK_EXT_display_control === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + + //=== VK_GOOGLE_display_timing === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + + //=== VK_NVX_multiview_per_view_attributes === + template <> + struct hash; + template <> + struct hash; + + //=== VK_NV_viewport_swizzle === + template <> + struct hash; + template <> + struct hash; + + //=== VK_EXT_discard_rectangles === + template <> + struct hash; + template <> + struct hash; + + //=== VK_EXT_conservative_rasterization === + template <> + struct hash; + template <> + struct hash; + + //=== VK_EXT_depth_clip_enable === + template <> + struct hash; + template <> + struct hash; + + //=== VK_EXT_hdr_metadata === + template <> + struct hash; + template <> + struct hash; + + //=== VK_IMG_relaxed_line_rasterization === + template <> + struct hash; + + //=== VK_KHR_shared_presentable_image === + template <> + struct hash; + +#if defined( VK_USE_PLATFORM_WIN32_KHR ) + //=== VK_KHR_external_fence_win32 === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + + //=== VK_KHR_external_fence_fd === + template <> + struct hash; + template <> + struct hash; + + //=== VK_KHR_performance_query === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + + //=== VK_KHR_get_surface_capabilities2 === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + + //=== VK_KHR_get_display_properties2 === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + +#if defined( VK_USE_PLATFORM_IOS_MVK ) + //=== VK_MVK_ios_surface === + template <> + struct hash; +#endif /*VK_USE_PLATFORM_IOS_MVK*/ + +#if defined( VK_USE_PLATFORM_MACOS_MVK ) + //=== VK_MVK_macos_surface === + template <> + struct hash; +#endif /*VK_USE_PLATFORM_MACOS_MVK*/ + + //=== VK_EXT_debug_utils === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + +#if defined( VK_USE_PLATFORM_ANDROID_KHR ) + //=== VK_ANDROID_external_memory_android_hardware_buffer === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; +#endif /*VK_USE_PLATFORM_ANDROID_KHR*/ + +#if defined( VK_ENABLE_BETA_EXTENSIONS ) + //=== VK_AMDX_shader_enqueue === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; +#endif /*VK_ENABLE_BETA_EXTENSIONS*/ + + //=== VK_AMD_mixed_attachment_samples === + template <> + struct hash; + + //=== VK_EXT_sample_locations === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + + //=== VK_EXT_blend_operation_advanced === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + + //=== VK_NV_fragment_coverage_to_color === + template <> + struct hash; + + //=== VK_KHR_acceleration_structure === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + + //=== VK_KHR_ray_tracing_pipeline === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + + //=== VK_KHR_ray_query === + template <> + struct hash; + + //=== VK_NV_framebuffer_mixed_samples === + template <> + struct hash; + + //=== VK_NV_shader_sm_builtins === + template <> + struct hash; + template <> + struct hash; + + //=== VK_EXT_image_drm_format_modifier === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + + //=== VK_EXT_validation_cache === + template <> + struct hash; + template <> + struct hash; + +#if defined( VK_ENABLE_BETA_EXTENSIONS ) + //=== VK_KHR_portability_subset === + template <> + struct hash; + template <> + struct hash; +#endif /*VK_ENABLE_BETA_EXTENSIONS*/ + + //=== VK_NV_shading_rate_image === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + + //=== VK_NV_ray_tracing === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + + //=== VK_NV_representative_fragment_test === + template <> + struct hash; + template <> + struct hash; + + //=== VK_EXT_filter_cubic === + template <> + struct hash; + template <> + struct hash; + + //=== VK_EXT_external_memory_host === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + + //=== VK_KHR_shader_clock === + template <> + struct hash; + + //=== VK_AMD_pipeline_compiler_control === + template <> + struct hash; + + //=== VK_AMD_shader_core_properties === + template <> + struct hash; + + //=== VK_KHR_video_decode_h265 === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + + //=== VK_AMD_memory_overallocation_behavior === + template <> + struct hash; + + //=== VK_EXT_vertex_attribute_divisor === + template <> + struct hash; + +#if defined( VK_USE_PLATFORM_GGP ) + //=== VK_GGP_frame_token === + template <> + struct hash; +#endif /*VK_USE_PLATFORM_GGP*/ + + //=== VK_NV_mesh_shader === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + + //=== VK_NV_shader_image_footprint === + template <> + struct hash; + + //=== VK_NV_scissor_exclusive === + template <> + struct hash; + template <> + struct hash; + + //=== VK_NV_device_diagnostic_checkpoints === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + + //=== VK_INTEL_shader_integer_functions2 === + template <> + struct hash; + + //=== VK_INTEL_performance_query === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + + //=== VK_EXT_pci_bus_info === + template <> + struct hash; + + //=== VK_AMD_display_native_hdr === + template <> + struct hash; + template <> + struct hash; + +#if defined( VK_USE_PLATFORM_FUCHSIA ) + //=== VK_FUCHSIA_imagepipe_surface === + template <> + struct hash; +#endif /*VK_USE_PLATFORM_FUCHSIA*/ + +#if defined( VK_USE_PLATFORM_METAL_EXT ) + //=== VK_EXT_metal_surface === + template <> + struct hash; +#endif /*VK_USE_PLATFORM_METAL_EXT*/ + + //=== VK_EXT_fragment_density_map === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + + //=== VK_KHR_fragment_shading_rate === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + + //=== VK_AMD_shader_core_properties2 === + template <> + struct hash; + + //=== VK_AMD_device_coherent_memory === + template <> + struct hash; + + //=== VK_EXT_shader_image_atomic_int64 === + template <> + struct hash; + + //=== VK_KHR_shader_quad_control === + template <> + struct hash; + + //=== VK_EXT_memory_budget === + template <> + struct hash; + + //=== VK_EXT_memory_priority === + template <> + struct hash; + template <> + struct hash; + + //=== VK_KHR_surface_protected_capabilities === + template <> + struct hash; + + //=== VK_NV_dedicated_allocation_image_aliasing === + template <> + struct hash; + + //=== VK_EXT_buffer_device_address === + template <> + struct hash; + template <> + struct hash; + + //=== VK_EXT_validation_features === + template <> + struct hash; + + //=== VK_KHR_present_wait === + template <> + struct hash; + + //=== VK_NV_cooperative_matrix === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + + //=== VK_NV_coverage_reduction_mode === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + + //=== VK_EXT_fragment_shader_interlock === + template <> + struct hash; + + //=== VK_EXT_ycbcr_image_arrays === + template <> + struct hash; + + //=== VK_EXT_provoking_vertex === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + +#if defined( VK_USE_PLATFORM_WIN32_KHR ) + //=== VK_EXT_full_screen_exclusive === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + + //=== VK_EXT_headless_surface === + template <> + struct hash; + + //=== VK_EXT_shader_atomic_float === + template <> + struct hash; + + //=== VK_EXT_extended_dynamic_state === + template <> + struct hash; + + //=== VK_KHR_pipeline_executable_properties === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + + //=== VK_EXT_map_memory_placed === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + + //=== VK_EXT_shader_atomic_float2 === + template <> + struct hash; + + //=== VK_EXT_surface_maintenance1 === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + + //=== VK_EXT_swapchain_maintenance1 === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + + //=== VK_NV_device_generated_commands === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + + //=== VK_NV_inherited_viewport_scissor === + template <> + struct hash; + template <> + struct hash; + + //=== VK_EXT_texel_buffer_alignment === + template <> + struct hash; + + //=== VK_QCOM_render_pass_transform === + template <> + struct hash; + template <> + struct hash; + + //=== VK_EXT_depth_bias_control === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + + //=== VK_EXT_device_memory_report === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + + //=== VK_EXT_robustness2 === + template <> + struct hash; + template <> + struct hash; + + //=== VK_EXT_custom_border_color === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + + //=== VK_KHR_pipeline_library === + template <> + struct hash; + + //=== VK_NV_present_barrier === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + + //=== VK_KHR_present_id === + template <> + struct hash; + template <> + struct hash; + + //=== VK_KHR_video_encode_queue === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + + //=== VK_NV_device_diagnostics_config === + template <> + struct hash; + template <> + struct hash; + +#if defined( VK_ENABLE_BETA_EXTENSIONS ) + //=== VK_NV_cuda_kernel_launch === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; +#endif /*VK_ENABLE_BETA_EXTENSIONS*/ + + //=== VK_NV_low_latency === + template <> + struct hash; + +#if defined( VK_USE_PLATFORM_METAL_EXT ) + //=== VK_EXT_metal_objects === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; +#endif /*VK_USE_PLATFORM_METAL_EXT*/ + + //=== VK_EXT_descriptor_buffer === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + + //=== VK_EXT_graphics_pipeline_library === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + + //=== VK_AMD_shader_early_and_late_fragment_tests === + template <> + struct hash; + + //=== VK_KHR_fragment_shader_barycentric === + template <> + struct hash; + template <> + struct hash; + + //=== VK_KHR_shader_subgroup_uniform_control_flow === + template <> + struct hash; + + //=== VK_NV_fragment_shading_rate_enums === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + + //=== VK_NV_ray_tracing_motion_blur === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + + //=== VK_EXT_mesh_shader === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + + //=== VK_EXT_ycbcr_2plane_444_formats === + template <> + struct hash; + + //=== VK_EXT_fragment_density_map2 === + template <> + struct hash; + template <> + struct hash; + + //=== VK_QCOM_rotated_copy_commands === + template <> + struct hash; + + //=== VK_KHR_workgroup_memory_explicit_layout === + template <> + struct hash; + + //=== VK_EXT_image_compression_control === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + + //=== VK_EXT_attachment_feedback_loop_layout === + template <> + struct hash; + + //=== VK_EXT_4444_formats === + template <> + struct hash; + + //=== VK_EXT_device_fault === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + + //=== VK_EXT_rgba10x6_formats === + template <> + struct hash; + +#if defined( VK_USE_PLATFORM_DIRECTFB_EXT ) + //=== VK_EXT_directfb_surface === + template <> + struct hash; +#endif /*VK_USE_PLATFORM_DIRECTFB_EXT*/ + + //=== VK_EXT_vertex_input_dynamic_state === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + + //=== VK_EXT_physical_device_drm === + template <> + struct hash; + + //=== VK_EXT_device_address_binding_report === + template <> + struct hash; + template <> + struct hash; + + //=== VK_EXT_depth_clip_control === + template <> + struct hash; + template <> + struct hash; + + //=== VK_EXT_primitive_topology_list_restart === + template <> + struct hash; + + //=== VK_EXT_present_mode_fifo_latest_ready === + template <> + struct hash; + +#if defined( VK_USE_PLATFORM_FUCHSIA ) + //=== VK_FUCHSIA_external_memory === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; +#endif /*VK_USE_PLATFORM_FUCHSIA*/ + +#if defined( VK_USE_PLATFORM_FUCHSIA ) + //=== VK_FUCHSIA_external_semaphore === + template <> + struct hash; + template <> + struct hash; +#endif /*VK_USE_PLATFORM_FUCHSIA*/ + +#if defined( VK_USE_PLATFORM_FUCHSIA ) + //=== VK_FUCHSIA_buffer_collection === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; +#endif /*VK_USE_PLATFORM_FUCHSIA*/ + + //=== VK_HUAWEI_subpass_shading === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + + //=== VK_HUAWEI_invocation_mask === + template <> + struct hash; + + //=== VK_NV_external_memory_rdma === + template <> + struct hash; + template <> + struct hash; + + //=== VK_EXT_pipeline_properties === + template <> + struct hash; + template <> + struct hash; + + //=== VK_EXT_frame_boundary === + template <> + struct hash; + template <> + struct hash; + + //=== VK_EXT_multisampled_render_to_single_sampled === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + + //=== VK_EXT_extended_dynamic_state2 === + template <> + struct hash; + +#if defined( VK_USE_PLATFORM_SCREEN_QNX ) + //=== VK_QNX_screen_surface === + template <> + struct hash; +#endif /*VK_USE_PLATFORM_SCREEN_QNX*/ + + //=== VK_EXT_color_write_enable === + template <> + struct hash; + template <> + struct hash; + + //=== VK_EXT_primitives_generated_query === + template <> + struct hash; + + //=== VK_KHR_ray_tracing_maintenance1 === + template <> + struct hash; + template <> + struct hash; + + //=== VK_EXT_image_view_min_lod === + template <> + struct hash; + template <> + struct hash; + + //=== VK_EXT_multi_draw === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + + //=== VK_EXT_image_2d_view_of_3d === + template <> + struct hash; + + //=== VK_EXT_shader_tile_image === + template <> + struct hash; + template <> + struct hash; + + //=== VK_EXT_opacity_micromap === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + +#if defined( VK_ENABLE_BETA_EXTENSIONS ) + //=== VK_NV_displacement_micromap === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; +#endif /*VK_ENABLE_BETA_EXTENSIONS*/ + + //=== VK_HUAWEI_cluster_culling_shader === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + + //=== VK_EXT_border_color_swizzle === + template <> + struct hash; + template <> + struct hash; + + //=== VK_EXT_pageable_device_local_memory === + template <> + struct hash; + + //=== VK_ARM_shader_core_properties === + template <> + struct hash; + + //=== VK_ARM_scheduling_controls === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + + //=== VK_EXT_image_sliced_view_of_3d === + template <> + struct hash; + template <> + struct hash; + + //=== VK_VALVE_descriptor_set_host_mapping === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + + //=== VK_EXT_depth_clamp_zero_one === + template <> + struct hash; + + //=== VK_EXT_non_seamless_cube_map === + template <> + struct hash; + + //=== VK_ARM_render_pass_striped === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + + //=== VK_QCOM_fragment_density_map_offset === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + + //=== VK_NV_copy_memory_indirect === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + + //=== VK_NV_memory_decompression === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + + //=== VK_NV_device_generated_commands_compute === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + + //=== VK_NV_linear_color_attachment === + template <> + struct hash; + + //=== VK_KHR_shader_maximal_reconvergence === + template <> + struct hash; + + //=== VK_EXT_image_compression_control_swapchain === + template <> + struct hash; + + //=== VK_QCOM_image_processing === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + + //=== VK_EXT_nested_command_buffer === + template <> + struct hash; + template <> + struct hash; + + //=== VK_EXT_external_memory_acquire_unmodified === + template <> + struct hash; + + //=== VK_EXT_extended_dynamic_state3 === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + + //=== VK_EXT_subpass_merge_feedback === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + + //=== VK_LUNARG_direct_driver_loading === + template <> + struct hash; + template <> + struct hash; + + //=== VK_EXT_shader_module_identifier === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + + //=== VK_EXT_rasterization_order_attachment_access === + template <> + struct hash; + + //=== VK_NV_optical_flow === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + + //=== VK_EXT_legacy_dithering === + template <> + struct hash; + +#if defined( VK_USE_PLATFORM_ANDROID_KHR ) + //=== VK_ANDROID_external_format_resolve === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; +#endif /*VK_USE_PLATFORM_ANDROID_KHR*/ + + //=== VK_AMD_anti_lag === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + + //=== VK_KHR_ray_tracing_position_fetch === + template <> + struct hash; + + //=== VK_EXT_shader_object === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + + //=== VK_KHR_pipeline_binary === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + + //=== VK_QCOM_tile_properties === + template <> + struct hash; + template <> + struct hash; + + //=== VK_SEC_amigo_profiling === + template <> + struct hash; + template <> + struct hash; + + //=== VK_QCOM_multiview_per_view_viewports === + template <> + struct hash; + + //=== VK_NV_ray_tracing_invocation_reorder === + template <> + struct hash; + template <> + struct hash; + + //=== VK_NV_extended_sparse_address_space === + template <> + struct hash; + template <> + struct hash; + + //=== VK_EXT_mutable_descriptor_type === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + + //=== VK_EXT_legacy_vertex_attributes === + template <> + struct hash; + template <> + struct hash; + + //=== VK_EXT_layer_settings === + template <> + struct hash; + template <> + struct hash; + + //=== VK_ARM_shader_core_builtins === + template <> + struct hash; + template <> + struct hash; + + //=== VK_EXT_pipeline_library_group_handles === + template <> + struct hash; + + //=== VK_EXT_dynamic_rendering_unused_attachments === + template <> + struct hash; + + //=== VK_NV_low_latency2 === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + + //=== VK_KHR_cooperative_matrix === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + + //=== VK_QCOM_multiview_per_view_render_areas === + template <> + struct hash; + template <> + struct hash; + + //=== VK_KHR_compute_shader_derivatives === + template <> + struct hash; + template <> + struct hash; + + //=== VK_KHR_video_decode_av1 === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + + //=== VK_KHR_video_encode_av1 === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + + //=== VK_KHR_video_maintenance1 === + template <> + struct hash; + template <> + struct hash; + + //=== VK_NV_per_stage_descriptor_set === + template <> + struct hash; + + //=== VK_QCOM_image_processing2 === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + + //=== VK_QCOM_filter_cubic_weights === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + + //=== VK_QCOM_ycbcr_degamma === + template <> + struct hash; + template <> + struct hash; + + //=== VK_QCOM_filter_cubic_clamp === + template <> + struct hash; + + //=== VK_EXT_attachment_feedback_loop_dynamic_state === + template <> + struct hash; + +#if defined( VK_USE_PLATFORM_SCREEN_QNX ) + //=== VK_QNX_external_memory_screen_buffer === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; +#endif /*VK_USE_PLATFORM_SCREEN_QNX*/ + + //=== VK_MSFT_layered_driver === + template <> + struct hash; + + //=== VK_KHR_calibrated_timestamps === + template <> + struct hash; + + //=== VK_KHR_maintenance6 === + template <> + struct hash; + template <> + struct hash; + + //=== VK_NV_descriptor_pool_overallocation === + template <> + struct hash; + + //=== VK_NV_display_stereo === + template <> + struct hash; + template <> + struct hash; + + //=== VK_KHR_video_encode_quantization_map === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + + //=== VK_NV_raw_access_chains === + template <> + struct hash; + + //=== VK_KHR_shader_relaxed_extended_instruction === + template <> + struct hash; + + //=== VK_NV_command_buffer_inheritance === + template <> + struct hash; + + //=== VK_KHR_maintenance7 === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + + //=== VK_NV_shader_atomic_float16_vector === + template <> + struct hash; + + //=== VK_EXT_shader_replicated_composites === + template <> + struct hash; + + //=== VK_NV_ray_tracing_validation === + template <> + struct hash; + + //=== VK_EXT_device_generated_commands === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + + //=== VK_MESA_image_alignment_control === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + + //=== VK_EXT_depth_clamp_control === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + + //=== VK_HUAWEI_hdr_vivid === + template <> + struct hash; + template <> + struct hash; + + //=== VK_NV_cooperative_matrix2 === + template <> + struct hash; + template <> + struct hash; + template <> + struct hash; + + //=== VK_EXT_vertex_attribute_robustness === + template <> + struct hash; + +} // namespace std diff --git a/third_party/vulkan/vulkan.hpp b/third_party/vulkan/vulkan.hpp index e20d00f..96c7cc8 100644 --- a/third_party/vulkan/vulkan.hpp +++ b/third_party/vulkan/vulkan.hpp @@ -63,7 +63,7 @@ extern "C" __declspec( dllimport ) FARPROC __stdcall GetProcAddress( HINSTANCE h # include #endif -static_assert( VK_HEADER_VERSION == 302, "Wrong VK_HEADER_VERSION!" ); +static_assert( VK_HEADER_VERSION == 303, "Wrong VK_HEADER_VERSION!" ); // includes through some other header // this results in major(x) being resolved to gnu_dev_major(x) @@ -2415,6 +2415,122 @@ namespace VULKAN_HPP_NAMESPACE return ::vkGetDeviceImageSparseMemoryRequirements( device, pInfo, pSparseMemoryRequirementCount, pSparseMemoryRequirements ); } + //=== VK_VERSION_1_4 === + + void vkCmdSetLineStipple( VkCommandBuffer commandBuffer, uint32_t lineStippleFactor, uint16_t lineStipplePattern ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdSetLineStipple( commandBuffer, lineStippleFactor, lineStipplePattern ); + } + + VkResult vkMapMemory2( VkDevice device, const VkMemoryMapInfo * pMemoryMapInfo, void ** ppData ) const VULKAN_HPP_NOEXCEPT + { + return ::vkMapMemory2( device, pMemoryMapInfo, ppData ); + } + + VkResult vkUnmapMemory2( VkDevice device, const VkMemoryUnmapInfo * pMemoryUnmapInfo ) const VULKAN_HPP_NOEXCEPT + { + return ::vkUnmapMemory2( device, pMemoryUnmapInfo ); + } + + void vkCmdBindIndexBuffer2( VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkDeviceSize size, VkIndexType indexType ) const + VULKAN_HPP_NOEXCEPT + { + return ::vkCmdBindIndexBuffer2( commandBuffer, buffer, offset, size, indexType ); + } + + void vkGetRenderingAreaGranularity( VkDevice device, const VkRenderingAreaInfo * pRenderingAreaInfo, VkExtent2D * pGranularity ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetRenderingAreaGranularity( device, pRenderingAreaInfo, pGranularity ); + } + + void vkGetDeviceImageSubresourceLayout( VkDevice device, + const VkDeviceImageSubresourceInfo * pInfo, + VkSubresourceLayout2 * pLayout ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetDeviceImageSubresourceLayout( device, pInfo, pLayout ); + } + + void vkGetImageSubresourceLayout2( VkDevice device, + VkImage image, + const VkImageSubresource2 * pSubresource, + VkSubresourceLayout2 * pLayout ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetImageSubresourceLayout2( device, image, pSubresource, pLayout ); + } + + void vkCmdPushDescriptorSet( VkCommandBuffer commandBuffer, + VkPipelineBindPoint pipelineBindPoint, + VkPipelineLayout layout, + uint32_t set, + uint32_t descriptorWriteCount, + const VkWriteDescriptorSet * pDescriptorWrites ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdPushDescriptorSet( commandBuffer, pipelineBindPoint, layout, set, descriptorWriteCount, pDescriptorWrites ); + } + + void vkCmdPushDescriptorSetWithTemplate( VkCommandBuffer commandBuffer, + VkDescriptorUpdateTemplate descriptorUpdateTemplate, + VkPipelineLayout layout, + uint32_t set, + const void * pData ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdPushDescriptorSetWithTemplate( commandBuffer, descriptorUpdateTemplate, layout, set, pData ); + } + + void vkCmdSetRenderingAttachmentLocations( VkCommandBuffer commandBuffer, + const VkRenderingAttachmentLocationInfo * pLocationInfo ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdSetRenderingAttachmentLocations( commandBuffer, pLocationInfo ); + } + + void vkCmdSetRenderingInputAttachmentIndices( VkCommandBuffer commandBuffer, + const VkRenderingInputAttachmentIndexInfo * pInputAttachmentIndexInfo ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdSetRenderingInputAttachmentIndices( commandBuffer, pInputAttachmentIndexInfo ); + } + + void vkCmdBindDescriptorSets2( VkCommandBuffer commandBuffer, const VkBindDescriptorSetsInfo * pBindDescriptorSetsInfo ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdBindDescriptorSets2( commandBuffer, pBindDescriptorSetsInfo ); + } + + void vkCmdPushConstants2( VkCommandBuffer commandBuffer, const VkPushConstantsInfo * pPushConstantsInfo ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdPushConstants2( commandBuffer, pPushConstantsInfo ); + } + + void vkCmdPushDescriptorSet2( VkCommandBuffer commandBuffer, const VkPushDescriptorSetInfo * pPushDescriptorSetInfo ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdPushDescriptorSet2( commandBuffer, pPushDescriptorSetInfo ); + } + + void vkCmdPushDescriptorSetWithTemplate2( VkCommandBuffer commandBuffer, + const VkPushDescriptorSetWithTemplateInfo * pPushDescriptorSetWithTemplateInfo ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdPushDescriptorSetWithTemplate2( commandBuffer, pPushDescriptorSetWithTemplateInfo ); + } + + VkResult vkCopyMemoryToImage( VkDevice device, const VkCopyMemoryToImageInfo * pCopyMemoryToImageInfo ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCopyMemoryToImage( device, pCopyMemoryToImageInfo ); + } + + VkResult vkCopyImageToMemory( VkDevice device, const VkCopyImageToMemoryInfo * pCopyImageToMemoryInfo ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCopyImageToMemory( device, pCopyImageToMemoryInfo ); + } + + VkResult vkCopyImageToImage( VkDevice device, const VkCopyImageToImageInfo * pCopyImageToImageInfo ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCopyImageToImage( device, pCopyImageToImageInfo ); + } + + VkResult + vkTransitionImageLayout( VkDevice device, uint32_t transitionCount, const VkHostImageLayoutTransitionInfo * pTransitions ) const VULKAN_HPP_NOEXCEPT + { + return ::vkTransitionImageLayout( device, transitionCount, pTransitions ); + } + //=== VK_KHR_surface === void vkDestroySurfaceKHR( VkInstance instance, VkSurfaceKHR surface, const VkAllocationCallbacks * pAllocator ) const VULKAN_HPP_NOEXCEPT @@ -4378,14 +4494,14 @@ namespace VULKAN_HPP_NAMESPACE //=== VK_KHR_dynamic_rendering_local_read === - void vkCmdSetRenderingAttachmentLocationsKHR( VkCommandBuffer commandBuffer, - const VkRenderingAttachmentLocationInfoKHR * pLocationInfo ) const VULKAN_HPP_NOEXCEPT + void vkCmdSetRenderingAttachmentLocationsKHR( VkCommandBuffer commandBuffer, + const VkRenderingAttachmentLocationInfo * pLocationInfo ) const VULKAN_HPP_NOEXCEPT { return ::vkCmdSetRenderingAttachmentLocationsKHR( commandBuffer, pLocationInfo ); } - void vkCmdSetRenderingInputAttachmentIndicesKHR( VkCommandBuffer commandBuffer, - const VkRenderingInputAttachmentIndexInfoKHR * pInputAttachmentIndexInfo ) const VULKAN_HPP_NOEXCEPT + void vkCmdSetRenderingInputAttachmentIndicesKHR( VkCommandBuffer commandBuffer, + const VkRenderingInputAttachmentIndexInfo * pInputAttachmentIndexInfo ) const VULKAN_HPP_NOEXCEPT { return ::vkCmdSetRenderingInputAttachmentIndicesKHR( commandBuffer, pInputAttachmentIndexInfo ); } @@ -4632,44 +4748,43 @@ namespace VULKAN_HPP_NAMESPACE //=== VK_EXT_host_image_copy === - VkResult vkCopyMemoryToImageEXT( VkDevice device, const VkCopyMemoryToImageInfoEXT * pCopyMemoryToImageInfo ) const VULKAN_HPP_NOEXCEPT + VkResult vkCopyMemoryToImageEXT( VkDevice device, const VkCopyMemoryToImageInfo * pCopyMemoryToImageInfo ) const VULKAN_HPP_NOEXCEPT { return ::vkCopyMemoryToImageEXT( device, pCopyMemoryToImageInfo ); } - VkResult vkCopyImageToMemoryEXT( VkDevice device, const VkCopyImageToMemoryInfoEXT * pCopyImageToMemoryInfo ) const VULKAN_HPP_NOEXCEPT + VkResult vkCopyImageToMemoryEXT( VkDevice device, const VkCopyImageToMemoryInfo * pCopyImageToMemoryInfo ) const VULKAN_HPP_NOEXCEPT { return ::vkCopyImageToMemoryEXT( device, pCopyImageToMemoryInfo ); } - VkResult vkCopyImageToImageEXT( VkDevice device, const VkCopyImageToImageInfoEXT * pCopyImageToImageInfo ) const VULKAN_HPP_NOEXCEPT + VkResult vkCopyImageToImageEXT( VkDevice device, const VkCopyImageToImageInfo * pCopyImageToImageInfo ) const VULKAN_HPP_NOEXCEPT { return ::vkCopyImageToImageEXT( device, pCopyImageToImageInfo ); } - VkResult vkTransitionImageLayoutEXT( VkDevice device, - uint32_t transitionCount, - const VkHostImageLayoutTransitionInfoEXT * pTransitions ) const VULKAN_HPP_NOEXCEPT + VkResult + vkTransitionImageLayoutEXT( VkDevice device, uint32_t transitionCount, const VkHostImageLayoutTransitionInfo * pTransitions ) const VULKAN_HPP_NOEXCEPT { return ::vkTransitionImageLayoutEXT( device, transitionCount, pTransitions ); } - void vkGetImageSubresourceLayout2EXT( VkDevice device, - VkImage image, - const VkImageSubresource2KHR * pSubresource, - VkSubresourceLayout2KHR * pLayout ) const VULKAN_HPP_NOEXCEPT + void vkGetImageSubresourceLayout2EXT( VkDevice device, + VkImage image, + const VkImageSubresource2 * pSubresource, + VkSubresourceLayout2 * pLayout ) const VULKAN_HPP_NOEXCEPT { return ::vkGetImageSubresourceLayout2EXT( device, image, pSubresource, pLayout ); } //=== VK_KHR_map_memory2 === - VkResult vkMapMemory2KHR( VkDevice device, const VkMemoryMapInfoKHR * pMemoryMapInfo, void ** ppData ) const VULKAN_HPP_NOEXCEPT + VkResult vkMapMemory2KHR( VkDevice device, const VkMemoryMapInfo * pMemoryMapInfo, void ** ppData ) const VULKAN_HPP_NOEXCEPT { return ::vkMapMemory2KHR( device, pMemoryMapInfo, ppData ); } - VkResult vkUnmapMemory2KHR( VkDevice device, const VkMemoryUnmapInfoKHR * pMemoryUnmapInfo ) const VULKAN_HPP_NOEXCEPT + VkResult vkUnmapMemory2KHR( VkDevice device, const VkMemoryUnmapInfo * pMemoryUnmapInfo ) const VULKAN_HPP_NOEXCEPT { return ::vkUnmapMemory2KHR( device, pMemoryUnmapInfo ); } @@ -5725,24 +5840,23 @@ namespace VULKAN_HPP_NAMESPACE return ::vkCmdBindIndexBuffer2KHR( commandBuffer, buffer, offset, size, indexType ); } - void vkGetRenderingAreaGranularityKHR( VkDevice device, - const VkRenderingAreaInfoKHR * pRenderingAreaInfo, - VkExtent2D * pGranularity ) const VULKAN_HPP_NOEXCEPT + void + vkGetRenderingAreaGranularityKHR( VkDevice device, const VkRenderingAreaInfo * pRenderingAreaInfo, VkExtent2D * pGranularity ) const VULKAN_HPP_NOEXCEPT { return ::vkGetRenderingAreaGranularityKHR( device, pRenderingAreaInfo, pGranularity ); } - void vkGetDeviceImageSubresourceLayoutKHR( VkDevice device, - const VkDeviceImageSubresourceInfoKHR * pInfo, - VkSubresourceLayout2KHR * pLayout ) const VULKAN_HPP_NOEXCEPT + void vkGetDeviceImageSubresourceLayoutKHR( VkDevice device, + const VkDeviceImageSubresourceInfo * pInfo, + VkSubresourceLayout2 * pLayout ) const VULKAN_HPP_NOEXCEPT { return ::vkGetDeviceImageSubresourceLayoutKHR( device, pInfo, pLayout ); } - void vkGetImageSubresourceLayout2KHR( VkDevice device, - VkImage image, - const VkImageSubresource2KHR * pSubresource, - VkSubresourceLayout2KHR * pLayout ) const VULKAN_HPP_NOEXCEPT + void vkGetImageSubresourceLayout2KHR( VkDevice device, + VkImage image, + const VkImageSubresource2 * pSubresource, + VkSubresourceLayout2 * pLayout ) const VULKAN_HPP_NOEXCEPT { return ::vkGetImageSubresourceLayout2KHR( device, image, pSubresource, pLayout ); } @@ -5926,23 +6040,23 @@ namespace VULKAN_HPP_NAMESPACE //=== VK_KHR_maintenance6 === - void vkCmdBindDescriptorSets2KHR( VkCommandBuffer commandBuffer, const VkBindDescriptorSetsInfoKHR * pBindDescriptorSetsInfo ) const VULKAN_HPP_NOEXCEPT + void vkCmdBindDescriptorSets2KHR( VkCommandBuffer commandBuffer, const VkBindDescriptorSetsInfo * pBindDescriptorSetsInfo ) const VULKAN_HPP_NOEXCEPT { return ::vkCmdBindDescriptorSets2KHR( commandBuffer, pBindDescriptorSetsInfo ); } - void vkCmdPushConstants2KHR( VkCommandBuffer commandBuffer, const VkPushConstantsInfoKHR * pPushConstantsInfo ) const VULKAN_HPP_NOEXCEPT + void vkCmdPushConstants2KHR( VkCommandBuffer commandBuffer, const VkPushConstantsInfo * pPushConstantsInfo ) const VULKAN_HPP_NOEXCEPT { return ::vkCmdPushConstants2KHR( commandBuffer, pPushConstantsInfo ); } - void vkCmdPushDescriptorSet2KHR( VkCommandBuffer commandBuffer, const VkPushDescriptorSetInfoKHR * pPushDescriptorSetInfo ) const VULKAN_HPP_NOEXCEPT + void vkCmdPushDescriptorSet2KHR( VkCommandBuffer commandBuffer, const VkPushDescriptorSetInfo * pPushDescriptorSetInfo ) const VULKAN_HPP_NOEXCEPT { return ::vkCmdPushDescriptorSet2KHR( commandBuffer, pPushDescriptorSetInfo ); } - void vkCmdPushDescriptorSetWithTemplate2KHR( VkCommandBuffer commandBuffer, - const VkPushDescriptorSetWithTemplateInfoKHR * pPushDescriptorSetWithTemplateInfo ) const VULKAN_HPP_NOEXCEPT + void vkCmdPushDescriptorSetWithTemplate2KHR( VkCommandBuffer commandBuffer, + const VkPushDescriptorSetWithTemplateInfo * pPushDescriptorSetWithTemplateInfo ) const VULKAN_HPP_NOEXCEPT { return ::vkCmdPushDescriptorSetWithTemplate2KHR( commandBuffer, pPushDescriptorSetWithTemplateInfo ); } @@ -6520,6 +6634,14 @@ namespace VULKAN_HPP_NAMESPACE InvalidOpaqueCaptureAddressError( char const * message ) : SystemError( make_error_code( Result::eErrorInvalidOpaqueCaptureAddress ), message ) {} }; + class NotPermittedError : public SystemError + { + public: + NotPermittedError( std::string const & message ) : SystemError( make_error_code( Result::eErrorNotPermitted ), message ) {} + + NotPermittedError( char const * message ) : SystemError( make_error_code( Result::eErrorNotPermitted ), message ) {} + }; + class SurfaceLostKHRError : public SystemError { public: @@ -6651,14 +6773,6 @@ namespace VULKAN_HPP_NAMESPACE } }; - class NotPermittedKHRError : public SystemError - { - public: - NotPermittedKHRError( std::string const & message ) : SystemError( make_error_code( Result::eErrorNotPermittedKHR ), message ) {} - - NotPermittedKHRError( char const * message ) : SystemError( make_error_code( Result::eErrorNotPermittedKHR ), message ) {} - }; - # if defined( VK_USE_PLATFORM_WIN32_KHR ) class FullScreenExclusiveModeLostEXTError : public SystemError { @@ -6718,6 +6832,7 @@ namespace VULKAN_HPP_NAMESPACE case Result::eErrorInvalidExternalHandle: throw InvalidExternalHandleError( message ); case Result::eErrorFragmentation: throw FragmentationError( message ); case Result::eErrorInvalidOpaqueCaptureAddress: throw InvalidOpaqueCaptureAddressError( message ); + case Result::eErrorNotPermitted: throw NotPermittedError( message ); case Result::eErrorSurfaceLostKHR: throw SurfaceLostKHRError( message ); case Result::eErrorNativeWindowInUseKHR: throw NativeWindowInUseKHRError( message ); case Result::eErrorOutOfDateKHR: throw OutOfDateKHRError( message ); @@ -6731,7 +6846,6 @@ namespace VULKAN_HPP_NAMESPACE case Result::eErrorVideoProfileCodecNotSupportedKHR: throw VideoProfileCodecNotSupportedKHRError( message ); case Result::eErrorVideoStdVersionNotSupportedKHR: throw VideoStdVersionNotSupportedKHRError( message ); case Result::eErrorInvalidDrmFormatModifierPlaneLayoutEXT: throw InvalidDrmFormatModifierPlaneLayoutEXTError( message ); - case Result::eErrorNotPermittedKHR: throw NotPermittedKHRError( message ); # if defined( VK_USE_PLATFORM_WIN32_KHR ) case Result::eErrorFullScreenExclusiveModeLostEXT: throw FullScreenExclusiveModeLostEXTError( message ); # endif /*VK_USE_PLATFORM_WIN32_KHR*/ @@ -6957,6 +7071,9 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR_INLINE uint32_t MaxDriverNameSize = VK_MAX_DRIVER_NAME_SIZE; VULKAN_HPP_CONSTEXPR_INLINE uint32_t MaxDriverInfoSize = VK_MAX_DRIVER_INFO_SIZE; + //=== VK_VERSION_1_4 === + VULKAN_HPP_CONSTEXPR_INLINE uint32_t MaxGlobalPrioritySize = VK_MAX_GLOBAL_PRIORITY_SIZE; + //=== VK_KHR_device_group_creation === VULKAN_HPP_CONSTEXPR_INLINE uint32_t MaxDeviceGroupSizeKHR = VK_MAX_DEVICE_GROUP_SIZE_KHR; @@ -7077,7 +7194,8 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_CONSTEXPR_INLINE auto ApiVersion11 = makeApiVersion( 0, 1, 1, 0 ); VULKAN_HPP_CONSTEXPR_INLINE auto ApiVersion12 = makeApiVersion( 0, 1, 2, 0 ); VULKAN_HPP_CONSTEXPR_INLINE auto ApiVersion13 = makeApiVersion( 0, 1, 3, 0 ); - VULKAN_HPP_CONSTEXPR_INLINE auto HeaderVersionComplete = makeApiVersion( 0, 1, 3, VK_HEADER_VERSION ); + VULKAN_HPP_CONSTEXPR_INLINE auto ApiVersion14 = makeApiVersion( 0, 1, 4, 0 ); + VULKAN_HPP_CONSTEXPR_INLINE auto HeaderVersionComplete = makeApiVersion( 0, 1, 4, VK_HEADER_VERSION ); //================================= //=== CONSTEXPR EXTENSION NAMEs === @@ -8767,7 +8885,7 @@ namespace VULKAN_HPP_NAMESPACE }; template <> - struct StructExtends + struct StructExtends { enum { @@ -8776,7 +8894,7 @@ namespace VULKAN_HPP_NAMESPACE }; template <> - struct StructExtends + struct StructExtends { enum { @@ -8785,7 +8903,7 @@ namespace VULKAN_HPP_NAMESPACE }; template <> - struct StructExtends + struct StructExtends { enum { @@ -8794,7 +8912,7 @@ namespace VULKAN_HPP_NAMESPACE }; template <> - struct StructExtends + struct StructExtends { enum { @@ -10209,6 +10327,547 @@ namespace VULKAN_HPP_NAMESPACE }; }; + //=== VK_VERSION_1_4 === + template <> + struct StructExtends + { + enum + { + value = true + }; + }; + + template <> + struct StructExtends + { + enum + { + value = true + }; + }; + + template <> + struct StructExtends + { + enum + { + value = true + }; + }; + + template <> + struct StructExtends + { + enum + { + value = true + }; + }; + + template <> + struct StructExtends + { + enum + { + value = true + }; + }; + + template <> + struct StructExtends + { + enum + { + value = true + }; + }; + + template <> + struct StructExtends + { + enum + { + value = true + }; + }; + + template <> + struct StructExtends + { + enum + { + value = true + }; + }; + + template <> + struct StructExtends + { + enum + { + value = true + }; + }; + + template <> + struct StructExtends + { + enum + { + value = true + }; + }; + + template <> + struct StructExtends + { + enum + { + value = true + }; + }; + + template <> + struct StructExtends + { + enum + { + value = true + }; + }; + + template <> + struct StructExtends + { + enum + { + value = true + }; + }; + + template <> + struct StructExtends + { + enum + { + value = true + }; + }; + + template <> + struct StructExtends + { + enum + { + value = true + }; + }; + + template <> + struct StructExtends + { + enum + { + value = true + }; + }; + + template <> + struct StructExtends + { + enum + { + value = true + }; + }; + + template <> + struct StructExtends + { + enum + { + value = true + }; + }; + + template <> + struct StructExtends + { + enum + { + value = true + }; + }; + + template <> + struct StructExtends + { + enum + { + value = true + }; + }; + + template <> + struct StructExtends + { + enum + { + value = true + }; + }; + + template <> + struct StructExtends + { + enum + { + value = true + }; + }; + + template <> + struct StructExtends + { + enum + { + value = true + }; + }; + + template <> + struct StructExtends + { + enum + { + value = true + }; + }; + + template <> + struct StructExtends + { + enum + { + value = true + }; + }; + + template <> + struct StructExtends + { + enum + { + value = true + }; + }; + + template <> + struct StructExtends + { + enum + { + value = true + }; + }; + + template <> + struct StructExtends + { + enum + { + value = true + }; + }; + + template <> + struct StructExtends + { + enum + { + value = true + }; + }; + + template <> + struct StructExtends + { + enum + { + value = true + }; + }; + + template <> + struct StructExtends + { + enum + { + value = true + }; + }; + + template <> + struct StructExtends + { + enum + { + value = true + }; + }; + + template <> + struct StructExtends + { + enum + { + value = true + }; + }; + + template <> + struct StructExtends + { + enum + { + value = true + }; + }; + + template <> + struct StructExtends + { + enum + { + value = true + }; + }; + + template <> + struct StructExtends + { + enum + { + value = true + }; + }; + + template <> + struct StructExtends + { + enum + { + value = true + }; + }; + + template <> + struct StructExtends + { + enum + { + value = true + }; + }; + + template <> + struct StructExtends + { + enum + { + value = true + }; + }; + + template <> + struct StructExtends + { + enum + { + value = true + }; + }; + + template <> + struct StructExtends + { + enum + { + value = true + }; + }; + + template <> + struct StructExtends + { + enum + { + value = true + }; + }; + + template <> + struct StructExtends + { + enum + { + value = true + }; + }; + + template <> + struct StructExtends + { + enum + { + value = true + }; + }; + + template <> + struct StructExtends + { + enum + { + value = true + }; + }; + + template <> + struct StructExtends + { + enum + { + value = true + }; + }; + + template <> + struct StructExtends + { + enum + { + value = true + }; + }; + + template <> + struct StructExtends + { + enum + { + value = true + }; + }; + + template <> + struct StructExtends + { + enum + { + value = true + }; + }; + + template <> + struct StructExtends + { + enum + { + value = true + }; + }; + + template <> + struct StructExtends + { + enum + { + value = true + }; + }; + + template <> + struct StructExtends + { + enum + { + value = true + }; + }; + + template <> + struct StructExtends + { + enum + { + value = true + }; + }; + + template <> + struct StructExtends + { + enum + { + value = true + }; + }; + + template <> + struct StructExtends + { + enum + { + value = true + }; + }; + + template <> + struct StructExtends + { + enum + { + value = true + }; + }; + + template <> + struct StructExtends + { + enum + { + value = true + }; + }; + + template <> + struct StructExtends + { + enum + { + value = true + }; + }; + + template <> + struct StructExtends + { + enum + { + value = true + }; + }; + + template <> + struct StructExtends + { + enum + { + value = true + }; + }; + //=== VK_KHR_swapchain === template <> struct StructExtends @@ -10907,70 +11566,6 @@ namespace VULKAN_HPP_NAMESPACE }; }; - //=== VK_EXT_pipeline_robustness === - template <> - struct StructExtends - { - enum - { - value = true - }; - }; - - template <> - struct StructExtends - { - enum - { - value = true - }; - }; - - template <> - struct StructExtends - { - enum - { - value = true - }; - }; - - template <> - struct StructExtends - { - enum - { - value = true - }; - }; - - template <> - struct StructExtends - { - enum - { - value = true - }; - }; - - template <> - struct StructExtends - { - enum - { - value = true - }; - }; - - template <> - struct StructExtends - { - enum - { - value = true - }; - }; - # if defined( VK_USE_PLATFORM_WIN32_KHR ) //=== VK_KHR_external_memory_win32 === template <> @@ -11044,16 +11639,6 @@ namespace VULKAN_HPP_NAMESPACE }; # endif /*VK_USE_PLATFORM_WIN32_KHR*/ - //=== VK_KHR_push_descriptor === - template <> - struct StructExtends - { - enum - { - value = true - }; - }; - //=== VK_EXT_conditional_rendering === template <> struct StructExtends @@ -12055,43 +12640,6 @@ namespace VULKAN_HPP_NAMESPACE }; }; - //=== VK_KHR_global_priority === - template <> - struct StructExtends - { - enum - { - value = true - }; - }; - - template <> - struct StructExtends - { - enum - { - value = true - }; - }; - - template <> - struct StructExtends - { - enum - { - value = true - }; - }; - - template <> - struct StructExtends - { - enum - { - value = true - }; - }; - //=== VK_AMD_memory_overallocation_behavior === template <> struct StructExtends @@ -12415,61 +12963,6 @@ namespace VULKAN_HPP_NAMESPACE }; }; - //=== VK_KHR_dynamic_rendering_local_read === - template <> - struct StructExtends - { - enum - { - value = true - }; - }; - - template <> - struct StructExtends - { - enum - { - value = true - }; - }; - - template <> - struct StructExtends - { - enum - { - value = true - }; - }; - - template <> - struct StructExtends - { - enum - { - value = true - }; - }; - - template <> - struct StructExtends - { - enum - { - value = true - }; - }; - - template <> - struct StructExtends - { - enum - { - value = true - }; - }; - //=== VK_EXT_shader_image_atomic_int64 === template <> struct StructExtends @@ -12886,52 +13379,6 @@ namespace VULKAN_HPP_NAMESPACE }; }; - //=== VK_EXT_host_image_copy === - template <> - struct StructExtends - { - enum - { - value = true - }; - }; - - template <> - struct StructExtends - { - enum - { - value = true - }; - }; - - template <> - struct StructExtends - { - enum - { - value = true - }; - }; - - template <> - struct StructExtends - { - enum - { - value = true - }; - }; - - template <> - struct StructExtends - { - enum - { - value = true - }; - }; - //=== VK_EXT_map_memory_placed === template <> struct StructExtends @@ -12961,7 +13408,7 @@ namespace VULKAN_HPP_NAMESPACE }; template <> - struct StructExtends + struct StructExtends { enum { @@ -14140,7 +14587,7 @@ namespace VULKAN_HPP_NAMESPACE }; template <> - struct StructExtends + struct StructExtends { enum { @@ -14942,25 +15389,6 @@ namespace VULKAN_HPP_NAMESPACE }; }; - //=== VK_KHR_shader_subgroup_rotate === - template <> - struct StructExtends - { - enum - { - value = true - }; - }; - - template <> - struct StructExtends - { - enum - { - value = true - }; - }; - //=== VK_ARM_scheduling_controls === template <> struct StructExtends @@ -15650,25 +16078,6 @@ namespace VULKAN_HPP_NAMESPACE }; }; - //=== VK_EXT_pipeline_protected_access === - template <> - struct StructExtends - { - enum - { - value = true - }; - }; - - template <> - struct StructExtends - { - enum - { - value = true - }; - }; - # if defined( VK_USE_PLATFORM_ANDROID_KHR ) //=== VK_ANDROID_external_format_resolve === template <> @@ -15708,106 +16117,6 @@ namespace VULKAN_HPP_NAMESPACE }; # endif /*VK_USE_PLATFORM_ANDROID_KHR*/ - //=== VK_KHR_maintenance5 === - template <> - struct StructExtends - { - enum - { - value = true - }; - }; - - template <> - struct StructExtends - { - enum - { - value = true - }; - }; - - template <> - struct StructExtends - { - enum - { - value = true - }; - }; - - template <> - struct StructExtends - { - enum - { - value = true - }; - }; - - template <> - struct StructExtends - { - enum - { - value = true - }; - }; - - template <> - struct StructExtends - { - enum - { - value = true - }; - }; - - template <> - struct StructExtends - { - enum - { - value = true - }; - }; - - template <> - struct StructExtends - { - enum - { - value = true - }; - }; - - template <> - struct StructExtends - { - enum - { - value = true - }; - }; - - template <> - struct StructExtends - { - enum - { - value = true - }; - }; - - template <> - struct StructExtends - { - enum - { - value = true - }; - }; - //=== VK_AMD_anti_lag === template <> struct StructExtends @@ -16709,62 +17018,6 @@ namespace VULKAN_HPP_NAMESPACE }; }; - //=== VK_KHR_vertex_attribute_divisor === - template <> - struct StructExtends - { - enum - { - value = true - }; - }; - - template <> - struct StructExtends - { - enum - { - value = true - }; - }; - - template <> - struct StructExtends - { - enum - { - value = true - }; - }; - - template <> - struct StructExtends - { - enum - { - value = true - }; - }; - - //=== VK_KHR_shader_float_controls2 === - template <> - struct StructExtends - { - enum - { - value = true - }; - }; - - template <> - struct StructExtends - { - enum - { - value = true - }; - }; - # if defined( VK_USE_PLATFORM_SCREEN_QNX ) //=== VK_QNX_external_memory_screen_buffer === template <> @@ -16832,127 +17085,6 @@ namespace VULKAN_HPP_NAMESPACE }; }; - //=== VK_KHR_index_type_uint8 === - template <> - struct StructExtends - { - enum - { - value = true - }; - }; - - template <> - struct StructExtends - { - enum - { - value = true - }; - }; - - //=== VK_KHR_line_rasterization === - template <> - struct StructExtends - { - enum - { - value = true - }; - }; - - template <> - struct StructExtends - { - enum - { - value = true - }; - }; - - template <> - struct StructExtends - { - enum - { - value = true - }; - }; - - template <> - struct StructExtends - { - enum - { - value = true - }; - }; - - //=== VK_KHR_shader_expect_assume === - template <> - struct StructExtends - { - enum - { - value = true - }; - }; - - template <> - struct StructExtends - { - enum - { - value = true - }; - }; - - //=== VK_KHR_maintenance6 === - template <> - struct StructExtends - { - enum - { - value = true - }; - }; - - template <> - struct StructExtends - { - enum - { - value = true - }; - }; - - template <> - struct StructExtends - { - enum - { - value = true - }; - }; - - template <> - struct StructExtends - { - enum - { - value = true - }; - }; - - template <> - struct StructExtends - { - enum - { - value = true - }; - }; - //=== VK_NV_descriptor_pool_overallocation === template <> struct StructExtends @@ -17814,6 +17946,27 @@ namespace VULKAN_HPP_NAMESPACE PFN_vkGetDeviceImageMemoryRequirements vkGetDeviceImageMemoryRequirements = 0; PFN_vkGetDeviceImageSparseMemoryRequirements vkGetDeviceImageSparseMemoryRequirements = 0; + //=== VK_VERSION_1_4 === + PFN_vkCmdSetLineStipple vkCmdSetLineStipple = 0; + PFN_vkMapMemory2 vkMapMemory2 = 0; + PFN_vkUnmapMemory2 vkUnmapMemory2 = 0; + PFN_vkCmdBindIndexBuffer2 vkCmdBindIndexBuffer2 = 0; + PFN_vkGetRenderingAreaGranularity vkGetRenderingAreaGranularity = 0; + PFN_vkGetDeviceImageSubresourceLayout vkGetDeviceImageSubresourceLayout = 0; + PFN_vkGetImageSubresourceLayout2 vkGetImageSubresourceLayout2 = 0; + PFN_vkCmdPushDescriptorSet vkCmdPushDescriptorSet = 0; + PFN_vkCmdPushDescriptorSetWithTemplate vkCmdPushDescriptorSetWithTemplate = 0; + PFN_vkCmdSetRenderingAttachmentLocations vkCmdSetRenderingAttachmentLocations = 0; + PFN_vkCmdSetRenderingInputAttachmentIndices vkCmdSetRenderingInputAttachmentIndices = 0; + PFN_vkCmdBindDescriptorSets2 vkCmdBindDescriptorSets2 = 0; + PFN_vkCmdPushConstants2 vkCmdPushConstants2 = 0; + PFN_vkCmdPushDescriptorSet2 vkCmdPushDescriptorSet2 = 0; + PFN_vkCmdPushDescriptorSetWithTemplate2 vkCmdPushDescriptorSetWithTemplate2 = 0; + PFN_vkCopyMemoryToImage vkCopyMemoryToImage = 0; + PFN_vkCopyImageToMemory vkCopyImageToMemory = 0; + PFN_vkCopyImageToImage vkCopyImageToImage = 0; + PFN_vkTransitionImageLayout vkTransitionImageLayout = 0; + //=== VK_KHR_surface === PFN_vkDestroySurfaceKHR vkDestroySurfaceKHR = 0; PFN_vkGetPhysicalDeviceSurfaceSupportKHR vkGetPhysicalDeviceSurfaceSupportKHR = 0; @@ -19050,6 +19203,30 @@ namespace VULKAN_HPP_NAMESPACE vkGetDeviceImageSparseMemoryRequirements = PFN_vkGetDeviceImageSparseMemoryRequirements( vkGetInstanceProcAddr( instance, "vkGetDeviceImageSparseMemoryRequirements" ) ); + //=== VK_VERSION_1_4 === + vkCmdSetLineStipple = PFN_vkCmdSetLineStipple( vkGetInstanceProcAddr( instance, "vkCmdSetLineStipple" ) ); + vkMapMemory2 = PFN_vkMapMemory2( vkGetInstanceProcAddr( instance, "vkMapMemory2" ) ); + vkUnmapMemory2 = PFN_vkUnmapMemory2( vkGetInstanceProcAddr( instance, "vkUnmapMemory2" ) ); + vkCmdBindIndexBuffer2 = PFN_vkCmdBindIndexBuffer2( vkGetInstanceProcAddr( instance, "vkCmdBindIndexBuffer2" ) ); + vkGetRenderingAreaGranularity = PFN_vkGetRenderingAreaGranularity( vkGetInstanceProcAddr( instance, "vkGetRenderingAreaGranularity" ) ); + vkGetDeviceImageSubresourceLayout = PFN_vkGetDeviceImageSubresourceLayout( vkGetInstanceProcAddr( instance, "vkGetDeviceImageSubresourceLayout" ) ); + vkGetImageSubresourceLayout2 = PFN_vkGetImageSubresourceLayout2( vkGetInstanceProcAddr( instance, "vkGetImageSubresourceLayout2" ) ); + vkCmdPushDescriptorSet = PFN_vkCmdPushDescriptorSet( vkGetInstanceProcAddr( instance, "vkCmdPushDescriptorSet" ) ); + vkCmdPushDescriptorSetWithTemplate = PFN_vkCmdPushDescriptorSetWithTemplate( vkGetInstanceProcAddr( instance, "vkCmdPushDescriptorSetWithTemplate" ) ); + vkCmdSetRenderingAttachmentLocations = + PFN_vkCmdSetRenderingAttachmentLocations( vkGetInstanceProcAddr( instance, "vkCmdSetRenderingAttachmentLocations" ) ); + vkCmdSetRenderingInputAttachmentIndices = + PFN_vkCmdSetRenderingInputAttachmentIndices( vkGetInstanceProcAddr( instance, "vkCmdSetRenderingInputAttachmentIndices" ) ); + vkCmdBindDescriptorSets2 = PFN_vkCmdBindDescriptorSets2( vkGetInstanceProcAddr( instance, "vkCmdBindDescriptorSets2" ) ); + vkCmdPushConstants2 = PFN_vkCmdPushConstants2( vkGetInstanceProcAddr( instance, "vkCmdPushConstants2" ) ); + vkCmdPushDescriptorSet2 = PFN_vkCmdPushDescriptorSet2( vkGetInstanceProcAddr( instance, "vkCmdPushDescriptorSet2" ) ); + vkCmdPushDescriptorSetWithTemplate2 = + PFN_vkCmdPushDescriptorSetWithTemplate2( vkGetInstanceProcAddr( instance, "vkCmdPushDescriptorSetWithTemplate2" ) ); + vkCopyMemoryToImage = PFN_vkCopyMemoryToImage( vkGetInstanceProcAddr( instance, "vkCopyMemoryToImage" ) ); + vkCopyImageToMemory = PFN_vkCopyImageToMemory( vkGetInstanceProcAddr( instance, "vkCopyImageToMemory" ) ); + vkCopyImageToImage = PFN_vkCopyImageToImage( vkGetInstanceProcAddr( instance, "vkCopyImageToImage" ) ); + vkTransitionImageLayout = PFN_vkTransitionImageLayout( vkGetInstanceProcAddr( instance, "vkTransitionImageLayout" ) ); + //=== VK_KHR_surface === vkDestroySurfaceKHR = PFN_vkDestroySurfaceKHR( vkGetInstanceProcAddr( instance, "vkDestroySurfaceKHR" ) ); vkGetPhysicalDeviceSurfaceSupportKHR = @@ -19299,8 +19476,12 @@ namespace VULKAN_HPP_NAMESPACE //=== VK_KHR_push_descriptor === vkCmdPushDescriptorSetKHR = PFN_vkCmdPushDescriptorSetKHR( vkGetInstanceProcAddr( instance, "vkCmdPushDescriptorSetKHR" ) ); + if ( !vkCmdPushDescriptorSet ) + vkCmdPushDescriptorSet = vkCmdPushDescriptorSetKHR; vkCmdPushDescriptorSetWithTemplateKHR = PFN_vkCmdPushDescriptorSetWithTemplateKHR( vkGetInstanceProcAddr( instance, "vkCmdPushDescriptorSetWithTemplateKHR" ) ); + if ( !vkCmdPushDescriptorSetWithTemplate ) + vkCmdPushDescriptorSetWithTemplate = vkCmdPushDescriptorSetWithTemplateKHR; //=== VK_EXT_conditional_rendering === vkCmdBeginConditionalRenderingEXT = PFN_vkCmdBeginConditionalRenderingEXT( vkGetInstanceProcAddr( instance, "vkCmdBeginConditionalRenderingEXT" ) ); @@ -19658,8 +19839,12 @@ namespace VULKAN_HPP_NAMESPACE //=== VK_KHR_dynamic_rendering_local_read === vkCmdSetRenderingAttachmentLocationsKHR = PFN_vkCmdSetRenderingAttachmentLocationsKHR( vkGetInstanceProcAddr( instance, "vkCmdSetRenderingAttachmentLocationsKHR" ) ); + if ( !vkCmdSetRenderingAttachmentLocations ) + vkCmdSetRenderingAttachmentLocations = vkCmdSetRenderingAttachmentLocationsKHR; vkCmdSetRenderingInputAttachmentIndicesKHR = PFN_vkCmdSetRenderingInputAttachmentIndicesKHR( vkGetInstanceProcAddr( instance, "vkCmdSetRenderingInputAttachmentIndicesKHR" ) ); + if ( !vkCmdSetRenderingInputAttachmentIndices ) + vkCmdSetRenderingInputAttachmentIndices = vkCmdSetRenderingInputAttachmentIndicesKHR; //=== VK_EXT_buffer_device_address === vkGetBufferDeviceAddressEXT = PFN_vkGetBufferDeviceAddressEXT( vkGetInstanceProcAddr( instance, "vkGetBufferDeviceAddressEXT" ) ); @@ -19712,8 +19897,8 @@ namespace VULKAN_HPP_NAMESPACE //=== VK_EXT_line_rasterization === vkCmdSetLineStippleEXT = PFN_vkCmdSetLineStippleEXT( vkGetInstanceProcAddr( instance, "vkCmdSetLineStippleEXT" ) ); - if ( !vkCmdSetLineStippleKHR ) - vkCmdSetLineStippleKHR = vkCmdSetLineStippleEXT; + if ( !vkCmdSetLineStipple ) + vkCmdSetLineStipple = vkCmdSetLineStippleEXT; //=== VK_EXT_host_query_reset === vkResetQueryPoolEXT = PFN_vkResetQueryPoolEXT( vkGetInstanceProcAddr( instance, "vkResetQueryPoolEXT" ) ); @@ -19775,17 +19960,29 @@ namespace VULKAN_HPP_NAMESPACE PFN_vkGetPipelineExecutableInternalRepresentationsKHR( vkGetInstanceProcAddr( instance, "vkGetPipelineExecutableInternalRepresentationsKHR" ) ); //=== VK_EXT_host_image_copy === - vkCopyMemoryToImageEXT = PFN_vkCopyMemoryToImageEXT( vkGetInstanceProcAddr( instance, "vkCopyMemoryToImageEXT" ) ); - vkCopyImageToMemoryEXT = PFN_vkCopyImageToMemoryEXT( vkGetInstanceProcAddr( instance, "vkCopyImageToMemoryEXT" ) ); - vkCopyImageToImageEXT = PFN_vkCopyImageToImageEXT( vkGetInstanceProcAddr( instance, "vkCopyImageToImageEXT" ) ); - vkTransitionImageLayoutEXT = PFN_vkTransitionImageLayoutEXT( vkGetInstanceProcAddr( instance, "vkTransitionImageLayoutEXT" ) ); + vkCopyMemoryToImageEXT = PFN_vkCopyMemoryToImageEXT( vkGetInstanceProcAddr( instance, "vkCopyMemoryToImageEXT" ) ); + if ( !vkCopyMemoryToImage ) + vkCopyMemoryToImage = vkCopyMemoryToImageEXT; + vkCopyImageToMemoryEXT = PFN_vkCopyImageToMemoryEXT( vkGetInstanceProcAddr( instance, "vkCopyImageToMemoryEXT" ) ); + if ( !vkCopyImageToMemory ) + vkCopyImageToMemory = vkCopyImageToMemoryEXT; + vkCopyImageToImageEXT = PFN_vkCopyImageToImageEXT( vkGetInstanceProcAddr( instance, "vkCopyImageToImageEXT" ) ); + if ( !vkCopyImageToImage ) + vkCopyImageToImage = vkCopyImageToImageEXT; + vkTransitionImageLayoutEXT = PFN_vkTransitionImageLayoutEXT( vkGetInstanceProcAddr( instance, "vkTransitionImageLayoutEXT" ) ); + if ( !vkTransitionImageLayout ) + vkTransitionImageLayout = vkTransitionImageLayoutEXT; vkGetImageSubresourceLayout2EXT = PFN_vkGetImageSubresourceLayout2EXT( vkGetInstanceProcAddr( instance, "vkGetImageSubresourceLayout2EXT" ) ); - if ( !vkGetImageSubresourceLayout2KHR ) - vkGetImageSubresourceLayout2KHR = vkGetImageSubresourceLayout2EXT; + if ( !vkGetImageSubresourceLayout2 ) + vkGetImageSubresourceLayout2 = vkGetImageSubresourceLayout2EXT; //=== VK_KHR_map_memory2 === - vkMapMemory2KHR = PFN_vkMapMemory2KHR( vkGetInstanceProcAddr( instance, "vkMapMemory2KHR" ) ); + vkMapMemory2KHR = PFN_vkMapMemory2KHR( vkGetInstanceProcAddr( instance, "vkMapMemory2KHR" ) ); + if ( !vkMapMemory2 ) + vkMapMemory2 = vkMapMemory2KHR; vkUnmapMemory2KHR = PFN_vkUnmapMemory2KHR( vkGetInstanceProcAddr( instance, "vkUnmapMemory2KHR" ) ); + if ( !vkUnmapMemory2 ) + vkUnmapMemory2 = vkUnmapMemory2KHR; //=== VK_EXT_swapchain_maintenance1 === vkReleaseSwapchainImagesEXT = PFN_vkReleaseSwapchainImagesEXT( vkGetInstanceProcAddr( instance, "vkReleaseSwapchainImagesEXT" ) ); @@ -20112,11 +20309,19 @@ namespace VULKAN_HPP_NAMESPACE vkCmdOpticalFlowExecuteNV = PFN_vkCmdOpticalFlowExecuteNV( vkGetInstanceProcAddr( instance, "vkCmdOpticalFlowExecuteNV" ) ); //=== VK_KHR_maintenance5 === - vkCmdBindIndexBuffer2KHR = PFN_vkCmdBindIndexBuffer2KHR( vkGetInstanceProcAddr( instance, "vkCmdBindIndexBuffer2KHR" ) ); + vkCmdBindIndexBuffer2KHR = PFN_vkCmdBindIndexBuffer2KHR( vkGetInstanceProcAddr( instance, "vkCmdBindIndexBuffer2KHR" ) ); + if ( !vkCmdBindIndexBuffer2 ) + vkCmdBindIndexBuffer2 = vkCmdBindIndexBuffer2KHR; vkGetRenderingAreaGranularityKHR = PFN_vkGetRenderingAreaGranularityKHR( vkGetInstanceProcAddr( instance, "vkGetRenderingAreaGranularityKHR" ) ); + if ( !vkGetRenderingAreaGranularity ) + vkGetRenderingAreaGranularity = vkGetRenderingAreaGranularityKHR; vkGetDeviceImageSubresourceLayoutKHR = PFN_vkGetDeviceImageSubresourceLayoutKHR( vkGetInstanceProcAddr( instance, "vkGetDeviceImageSubresourceLayoutKHR" ) ); + if ( !vkGetDeviceImageSubresourceLayout ) + vkGetDeviceImageSubresourceLayout = vkGetDeviceImageSubresourceLayoutKHR; vkGetImageSubresourceLayout2KHR = PFN_vkGetImageSubresourceLayout2KHR( vkGetInstanceProcAddr( instance, "vkGetImageSubresourceLayout2KHR" ) ); + if ( !vkGetImageSubresourceLayout2 ) + vkGetImageSubresourceLayout2 = vkGetImageSubresourceLayout2KHR; //=== VK_AMD_anti_lag === vkAntiLagUpdateAMD = PFN_vkAntiLagUpdateAMD( vkGetInstanceProcAddr( instance, "vkAntiLagUpdateAMD" ) ); @@ -20162,6 +20367,8 @@ namespace VULKAN_HPP_NAMESPACE //=== VK_KHR_line_rasterization === vkCmdSetLineStippleKHR = PFN_vkCmdSetLineStippleKHR( vkGetInstanceProcAddr( instance, "vkCmdSetLineStippleKHR" ) ); + if ( !vkCmdSetLineStipple ) + vkCmdSetLineStipple = vkCmdSetLineStippleKHR; //=== VK_KHR_calibrated_timestamps === vkGetPhysicalDeviceCalibrateableTimeDomainsKHR = @@ -20170,10 +20377,18 @@ namespace VULKAN_HPP_NAMESPACE //=== VK_KHR_maintenance6 === vkCmdBindDescriptorSets2KHR = PFN_vkCmdBindDescriptorSets2KHR( vkGetInstanceProcAddr( instance, "vkCmdBindDescriptorSets2KHR" ) ); - vkCmdPushConstants2KHR = PFN_vkCmdPushConstants2KHR( vkGetInstanceProcAddr( instance, "vkCmdPushConstants2KHR" ) ); - vkCmdPushDescriptorSet2KHR = PFN_vkCmdPushDescriptorSet2KHR( vkGetInstanceProcAddr( instance, "vkCmdPushDescriptorSet2KHR" ) ); + if ( !vkCmdBindDescriptorSets2 ) + vkCmdBindDescriptorSets2 = vkCmdBindDescriptorSets2KHR; + vkCmdPushConstants2KHR = PFN_vkCmdPushConstants2KHR( vkGetInstanceProcAddr( instance, "vkCmdPushConstants2KHR" ) ); + if ( !vkCmdPushConstants2 ) + vkCmdPushConstants2 = vkCmdPushConstants2KHR; + vkCmdPushDescriptorSet2KHR = PFN_vkCmdPushDescriptorSet2KHR( vkGetInstanceProcAddr( instance, "vkCmdPushDescriptorSet2KHR" ) ); + if ( !vkCmdPushDescriptorSet2 ) + vkCmdPushDescriptorSet2 = vkCmdPushDescriptorSet2KHR; vkCmdPushDescriptorSetWithTemplate2KHR = PFN_vkCmdPushDescriptorSetWithTemplate2KHR( vkGetInstanceProcAddr( instance, "vkCmdPushDescriptorSetWithTemplate2KHR" ) ); + if ( !vkCmdPushDescriptorSetWithTemplate2 ) + vkCmdPushDescriptorSetWithTemplate2 = vkCmdPushDescriptorSetWithTemplate2KHR; vkCmdSetDescriptorBufferOffsets2EXT = PFN_vkCmdSetDescriptorBufferOffsets2EXT( vkGetInstanceProcAddr( instance, "vkCmdSetDescriptorBufferOffsets2EXT" ) ); vkCmdBindDescriptorBufferEmbeddedSamplers2EXT = @@ -20399,6 +20614,29 @@ namespace VULKAN_HPP_NAMESPACE vkGetDeviceImageSparseMemoryRequirements = PFN_vkGetDeviceImageSparseMemoryRequirements( vkGetDeviceProcAddr( device, "vkGetDeviceImageSparseMemoryRequirements" ) ); + //=== VK_VERSION_1_4 === + vkCmdSetLineStipple = PFN_vkCmdSetLineStipple( vkGetDeviceProcAddr( device, "vkCmdSetLineStipple" ) ); + vkMapMemory2 = PFN_vkMapMemory2( vkGetDeviceProcAddr( device, "vkMapMemory2" ) ); + vkUnmapMemory2 = PFN_vkUnmapMemory2( vkGetDeviceProcAddr( device, "vkUnmapMemory2" ) ); + vkCmdBindIndexBuffer2 = PFN_vkCmdBindIndexBuffer2( vkGetDeviceProcAddr( device, "vkCmdBindIndexBuffer2" ) ); + vkGetRenderingAreaGranularity = PFN_vkGetRenderingAreaGranularity( vkGetDeviceProcAddr( device, "vkGetRenderingAreaGranularity" ) ); + vkGetDeviceImageSubresourceLayout = PFN_vkGetDeviceImageSubresourceLayout( vkGetDeviceProcAddr( device, "vkGetDeviceImageSubresourceLayout" ) ); + vkGetImageSubresourceLayout2 = PFN_vkGetImageSubresourceLayout2( vkGetDeviceProcAddr( device, "vkGetImageSubresourceLayout2" ) ); + vkCmdPushDescriptorSet = PFN_vkCmdPushDescriptorSet( vkGetDeviceProcAddr( device, "vkCmdPushDescriptorSet" ) ); + vkCmdPushDescriptorSetWithTemplate = PFN_vkCmdPushDescriptorSetWithTemplate( vkGetDeviceProcAddr( device, "vkCmdPushDescriptorSetWithTemplate" ) ); + vkCmdSetRenderingAttachmentLocations = + PFN_vkCmdSetRenderingAttachmentLocations( vkGetDeviceProcAddr( device, "vkCmdSetRenderingAttachmentLocations" ) ); + vkCmdSetRenderingInputAttachmentIndices = + PFN_vkCmdSetRenderingInputAttachmentIndices( vkGetDeviceProcAddr( device, "vkCmdSetRenderingInputAttachmentIndices" ) ); + vkCmdBindDescriptorSets2 = PFN_vkCmdBindDescriptorSets2( vkGetDeviceProcAddr( device, "vkCmdBindDescriptorSets2" ) ); + vkCmdPushConstants2 = PFN_vkCmdPushConstants2( vkGetDeviceProcAddr( device, "vkCmdPushConstants2" ) ); + vkCmdPushDescriptorSet2 = PFN_vkCmdPushDescriptorSet2( vkGetDeviceProcAddr( device, "vkCmdPushDescriptorSet2" ) ); + vkCmdPushDescriptorSetWithTemplate2 = PFN_vkCmdPushDescriptorSetWithTemplate2( vkGetDeviceProcAddr( device, "vkCmdPushDescriptorSetWithTemplate2" ) ); + vkCopyMemoryToImage = PFN_vkCopyMemoryToImage( vkGetDeviceProcAddr( device, "vkCopyMemoryToImage" ) ); + vkCopyImageToMemory = PFN_vkCopyImageToMemory( vkGetDeviceProcAddr( device, "vkCopyImageToMemory" ) ); + vkCopyImageToImage = PFN_vkCopyImageToImage( vkGetDeviceProcAddr( device, "vkCopyImageToImage" ) ); + vkTransitionImageLayout = PFN_vkTransitionImageLayout( vkGetDeviceProcAddr( device, "vkTransitionImageLayout" ) ); + //=== VK_KHR_swapchain === vkCreateSwapchainKHR = PFN_vkCreateSwapchainKHR( vkGetDeviceProcAddr( device, "vkCreateSwapchainKHR" ) ); vkDestroySwapchainKHR = PFN_vkDestroySwapchainKHR( vkGetDeviceProcAddr( device, "vkDestroySwapchainKHR" ) ); @@ -20521,8 +20759,12 @@ namespace VULKAN_HPP_NAMESPACE //=== VK_KHR_push_descriptor === vkCmdPushDescriptorSetKHR = PFN_vkCmdPushDescriptorSetKHR( vkGetDeviceProcAddr( device, "vkCmdPushDescriptorSetKHR" ) ); + if ( !vkCmdPushDescriptorSet ) + vkCmdPushDescriptorSet = vkCmdPushDescriptorSetKHR; vkCmdPushDescriptorSetWithTemplateKHR = PFN_vkCmdPushDescriptorSetWithTemplateKHR( vkGetDeviceProcAddr( device, "vkCmdPushDescriptorSetWithTemplateKHR" ) ); + if ( !vkCmdPushDescriptorSetWithTemplate ) + vkCmdPushDescriptorSetWithTemplate = vkCmdPushDescriptorSetWithTemplateKHR; //=== VK_EXT_conditional_rendering === vkCmdBeginConditionalRenderingEXT = PFN_vkCmdBeginConditionalRenderingEXT( vkGetDeviceProcAddr( device, "vkCmdBeginConditionalRenderingEXT" ) ); @@ -20805,8 +21047,12 @@ namespace VULKAN_HPP_NAMESPACE //=== VK_KHR_dynamic_rendering_local_read === vkCmdSetRenderingAttachmentLocationsKHR = PFN_vkCmdSetRenderingAttachmentLocationsKHR( vkGetDeviceProcAddr( device, "vkCmdSetRenderingAttachmentLocationsKHR" ) ); + if ( !vkCmdSetRenderingAttachmentLocations ) + vkCmdSetRenderingAttachmentLocations = vkCmdSetRenderingAttachmentLocationsKHR; vkCmdSetRenderingInputAttachmentIndicesKHR = PFN_vkCmdSetRenderingInputAttachmentIndicesKHR( vkGetDeviceProcAddr( device, "vkCmdSetRenderingInputAttachmentIndicesKHR" ) ); + if ( !vkCmdSetRenderingInputAttachmentIndices ) + vkCmdSetRenderingInputAttachmentIndices = vkCmdSetRenderingInputAttachmentIndicesKHR; //=== VK_EXT_buffer_device_address === vkGetBufferDeviceAddressEXT = PFN_vkGetBufferDeviceAddressEXT( vkGetDeviceProcAddr( device, "vkGetBufferDeviceAddressEXT" ) ); @@ -20838,8 +21084,8 @@ namespace VULKAN_HPP_NAMESPACE //=== VK_EXT_line_rasterization === vkCmdSetLineStippleEXT = PFN_vkCmdSetLineStippleEXT( vkGetDeviceProcAddr( device, "vkCmdSetLineStippleEXT" ) ); - if ( !vkCmdSetLineStippleKHR ) - vkCmdSetLineStippleKHR = vkCmdSetLineStippleEXT; + if ( !vkCmdSetLineStipple ) + vkCmdSetLineStipple = vkCmdSetLineStippleEXT; //=== VK_EXT_host_query_reset === vkResetQueryPoolEXT = PFN_vkResetQueryPoolEXT( vkGetDeviceProcAddr( device, "vkResetQueryPoolEXT" ) ); @@ -20901,17 +21147,29 @@ namespace VULKAN_HPP_NAMESPACE PFN_vkGetPipelineExecutableInternalRepresentationsKHR( vkGetDeviceProcAddr( device, "vkGetPipelineExecutableInternalRepresentationsKHR" ) ); //=== VK_EXT_host_image_copy === - vkCopyMemoryToImageEXT = PFN_vkCopyMemoryToImageEXT( vkGetDeviceProcAddr( device, "vkCopyMemoryToImageEXT" ) ); - vkCopyImageToMemoryEXT = PFN_vkCopyImageToMemoryEXT( vkGetDeviceProcAddr( device, "vkCopyImageToMemoryEXT" ) ); - vkCopyImageToImageEXT = PFN_vkCopyImageToImageEXT( vkGetDeviceProcAddr( device, "vkCopyImageToImageEXT" ) ); - vkTransitionImageLayoutEXT = PFN_vkTransitionImageLayoutEXT( vkGetDeviceProcAddr( device, "vkTransitionImageLayoutEXT" ) ); + vkCopyMemoryToImageEXT = PFN_vkCopyMemoryToImageEXT( vkGetDeviceProcAddr( device, "vkCopyMemoryToImageEXT" ) ); + if ( !vkCopyMemoryToImage ) + vkCopyMemoryToImage = vkCopyMemoryToImageEXT; + vkCopyImageToMemoryEXT = PFN_vkCopyImageToMemoryEXT( vkGetDeviceProcAddr( device, "vkCopyImageToMemoryEXT" ) ); + if ( !vkCopyImageToMemory ) + vkCopyImageToMemory = vkCopyImageToMemoryEXT; + vkCopyImageToImageEXT = PFN_vkCopyImageToImageEXT( vkGetDeviceProcAddr( device, "vkCopyImageToImageEXT" ) ); + if ( !vkCopyImageToImage ) + vkCopyImageToImage = vkCopyImageToImageEXT; + vkTransitionImageLayoutEXT = PFN_vkTransitionImageLayoutEXT( vkGetDeviceProcAddr( device, "vkTransitionImageLayoutEXT" ) ); + if ( !vkTransitionImageLayout ) + vkTransitionImageLayout = vkTransitionImageLayoutEXT; vkGetImageSubresourceLayout2EXT = PFN_vkGetImageSubresourceLayout2EXT( vkGetDeviceProcAddr( device, "vkGetImageSubresourceLayout2EXT" ) ); - if ( !vkGetImageSubresourceLayout2KHR ) - vkGetImageSubresourceLayout2KHR = vkGetImageSubresourceLayout2EXT; + if ( !vkGetImageSubresourceLayout2 ) + vkGetImageSubresourceLayout2 = vkGetImageSubresourceLayout2EXT; //=== VK_KHR_map_memory2 === - vkMapMemory2KHR = PFN_vkMapMemory2KHR( vkGetDeviceProcAddr( device, "vkMapMemory2KHR" ) ); + vkMapMemory2KHR = PFN_vkMapMemory2KHR( vkGetDeviceProcAddr( device, "vkMapMemory2KHR" ) ); + if ( !vkMapMemory2 ) + vkMapMemory2 = vkMapMemory2KHR; vkUnmapMemory2KHR = PFN_vkUnmapMemory2KHR( vkGetDeviceProcAddr( device, "vkUnmapMemory2KHR" ) ); + if ( !vkUnmapMemory2 ) + vkUnmapMemory2 = vkUnmapMemory2KHR; //=== VK_EXT_swapchain_maintenance1 === vkReleaseSwapchainImagesEXT = PFN_vkReleaseSwapchainImagesEXT( vkGetDeviceProcAddr( device, "vkReleaseSwapchainImagesEXT" ) ); @@ -21207,11 +21465,19 @@ namespace VULKAN_HPP_NAMESPACE vkCmdOpticalFlowExecuteNV = PFN_vkCmdOpticalFlowExecuteNV( vkGetDeviceProcAddr( device, "vkCmdOpticalFlowExecuteNV" ) ); //=== VK_KHR_maintenance5 === - vkCmdBindIndexBuffer2KHR = PFN_vkCmdBindIndexBuffer2KHR( vkGetDeviceProcAddr( device, "vkCmdBindIndexBuffer2KHR" ) ); + vkCmdBindIndexBuffer2KHR = PFN_vkCmdBindIndexBuffer2KHR( vkGetDeviceProcAddr( device, "vkCmdBindIndexBuffer2KHR" ) ); + if ( !vkCmdBindIndexBuffer2 ) + vkCmdBindIndexBuffer2 = vkCmdBindIndexBuffer2KHR; vkGetRenderingAreaGranularityKHR = PFN_vkGetRenderingAreaGranularityKHR( vkGetDeviceProcAddr( device, "vkGetRenderingAreaGranularityKHR" ) ); + if ( !vkGetRenderingAreaGranularity ) + vkGetRenderingAreaGranularity = vkGetRenderingAreaGranularityKHR; vkGetDeviceImageSubresourceLayoutKHR = PFN_vkGetDeviceImageSubresourceLayoutKHR( vkGetDeviceProcAddr( device, "vkGetDeviceImageSubresourceLayoutKHR" ) ); + if ( !vkGetDeviceImageSubresourceLayout ) + vkGetDeviceImageSubresourceLayout = vkGetDeviceImageSubresourceLayoutKHR; vkGetImageSubresourceLayout2KHR = PFN_vkGetImageSubresourceLayout2KHR( vkGetDeviceProcAddr( device, "vkGetImageSubresourceLayout2KHR" ) ); + if ( !vkGetImageSubresourceLayout2 ) + vkGetImageSubresourceLayout2 = vkGetImageSubresourceLayout2KHR; //=== VK_AMD_anti_lag === vkAntiLagUpdateAMD = PFN_vkAntiLagUpdateAMD( vkGetDeviceProcAddr( device, "vkAntiLagUpdateAMD" ) ); @@ -21253,16 +21519,26 @@ namespace VULKAN_HPP_NAMESPACE //=== VK_KHR_line_rasterization === vkCmdSetLineStippleKHR = PFN_vkCmdSetLineStippleKHR( vkGetDeviceProcAddr( device, "vkCmdSetLineStippleKHR" ) ); + if ( !vkCmdSetLineStipple ) + vkCmdSetLineStipple = vkCmdSetLineStippleKHR; //=== VK_KHR_calibrated_timestamps === vkGetCalibratedTimestampsKHR = PFN_vkGetCalibratedTimestampsKHR( vkGetDeviceProcAddr( device, "vkGetCalibratedTimestampsKHR" ) ); //=== VK_KHR_maintenance6 === vkCmdBindDescriptorSets2KHR = PFN_vkCmdBindDescriptorSets2KHR( vkGetDeviceProcAddr( device, "vkCmdBindDescriptorSets2KHR" ) ); - vkCmdPushConstants2KHR = PFN_vkCmdPushConstants2KHR( vkGetDeviceProcAddr( device, "vkCmdPushConstants2KHR" ) ); - vkCmdPushDescriptorSet2KHR = PFN_vkCmdPushDescriptorSet2KHR( vkGetDeviceProcAddr( device, "vkCmdPushDescriptorSet2KHR" ) ); + if ( !vkCmdBindDescriptorSets2 ) + vkCmdBindDescriptorSets2 = vkCmdBindDescriptorSets2KHR; + vkCmdPushConstants2KHR = PFN_vkCmdPushConstants2KHR( vkGetDeviceProcAddr( device, "vkCmdPushConstants2KHR" ) ); + if ( !vkCmdPushConstants2 ) + vkCmdPushConstants2 = vkCmdPushConstants2KHR; + vkCmdPushDescriptorSet2KHR = PFN_vkCmdPushDescriptorSet2KHR( vkGetDeviceProcAddr( device, "vkCmdPushDescriptorSet2KHR" ) ); + if ( !vkCmdPushDescriptorSet2 ) + vkCmdPushDescriptorSet2 = vkCmdPushDescriptorSet2KHR; vkCmdPushDescriptorSetWithTemplate2KHR = PFN_vkCmdPushDescriptorSetWithTemplate2KHR( vkGetDeviceProcAddr( device, "vkCmdPushDescriptorSetWithTemplate2KHR" ) ); + if ( !vkCmdPushDescriptorSetWithTemplate2 ) + vkCmdPushDescriptorSetWithTemplate2 = vkCmdPushDescriptorSetWithTemplate2KHR; vkCmdSetDescriptorBufferOffsets2EXT = PFN_vkCmdSetDescriptorBufferOffsets2EXT( vkGetDeviceProcAddr( device, "vkCmdSetDescriptorBufferOffsets2EXT" ) ); vkCmdBindDescriptorBufferEmbeddedSamplers2EXT = PFN_vkCmdBindDescriptorBufferEmbeddedSamplers2EXT( vkGetDeviceProcAddr( device, "vkCmdBindDescriptorBufferEmbeddedSamplers2EXT" ) ); diff --git a/third_party/vulkan/vulkan_core.h b/third_party/vulkan/vulkan_core.h index bdbf3f8..4e716da 100644 --- a/third_party/vulkan/vulkan_core.h +++ b/third_party/vulkan/vulkan_core.h @@ -69,10 +69,10 @@ extern "C" { #define VK_API_VERSION_1_0 VK_MAKE_API_VERSION(0, 1, 0, 0)// Patch version should always be set to 0 // Version of this file -#define VK_HEADER_VERSION 302 +#define VK_HEADER_VERSION 303 // Complete version of this file -#define VK_HEADER_VERSION_COMPLETE VK_MAKE_API_VERSION(0, 1, 3, VK_HEADER_VERSION) +#define VK_HEADER_VERSION_COMPLETE VK_MAKE_API_VERSION(0, 1, 4, VK_HEADER_VERSION) // VK_MAKE_VERSION is deprecated, but no reason was given in the API XML // DEPRECATED: This define is deprecated. VK_MAKE_API_VERSION should be used instead. @@ -166,6 +166,7 @@ typedef enum VkResult { VK_ERROR_FRAGMENTATION = -1000161000, VK_ERROR_INVALID_OPAQUE_CAPTURE_ADDRESS = -1000257000, VK_PIPELINE_COMPILE_REQUIRED = 1000297000, + VK_ERROR_NOT_PERMITTED = -1000174001, VK_ERROR_SURFACE_LOST_KHR = -1000000000, VK_ERROR_NATIVE_WINDOW_IN_USE_KHR = -1000000001, VK_SUBOPTIMAL_KHR = 1000001003, @@ -180,7 +181,6 @@ typedef enum VkResult { VK_ERROR_VIDEO_PROFILE_CODEC_NOT_SUPPORTED_KHR = -1000023004, VK_ERROR_VIDEO_STD_VERSION_NOT_SUPPORTED_KHR = -1000023005, VK_ERROR_INVALID_DRM_FORMAT_MODIFIER_PLANE_LAYOUT_EXT = -1000158000, - VK_ERROR_NOT_PERMITTED_KHR = -1000174001, VK_ERROR_FULL_SCREEN_EXCLUSIVE_MODE_LOST_EXT = -1000255000, VK_THREAD_IDLE_KHR = 1000268000, VK_THREAD_DONE_KHR = 1000268001, @@ -194,7 +194,8 @@ typedef enum VkResult { VK_ERROR_OUT_OF_POOL_MEMORY_KHR = VK_ERROR_OUT_OF_POOL_MEMORY, VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR = VK_ERROR_INVALID_EXTERNAL_HANDLE, VK_ERROR_FRAGMENTATION_EXT = VK_ERROR_FRAGMENTATION, - VK_ERROR_NOT_PERMITTED_EXT = VK_ERROR_NOT_PERMITTED_KHR, + VK_ERROR_NOT_PERMITTED_EXT = VK_ERROR_NOT_PERMITTED, + VK_ERROR_NOT_PERMITTED_KHR = VK_ERROR_NOT_PERMITTED, VK_ERROR_INVALID_DEVICE_ADDRESS_EXT = VK_ERROR_INVALID_OPAQUE_CAPTURE_ADDRESS, VK_ERROR_INVALID_OPAQUE_CAPTURE_ADDRESS_KHR = VK_ERROR_INVALID_OPAQUE_CAPTURE_ADDRESS, VK_PIPELINE_COMPILE_REQUIRED_EXT = VK_PIPELINE_COMPILE_REQUIRED, @@ -421,6 +422,56 @@ typedef enum VkStructureType { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_4_PROPERTIES = 1000413001, VK_STRUCTURE_TYPE_DEVICE_BUFFER_MEMORY_REQUIREMENTS = 1000413002, VK_STRUCTURE_TYPE_DEVICE_IMAGE_MEMORY_REQUIREMENTS = 1000413003, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_4_FEATURES = 55, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_4_PROPERTIES = 56, + VK_STRUCTURE_TYPE_DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO = 1000174000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GLOBAL_PRIORITY_QUERY_FEATURES = 1000388000, + VK_STRUCTURE_TYPE_QUEUE_FAMILY_GLOBAL_PRIORITY_PROPERTIES = 1000388001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SUBGROUP_ROTATE_FEATURES = 1000416000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT_CONTROLS_2_FEATURES = 1000528000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_EXPECT_ASSUME_FEATURES = 1000544000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_FEATURES = 1000259000, + VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_LINE_STATE_CREATE_INFO = 1000259001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_PROPERTIES = 1000259002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_PROPERTIES = 1000525000, + VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO = 1000190001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES = 1000190002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INDEX_TYPE_UINT8_FEATURES = 1000265000, + VK_STRUCTURE_TYPE_MEMORY_MAP_INFO = 1000271000, + VK_STRUCTURE_TYPE_MEMORY_UNMAP_INFO = 1000271001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_5_FEATURES = 1000470000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_5_PROPERTIES = 1000470001, + VK_STRUCTURE_TYPE_RENDERING_AREA_INFO = 1000470003, + VK_STRUCTURE_TYPE_DEVICE_IMAGE_SUBRESOURCE_INFO = 1000470004, + VK_STRUCTURE_TYPE_SUBRESOURCE_LAYOUT_2 = 1000338002, + VK_STRUCTURE_TYPE_IMAGE_SUBRESOURCE_2 = 1000338003, + VK_STRUCTURE_TYPE_PIPELINE_CREATE_FLAGS_2_CREATE_INFO = 1000470005, + VK_STRUCTURE_TYPE_BUFFER_USAGE_FLAGS_2_CREATE_INFO = 1000470006, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES = 1000080000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DYNAMIC_RENDERING_LOCAL_READ_FEATURES = 1000232000, + VK_STRUCTURE_TYPE_RENDERING_ATTACHMENT_LOCATION_INFO = 1000232001, + VK_STRUCTURE_TYPE_RENDERING_INPUT_ATTACHMENT_INDEX_INFO = 1000232002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_6_FEATURES = 1000545000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_6_PROPERTIES = 1000545001, + VK_STRUCTURE_TYPE_BIND_MEMORY_STATUS = 1000545002, + VK_STRUCTURE_TYPE_BIND_DESCRIPTOR_SETS_INFO = 1000545003, + VK_STRUCTURE_TYPE_PUSH_CONSTANTS_INFO = 1000545004, + VK_STRUCTURE_TYPE_PUSH_DESCRIPTOR_SET_INFO = 1000545005, + VK_STRUCTURE_TYPE_PUSH_DESCRIPTOR_SET_WITH_TEMPLATE_INFO = 1000545006, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_PROTECTED_ACCESS_FEATURES = 1000466000, + VK_STRUCTURE_TYPE_PIPELINE_ROBUSTNESS_CREATE_INFO = 1000068000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_ROBUSTNESS_FEATURES = 1000068001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_ROBUSTNESS_PROPERTIES = 1000068002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_IMAGE_COPY_FEATURES = 1000270000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_IMAGE_COPY_PROPERTIES = 1000270001, + VK_STRUCTURE_TYPE_MEMORY_TO_IMAGE_COPY = 1000270002, + VK_STRUCTURE_TYPE_IMAGE_TO_MEMORY_COPY = 1000270003, + VK_STRUCTURE_TYPE_COPY_IMAGE_TO_MEMORY_INFO = 1000270004, + VK_STRUCTURE_TYPE_COPY_MEMORY_TO_IMAGE_INFO = 1000270005, + VK_STRUCTURE_TYPE_HOST_IMAGE_LAYOUT_TRANSITION_INFO = 1000270006, + VK_STRUCTURE_TYPE_COPY_IMAGE_TO_IMAGE_INFO = 1000270007, + VK_STRUCTURE_TYPE_SUBRESOURCE_HOST_MEMCPY_SIZE = 1000270008, + VK_STRUCTURE_TYPE_HOST_IMAGE_COPY_DEVICE_PERFORMANCE_QUERY = 1000270009, VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR = 1000001000, VK_STRUCTURE_TYPE_PRESENT_INFO_KHR = 1000001001, VK_STRUCTURE_TYPE_DEVICE_GROUP_PRESENT_CAPABILITIES_KHR = 1000060007, @@ -520,9 +571,6 @@ typedef enum VkStructureType { VK_STRUCTURE_TYPE_VI_SURFACE_CREATE_INFO_NN = 1000062000, VK_STRUCTURE_TYPE_IMAGE_VIEW_ASTC_DECODE_MODE_EXT = 1000067000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ASTC_DECODE_FEATURES_EXT = 1000067001, - VK_STRUCTURE_TYPE_PIPELINE_ROBUSTNESS_CREATE_INFO_EXT = 1000068000, - VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_ROBUSTNESS_FEATURES_EXT = 1000068001, - VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_ROBUSTNESS_PROPERTIES_EXT = 1000068002, VK_STRUCTURE_TYPE_IMPORT_MEMORY_WIN32_HANDLE_INFO_KHR = 1000073000, VK_STRUCTURE_TYPE_EXPORT_MEMORY_WIN32_HANDLE_INFO_KHR = 1000073001, VK_STRUCTURE_TYPE_MEMORY_WIN32_HANDLE_PROPERTIES_KHR = 1000073002, @@ -537,7 +585,6 @@ typedef enum VkStructureType { VK_STRUCTURE_TYPE_SEMAPHORE_GET_WIN32_HANDLE_INFO_KHR = 1000078003, VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_FD_INFO_KHR = 1000079000, VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR = 1000079001, - VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR = 1000080000, VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_CONDITIONAL_RENDERING_INFO_EXT = 1000081000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT = 1000081001, VK_STRUCTURE_TYPE_CONDITIONAL_RENDERING_BEGIN_INFO_EXT = 1000081002, @@ -689,9 +736,6 @@ typedef enum VkStructureType { VK_STRUCTURE_TYPE_VIDEO_DECODE_H265_PROFILE_INFO_KHR = 1000187003, VK_STRUCTURE_TYPE_VIDEO_DECODE_H265_PICTURE_INFO_KHR = 1000187004, VK_STRUCTURE_TYPE_VIDEO_DECODE_H265_DPB_SLOT_INFO_KHR = 1000187005, - VK_STRUCTURE_TYPE_DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO_KHR = 1000174000, - VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GLOBAL_PRIORITY_QUERY_FEATURES_KHR = 1000388000, - VK_STRUCTURE_TYPE_QUEUE_FAMILY_GLOBAL_PRIORITY_PROPERTIES_KHR = 1000388001, VK_STRUCTURE_TYPE_DEVICE_MEMORY_OVERALLOCATION_CREATE_INFO_AMD = 1000189000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_PROPERTIES_EXT = 1000190000, VK_STRUCTURE_TYPE_PRESENT_FRAME_TOKEN_GGP = 1000191000, @@ -728,9 +772,6 @@ typedef enum VkStructureType { VK_STRUCTURE_TYPE_RENDERING_FRAGMENT_SHADING_RATE_ATTACHMENT_INFO_KHR = 1000044006, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CORE_PROPERTIES_2_AMD = 1000227000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COHERENT_MEMORY_FEATURES_AMD = 1000229000, - VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DYNAMIC_RENDERING_LOCAL_READ_FEATURES_KHR = 1000232000, - VK_STRUCTURE_TYPE_RENDERING_ATTACHMENT_LOCATION_INFO_KHR = 1000232001, - VK_STRUCTURE_TYPE_RENDERING_INPUT_ATTACHMENT_INDEX_INFO_KHR = 1000232002, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_IMAGE_ATOMIC_INT64_FEATURES_EXT = 1000234000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_QUAD_CONTROL_FEATURES_KHR = 1000235000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_BUDGET_PROPERTIES_EXT = 1000237000, @@ -765,18 +806,6 @@ typedef enum VkStructureType { VK_STRUCTURE_TYPE_PIPELINE_EXECUTABLE_INFO_KHR = 1000269003, VK_STRUCTURE_TYPE_PIPELINE_EXECUTABLE_STATISTIC_KHR = 1000269004, VK_STRUCTURE_TYPE_PIPELINE_EXECUTABLE_INTERNAL_REPRESENTATION_KHR = 1000269005, - VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_IMAGE_COPY_FEATURES_EXT = 1000270000, - VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_IMAGE_COPY_PROPERTIES_EXT = 1000270001, - VK_STRUCTURE_TYPE_MEMORY_TO_IMAGE_COPY_EXT = 1000270002, - VK_STRUCTURE_TYPE_IMAGE_TO_MEMORY_COPY_EXT = 1000270003, - VK_STRUCTURE_TYPE_COPY_IMAGE_TO_MEMORY_INFO_EXT = 1000270004, - VK_STRUCTURE_TYPE_COPY_MEMORY_TO_IMAGE_INFO_EXT = 1000270005, - VK_STRUCTURE_TYPE_HOST_IMAGE_LAYOUT_TRANSITION_INFO_EXT = 1000270006, - VK_STRUCTURE_TYPE_COPY_IMAGE_TO_IMAGE_INFO_EXT = 1000270007, - VK_STRUCTURE_TYPE_SUBRESOURCE_HOST_MEMCPY_SIZE_EXT = 1000270008, - VK_STRUCTURE_TYPE_HOST_IMAGE_COPY_DEVICE_PERFORMANCE_QUERY_EXT = 1000270009, - VK_STRUCTURE_TYPE_MEMORY_MAP_INFO_KHR = 1000271000, - VK_STRUCTURE_TYPE_MEMORY_UNMAP_INFO_KHR = 1000271001, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAP_MEMORY_PLACED_FEATURES_EXT = 1000272000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAP_MEMORY_PLACED_PROPERTIES_EXT = 1000272001, VK_STRUCTURE_TYPE_MEMORY_MAP_PLACED_INFO_EXT = 1000272002, @@ -971,7 +1000,6 @@ typedef enum VkStructureType { VK_STRUCTURE_TYPE_SAMPLER_BORDER_COLOR_COMPONENT_MAPPING_CREATE_INFO_EXT = 1000411001, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PAGEABLE_DEVICE_LOCAL_MEMORY_FEATURES_EXT = 1000412000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CORE_PROPERTIES_ARM = 1000415000, - VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SUBGROUP_ROTATE_FEATURES_KHR = 1000416000, VK_STRUCTURE_TYPE_DEVICE_QUEUE_SHADER_CORE_CONTROL_CREATE_INFO_ARM = 1000417000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SCHEDULING_CONTROLS_FEATURES_ARM = 1000417001, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SCHEDULING_CONTROLS_PROPERTIES_ARM = 1000417002, @@ -1027,18 +1055,9 @@ typedef enum VkStructureType { VK_STRUCTURE_TYPE_OPTICAL_FLOW_EXECUTE_INFO_NV = 1000464005, VK_STRUCTURE_TYPE_OPTICAL_FLOW_SESSION_CREATE_PRIVATE_DATA_INFO_NV = 1000464010, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LEGACY_DITHERING_FEATURES_EXT = 1000465000, - VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_PROTECTED_ACCESS_FEATURES_EXT = 1000466000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_FORMAT_RESOLVE_FEATURES_ANDROID = 1000468000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_FORMAT_RESOLVE_PROPERTIES_ANDROID = 1000468001, VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_FORMAT_RESOLVE_PROPERTIES_ANDROID = 1000468002, - VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_5_FEATURES_KHR = 1000470000, - VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_5_PROPERTIES_KHR = 1000470001, - VK_STRUCTURE_TYPE_RENDERING_AREA_INFO_KHR = 1000470003, - VK_STRUCTURE_TYPE_DEVICE_IMAGE_SUBRESOURCE_INFO_KHR = 1000470004, - VK_STRUCTURE_TYPE_SUBRESOURCE_LAYOUT_2_KHR = 1000338002, - VK_STRUCTURE_TYPE_IMAGE_SUBRESOURCE_2_KHR = 1000338003, - VK_STRUCTURE_TYPE_PIPELINE_CREATE_FLAGS_2_CREATE_INFO_KHR = 1000470005, - VK_STRUCTURE_TYPE_BUFFER_USAGE_FLAGS_2_CREATE_INFO_KHR = 1000470006, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ANTI_LAG_FEATURES_AMD = 1000476000, VK_STRUCTURE_TYPE_ANTI_LAG_DATA_AMD = 1000476001, VK_STRUCTURE_TYPE_ANTI_LAG_PRESENTATION_INFO_AMD = 1000476002, @@ -1119,29 +1138,13 @@ typedef enum VkStructureType { VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_YCBCR_DEGAMMA_CREATE_INFO_QCOM = 1000520001, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUBIC_CLAMP_FEATURES_QCOM = 1000521000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ATTACHMENT_FEEDBACK_LOOP_DYNAMIC_STATE_FEATURES_EXT = 1000524000, - VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_PROPERTIES_KHR = 1000525000, - VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_KHR = 1000190001, - VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES_KHR = 1000190002, - VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT_CONTROLS_2_FEATURES_KHR = 1000528000, VK_STRUCTURE_TYPE_SCREEN_BUFFER_PROPERTIES_QNX = 1000529000, VK_STRUCTURE_TYPE_SCREEN_BUFFER_FORMAT_PROPERTIES_QNX = 1000529001, VK_STRUCTURE_TYPE_IMPORT_SCREEN_BUFFER_INFO_QNX = 1000529002, VK_STRUCTURE_TYPE_EXTERNAL_FORMAT_QNX = 1000529003, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_MEMORY_SCREEN_BUFFER_FEATURES_QNX = 1000529004, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LAYERED_DRIVER_PROPERTIES_MSFT = 1000530000, - VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INDEX_TYPE_UINT8_FEATURES_KHR = 1000265000, - VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_FEATURES_KHR = 1000259000, - VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_LINE_STATE_CREATE_INFO_KHR = 1000259001, - VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_PROPERTIES_KHR = 1000259002, VK_STRUCTURE_TYPE_CALIBRATED_TIMESTAMP_INFO_KHR = 1000184000, - VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_EXPECT_ASSUME_FEATURES_KHR = 1000544000, - VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_6_FEATURES_KHR = 1000545000, - VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_6_PROPERTIES_KHR = 1000545001, - VK_STRUCTURE_TYPE_BIND_MEMORY_STATUS_KHR = 1000545002, - VK_STRUCTURE_TYPE_BIND_DESCRIPTOR_SETS_INFO_KHR = 1000545003, - VK_STRUCTURE_TYPE_PUSH_CONSTANTS_INFO_KHR = 1000545004, - VK_STRUCTURE_TYPE_PUSH_DESCRIPTOR_SET_INFO_KHR = 1000545005, - VK_STRUCTURE_TYPE_PUSH_DESCRIPTOR_SET_WITH_TEMPLATE_INFO_KHR = 1000545006, VK_STRUCTURE_TYPE_SET_DESCRIPTOR_BUFFER_OFFSETS_INFO_EXT = 1000545007, VK_STRUCTURE_TYPE_BIND_DESCRIPTOR_BUFFER_EMBEDDED_SAMPLERS_INFO_EXT = 1000545008, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_POOL_OVERALLOCATION_FEATURES_NV = 1000546000, @@ -1222,6 +1225,9 @@ typedef enum VkStructureType { VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_DEVICE_GROUP_INFO_KHR = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_DEVICE_GROUP_INFO, VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_DEVICE_GROUP_INFO_KHR = VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_DEVICE_GROUP_INFO, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXTURE_COMPRESSION_ASTC_HDR_FEATURES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXTURE_COMPRESSION_ASTC_HDR_FEATURES, + VK_STRUCTURE_TYPE_PIPELINE_ROBUSTNESS_CREATE_INFO_EXT = VK_STRUCTURE_TYPE_PIPELINE_ROBUSTNESS_CREATE_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_ROBUSTNESS_FEATURES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_ROBUSTNESS_FEATURES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_ROBUSTNESS_PROPERTIES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_ROBUSTNESS_PROPERTIES, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GROUP_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GROUP_PROPERTIES, VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO, @@ -1235,6 +1241,7 @@ typedef enum VkStructureType { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_SEMAPHORE_INFO_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_SEMAPHORE_INFO, VK_STRUCTURE_TYPE_EXTERNAL_SEMAPHORE_PROPERTIES_KHR = VK_STRUCTURE_TYPE_EXTERNAL_SEMAPHORE_PROPERTIES, VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT16_INT8_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT16_INT8_FEATURES, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FLOAT16_INT8_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT16_INT8_FEATURES, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES, @@ -1291,13 +1298,16 @@ typedef enum VkStructureType { VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_LAYOUT_SUPPORT_EXT = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_LAYOUT_SUPPORT, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES, VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_SUPPORT_KHR = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_SUPPORT, - VK_STRUCTURE_TYPE_DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO_EXT = VK_STRUCTURE_TYPE_DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO_KHR, + VK_STRUCTURE_TYPE_DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO_EXT = VK_STRUCTURE_TYPE_DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SUBGROUP_EXTENDED_TYPES_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SUBGROUP_EXTENDED_TYPES_FEATURES, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_INT64_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_INT64_FEATURES, VK_STRUCTURE_TYPE_CALIBRATED_TIMESTAMP_INFO_EXT = VK_STRUCTURE_TYPE_CALIBRATED_TIMESTAMP_INFO_KHR, - VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_EXT = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_KHR, - VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES_KHR, + VK_STRUCTURE_TYPE_DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GLOBAL_PRIORITY_QUERY_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GLOBAL_PRIORITY_QUERY_FEATURES, + VK_STRUCTURE_TYPE_QUEUE_FAMILY_GLOBAL_PRIORITY_PROPERTIES_KHR = VK_STRUCTURE_TYPE_QUEUE_FAMILY_GLOBAL_PRIORITY_PROPERTIES, + VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_EXT = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES, VK_STRUCTURE_TYPE_PIPELINE_CREATION_FEEDBACK_CREATE_INFO_EXT = VK_STRUCTURE_TYPE_PIPELINE_CREATION_FEEDBACK_CREATE_INFO, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRIVER_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRIVER_PROPERTIES, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FLOAT_CONTROLS_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FLOAT_CONTROLS_PROPERTIES, @@ -1319,6 +1329,9 @@ typedef enum VkStructureType { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES, VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DYNAMIC_RENDERING_LOCAL_READ_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DYNAMIC_RENDERING_LOCAL_READ_FEATURES, + VK_STRUCTURE_TYPE_RENDERING_ATTACHMENT_LOCATION_INFO_KHR = VK_STRUCTURE_TYPE_RENDERING_ATTACHMENT_LOCATION_INFO, + VK_STRUCTURE_TYPE_RENDERING_INPUT_ATTACHMENT_INDEX_INFO_KHR = VK_STRUCTURE_TYPE_RENDERING_INPUT_ATTACHMENT_INDEX_INFO, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SEPARATE_DEPTH_STENCIL_LAYOUTS_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SEPARATE_DEPTH_STENCIL_LAYOUTS_FEATURES, VK_STRUCTURE_TYPE_ATTACHMENT_REFERENCE_STENCIL_LAYOUT_KHR = VK_STRUCTURE_TYPE_ATTACHMENT_REFERENCE_STENCIL_LAYOUT, VK_STRUCTURE_TYPE_ATTACHMENT_DESCRIPTION_STENCIL_LAYOUT_KHR = VK_STRUCTURE_TYPE_ATTACHMENT_DESCRIPTION_STENCIL_LAYOUT, @@ -1332,11 +1345,23 @@ typedef enum VkStructureType { VK_STRUCTURE_TYPE_BUFFER_OPAQUE_CAPTURE_ADDRESS_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_BUFFER_OPAQUE_CAPTURE_ADDRESS_CREATE_INFO, VK_STRUCTURE_TYPE_MEMORY_OPAQUE_CAPTURE_ADDRESS_ALLOCATE_INFO_KHR = VK_STRUCTURE_TYPE_MEMORY_OPAQUE_CAPTURE_ADDRESS_ALLOCATE_INFO, VK_STRUCTURE_TYPE_DEVICE_MEMORY_OPAQUE_CAPTURE_ADDRESS_INFO_KHR = VK_STRUCTURE_TYPE_DEVICE_MEMORY_OPAQUE_CAPTURE_ADDRESS_INFO, - VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_FEATURES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_FEATURES_KHR, - VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_LINE_STATE_CREATE_INFO_EXT = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_LINE_STATE_CREATE_INFO_KHR, - VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_PROPERTIES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_PROPERTIES_KHR, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_FEATURES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_FEATURES, + VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_LINE_STATE_CREATE_INFO_EXT = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_LINE_STATE_CREATE_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_PROPERTIES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_PROPERTIES, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_QUERY_RESET_FEATURES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_QUERY_RESET_FEATURES, - VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INDEX_TYPE_UINT8_FEATURES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INDEX_TYPE_UINT8_FEATURES_KHR, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INDEX_TYPE_UINT8_FEATURES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INDEX_TYPE_UINT8_FEATURES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_IMAGE_COPY_FEATURES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_IMAGE_COPY_FEATURES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_IMAGE_COPY_PROPERTIES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_IMAGE_COPY_PROPERTIES, + VK_STRUCTURE_TYPE_MEMORY_TO_IMAGE_COPY_EXT = VK_STRUCTURE_TYPE_MEMORY_TO_IMAGE_COPY, + VK_STRUCTURE_TYPE_IMAGE_TO_MEMORY_COPY_EXT = VK_STRUCTURE_TYPE_IMAGE_TO_MEMORY_COPY, + VK_STRUCTURE_TYPE_COPY_IMAGE_TO_MEMORY_INFO_EXT = VK_STRUCTURE_TYPE_COPY_IMAGE_TO_MEMORY_INFO, + VK_STRUCTURE_TYPE_COPY_MEMORY_TO_IMAGE_INFO_EXT = VK_STRUCTURE_TYPE_COPY_MEMORY_TO_IMAGE_INFO, + VK_STRUCTURE_TYPE_HOST_IMAGE_LAYOUT_TRANSITION_INFO_EXT = VK_STRUCTURE_TYPE_HOST_IMAGE_LAYOUT_TRANSITION_INFO, + VK_STRUCTURE_TYPE_COPY_IMAGE_TO_IMAGE_INFO_EXT = VK_STRUCTURE_TYPE_COPY_IMAGE_TO_IMAGE_INFO, + VK_STRUCTURE_TYPE_SUBRESOURCE_HOST_MEMCPY_SIZE_EXT = VK_STRUCTURE_TYPE_SUBRESOURCE_HOST_MEMCPY_SIZE, + VK_STRUCTURE_TYPE_HOST_IMAGE_COPY_DEVICE_PERFORMANCE_QUERY_EXT = VK_STRUCTURE_TYPE_HOST_IMAGE_COPY_DEVICE_PERFORMANCE_QUERY, + VK_STRUCTURE_TYPE_MEMORY_MAP_INFO_KHR = VK_STRUCTURE_TYPE_MEMORY_MAP_INFO, + VK_STRUCTURE_TYPE_MEMORY_UNMAP_INFO_KHR = VK_STRUCTURE_TYPE_MEMORY_UNMAP_INFO, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DEMOTE_TO_HELPER_INVOCATION_FEATURES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DEMOTE_TO_HELPER_INVOCATION_FEATURES, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_INTEGER_DOT_PRODUCT_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_INTEGER_DOT_PRODUCT_FEATURES, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_INTEGER_DOT_PRODUCT_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_INTEGER_DOT_PRODUCT_PROPERTIES, @@ -1366,20 +1391,46 @@ typedef enum VkStructureType { VK_STRUCTURE_TYPE_IMAGE_BLIT_2_KHR = VK_STRUCTURE_TYPE_IMAGE_BLIT_2, VK_STRUCTURE_TYPE_BUFFER_IMAGE_COPY_2_KHR = VK_STRUCTURE_TYPE_BUFFER_IMAGE_COPY_2, VK_STRUCTURE_TYPE_IMAGE_RESOLVE_2_KHR = VK_STRUCTURE_TYPE_IMAGE_RESOLVE_2, - VK_STRUCTURE_TYPE_SUBRESOURCE_LAYOUT_2_EXT = VK_STRUCTURE_TYPE_SUBRESOURCE_LAYOUT_2_KHR, - VK_STRUCTURE_TYPE_IMAGE_SUBRESOURCE_2_EXT = VK_STRUCTURE_TYPE_IMAGE_SUBRESOURCE_2_KHR, + VK_STRUCTURE_TYPE_SUBRESOURCE_LAYOUT_2_EXT = VK_STRUCTURE_TYPE_SUBRESOURCE_LAYOUT_2, + VK_STRUCTURE_TYPE_IMAGE_SUBRESOURCE_2_EXT = VK_STRUCTURE_TYPE_IMAGE_SUBRESOURCE_2, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RASTERIZATION_ORDER_ATTACHMENT_ACCESS_FEATURES_ARM = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RASTERIZATION_ORDER_ATTACHMENT_ACCESS_FEATURES_EXT, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MUTABLE_DESCRIPTOR_TYPE_FEATURES_VALVE = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MUTABLE_DESCRIPTOR_TYPE_FEATURES_EXT, VK_STRUCTURE_TYPE_MUTABLE_DESCRIPTOR_TYPE_CREATE_INFO_VALVE = VK_STRUCTURE_TYPE_MUTABLE_DESCRIPTOR_TYPE_CREATE_INFO_EXT, VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_3_KHR = VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_3, VK_STRUCTURE_TYPE_PIPELINE_INFO_EXT = VK_STRUCTURE_TYPE_PIPELINE_INFO_KHR, - VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GLOBAL_PRIORITY_QUERY_FEATURES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GLOBAL_PRIORITY_QUERY_FEATURES_KHR, - VK_STRUCTURE_TYPE_QUEUE_FAMILY_GLOBAL_PRIORITY_PROPERTIES_EXT = VK_STRUCTURE_TYPE_QUEUE_FAMILY_GLOBAL_PRIORITY_PROPERTIES_KHR, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GLOBAL_PRIORITY_QUERY_FEATURES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GLOBAL_PRIORITY_QUERY_FEATURES, + VK_STRUCTURE_TYPE_QUEUE_FAMILY_GLOBAL_PRIORITY_PROPERTIES_EXT = VK_STRUCTURE_TYPE_QUEUE_FAMILY_GLOBAL_PRIORITY_PROPERTIES, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_4_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_4_FEATURES, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_4_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_4_PROPERTIES, VK_STRUCTURE_TYPE_DEVICE_BUFFER_MEMORY_REQUIREMENTS_KHR = VK_STRUCTURE_TYPE_DEVICE_BUFFER_MEMORY_REQUIREMENTS, VK_STRUCTURE_TYPE_DEVICE_IMAGE_MEMORY_REQUIREMENTS_KHR = VK_STRUCTURE_TYPE_DEVICE_IMAGE_MEMORY_REQUIREMENTS, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SUBGROUP_ROTATE_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SUBGROUP_ROTATE_FEATURES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_PROTECTED_ACCESS_FEATURES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_PROTECTED_ACCESS_FEATURES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_5_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_5_FEATURES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_5_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_5_PROPERTIES, + VK_STRUCTURE_TYPE_RENDERING_AREA_INFO_KHR = VK_STRUCTURE_TYPE_RENDERING_AREA_INFO, + VK_STRUCTURE_TYPE_DEVICE_IMAGE_SUBRESOURCE_INFO_KHR = VK_STRUCTURE_TYPE_DEVICE_IMAGE_SUBRESOURCE_INFO, + VK_STRUCTURE_TYPE_SUBRESOURCE_LAYOUT_2_KHR = VK_STRUCTURE_TYPE_SUBRESOURCE_LAYOUT_2, + VK_STRUCTURE_TYPE_IMAGE_SUBRESOURCE_2_KHR = VK_STRUCTURE_TYPE_IMAGE_SUBRESOURCE_2, + VK_STRUCTURE_TYPE_PIPELINE_CREATE_FLAGS_2_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_PIPELINE_CREATE_FLAGS_2_CREATE_INFO, + VK_STRUCTURE_TYPE_BUFFER_USAGE_FLAGS_2_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_BUFFER_USAGE_FLAGS_2_CREATE_INFO, VK_STRUCTURE_TYPE_SHADER_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_PROPERTIES, + VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT_CONTROLS_2_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT_CONTROLS_2_FEATURES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INDEX_TYPE_UINT8_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INDEX_TYPE_UINT8_FEATURES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_FEATURES, + VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_LINE_STATE_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_LINE_STATE_CREATE_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_PROPERTIES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_EXPECT_ASSUME_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_EXPECT_ASSUME_FEATURES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_6_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_6_FEATURES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_6_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_6_PROPERTIES, + VK_STRUCTURE_TYPE_BIND_MEMORY_STATUS_KHR = VK_STRUCTURE_TYPE_BIND_MEMORY_STATUS, + VK_STRUCTURE_TYPE_BIND_DESCRIPTOR_SETS_INFO_KHR = VK_STRUCTURE_TYPE_BIND_DESCRIPTOR_SETS_INFO, + VK_STRUCTURE_TYPE_PUSH_CONSTANTS_INFO_KHR = VK_STRUCTURE_TYPE_PUSH_CONSTANTS_INFO, + VK_STRUCTURE_TYPE_PUSH_DESCRIPTOR_SET_INFO_KHR = VK_STRUCTURE_TYPE_PUSH_DESCRIPTOR_SET_INFO, + VK_STRUCTURE_TYPE_PUSH_DESCRIPTOR_SET_WITH_TEMPLATE_INFO_KHR = VK_STRUCTURE_TYPE_PUSH_DESCRIPTOR_SET_WITH_TEMPLATE_INFO, VK_STRUCTURE_TYPE_MAX_ENUM = 0x7FFFFFFF } VkStructureType; @@ -1406,6 +1457,7 @@ typedef enum VkImageLayout { VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL = 1000241003, VK_IMAGE_LAYOUT_READ_ONLY_OPTIMAL = 1000314000, VK_IMAGE_LAYOUT_ATTACHMENT_OPTIMAL = 1000314001, + VK_IMAGE_LAYOUT_RENDERING_LOCAL_READ = 1000232000, VK_IMAGE_LAYOUT_PRESENT_SRC_KHR = 1000001002, VK_IMAGE_LAYOUT_VIDEO_DECODE_DST_KHR = 1000024000, VK_IMAGE_LAYOUT_VIDEO_DECODE_SRC_KHR = 1000024001, @@ -1413,7 +1465,6 @@ typedef enum VkImageLayout { VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR = 1000111000, VK_IMAGE_LAYOUT_FRAGMENT_DENSITY_MAP_OPTIMAL_EXT = 1000218000, VK_IMAGE_LAYOUT_FRAGMENT_SHADING_RATE_ATTACHMENT_OPTIMAL_KHR = 1000164003, - VK_IMAGE_LAYOUT_RENDERING_LOCAL_READ_KHR = 1000232000, VK_IMAGE_LAYOUT_VIDEO_ENCODE_DST_KHR = 1000299000, VK_IMAGE_LAYOUT_VIDEO_ENCODE_SRC_KHR = 1000299001, VK_IMAGE_LAYOUT_VIDEO_ENCODE_DPB_KHR = 1000299002, @@ -1422,6 +1473,7 @@ typedef enum VkImageLayout { VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL_KHR = VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL, VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL_KHR = VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL, VK_IMAGE_LAYOUT_SHADING_RATE_OPTIMAL_NV = VK_IMAGE_LAYOUT_FRAGMENT_SHADING_RATE_ATTACHMENT_OPTIMAL_KHR, + VK_IMAGE_LAYOUT_RENDERING_LOCAL_READ_KHR = VK_IMAGE_LAYOUT_RENDERING_LOCAL_READ, VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL_KHR = VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL, VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL_KHR = VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL, VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL_KHR = VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL, @@ -1758,6 +1810,8 @@ typedef enum VkFormat { VK_FORMAT_ASTC_10x10_SFLOAT_BLOCK = 1000066011, VK_FORMAT_ASTC_12x10_SFLOAT_BLOCK = 1000066012, VK_FORMAT_ASTC_12x12_SFLOAT_BLOCK = 1000066013, + VK_FORMAT_A1B5G5R5_UNORM_PACK16 = 1000470000, + VK_FORMAT_A8_UNORM = 1000470001, VK_FORMAT_PVRTC1_2BPP_UNORM_BLOCK_IMG = 1000054000, VK_FORMAT_PVRTC1_4BPP_UNORM_BLOCK_IMG = 1000054001, VK_FORMAT_PVRTC2_2BPP_UNORM_BLOCK_IMG = 1000054002, @@ -1767,8 +1821,6 @@ typedef enum VkFormat { VK_FORMAT_PVRTC2_2BPP_SRGB_BLOCK_IMG = 1000054006, VK_FORMAT_PVRTC2_4BPP_SRGB_BLOCK_IMG = 1000054007, VK_FORMAT_R16G16_SFIXED5_NV = 1000464000, - VK_FORMAT_A1B5G5R5_UNORM_PACK16_KHR = 1000470000, - VK_FORMAT_A8_UNORM_KHR = 1000470001, VK_FORMAT_ASTC_4x4_SFLOAT_BLOCK_EXT = VK_FORMAT_ASTC_4x4_SFLOAT_BLOCK, VK_FORMAT_ASTC_5x4_SFLOAT_BLOCK_EXT = VK_FORMAT_ASTC_5x4_SFLOAT_BLOCK, VK_FORMAT_ASTC_5x5_SFLOAT_BLOCK_EXT = VK_FORMAT_ASTC_5x5_SFLOAT_BLOCK, @@ -1825,6 +1877,8 @@ typedef enum VkFormat { VK_FORMAT_A4B4G4R4_UNORM_PACK16_EXT = VK_FORMAT_A4B4G4R4_UNORM_PACK16, // VK_FORMAT_R16G16_S10_5_NV is a deprecated alias VK_FORMAT_R16G16_S10_5_NV = VK_FORMAT_R16G16_SFIXED5_NV, + VK_FORMAT_A1B5G5R5_UNORM_PACK16_KHR = VK_FORMAT_A1B5G5R5_UNORM_PACK16, + VK_FORMAT_A8_UNORM_KHR = VK_FORMAT_A8_UNORM, VK_FORMAT_MAX_ENUM = 0x7FFFFFFF } VkFormat; @@ -2015,6 +2069,7 @@ typedef enum VkDynamicState { VK_DYNAMIC_STATE_RASTERIZER_DISCARD_ENABLE = 1000377001, VK_DYNAMIC_STATE_DEPTH_BIAS_ENABLE = 1000377002, VK_DYNAMIC_STATE_PRIMITIVE_RESTART_ENABLE = 1000377004, + VK_DYNAMIC_STATE_LINE_STIPPLE = 1000259000, VK_DYNAMIC_STATE_VIEWPORT_W_SCALING_NV = 1000087000, VK_DYNAMIC_STATE_DISCARD_RECTANGLE_EXT = 1000099000, VK_DYNAMIC_STATE_DISCARD_RECTANGLE_ENABLE_EXT = 1000099001, @@ -2062,9 +2117,8 @@ typedef enum VkDynamicState { VK_DYNAMIC_STATE_REPRESENTATIVE_FRAGMENT_TEST_ENABLE_NV = 1000455031, VK_DYNAMIC_STATE_COVERAGE_REDUCTION_MODE_NV = 1000455032, VK_DYNAMIC_STATE_ATTACHMENT_FEEDBACK_LOOP_ENABLE_EXT = 1000524000, - VK_DYNAMIC_STATE_LINE_STIPPLE_KHR = 1000259000, VK_DYNAMIC_STATE_DEPTH_CLAMP_RANGE_EXT = 1000582000, - VK_DYNAMIC_STATE_LINE_STIPPLE_EXT = VK_DYNAMIC_STATE_LINE_STIPPLE_KHR, + VK_DYNAMIC_STATE_LINE_STIPPLE_EXT = VK_DYNAMIC_STATE_LINE_STIPPLE, VK_DYNAMIC_STATE_CULL_MODE_EXT = VK_DYNAMIC_STATE_CULL_MODE, VK_DYNAMIC_STATE_FRONT_FACE_EXT = VK_DYNAMIC_STATE_FRONT_FACE, VK_DYNAMIC_STATE_PRIMITIVE_TOPOLOGY_EXT = VK_DYNAMIC_STATE_PRIMITIVE_TOPOLOGY, @@ -2080,6 +2134,7 @@ typedef enum VkDynamicState { VK_DYNAMIC_STATE_RASTERIZER_DISCARD_ENABLE_EXT = VK_DYNAMIC_STATE_RASTERIZER_DISCARD_ENABLE, VK_DYNAMIC_STATE_DEPTH_BIAS_ENABLE_EXT = VK_DYNAMIC_STATE_DEPTH_BIAS_ENABLE, VK_DYNAMIC_STATE_PRIMITIVE_RESTART_ENABLE_EXT = VK_DYNAMIC_STATE_PRIMITIVE_RESTART_ENABLE, + VK_DYNAMIC_STATE_LINE_STIPPLE_KHR = VK_DYNAMIC_STATE_LINE_STIPPLE, VK_DYNAMIC_STATE_MAX_ENUM = 0x7FFFFFFF } VkDynamicState; @@ -2214,8 +2269,9 @@ typedef enum VkAttachmentLoadOp { VK_ATTACHMENT_LOAD_OP_LOAD = 0, VK_ATTACHMENT_LOAD_OP_CLEAR = 1, VK_ATTACHMENT_LOAD_OP_DONT_CARE = 2, - VK_ATTACHMENT_LOAD_OP_NONE_KHR = 1000400000, - VK_ATTACHMENT_LOAD_OP_NONE_EXT = VK_ATTACHMENT_LOAD_OP_NONE_KHR, + VK_ATTACHMENT_LOAD_OP_NONE = 1000400000, + VK_ATTACHMENT_LOAD_OP_NONE_EXT = VK_ATTACHMENT_LOAD_OP_NONE, + VK_ATTACHMENT_LOAD_OP_NONE_KHR = VK_ATTACHMENT_LOAD_OP_NONE, VK_ATTACHMENT_LOAD_OP_MAX_ENUM = 0x7FFFFFFF } VkAttachmentLoadOp; @@ -2250,10 +2306,11 @@ typedef enum VkCommandBufferLevel { typedef enum VkIndexType { VK_INDEX_TYPE_UINT16 = 0, VK_INDEX_TYPE_UINT32 = 1, + VK_INDEX_TYPE_UINT8 = 1000265000, VK_INDEX_TYPE_NONE_KHR = 1000165000, - VK_INDEX_TYPE_UINT8_KHR = 1000265000, VK_INDEX_TYPE_NONE_NV = VK_INDEX_TYPE_NONE_KHR, - VK_INDEX_TYPE_UINT8_EXT = VK_INDEX_TYPE_UINT8_KHR, + VK_INDEX_TYPE_UINT8_EXT = VK_INDEX_TYPE_UINT8, + VK_INDEX_TYPE_UINT8_KHR = VK_INDEX_TYPE_UINT8, VK_INDEX_TYPE_MAX_ENUM = 0x7FFFFFFF } VkIndexType; @@ -2425,12 +2482,12 @@ typedef enum VkImageUsageFlagBits { VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT = 0x00000020, VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT = 0x00000040, VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT = 0x00000080, + VK_IMAGE_USAGE_HOST_TRANSFER_BIT = 0x00400000, VK_IMAGE_USAGE_VIDEO_DECODE_DST_BIT_KHR = 0x00000400, VK_IMAGE_USAGE_VIDEO_DECODE_SRC_BIT_KHR = 0x00000800, VK_IMAGE_USAGE_VIDEO_DECODE_DPB_BIT_KHR = 0x00001000, VK_IMAGE_USAGE_FRAGMENT_DENSITY_MAP_BIT_EXT = 0x00000200, VK_IMAGE_USAGE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR = 0x00000100, - VK_IMAGE_USAGE_HOST_TRANSFER_BIT_EXT = 0x00400000, VK_IMAGE_USAGE_VIDEO_ENCODE_DST_BIT_KHR = 0x00002000, VK_IMAGE_USAGE_VIDEO_ENCODE_SRC_BIT_KHR = 0x00004000, VK_IMAGE_USAGE_VIDEO_ENCODE_DPB_BIT_KHR = 0x00008000, @@ -2441,6 +2498,7 @@ typedef enum VkImageUsageFlagBits { VK_IMAGE_USAGE_VIDEO_ENCODE_QUANTIZATION_DELTA_MAP_BIT_KHR = 0x02000000, VK_IMAGE_USAGE_VIDEO_ENCODE_EMPHASIS_MAP_BIT_KHR = 0x04000000, VK_IMAGE_USAGE_SHADING_RATE_IMAGE_BIT_NV = VK_IMAGE_USAGE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR, + VK_IMAGE_USAGE_HOST_TRANSFER_BIT_EXT = VK_IMAGE_USAGE_HOST_TRANSFER_BIT, VK_IMAGE_USAGE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF } VkImageUsageFlagBits; typedef VkFlags VkImageUsageFlags; @@ -2680,6 +2738,8 @@ typedef enum VkPipelineCreateFlagBits { VK_PIPELINE_CREATE_DISPATCH_BASE_BIT = 0x00000010, VK_PIPELINE_CREATE_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT = 0x00000100, VK_PIPELINE_CREATE_EARLY_RETURN_ON_FAILURE_BIT = 0x00000200, + VK_PIPELINE_CREATE_NO_PROTECTED_ACCESS_BIT = 0x08000000, + VK_PIPELINE_CREATE_PROTECTED_ACCESS_ONLY_BIT = 0x40000000, VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_ANY_HIT_SHADERS_BIT_KHR = 0x00004000, VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_CLOSEST_HIT_SHADERS_BIT_KHR = 0x00008000, VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_MISS_SHADERS_BIT_KHR = 0x00010000, @@ -2704,8 +2764,6 @@ typedef enum VkPipelineCreateFlagBits { #ifdef VK_ENABLE_BETA_EXTENSIONS VK_PIPELINE_CREATE_RAY_TRACING_DISPLACEMENT_MICROMAP_BIT_NV = 0x10000000, #endif - VK_PIPELINE_CREATE_NO_PROTECTED_ACCESS_BIT_EXT = 0x08000000, - VK_PIPELINE_CREATE_PROTECTED_ACCESS_ONLY_BIT_EXT = 0x40000000, VK_PIPELINE_CREATE_DISPATCH_BASE = VK_PIPELINE_CREATE_DISPATCH_BASE_BIT, VK_PIPELINE_CREATE_VIEW_INDEX_FROM_DEVICE_INDEX_BIT_KHR = VK_PIPELINE_CREATE_VIEW_INDEX_FROM_DEVICE_INDEX_BIT, VK_PIPELINE_CREATE_DISPATCH_BASE_KHR = VK_PIPELINE_CREATE_DISPATCH_BASE, @@ -2715,6 +2773,8 @@ typedef enum VkPipelineCreateFlagBits { VK_PIPELINE_RASTERIZATION_STATE_CREATE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR = VK_PIPELINE_CREATE_RENDERING_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR, VK_PIPELINE_CREATE_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT_EXT = VK_PIPELINE_CREATE_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT, VK_PIPELINE_CREATE_EARLY_RETURN_ON_FAILURE_BIT_EXT = VK_PIPELINE_CREATE_EARLY_RETURN_ON_FAILURE_BIT, + VK_PIPELINE_CREATE_NO_PROTECTED_ACCESS_BIT_EXT = VK_PIPELINE_CREATE_NO_PROTECTED_ACCESS_BIT, + VK_PIPELINE_CREATE_PROTECTED_ACCESS_ONLY_BIT_EXT = VK_PIPELINE_CREATE_PROTECTED_ACCESS_ONLY_BIT, VK_PIPELINE_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF } VkPipelineCreateFlagBits; typedef VkFlags VkPipelineCreateFlags; @@ -2822,12 +2882,13 @@ typedef VkFlags VkDescriptorPoolResetFlags; typedef enum VkDescriptorSetLayoutCreateFlagBits { VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT = 0x00000002, - VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR = 0x00000001, + VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT = 0x00000001, VK_DESCRIPTOR_SET_LAYOUT_CREATE_DESCRIPTOR_BUFFER_BIT_EXT = 0x00000010, VK_DESCRIPTOR_SET_LAYOUT_CREATE_EMBEDDED_IMMUTABLE_SAMPLERS_BIT_EXT = 0x00000020, VK_DESCRIPTOR_SET_LAYOUT_CREATE_INDIRECT_BINDABLE_BIT_NV = 0x00000080, VK_DESCRIPTOR_SET_LAYOUT_CREATE_HOST_ONLY_POOL_BIT_EXT = 0x00000004, VK_DESCRIPTOR_SET_LAYOUT_CREATE_PER_STAGE_BIT_NV = 0x00000040, + VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR = VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT, VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT = VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT, VK_DESCRIPTOR_SET_LAYOUT_CREATE_HOST_ONLY_POOL_BIT_VALVE = VK_DESCRIPTOR_SET_LAYOUT_CREATE_HOST_ONLY_POOL_BIT_EXT, VK_DESCRIPTOR_SET_LAYOUT_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF @@ -5045,7 +5106,8 @@ typedef enum VkChromaLocation { typedef enum VkDescriptorUpdateTemplateType { VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET = 0, - VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR = 1, + VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS = 1, + VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR = VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS, VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET_KHR = VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET, VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_MAX_ENUM = 0x7FFFFFFF } VkDescriptorUpdateTemplateType; @@ -5059,9 +5121,11 @@ typedef enum VkSubgroupFeatureFlagBits { VK_SUBGROUP_FEATURE_SHUFFLE_RELATIVE_BIT = 0x00000020, VK_SUBGROUP_FEATURE_CLUSTERED_BIT = 0x00000040, VK_SUBGROUP_FEATURE_QUAD_BIT = 0x00000080, + VK_SUBGROUP_FEATURE_ROTATE_BIT = 0x00000200, + VK_SUBGROUP_FEATURE_ROTATE_CLUSTERED_BIT = 0x00000400, VK_SUBGROUP_FEATURE_PARTITIONED_BIT_NV = 0x00000100, - VK_SUBGROUP_FEATURE_ROTATE_BIT_KHR = 0x00000200, - VK_SUBGROUP_FEATURE_ROTATE_CLUSTERED_BIT_KHR = 0x00000400, + VK_SUBGROUP_FEATURE_ROTATE_BIT_KHR = VK_SUBGROUP_FEATURE_ROTATE_BIT, + VK_SUBGROUP_FEATURE_ROTATE_CLUSTERED_BIT_KHR = VK_SUBGROUP_FEATURE_ROTATE_CLUSTERED_BIT, VK_SUBGROUP_FEATURE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF } VkSubgroupFeatureFlagBits; typedef VkFlags VkSubgroupFeatureFlags; @@ -6655,59 +6719,59 @@ typedef VkFlags64 VkPipelineStageFlags2; // Flag bits for VkPipelineStageFlagBits2 typedef VkFlags64 VkPipelineStageFlagBits2; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_NONE = 0ULL; -static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_NONE_KHR = 0ULL; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT = 0x00000001ULL; -static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT_KHR = 0x00000001ULL; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_DRAW_INDIRECT_BIT = 0x00000002ULL; -static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_DRAW_INDIRECT_BIT_KHR = 0x00000002ULL; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_VERTEX_INPUT_BIT = 0x00000004ULL; -static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_VERTEX_INPUT_BIT_KHR = 0x00000004ULL; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_VERTEX_SHADER_BIT = 0x00000008ULL; -static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_VERTEX_SHADER_BIT_KHR = 0x00000008ULL; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_TESSELLATION_CONTROL_SHADER_BIT = 0x00000010ULL; -static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_TESSELLATION_CONTROL_SHADER_BIT_KHR = 0x00000010ULL; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_TESSELLATION_EVALUATION_SHADER_BIT = 0x00000020ULL; -static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_TESSELLATION_EVALUATION_SHADER_BIT_KHR = 0x00000020ULL; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_GEOMETRY_SHADER_BIT = 0x00000040ULL; -static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_GEOMETRY_SHADER_BIT_KHR = 0x00000040ULL; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_FRAGMENT_SHADER_BIT = 0x00000080ULL; -static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_FRAGMENT_SHADER_BIT_KHR = 0x00000080ULL; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_EARLY_FRAGMENT_TESTS_BIT = 0x00000100ULL; -static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_EARLY_FRAGMENT_TESTS_BIT_KHR = 0x00000100ULL; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_LATE_FRAGMENT_TESTS_BIT = 0x00000200ULL; -static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_LATE_FRAGMENT_TESTS_BIT_KHR = 0x00000200ULL; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_COLOR_ATTACHMENT_OUTPUT_BIT = 0x00000400ULL; -static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_COLOR_ATTACHMENT_OUTPUT_BIT_KHR = 0x00000400ULL; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_COMPUTE_SHADER_BIT = 0x00000800ULL; -static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_COMPUTE_SHADER_BIT_KHR = 0x00000800ULL; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_ALL_TRANSFER_BIT = 0x00001000ULL; -static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_ALL_TRANSFER_BIT_KHR = 0x00001000ULL; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_TRANSFER_BIT = 0x00001000ULL; -static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_TRANSFER_BIT_KHR = 0x00001000ULL; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_BOTTOM_OF_PIPE_BIT = 0x00002000ULL; -static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_BOTTOM_OF_PIPE_BIT_KHR = 0x00002000ULL; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_HOST_BIT = 0x00004000ULL; -static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_HOST_BIT_KHR = 0x00004000ULL; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_ALL_GRAPHICS_BIT = 0x00008000ULL; -static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_ALL_GRAPHICS_BIT_KHR = 0x00008000ULL; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_ALL_COMMANDS_BIT = 0x00010000ULL; -static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_ALL_COMMANDS_BIT_KHR = 0x00010000ULL; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_COPY_BIT = 0x100000000ULL; -static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_COPY_BIT_KHR = 0x100000000ULL; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_RESOLVE_BIT = 0x200000000ULL; -static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_RESOLVE_BIT_KHR = 0x200000000ULL; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_BLIT_BIT = 0x400000000ULL; -static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_BLIT_BIT_KHR = 0x400000000ULL; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_CLEAR_BIT = 0x800000000ULL; -static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_CLEAR_BIT_KHR = 0x800000000ULL; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_INDEX_INPUT_BIT = 0x1000000000ULL; -static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_INDEX_INPUT_BIT_KHR = 0x1000000000ULL; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_VERTEX_ATTRIBUTE_INPUT_BIT = 0x2000000000ULL; -static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_VERTEX_ATTRIBUTE_INPUT_BIT_KHR = 0x2000000000ULL; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_PRE_RASTERIZATION_SHADERS_BIT = 0x4000000000ULL; -static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_PRE_RASTERIZATION_SHADERS_BIT_KHR = 0x4000000000ULL; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_VIDEO_DECODE_BIT_KHR = 0x04000000ULL; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_VIDEO_ENCODE_BIT_KHR = 0x08000000ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_NONE_KHR = 0ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT_KHR = 0x00000001ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_DRAW_INDIRECT_BIT_KHR = 0x00000002ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_VERTEX_INPUT_BIT_KHR = 0x00000004ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_VERTEX_SHADER_BIT_KHR = 0x00000008ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_TESSELLATION_CONTROL_SHADER_BIT_KHR = 0x00000010ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_TESSELLATION_EVALUATION_SHADER_BIT_KHR = 0x00000020ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_GEOMETRY_SHADER_BIT_KHR = 0x00000040ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_FRAGMENT_SHADER_BIT_KHR = 0x00000080ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_EARLY_FRAGMENT_TESTS_BIT_KHR = 0x00000100ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_LATE_FRAGMENT_TESTS_BIT_KHR = 0x00000200ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_COLOR_ATTACHMENT_OUTPUT_BIT_KHR = 0x00000400ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_COMPUTE_SHADER_BIT_KHR = 0x00000800ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_ALL_TRANSFER_BIT_KHR = 0x00001000ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_TRANSFER_BIT_KHR = 0x00001000ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_BOTTOM_OF_PIPE_BIT_KHR = 0x00002000ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_HOST_BIT_KHR = 0x00004000ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_ALL_GRAPHICS_BIT_KHR = 0x00008000ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_ALL_COMMANDS_BIT_KHR = 0x00010000ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_COPY_BIT_KHR = 0x100000000ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_RESOLVE_BIT_KHR = 0x200000000ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_BLIT_BIT_KHR = 0x400000000ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_CLEAR_BIT_KHR = 0x800000000ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_INDEX_INPUT_BIT_KHR = 0x1000000000ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_VERTEX_ATTRIBUTE_INPUT_BIT_KHR = 0x2000000000ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_PRE_RASTERIZATION_SHADERS_BIT_KHR = 0x4000000000ULL; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_TRANSFORM_FEEDBACK_BIT_EXT = 0x01000000ULL; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_CONDITIONAL_RENDERING_BIT_EXT = 0x00040000ULL; static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_COMMAND_PREPROCESS_BIT_NV = 0x00020000ULL; @@ -6737,51 +6801,51 @@ typedef VkFlags64 VkAccessFlags2; // Flag bits for VkAccessFlagBits2 typedef VkFlags64 VkAccessFlagBits2; static const VkAccessFlagBits2 VK_ACCESS_2_NONE = 0ULL; -static const VkAccessFlagBits2 VK_ACCESS_2_NONE_KHR = 0ULL; static const VkAccessFlagBits2 VK_ACCESS_2_INDIRECT_COMMAND_READ_BIT = 0x00000001ULL; -static const VkAccessFlagBits2 VK_ACCESS_2_INDIRECT_COMMAND_READ_BIT_KHR = 0x00000001ULL; static const VkAccessFlagBits2 VK_ACCESS_2_INDEX_READ_BIT = 0x00000002ULL; -static const VkAccessFlagBits2 VK_ACCESS_2_INDEX_READ_BIT_KHR = 0x00000002ULL; static const VkAccessFlagBits2 VK_ACCESS_2_VERTEX_ATTRIBUTE_READ_BIT = 0x00000004ULL; -static const VkAccessFlagBits2 VK_ACCESS_2_VERTEX_ATTRIBUTE_READ_BIT_KHR = 0x00000004ULL; static const VkAccessFlagBits2 VK_ACCESS_2_UNIFORM_READ_BIT = 0x00000008ULL; -static const VkAccessFlagBits2 VK_ACCESS_2_UNIFORM_READ_BIT_KHR = 0x00000008ULL; static const VkAccessFlagBits2 VK_ACCESS_2_INPUT_ATTACHMENT_READ_BIT = 0x00000010ULL; -static const VkAccessFlagBits2 VK_ACCESS_2_INPUT_ATTACHMENT_READ_BIT_KHR = 0x00000010ULL; static const VkAccessFlagBits2 VK_ACCESS_2_SHADER_READ_BIT = 0x00000020ULL; -static const VkAccessFlagBits2 VK_ACCESS_2_SHADER_READ_BIT_KHR = 0x00000020ULL; static const VkAccessFlagBits2 VK_ACCESS_2_SHADER_WRITE_BIT = 0x00000040ULL; -static const VkAccessFlagBits2 VK_ACCESS_2_SHADER_WRITE_BIT_KHR = 0x00000040ULL; static const VkAccessFlagBits2 VK_ACCESS_2_COLOR_ATTACHMENT_READ_BIT = 0x00000080ULL; -static const VkAccessFlagBits2 VK_ACCESS_2_COLOR_ATTACHMENT_READ_BIT_KHR = 0x00000080ULL; static const VkAccessFlagBits2 VK_ACCESS_2_COLOR_ATTACHMENT_WRITE_BIT = 0x00000100ULL; -static const VkAccessFlagBits2 VK_ACCESS_2_COLOR_ATTACHMENT_WRITE_BIT_KHR = 0x00000100ULL; static const VkAccessFlagBits2 VK_ACCESS_2_DEPTH_STENCIL_ATTACHMENT_READ_BIT = 0x00000200ULL; -static const VkAccessFlagBits2 VK_ACCESS_2_DEPTH_STENCIL_ATTACHMENT_READ_BIT_KHR = 0x00000200ULL; static const VkAccessFlagBits2 VK_ACCESS_2_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT = 0x00000400ULL; -static const VkAccessFlagBits2 VK_ACCESS_2_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT_KHR = 0x00000400ULL; static const VkAccessFlagBits2 VK_ACCESS_2_TRANSFER_READ_BIT = 0x00000800ULL; -static const VkAccessFlagBits2 VK_ACCESS_2_TRANSFER_READ_BIT_KHR = 0x00000800ULL; static const VkAccessFlagBits2 VK_ACCESS_2_TRANSFER_WRITE_BIT = 0x00001000ULL; -static const VkAccessFlagBits2 VK_ACCESS_2_TRANSFER_WRITE_BIT_KHR = 0x00001000ULL; static const VkAccessFlagBits2 VK_ACCESS_2_HOST_READ_BIT = 0x00002000ULL; -static const VkAccessFlagBits2 VK_ACCESS_2_HOST_READ_BIT_KHR = 0x00002000ULL; static const VkAccessFlagBits2 VK_ACCESS_2_HOST_WRITE_BIT = 0x00004000ULL; -static const VkAccessFlagBits2 VK_ACCESS_2_HOST_WRITE_BIT_KHR = 0x00004000ULL; static const VkAccessFlagBits2 VK_ACCESS_2_MEMORY_READ_BIT = 0x00008000ULL; -static const VkAccessFlagBits2 VK_ACCESS_2_MEMORY_READ_BIT_KHR = 0x00008000ULL; static const VkAccessFlagBits2 VK_ACCESS_2_MEMORY_WRITE_BIT = 0x00010000ULL; -static const VkAccessFlagBits2 VK_ACCESS_2_MEMORY_WRITE_BIT_KHR = 0x00010000ULL; static const VkAccessFlagBits2 VK_ACCESS_2_SHADER_SAMPLED_READ_BIT = 0x100000000ULL; -static const VkAccessFlagBits2 VK_ACCESS_2_SHADER_SAMPLED_READ_BIT_KHR = 0x100000000ULL; static const VkAccessFlagBits2 VK_ACCESS_2_SHADER_STORAGE_READ_BIT = 0x200000000ULL; -static const VkAccessFlagBits2 VK_ACCESS_2_SHADER_STORAGE_READ_BIT_KHR = 0x200000000ULL; static const VkAccessFlagBits2 VK_ACCESS_2_SHADER_STORAGE_WRITE_BIT = 0x400000000ULL; -static const VkAccessFlagBits2 VK_ACCESS_2_SHADER_STORAGE_WRITE_BIT_KHR = 0x400000000ULL; static const VkAccessFlagBits2 VK_ACCESS_2_VIDEO_DECODE_READ_BIT_KHR = 0x800000000ULL; static const VkAccessFlagBits2 VK_ACCESS_2_VIDEO_DECODE_WRITE_BIT_KHR = 0x1000000000ULL; static const VkAccessFlagBits2 VK_ACCESS_2_VIDEO_ENCODE_READ_BIT_KHR = 0x2000000000ULL; static const VkAccessFlagBits2 VK_ACCESS_2_VIDEO_ENCODE_WRITE_BIT_KHR = 0x4000000000ULL; +static const VkAccessFlagBits2 VK_ACCESS_2_NONE_KHR = 0ULL; +static const VkAccessFlagBits2 VK_ACCESS_2_INDIRECT_COMMAND_READ_BIT_KHR = 0x00000001ULL; +static const VkAccessFlagBits2 VK_ACCESS_2_INDEX_READ_BIT_KHR = 0x00000002ULL; +static const VkAccessFlagBits2 VK_ACCESS_2_VERTEX_ATTRIBUTE_READ_BIT_KHR = 0x00000004ULL; +static const VkAccessFlagBits2 VK_ACCESS_2_UNIFORM_READ_BIT_KHR = 0x00000008ULL; +static const VkAccessFlagBits2 VK_ACCESS_2_INPUT_ATTACHMENT_READ_BIT_KHR = 0x00000010ULL; +static const VkAccessFlagBits2 VK_ACCESS_2_SHADER_READ_BIT_KHR = 0x00000020ULL; +static const VkAccessFlagBits2 VK_ACCESS_2_SHADER_WRITE_BIT_KHR = 0x00000040ULL; +static const VkAccessFlagBits2 VK_ACCESS_2_COLOR_ATTACHMENT_READ_BIT_KHR = 0x00000080ULL; +static const VkAccessFlagBits2 VK_ACCESS_2_COLOR_ATTACHMENT_WRITE_BIT_KHR = 0x00000100ULL; +static const VkAccessFlagBits2 VK_ACCESS_2_DEPTH_STENCIL_ATTACHMENT_READ_BIT_KHR = 0x00000200ULL; +static const VkAccessFlagBits2 VK_ACCESS_2_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT_KHR = 0x00000400ULL; +static const VkAccessFlagBits2 VK_ACCESS_2_TRANSFER_READ_BIT_KHR = 0x00000800ULL; +static const VkAccessFlagBits2 VK_ACCESS_2_TRANSFER_WRITE_BIT_KHR = 0x00001000ULL; +static const VkAccessFlagBits2 VK_ACCESS_2_HOST_READ_BIT_KHR = 0x00002000ULL; +static const VkAccessFlagBits2 VK_ACCESS_2_HOST_WRITE_BIT_KHR = 0x00004000ULL; +static const VkAccessFlagBits2 VK_ACCESS_2_MEMORY_READ_BIT_KHR = 0x00008000ULL; +static const VkAccessFlagBits2 VK_ACCESS_2_MEMORY_WRITE_BIT_KHR = 0x00010000ULL; +static const VkAccessFlagBits2 VK_ACCESS_2_SHADER_SAMPLED_READ_BIT_KHR = 0x100000000ULL; +static const VkAccessFlagBits2 VK_ACCESS_2_SHADER_STORAGE_READ_BIT_KHR = 0x200000000ULL; +static const VkAccessFlagBits2 VK_ACCESS_2_SHADER_STORAGE_WRITE_BIT_KHR = 0x400000000ULL; static const VkAccessFlagBits2 VK_ACCESS_2_TRANSFORM_FEEDBACK_WRITE_BIT_EXT = 0x02000000ULL; static const VkAccessFlagBits2 VK_ACCESS_2_TRANSFORM_FEEDBACK_COUNTER_READ_BIT_EXT = 0x04000000ULL; static const VkAccessFlagBits2 VK_ACCESS_2_TRANSFORM_FEEDBACK_COUNTER_WRITE_BIT_EXT = 0x08000000ULL; @@ -6832,59 +6896,33 @@ typedef VkFlags64 VkFormatFeatureFlags2; // Flag bits for VkFormatFeatureFlagBits2 typedef VkFlags64 VkFormatFeatureFlagBits2; static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_BIT = 0x00000001ULL; -static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_BIT_KHR = 0x00000001ULL; static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_STORAGE_IMAGE_BIT = 0x00000002ULL; -static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_STORAGE_IMAGE_BIT_KHR = 0x00000002ULL; static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_STORAGE_IMAGE_ATOMIC_BIT = 0x00000004ULL; -static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_STORAGE_IMAGE_ATOMIC_BIT_KHR = 0x00000004ULL; static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_UNIFORM_TEXEL_BUFFER_BIT = 0x00000008ULL; -static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_UNIFORM_TEXEL_BUFFER_BIT_KHR = 0x00000008ULL; static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_STORAGE_TEXEL_BUFFER_BIT = 0x00000010ULL; -static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_STORAGE_TEXEL_BUFFER_BIT_KHR = 0x00000010ULL; static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_STORAGE_TEXEL_BUFFER_ATOMIC_BIT = 0x00000020ULL; -static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_STORAGE_TEXEL_BUFFER_ATOMIC_BIT_KHR = 0x00000020ULL; static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_VERTEX_BUFFER_BIT = 0x00000040ULL; -static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_VERTEX_BUFFER_BIT_KHR = 0x00000040ULL; static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_COLOR_ATTACHMENT_BIT = 0x00000080ULL; -static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_COLOR_ATTACHMENT_BIT_KHR = 0x00000080ULL; static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_COLOR_ATTACHMENT_BLEND_BIT = 0x00000100ULL; -static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_COLOR_ATTACHMENT_BLEND_BIT_KHR = 0x00000100ULL; static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_DEPTH_STENCIL_ATTACHMENT_BIT = 0x00000200ULL; -static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_DEPTH_STENCIL_ATTACHMENT_BIT_KHR = 0x00000200ULL; static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_BLIT_SRC_BIT = 0x00000400ULL; -static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_BLIT_SRC_BIT_KHR = 0x00000400ULL; static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_BLIT_DST_BIT = 0x00000800ULL; -static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_BLIT_DST_BIT_KHR = 0x00000800ULL; static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_FILTER_LINEAR_BIT = 0x00001000ULL; -static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_FILTER_LINEAR_BIT_KHR = 0x00001000ULL; -static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_FILTER_CUBIC_BIT = 0x00002000ULL; -static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_FILTER_CUBIC_BIT_EXT = 0x00002000ULL; static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_TRANSFER_SRC_BIT = 0x00004000ULL; -static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_TRANSFER_SRC_BIT_KHR = 0x00004000ULL; static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_TRANSFER_DST_BIT = 0x00008000ULL; -static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_TRANSFER_DST_BIT_KHR = 0x00008000ULL; static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_FILTER_MINMAX_BIT = 0x00010000ULL; -static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_FILTER_MINMAX_BIT_KHR = 0x00010000ULL; static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_MIDPOINT_CHROMA_SAMPLES_BIT = 0x00020000ULL; -static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_MIDPOINT_CHROMA_SAMPLES_BIT_KHR = 0x00020000ULL; static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_YCBCR_CONVERSION_LINEAR_FILTER_BIT = 0x00040000ULL; -static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_YCBCR_CONVERSION_LINEAR_FILTER_BIT_KHR = 0x00040000ULL; static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_YCBCR_CONVERSION_SEPARATE_RECONSTRUCTION_FILTER_BIT = 0x00080000ULL; -static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_YCBCR_CONVERSION_SEPARATE_RECONSTRUCTION_FILTER_BIT_KHR = 0x00080000ULL; static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_BIT = 0x00100000ULL; -static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_BIT_KHR = 0x00100000ULL; static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_FORCEABLE_BIT = 0x00200000ULL; -static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_FORCEABLE_BIT_KHR = 0x00200000ULL; static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_DISJOINT_BIT = 0x00400000ULL; -static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_DISJOINT_BIT_KHR = 0x00400000ULL; static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_COSITED_CHROMA_SAMPLES_BIT = 0x00800000ULL; -static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_COSITED_CHROMA_SAMPLES_BIT_KHR = 0x00800000ULL; static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_STORAGE_READ_WITHOUT_FORMAT_BIT = 0x80000000ULL; -static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_STORAGE_READ_WITHOUT_FORMAT_BIT_KHR = 0x80000000ULL; static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_STORAGE_WRITE_WITHOUT_FORMAT_BIT = 0x100000000ULL; -static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_STORAGE_WRITE_WITHOUT_FORMAT_BIT_KHR = 0x100000000ULL; static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_DEPTH_COMPARISON_BIT = 0x200000000ULL; -static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_DEPTH_COMPARISON_BIT_KHR = 0x200000000ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_FILTER_CUBIC_BIT = 0x00002000ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_HOST_IMAGE_TRANSFER_BIT = 0x400000000000ULL; static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_VIDEO_DECODE_OUTPUT_BIT_KHR = 0x02000000ULL; static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_VIDEO_DECODE_DPB_BIT_KHR = 0x04000000ULL; static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_ACCELERATION_STRUCTURE_VERTEX_BUFFER_BIT_KHR = 0x20000000ULL; @@ -6893,6 +6931,33 @@ static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_FRAGMENT_SHADING_RATE_ static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_HOST_IMAGE_TRANSFER_BIT_EXT = 0x400000000000ULL; static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_VIDEO_ENCODE_INPUT_BIT_KHR = 0x08000000ULL; static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_VIDEO_ENCODE_DPB_BIT_KHR = 0x10000000ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_BIT_KHR = 0x00000001ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_STORAGE_IMAGE_BIT_KHR = 0x00000002ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_STORAGE_IMAGE_ATOMIC_BIT_KHR = 0x00000004ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_UNIFORM_TEXEL_BUFFER_BIT_KHR = 0x00000008ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_STORAGE_TEXEL_BUFFER_BIT_KHR = 0x00000010ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_STORAGE_TEXEL_BUFFER_ATOMIC_BIT_KHR = 0x00000020ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_VERTEX_BUFFER_BIT_KHR = 0x00000040ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_COLOR_ATTACHMENT_BIT_KHR = 0x00000080ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_COLOR_ATTACHMENT_BLEND_BIT_KHR = 0x00000100ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_DEPTH_STENCIL_ATTACHMENT_BIT_KHR = 0x00000200ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_BLIT_SRC_BIT_KHR = 0x00000400ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_BLIT_DST_BIT_KHR = 0x00000800ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_FILTER_LINEAR_BIT_KHR = 0x00001000ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_TRANSFER_SRC_BIT_KHR = 0x00004000ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_TRANSFER_DST_BIT_KHR = 0x00008000ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_MIDPOINT_CHROMA_SAMPLES_BIT_KHR = 0x00020000ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_YCBCR_CONVERSION_LINEAR_FILTER_BIT_KHR = 0x00040000ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_YCBCR_CONVERSION_SEPARATE_RECONSTRUCTION_FILTER_BIT_KHR = 0x00080000ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_BIT_KHR = 0x00100000ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_FORCEABLE_BIT_KHR = 0x00200000ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_DISJOINT_BIT_KHR = 0x00400000ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_COSITED_CHROMA_SAMPLES_BIT_KHR = 0x00800000ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_STORAGE_READ_WITHOUT_FORMAT_BIT_KHR = 0x80000000ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_STORAGE_WRITE_WITHOUT_FORMAT_BIT_KHR = 0x100000000ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_DEPTH_COMPARISON_BIT_KHR = 0x200000000ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_FILTER_MINMAX_BIT_KHR = 0x00010000ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_FILTER_CUBIC_BIT_EXT = 0x00002000ULL; static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_LINEAR_COLOR_ATTACHMENT_BIT_NV = 0x4000000000ULL; static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_WEIGHT_IMAGE_BIT_QCOM = 0x400000000ULL; static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_WEIGHT_SAMPLED_IMAGE_BIT_QCOM = 0x800000000ULL; @@ -7656,6 +7721,731 @@ VKAPI_ATTR void VKAPI_CALL vkGetDeviceImageSparseMemoryRequirements( #endif +// VK_VERSION_1_4 is a preprocessor guard. Do not pass it to API calls. +#define VK_VERSION_1_4 1 +// Vulkan 1.4 version number +#define VK_API_VERSION_1_4 VK_MAKE_API_VERSION(0, 1, 4, 0)// Patch version should always be set to 0 + +#define VK_MAX_GLOBAL_PRIORITY_SIZE 16U + +typedef enum VkPipelineRobustnessBufferBehavior { + VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_DEVICE_DEFAULT = 0, + VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_DISABLED = 1, + VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_ROBUST_BUFFER_ACCESS = 2, + VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_ROBUST_BUFFER_ACCESS_2 = 3, + VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_DEVICE_DEFAULT_EXT = VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_DEVICE_DEFAULT, + VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_DISABLED_EXT = VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_DISABLED, + VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_ROBUST_BUFFER_ACCESS_EXT = VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_ROBUST_BUFFER_ACCESS, + VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_ROBUST_BUFFER_ACCESS_2_EXT = VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_ROBUST_BUFFER_ACCESS_2, + VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_MAX_ENUM = 0x7FFFFFFF +} VkPipelineRobustnessBufferBehavior; + +typedef enum VkPipelineRobustnessImageBehavior { + VK_PIPELINE_ROBUSTNESS_IMAGE_BEHAVIOR_DEVICE_DEFAULT = 0, + VK_PIPELINE_ROBUSTNESS_IMAGE_BEHAVIOR_DISABLED = 1, + VK_PIPELINE_ROBUSTNESS_IMAGE_BEHAVIOR_ROBUST_IMAGE_ACCESS = 2, + VK_PIPELINE_ROBUSTNESS_IMAGE_BEHAVIOR_ROBUST_IMAGE_ACCESS_2 = 3, + VK_PIPELINE_ROBUSTNESS_IMAGE_BEHAVIOR_DEVICE_DEFAULT_EXT = VK_PIPELINE_ROBUSTNESS_IMAGE_BEHAVIOR_DEVICE_DEFAULT, + VK_PIPELINE_ROBUSTNESS_IMAGE_BEHAVIOR_DISABLED_EXT = VK_PIPELINE_ROBUSTNESS_IMAGE_BEHAVIOR_DISABLED, + VK_PIPELINE_ROBUSTNESS_IMAGE_BEHAVIOR_ROBUST_IMAGE_ACCESS_EXT = VK_PIPELINE_ROBUSTNESS_IMAGE_BEHAVIOR_ROBUST_IMAGE_ACCESS, + VK_PIPELINE_ROBUSTNESS_IMAGE_BEHAVIOR_ROBUST_IMAGE_ACCESS_2_EXT = VK_PIPELINE_ROBUSTNESS_IMAGE_BEHAVIOR_ROBUST_IMAGE_ACCESS_2, + VK_PIPELINE_ROBUSTNESS_IMAGE_BEHAVIOR_MAX_ENUM = 0x7FFFFFFF +} VkPipelineRobustnessImageBehavior; + +typedef enum VkQueueGlobalPriority { + VK_QUEUE_GLOBAL_PRIORITY_LOW = 128, + VK_QUEUE_GLOBAL_PRIORITY_MEDIUM = 256, + VK_QUEUE_GLOBAL_PRIORITY_HIGH = 512, + VK_QUEUE_GLOBAL_PRIORITY_REALTIME = 1024, + VK_QUEUE_GLOBAL_PRIORITY_LOW_EXT = VK_QUEUE_GLOBAL_PRIORITY_LOW, + VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_EXT = VK_QUEUE_GLOBAL_PRIORITY_MEDIUM, + VK_QUEUE_GLOBAL_PRIORITY_HIGH_EXT = VK_QUEUE_GLOBAL_PRIORITY_HIGH, + VK_QUEUE_GLOBAL_PRIORITY_REALTIME_EXT = VK_QUEUE_GLOBAL_PRIORITY_REALTIME, + VK_QUEUE_GLOBAL_PRIORITY_LOW_KHR = VK_QUEUE_GLOBAL_PRIORITY_LOW, + VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_KHR = VK_QUEUE_GLOBAL_PRIORITY_MEDIUM, + VK_QUEUE_GLOBAL_PRIORITY_HIGH_KHR = VK_QUEUE_GLOBAL_PRIORITY_HIGH, + VK_QUEUE_GLOBAL_PRIORITY_REALTIME_KHR = VK_QUEUE_GLOBAL_PRIORITY_REALTIME, + VK_QUEUE_GLOBAL_PRIORITY_MAX_ENUM = 0x7FFFFFFF +} VkQueueGlobalPriority; + +typedef enum VkLineRasterizationMode { + VK_LINE_RASTERIZATION_MODE_DEFAULT = 0, + VK_LINE_RASTERIZATION_MODE_RECTANGULAR = 1, + VK_LINE_RASTERIZATION_MODE_BRESENHAM = 2, + VK_LINE_RASTERIZATION_MODE_RECTANGULAR_SMOOTH = 3, + VK_LINE_RASTERIZATION_MODE_DEFAULT_EXT = VK_LINE_RASTERIZATION_MODE_DEFAULT, + VK_LINE_RASTERIZATION_MODE_RECTANGULAR_EXT = VK_LINE_RASTERIZATION_MODE_RECTANGULAR, + VK_LINE_RASTERIZATION_MODE_BRESENHAM_EXT = VK_LINE_RASTERIZATION_MODE_BRESENHAM, + VK_LINE_RASTERIZATION_MODE_RECTANGULAR_SMOOTH_EXT = VK_LINE_RASTERIZATION_MODE_RECTANGULAR_SMOOTH, + VK_LINE_RASTERIZATION_MODE_DEFAULT_KHR = VK_LINE_RASTERIZATION_MODE_DEFAULT, + VK_LINE_RASTERIZATION_MODE_RECTANGULAR_KHR = VK_LINE_RASTERIZATION_MODE_RECTANGULAR, + VK_LINE_RASTERIZATION_MODE_BRESENHAM_KHR = VK_LINE_RASTERIZATION_MODE_BRESENHAM, + VK_LINE_RASTERIZATION_MODE_RECTANGULAR_SMOOTH_KHR = VK_LINE_RASTERIZATION_MODE_RECTANGULAR_SMOOTH, + VK_LINE_RASTERIZATION_MODE_MAX_ENUM = 0x7FFFFFFF +} VkLineRasterizationMode; + +typedef enum VkMemoryUnmapFlagBits { + VK_MEMORY_UNMAP_RESERVE_BIT_EXT = 0x00000001, + VK_MEMORY_UNMAP_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkMemoryUnmapFlagBits; +typedef VkFlags VkMemoryUnmapFlags; +typedef VkFlags64 VkPipelineCreateFlags2; + +// Flag bits for VkPipelineCreateFlagBits2 +typedef VkFlags64 VkPipelineCreateFlagBits2; +static const VkPipelineCreateFlagBits2 VK_PIPELINE_CREATE_2_DISABLE_OPTIMIZATION_BIT = 0x00000001ULL; +static const VkPipelineCreateFlagBits2 VK_PIPELINE_CREATE_2_ALLOW_DERIVATIVES_BIT = 0x00000002ULL; +static const VkPipelineCreateFlagBits2 VK_PIPELINE_CREATE_2_DERIVATIVE_BIT = 0x00000004ULL; +static const VkPipelineCreateFlagBits2 VK_PIPELINE_CREATE_2_VIEW_INDEX_FROM_DEVICE_INDEX_BIT = 0x00000008ULL; +static const VkPipelineCreateFlagBits2 VK_PIPELINE_CREATE_2_DISPATCH_BASE_BIT = 0x00000010ULL; +static const VkPipelineCreateFlagBits2 VK_PIPELINE_CREATE_2_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT = 0x00000100ULL; +static const VkPipelineCreateFlagBits2 VK_PIPELINE_CREATE_2_EARLY_RETURN_ON_FAILURE_BIT = 0x00000200ULL; +static const VkPipelineCreateFlagBits2 VK_PIPELINE_CREATE_2_NO_PROTECTED_ACCESS_BIT = 0x08000000ULL; +static const VkPipelineCreateFlagBits2 VK_PIPELINE_CREATE_2_PROTECTED_ACCESS_ONLY_BIT = 0x40000000ULL; +#ifdef VK_ENABLE_BETA_EXTENSIONS +static const VkPipelineCreateFlagBits2 VK_PIPELINE_CREATE_2_EXECUTION_GRAPH_BIT_AMDX = 0x100000000ULL; +#endif +static const VkPipelineCreateFlagBits2 VK_PIPELINE_CREATE_2_ENABLE_LEGACY_DITHERING_BIT_EXT = 0x400000000ULL; +static const VkPipelineCreateFlagBits2 VK_PIPELINE_CREATE_2_DISABLE_OPTIMIZATION_BIT_KHR = 0x00000001ULL; +static const VkPipelineCreateFlagBits2 VK_PIPELINE_CREATE_2_ALLOW_DERIVATIVES_BIT_KHR = 0x00000002ULL; +static const VkPipelineCreateFlagBits2 VK_PIPELINE_CREATE_2_DERIVATIVE_BIT_KHR = 0x00000004ULL; +static const VkPipelineCreateFlagBits2 VK_PIPELINE_CREATE_2_VIEW_INDEX_FROM_DEVICE_INDEX_BIT_KHR = 0x00000008ULL; +static const VkPipelineCreateFlagBits2 VK_PIPELINE_CREATE_2_DISPATCH_BASE_BIT_KHR = 0x00000010ULL; +static const VkPipelineCreateFlagBits2 VK_PIPELINE_CREATE_2_DEFER_COMPILE_BIT_NV = 0x00000020ULL; +static const VkPipelineCreateFlagBits2 VK_PIPELINE_CREATE_2_CAPTURE_STATISTICS_BIT_KHR = 0x00000040ULL; +static const VkPipelineCreateFlagBits2 VK_PIPELINE_CREATE_2_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR = 0x00000080ULL; +static const VkPipelineCreateFlagBits2 VK_PIPELINE_CREATE_2_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT_KHR = 0x00000100ULL; +static const VkPipelineCreateFlagBits2 VK_PIPELINE_CREATE_2_EARLY_RETURN_ON_FAILURE_BIT_KHR = 0x00000200ULL; +static const VkPipelineCreateFlagBits2 VK_PIPELINE_CREATE_2_LINK_TIME_OPTIMIZATION_BIT_EXT = 0x00000400ULL; +static const VkPipelineCreateFlagBits2 VK_PIPELINE_CREATE_2_RETAIN_LINK_TIME_OPTIMIZATION_INFO_BIT_EXT = 0x00800000ULL; +static const VkPipelineCreateFlagBits2 VK_PIPELINE_CREATE_2_LIBRARY_BIT_KHR = 0x00000800ULL; +static const VkPipelineCreateFlagBits2 VK_PIPELINE_CREATE_2_RAY_TRACING_SKIP_TRIANGLES_BIT_KHR = 0x00001000ULL; +static const VkPipelineCreateFlagBits2 VK_PIPELINE_CREATE_2_RAY_TRACING_SKIP_AABBS_BIT_KHR = 0x00002000ULL; +static const VkPipelineCreateFlagBits2 VK_PIPELINE_CREATE_2_RAY_TRACING_NO_NULL_ANY_HIT_SHADERS_BIT_KHR = 0x00004000ULL; +static const VkPipelineCreateFlagBits2 VK_PIPELINE_CREATE_2_RAY_TRACING_NO_NULL_CLOSEST_HIT_SHADERS_BIT_KHR = 0x00008000ULL; +static const VkPipelineCreateFlagBits2 VK_PIPELINE_CREATE_2_RAY_TRACING_NO_NULL_MISS_SHADERS_BIT_KHR = 0x00010000ULL; +static const VkPipelineCreateFlagBits2 VK_PIPELINE_CREATE_2_RAY_TRACING_NO_NULL_INTERSECTION_SHADERS_BIT_KHR = 0x00020000ULL; +static const VkPipelineCreateFlagBits2 VK_PIPELINE_CREATE_2_RAY_TRACING_SHADER_GROUP_HANDLE_CAPTURE_REPLAY_BIT_KHR = 0x00080000ULL; +static const VkPipelineCreateFlagBits2 VK_PIPELINE_CREATE_2_INDIRECT_BINDABLE_BIT_NV = 0x00040000ULL; +static const VkPipelineCreateFlagBits2 VK_PIPELINE_CREATE_2_RAY_TRACING_ALLOW_MOTION_BIT_NV = 0x00100000ULL; +static const VkPipelineCreateFlagBits2 VK_PIPELINE_CREATE_2_RENDERING_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR = 0x00200000ULL; +static const VkPipelineCreateFlagBits2 VK_PIPELINE_CREATE_2_RENDERING_FRAGMENT_DENSITY_MAP_ATTACHMENT_BIT_EXT = 0x00400000ULL; +static const VkPipelineCreateFlagBits2 VK_PIPELINE_CREATE_2_RAY_TRACING_OPACITY_MICROMAP_BIT_EXT = 0x01000000ULL; +static const VkPipelineCreateFlagBits2 VK_PIPELINE_CREATE_2_COLOR_ATTACHMENT_FEEDBACK_LOOP_BIT_EXT = 0x02000000ULL; +static const VkPipelineCreateFlagBits2 VK_PIPELINE_CREATE_2_DEPTH_STENCIL_ATTACHMENT_FEEDBACK_LOOP_BIT_EXT = 0x04000000ULL; +static const VkPipelineCreateFlagBits2 VK_PIPELINE_CREATE_2_NO_PROTECTED_ACCESS_BIT_EXT = 0x08000000ULL; +static const VkPipelineCreateFlagBits2 VK_PIPELINE_CREATE_2_PROTECTED_ACCESS_ONLY_BIT_EXT = 0x40000000ULL; +static const VkPipelineCreateFlagBits2 VK_PIPELINE_CREATE_2_RAY_TRACING_DISPLACEMENT_MICROMAP_BIT_NV = 0x10000000ULL; +static const VkPipelineCreateFlagBits2 VK_PIPELINE_CREATE_2_DESCRIPTOR_BUFFER_BIT_EXT = 0x20000000ULL; +static const VkPipelineCreateFlagBits2 VK_PIPELINE_CREATE_2_CAPTURE_DATA_BIT_KHR = 0x80000000ULL; +static const VkPipelineCreateFlagBits2 VK_PIPELINE_CREATE_2_INDIRECT_BINDABLE_BIT_EXT = 0x4000000000ULL; + +typedef VkFlags64 VkBufferUsageFlags2; + +// Flag bits for VkBufferUsageFlagBits2 +typedef VkFlags64 VkBufferUsageFlagBits2; +static const VkBufferUsageFlagBits2 VK_BUFFER_USAGE_2_TRANSFER_SRC_BIT = 0x00000001ULL; +static const VkBufferUsageFlagBits2 VK_BUFFER_USAGE_2_TRANSFER_DST_BIT = 0x00000002ULL; +static const VkBufferUsageFlagBits2 VK_BUFFER_USAGE_2_UNIFORM_TEXEL_BUFFER_BIT = 0x00000004ULL; +static const VkBufferUsageFlagBits2 VK_BUFFER_USAGE_2_STORAGE_TEXEL_BUFFER_BIT = 0x00000008ULL; +static const VkBufferUsageFlagBits2 VK_BUFFER_USAGE_2_UNIFORM_BUFFER_BIT = 0x00000010ULL; +static const VkBufferUsageFlagBits2 VK_BUFFER_USAGE_2_STORAGE_BUFFER_BIT = 0x00000020ULL; +static const VkBufferUsageFlagBits2 VK_BUFFER_USAGE_2_INDEX_BUFFER_BIT = 0x00000040ULL; +static const VkBufferUsageFlagBits2 VK_BUFFER_USAGE_2_VERTEX_BUFFER_BIT = 0x00000080ULL; +static const VkBufferUsageFlagBits2 VK_BUFFER_USAGE_2_INDIRECT_BUFFER_BIT = 0x00000100ULL; +static const VkBufferUsageFlagBits2 VK_BUFFER_USAGE_2_SHADER_DEVICE_ADDRESS_BIT = 0x00020000ULL; +#ifdef VK_ENABLE_BETA_EXTENSIONS +static const VkBufferUsageFlagBits2 VK_BUFFER_USAGE_2_EXECUTION_GRAPH_SCRATCH_BIT_AMDX = 0x02000000ULL; +#endif +static const VkBufferUsageFlagBits2 VK_BUFFER_USAGE_2_TRANSFER_SRC_BIT_KHR = 0x00000001ULL; +static const VkBufferUsageFlagBits2 VK_BUFFER_USAGE_2_TRANSFER_DST_BIT_KHR = 0x00000002ULL; +static const VkBufferUsageFlagBits2 VK_BUFFER_USAGE_2_UNIFORM_TEXEL_BUFFER_BIT_KHR = 0x00000004ULL; +static const VkBufferUsageFlagBits2 VK_BUFFER_USAGE_2_STORAGE_TEXEL_BUFFER_BIT_KHR = 0x00000008ULL; +static const VkBufferUsageFlagBits2 VK_BUFFER_USAGE_2_UNIFORM_BUFFER_BIT_KHR = 0x00000010ULL; +static const VkBufferUsageFlagBits2 VK_BUFFER_USAGE_2_STORAGE_BUFFER_BIT_KHR = 0x00000020ULL; +static const VkBufferUsageFlagBits2 VK_BUFFER_USAGE_2_INDEX_BUFFER_BIT_KHR = 0x00000040ULL; +static const VkBufferUsageFlagBits2 VK_BUFFER_USAGE_2_VERTEX_BUFFER_BIT_KHR = 0x00000080ULL; +static const VkBufferUsageFlagBits2 VK_BUFFER_USAGE_2_INDIRECT_BUFFER_BIT_KHR = 0x00000100ULL; +static const VkBufferUsageFlagBits2 VK_BUFFER_USAGE_2_CONDITIONAL_RENDERING_BIT_EXT = 0x00000200ULL; +static const VkBufferUsageFlagBits2 VK_BUFFER_USAGE_2_SHADER_BINDING_TABLE_BIT_KHR = 0x00000400ULL; +static const VkBufferUsageFlagBits2 VK_BUFFER_USAGE_2_RAY_TRACING_BIT_NV = 0x00000400ULL; +static const VkBufferUsageFlagBits2 VK_BUFFER_USAGE_2_TRANSFORM_FEEDBACK_BUFFER_BIT_EXT = 0x00000800ULL; +static const VkBufferUsageFlagBits2 VK_BUFFER_USAGE_2_TRANSFORM_FEEDBACK_COUNTER_BUFFER_BIT_EXT = 0x00001000ULL; +static const VkBufferUsageFlagBits2 VK_BUFFER_USAGE_2_VIDEO_DECODE_SRC_BIT_KHR = 0x00002000ULL; +static const VkBufferUsageFlagBits2 VK_BUFFER_USAGE_2_VIDEO_DECODE_DST_BIT_KHR = 0x00004000ULL; +static const VkBufferUsageFlagBits2 VK_BUFFER_USAGE_2_VIDEO_ENCODE_DST_BIT_KHR = 0x00008000ULL; +static const VkBufferUsageFlagBits2 VK_BUFFER_USAGE_2_VIDEO_ENCODE_SRC_BIT_KHR = 0x00010000ULL; +static const VkBufferUsageFlagBits2 VK_BUFFER_USAGE_2_SHADER_DEVICE_ADDRESS_BIT_KHR = 0x00020000ULL; +static const VkBufferUsageFlagBits2 VK_BUFFER_USAGE_2_ACCELERATION_STRUCTURE_BUILD_INPUT_READ_ONLY_BIT_KHR = 0x00080000ULL; +static const VkBufferUsageFlagBits2 VK_BUFFER_USAGE_2_ACCELERATION_STRUCTURE_STORAGE_BIT_KHR = 0x00100000ULL; +static const VkBufferUsageFlagBits2 VK_BUFFER_USAGE_2_SAMPLER_DESCRIPTOR_BUFFER_BIT_EXT = 0x00200000ULL; +static const VkBufferUsageFlagBits2 VK_BUFFER_USAGE_2_RESOURCE_DESCRIPTOR_BUFFER_BIT_EXT = 0x00400000ULL; +static const VkBufferUsageFlagBits2 VK_BUFFER_USAGE_2_PUSH_DESCRIPTORS_DESCRIPTOR_BUFFER_BIT_EXT = 0x04000000ULL; +static const VkBufferUsageFlagBits2 VK_BUFFER_USAGE_2_MICROMAP_BUILD_INPUT_READ_ONLY_BIT_EXT = 0x00800000ULL; +static const VkBufferUsageFlagBits2 VK_BUFFER_USAGE_2_MICROMAP_STORAGE_BIT_EXT = 0x01000000ULL; +static const VkBufferUsageFlagBits2 VK_BUFFER_USAGE_2_PREPROCESS_BUFFER_BIT_EXT = 0x80000000ULL; + + +typedef enum VkHostImageCopyFlagBits { + VK_HOST_IMAGE_COPY_MEMCPY = 0x00000001, + VK_HOST_IMAGE_COPY_MEMCPY_EXT = VK_HOST_IMAGE_COPY_MEMCPY, + VK_HOST_IMAGE_COPY_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkHostImageCopyFlagBits; +typedef VkFlags VkHostImageCopyFlags; +typedef struct VkPhysicalDeviceVulkan14Features { + VkStructureType sType; + void* pNext; + VkBool32 globalPriorityQuery; + VkBool32 shaderSubgroupRotate; + VkBool32 shaderSubgroupRotateClustered; + VkBool32 shaderFloatControls2; + VkBool32 shaderExpectAssume; + VkBool32 rectangularLines; + VkBool32 bresenhamLines; + VkBool32 smoothLines; + VkBool32 stippledRectangularLines; + VkBool32 stippledBresenhamLines; + VkBool32 stippledSmoothLines; + VkBool32 vertexAttributeInstanceRateDivisor; + VkBool32 vertexAttributeInstanceRateZeroDivisor; + VkBool32 indexTypeUint8; + VkBool32 dynamicRenderingLocalRead; + VkBool32 maintenance5; + VkBool32 maintenance6; + VkBool32 pipelineProtectedAccess; + VkBool32 pipelineRobustness; + VkBool32 hostImageCopy; + VkBool32 pushDescriptor; +} VkPhysicalDeviceVulkan14Features; + +typedef struct VkPhysicalDeviceVulkan14Properties { + VkStructureType sType; + void* pNext; + uint32_t lineSubPixelPrecisionBits; + uint32_t maxVertexAttribDivisor; + VkBool32 supportsNonZeroFirstInstance; + uint32_t maxPushDescriptors; + VkBool32 dynamicRenderingLocalReadDepthStencilAttachments; + VkBool32 dynamicRenderingLocalReadMultisampledAttachments; + VkBool32 earlyFragmentMultisampleCoverageAfterSampleCounting; + VkBool32 earlyFragmentSampleMaskTestBeforeSampleCounting; + VkBool32 depthStencilSwizzleOneSupport; + VkBool32 polygonModePointSize; + VkBool32 nonStrictSinglePixelWideLinesUseParallelogram; + VkBool32 nonStrictWideLinesUseParallelogram; + VkBool32 blockTexelViewCompatibleMultipleLayers; + uint32_t maxCombinedImageSamplerDescriptorCount; + VkBool32 fragmentShadingRateClampCombinerInputs; + VkPipelineRobustnessBufferBehavior defaultRobustnessStorageBuffers; + VkPipelineRobustnessBufferBehavior defaultRobustnessUniformBuffers; + VkPipelineRobustnessBufferBehavior defaultRobustnessVertexInputs; + VkPipelineRobustnessImageBehavior defaultRobustnessImages; + uint32_t copySrcLayoutCount; + VkImageLayout* pCopySrcLayouts; + uint32_t copyDstLayoutCount; + VkImageLayout* pCopyDstLayouts; + uint8_t optimalTilingLayoutUUID[VK_UUID_SIZE]; + VkBool32 identicalMemoryTypeRequirements; +} VkPhysicalDeviceVulkan14Properties; + +typedef struct VkDeviceQueueGlobalPriorityCreateInfo { + VkStructureType sType; + const void* pNext; + VkQueueGlobalPriority globalPriority; +} VkDeviceQueueGlobalPriorityCreateInfo; + +typedef struct VkPhysicalDeviceGlobalPriorityQueryFeatures { + VkStructureType sType; + void* pNext; + VkBool32 globalPriorityQuery; +} VkPhysicalDeviceGlobalPriorityQueryFeatures; + +typedef struct VkQueueFamilyGlobalPriorityProperties { + VkStructureType sType; + void* pNext; + uint32_t priorityCount; + VkQueueGlobalPriority priorities[VK_MAX_GLOBAL_PRIORITY_SIZE]; +} VkQueueFamilyGlobalPriorityProperties; + +typedef struct VkPhysicalDeviceShaderSubgroupRotateFeatures { + VkStructureType sType; + void* pNext; + VkBool32 shaderSubgroupRotate; + VkBool32 shaderSubgroupRotateClustered; +} VkPhysicalDeviceShaderSubgroupRotateFeatures; + +typedef struct VkPhysicalDeviceShaderFloatControls2Features { + VkStructureType sType; + void* pNext; + VkBool32 shaderFloatControls2; +} VkPhysicalDeviceShaderFloatControls2Features; + +typedef struct VkPhysicalDeviceShaderExpectAssumeFeatures { + VkStructureType sType; + void* pNext; + VkBool32 shaderExpectAssume; +} VkPhysicalDeviceShaderExpectAssumeFeatures; + +typedef struct VkPhysicalDeviceLineRasterizationFeatures { + VkStructureType sType; + void* pNext; + VkBool32 rectangularLines; + VkBool32 bresenhamLines; + VkBool32 smoothLines; + VkBool32 stippledRectangularLines; + VkBool32 stippledBresenhamLines; + VkBool32 stippledSmoothLines; +} VkPhysicalDeviceLineRasterizationFeatures; + +typedef struct VkPhysicalDeviceLineRasterizationProperties { + VkStructureType sType; + void* pNext; + uint32_t lineSubPixelPrecisionBits; +} VkPhysicalDeviceLineRasterizationProperties; + +typedef struct VkPipelineRasterizationLineStateCreateInfo { + VkStructureType sType; + const void* pNext; + VkLineRasterizationMode lineRasterizationMode; + VkBool32 stippledLineEnable; + uint32_t lineStippleFactor; + uint16_t lineStipplePattern; +} VkPipelineRasterizationLineStateCreateInfo; + +typedef struct VkPhysicalDeviceVertexAttributeDivisorProperties { + VkStructureType sType; + void* pNext; + uint32_t maxVertexAttribDivisor; + VkBool32 supportsNonZeroFirstInstance; +} VkPhysicalDeviceVertexAttributeDivisorProperties; + +typedef struct VkVertexInputBindingDivisorDescription { + uint32_t binding; + uint32_t divisor; +} VkVertexInputBindingDivisorDescription; + +typedef struct VkPipelineVertexInputDivisorStateCreateInfo { + VkStructureType sType; + const void* pNext; + uint32_t vertexBindingDivisorCount; + const VkVertexInputBindingDivisorDescription* pVertexBindingDivisors; +} VkPipelineVertexInputDivisorStateCreateInfo; + +typedef struct VkPhysicalDeviceVertexAttributeDivisorFeatures { + VkStructureType sType; + void* pNext; + VkBool32 vertexAttributeInstanceRateDivisor; + VkBool32 vertexAttributeInstanceRateZeroDivisor; +} VkPhysicalDeviceVertexAttributeDivisorFeatures; + +typedef struct VkPhysicalDeviceIndexTypeUint8Features { + VkStructureType sType; + void* pNext; + VkBool32 indexTypeUint8; +} VkPhysicalDeviceIndexTypeUint8Features; + +typedef struct VkMemoryMapInfo { + VkStructureType sType; + const void* pNext; + VkMemoryMapFlags flags; + VkDeviceMemory memory; + VkDeviceSize offset; + VkDeviceSize size; +} VkMemoryMapInfo; + +typedef struct VkMemoryUnmapInfo { + VkStructureType sType; + const void* pNext; + VkMemoryUnmapFlags flags; + VkDeviceMemory memory; +} VkMemoryUnmapInfo; + +typedef struct VkPhysicalDeviceMaintenance5Features { + VkStructureType sType; + void* pNext; + VkBool32 maintenance5; +} VkPhysicalDeviceMaintenance5Features; + +typedef struct VkPhysicalDeviceMaintenance5Properties { + VkStructureType sType; + void* pNext; + VkBool32 earlyFragmentMultisampleCoverageAfterSampleCounting; + VkBool32 earlyFragmentSampleMaskTestBeforeSampleCounting; + VkBool32 depthStencilSwizzleOneSupport; + VkBool32 polygonModePointSize; + VkBool32 nonStrictSinglePixelWideLinesUseParallelogram; + VkBool32 nonStrictWideLinesUseParallelogram; +} VkPhysicalDeviceMaintenance5Properties; + +typedef struct VkRenderingAreaInfo { + VkStructureType sType; + const void* pNext; + uint32_t viewMask; + uint32_t colorAttachmentCount; + const VkFormat* pColorAttachmentFormats; + VkFormat depthAttachmentFormat; + VkFormat stencilAttachmentFormat; +} VkRenderingAreaInfo; + +typedef struct VkImageSubresource2 { + VkStructureType sType; + void* pNext; + VkImageSubresource imageSubresource; +} VkImageSubresource2; + +typedef struct VkDeviceImageSubresourceInfo { + VkStructureType sType; + const void* pNext; + const VkImageCreateInfo* pCreateInfo; + const VkImageSubresource2* pSubresource; +} VkDeviceImageSubresourceInfo; + +typedef struct VkSubresourceLayout2 { + VkStructureType sType; + void* pNext; + VkSubresourceLayout subresourceLayout; +} VkSubresourceLayout2; + +typedef struct VkPipelineCreateFlags2CreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineCreateFlags2 flags; +} VkPipelineCreateFlags2CreateInfo; + +typedef struct VkBufferUsageFlags2CreateInfo { + VkStructureType sType; + const void* pNext; + VkBufferUsageFlags2 usage; +} VkBufferUsageFlags2CreateInfo; + +typedef struct VkPhysicalDevicePushDescriptorProperties { + VkStructureType sType; + void* pNext; + uint32_t maxPushDescriptors; +} VkPhysicalDevicePushDescriptorProperties; + +typedef struct VkPhysicalDeviceDynamicRenderingLocalReadFeatures { + VkStructureType sType; + void* pNext; + VkBool32 dynamicRenderingLocalRead; +} VkPhysicalDeviceDynamicRenderingLocalReadFeatures; + +typedef struct VkRenderingAttachmentLocationInfo { + VkStructureType sType; + const void* pNext; + uint32_t colorAttachmentCount; + const uint32_t* pColorAttachmentLocations; +} VkRenderingAttachmentLocationInfo; + +typedef struct VkRenderingInputAttachmentIndexInfo { + VkStructureType sType; + const void* pNext; + uint32_t colorAttachmentCount; + const uint32_t* pColorAttachmentInputIndices; + const uint32_t* pDepthInputAttachmentIndex; + const uint32_t* pStencilInputAttachmentIndex; +} VkRenderingInputAttachmentIndexInfo; + +typedef struct VkPhysicalDeviceMaintenance6Features { + VkStructureType sType; + void* pNext; + VkBool32 maintenance6; +} VkPhysicalDeviceMaintenance6Features; + +typedef struct VkPhysicalDeviceMaintenance6Properties { + VkStructureType sType; + void* pNext; + VkBool32 blockTexelViewCompatibleMultipleLayers; + uint32_t maxCombinedImageSamplerDescriptorCount; + VkBool32 fragmentShadingRateClampCombinerInputs; +} VkPhysicalDeviceMaintenance6Properties; + +typedef struct VkBindMemoryStatus { + VkStructureType sType; + const void* pNext; + VkResult* pResult; +} VkBindMemoryStatus; + +typedef struct VkBindDescriptorSetsInfo { + VkStructureType sType; + const void* pNext; + VkShaderStageFlags stageFlags; + VkPipelineLayout layout; + uint32_t firstSet; + uint32_t descriptorSetCount; + const VkDescriptorSet* pDescriptorSets; + uint32_t dynamicOffsetCount; + const uint32_t* pDynamicOffsets; +} VkBindDescriptorSetsInfo; + +typedef struct VkPushConstantsInfo { + VkStructureType sType; + const void* pNext; + VkPipelineLayout layout; + VkShaderStageFlags stageFlags; + uint32_t offset; + uint32_t size; + const void* pValues; +} VkPushConstantsInfo; + +typedef struct VkPushDescriptorSetInfo { + VkStructureType sType; + const void* pNext; + VkShaderStageFlags stageFlags; + VkPipelineLayout layout; + uint32_t set; + uint32_t descriptorWriteCount; + const VkWriteDescriptorSet* pDescriptorWrites; +} VkPushDescriptorSetInfo; + +typedef struct VkPushDescriptorSetWithTemplateInfo { + VkStructureType sType; + const void* pNext; + VkDescriptorUpdateTemplate descriptorUpdateTemplate; + VkPipelineLayout layout; + uint32_t set; + const void* pData; +} VkPushDescriptorSetWithTemplateInfo; + +typedef struct VkPhysicalDevicePipelineProtectedAccessFeatures { + VkStructureType sType; + void* pNext; + VkBool32 pipelineProtectedAccess; +} VkPhysicalDevicePipelineProtectedAccessFeatures; + +typedef struct VkPhysicalDevicePipelineRobustnessFeatures { + VkStructureType sType; + void* pNext; + VkBool32 pipelineRobustness; +} VkPhysicalDevicePipelineRobustnessFeatures; + +typedef struct VkPhysicalDevicePipelineRobustnessProperties { + VkStructureType sType; + void* pNext; + VkPipelineRobustnessBufferBehavior defaultRobustnessStorageBuffers; + VkPipelineRobustnessBufferBehavior defaultRobustnessUniformBuffers; + VkPipelineRobustnessBufferBehavior defaultRobustnessVertexInputs; + VkPipelineRobustnessImageBehavior defaultRobustnessImages; +} VkPhysicalDevicePipelineRobustnessProperties; + +typedef struct VkPipelineRobustnessCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineRobustnessBufferBehavior storageBuffers; + VkPipelineRobustnessBufferBehavior uniformBuffers; + VkPipelineRobustnessBufferBehavior vertexInputs; + VkPipelineRobustnessImageBehavior images; +} VkPipelineRobustnessCreateInfo; + +typedef struct VkPhysicalDeviceHostImageCopyFeatures { + VkStructureType sType; + void* pNext; + VkBool32 hostImageCopy; +} VkPhysicalDeviceHostImageCopyFeatures; + +typedef struct VkPhysicalDeviceHostImageCopyProperties { + VkStructureType sType; + void* pNext; + uint32_t copySrcLayoutCount; + VkImageLayout* pCopySrcLayouts; + uint32_t copyDstLayoutCount; + VkImageLayout* pCopyDstLayouts; + uint8_t optimalTilingLayoutUUID[VK_UUID_SIZE]; + VkBool32 identicalMemoryTypeRequirements; +} VkPhysicalDeviceHostImageCopyProperties; + +typedef struct VkMemoryToImageCopy { + VkStructureType sType; + const void* pNext; + const void* pHostPointer; + uint32_t memoryRowLength; + uint32_t memoryImageHeight; + VkImageSubresourceLayers imageSubresource; + VkOffset3D imageOffset; + VkExtent3D imageExtent; +} VkMemoryToImageCopy; + +typedef struct VkImageToMemoryCopy { + VkStructureType sType; + const void* pNext; + void* pHostPointer; + uint32_t memoryRowLength; + uint32_t memoryImageHeight; + VkImageSubresourceLayers imageSubresource; + VkOffset3D imageOffset; + VkExtent3D imageExtent; +} VkImageToMemoryCopy; + +typedef struct VkCopyMemoryToImageInfo { + VkStructureType sType; + const void* pNext; + VkHostImageCopyFlags flags; + VkImage dstImage; + VkImageLayout dstImageLayout; + uint32_t regionCount; + const VkMemoryToImageCopy* pRegions; +} VkCopyMemoryToImageInfo; + +typedef struct VkCopyImageToMemoryInfo { + VkStructureType sType; + const void* pNext; + VkHostImageCopyFlags flags; + VkImage srcImage; + VkImageLayout srcImageLayout; + uint32_t regionCount; + const VkImageToMemoryCopy* pRegions; +} VkCopyImageToMemoryInfo; + +typedef struct VkCopyImageToImageInfo { + VkStructureType sType; + const void* pNext; + VkHostImageCopyFlags flags; + VkImage srcImage; + VkImageLayout srcImageLayout; + VkImage dstImage; + VkImageLayout dstImageLayout; + uint32_t regionCount; + const VkImageCopy2* pRegions; +} VkCopyImageToImageInfo; + +typedef struct VkHostImageLayoutTransitionInfo { + VkStructureType sType; + const void* pNext; + VkImage image; + VkImageLayout oldLayout; + VkImageLayout newLayout; + VkImageSubresourceRange subresourceRange; +} VkHostImageLayoutTransitionInfo; + +typedef struct VkSubresourceHostMemcpySize { + VkStructureType sType; + void* pNext; + VkDeviceSize size; +} VkSubresourceHostMemcpySize; + +typedef struct VkHostImageCopyDevicePerformanceQuery { + VkStructureType sType; + void* pNext; + VkBool32 optimalDeviceAccess; + VkBool32 identicalMemoryLayout; +} VkHostImageCopyDevicePerformanceQuery; + +typedef void (VKAPI_PTR *PFN_vkCmdSetLineStipple)(VkCommandBuffer commandBuffer, uint32_t lineStippleFactor, uint16_t lineStipplePattern); +typedef VkResult (VKAPI_PTR *PFN_vkMapMemory2)(VkDevice device, const VkMemoryMapInfo* pMemoryMapInfo, void** ppData); +typedef VkResult (VKAPI_PTR *PFN_vkUnmapMemory2)(VkDevice device, const VkMemoryUnmapInfo* pMemoryUnmapInfo); +typedef void (VKAPI_PTR *PFN_vkCmdBindIndexBuffer2)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkDeviceSize size, VkIndexType indexType); +typedef void (VKAPI_PTR *PFN_vkGetRenderingAreaGranularity)(VkDevice device, const VkRenderingAreaInfo* pRenderingAreaInfo, VkExtent2D* pGranularity); +typedef void (VKAPI_PTR *PFN_vkGetDeviceImageSubresourceLayout)(VkDevice device, const VkDeviceImageSubresourceInfo* pInfo, VkSubresourceLayout2* pLayout); +typedef void (VKAPI_PTR *PFN_vkGetImageSubresourceLayout2)(VkDevice device, VkImage image, const VkImageSubresource2* pSubresource, VkSubresourceLayout2* pLayout); +typedef void (VKAPI_PTR *PFN_vkCmdPushDescriptorSet)(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, uint32_t set, uint32_t descriptorWriteCount, const VkWriteDescriptorSet* pDescriptorWrites); +typedef void (VKAPI_PTR *PFN_vkCmdPushDescriptorSetWithTemplate)(VkCommandBuffer commandBuffer, VkDescriptorUpdateTemplate descriptorUpdateTemplate, VkPipelineLayout layout, uint32_t set, const void* pData); +typedef void (VKAPI_PTR *PFN_vkCmdSetRenderingAttachmentLocations)(VkCommandBuffer commandBuffer, const VkRenderingAttachmentLocationInfo* pLocationInfo); +typedef void (VKAPI_PTR *PFN_vkCmdSetRenderingInputAttachmentIndices)(VkCommandBuffer commandBuffer, const VkRenderingInputAttachmentIndexInfo* pInputAttachmentIndexInfo); +typedef void (VKAPI_PTR *PFN_vkCmdBindDescriptorSets2)(VkCommandBuffer commandBuffer, const VkBindDescriptorSetsInfo* pBindDescriptorSetsInfo); +typedef void (VKAPI_PTR *PFN_vkCmdPushConstants2)(VkCommandBuffer commandBuffer, const VkPushConstantsInfo* pPushConstantsInfo); +typedef void (VKAPI_PTR *PFN_vkCmdPushDescriptorSet2)(VkCommandBuffer commandBuffer, const VkPushDescriptorSetInfo* pPushDescriptorSetInfo); +typedef void (VKAPI_PTR *PFN_vkCmdPushDescriptorSetWithTemplate2)(VkCommandBuffer commandBuffer, const VkPushDescriptorSetWithTemplateInfo* pPushDescriptorSetWithTemplateInfo); +typedef VkResult (VKAPI_PTR *PFN_vkCopyMemoryToImage)(VkDevice device, const VkCopyMemoryToImageInfo* pCopyMemoryToImageInfo); +typedef VkResult (VKAPI_PTR *PFN_vkCopyImageToMemory)(VkDevice device, const VkCopyImageToMemoryInfo* pCopyImageToMemoryInfo); +typedef VkResult (VKAPI_PTR *PFN_vkCopyImageToImage)(VkDevice device, const VkCopyImageToImageInfo* pCopyImageToImageInfo); +typedef VkResult (VKAPI_PTR *PFN_vkTransitionImageLayout)(VkDevice device, uint32_t transitionCount, const VkHostImageLayoutTransitionInfo* pTransitions); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetLineStipple( + VkCommandBuffer commandBuffer, + uint32_t lineStippleFactor, + uint16_t lineStipplePattern); + +VKAPI_ATTR VkResult VKAPI_CALL vkMapMemory2( + VkDevice device, + const VkMemoryMapInfo* pMemoryMapInfo, + void** ppData); + +VKAPI_ATTR VkResult VKAPI_CALL vkUnmapMemory2( + VkDevice device, + const VkMemoryUnmapInfo* pMemoryUnmapInfo); + +VKAPI_ATTR void VKAPI_CALL vkCmdBindIndexBuffer2( + VkCommandBuffer commandBuffer, + VkBuffer buffer, + VkDeviceSize offset, + VkDeviceSize size, + VkIndexType indexType); + +VKAPI_ATTR void VKAPI_CALL vkGetRenderingAreaGranularity( + VkDevice device, + const VkRenderingAreaInfo* pRenderingAreaInfo, + VkExtent2D* pGranularity); + +VKAPI_ATTR void VKAPI_CALL vkGetDeviceImageSubresourceLayout( + VkDevice device, + const VkDeviceImageSubresourceInfo* pInfo, + VkSubresourceLayout2* pLayout); + +VKAPI_ATTR void VKAPI_CALL vkGetImageSubresourceLayout2( + VkDevice device, + VkImage image, + const VkImageSubresource2* pSubresource, + VkSubresourceLayout2* pLayout); + +VKAPI_ATTR void VKAPI_CALL vkCmdPushDescriptorSet( + VkCommandBuffer commandBuffer, + VkPipelineBindPoint pipelineBindPoint, + VkPipelineLayout layout, + uint32_t set, + uint32_t descriptorWriteCount, + const VkWriteDescriptorSet* pDescriptorWrites); + +VKAPI_ATTR void VKAPI_CALL vkCmdPushDescriptorSetWithTemplate( + VkCommandBuffer commandBuffer, + VkDescriptorUpdateTemplate descriptorUpdateTemplate, + VkPipelineLayout layout, + uint32_t set, + const void* pData); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetRenderingAttachmentLocations( + VkCommandBuffer commandBuffer, + const VkRenderingAttachmentLocationInfo* pLocationInfo); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetRenderingInputAttachmentIndices( + VkCommandBuffer commandBuffer, + const VkRenderingInputAttachmentIndexInfo* pInputAttachmentIndexInfo); + +VKAPI_ATTR void VKAPI_CALL vkCmdBindDescriptorSets2( + VkCommandBuffer commandBuffer, + const VkBindDescriptorSetsInfo* pBindDescriptorSetsInfo); + +VKAPI_ATTR void VKAPI_CALL vkCmdPushConstants2( + VkCommandBuffer commandBuffer, + const VkPushConstantsInfo* pPushConstantsInfo); + +VKAPI_ATTR void VKAPI_CALL vkCmdPushDescriptorSet2( + VkCommandBuffer commandBuffer, + const VkPushDescriptorSetInfo* pPushDescriptorSetInfo); + +VKAPI_ATTR void VKAPI_CALL vkCmdPushDescriptorSetWithTemplate2( + VkCommandBuffer commandBuffer, + const VkPushDescriptorSetWithTemplateInfo* pPushDescriptorSetWithTemplateInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkCopyMemoryToImage( + VkDevice device, + const VkCopyMemoryToImageInfo* pCopyMemoryToImageInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkCopyImageToMemory( + VkDevice device, + const VkCopyImageToMemoryInfo* pCopyImageToMemoryInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkCopyImageToImage( + VkDevice device, + const VkCopyImageToImageInfo* pCopyImageToImageInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkTransitionImageLayout( + VkDevice device, + uint32_t transitionCount, + const VkHostImageLayoutTransitionInfo* pTransitions); +#endif + + // VK_KHR_surface is a preprocessor guard. Do not pass it to API calls. #define VK_KHR_surface 1 VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkSurfaceKHR) @@ -9303,11 +10093,7 @@ VKAPI_ATTR VkResult VKAPI_CALL vkGetSemaphoreFdKHR( #define VK_KHR_push_descriptor 1 #define VK_KHR_PUSH_DESCRIPTOR_SPEC_VERSION 2 #define VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME "VK_KHR_push_descriptor" -typedef struct VkPhysicalDevicePushDescriptorPropertiesKHR { - VkStructureType sType; - void* pNext; - uint32_t maxPushDescriptors; -} VkPhysicalDevicePushDescriptorPropertiesKHR; +typedef VkPhysicalDevicePushDescriptorProperties VkPhysicalDevicePushDescriptorPropertiesKHR; typedef void (VKAPI_PTR *PFN_vkCmdPushDescriptorSetKHR)(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, uint32_t set, uint32_t descriptorWriteCount, const VkWriteDescriptorSet* pDescriptorWrites); typedef void (VKAPI_PTR *PFN_vkCmdPushDescriptorSetWithTemplateKHR)(VkCommandBuffer commandBuffer, VkDescriptorUpdateTemplate descriptorUpdateTemplate, VkPipelineLayout layout, uint32_t set, const void* pData); @@ -10119,39 +10905,16 @@ typedef struct VkVideoDecodeH265DpbSlotInfoKHR { // VK_KHR_global_priority is a preprocessor guard. Do not pass it to API calls. #define VK_KHR_global_priority 1 -#define VK_MAX_GLOBAL_PRIORITY_SIZE_KHR 16U #define VK_KHR_GLOBAL_PRIORITY_SPEC_VERSION 1 #define VK_KHR_GLOBAL_PRIORITY_EXTENSION_NAME "VK_KHR_global_priority" +#define VK_MAX_GLOBAL_PRIORITY_SIZE_KHR VK_MAX_GLOBAL_PRIORITY_SIZE +typedef VkQueueGlobalPriority VkQueueGlobalPriorityKHR; -typedef enum VkQueueGlobalPriorityKHR { - VK_QUEUE_GLOBAL_PRIORITY_LOW_KHR = 128, - VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_KHR = 256, - VK_QUEUE_GLOBAL_PRIORITY_HIGH_KHR = 512, - VK_QUEUE_GLOBAL_PRIORITY_REALTIME_KHR = 1024, - VK_QUEUE_GLOBAL_PRIORITY_LOW_EXT = VK_QUEUE_GLOBAL_PRIORITY_LOW_KHR, - VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_EXT = VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_KHR, - VK_QUEUE_GLOBAL_PRIORITY_HIGH_EXT = VK_QUEUE_GLOBAL_PRIORITY_HIGH_KHR, - VK_QUEUE_GLOBAL_PRIORITY_REALTIME_EXT = VK_QUEUE_GLOBAL_PRIORITY_REALTIME_KHR, - VK_QUEUE_GLOBAL_PRIORITY_MAX_ENUM_KHR = 0x7FFFFFFF -} VkQueueGlobalPriorityKHR; -typedef struct VkDeviceQueueGlobalPriorityCreateInfoKHR { - VkStructureType sType; - const void* pNext; - VkQueueGlobalPriorityKHR globalPriority; -} VkDeviceQueueGlobalPriorityCreateInfoKHR; +typedef VkDeviceQueueGlobalPriorityCreateInfo VkDeviceQueueGlobalPriorityCreateInfoKHR; -typedef struct VkPhysicalDeviceGlobalPriorityQueryFeaturesKHR { - VkStructureType sType; - void* pNext; - VkBool32 globalPriorityQuery; -} VkPhysicalDeviceGlobalPriorityQueryFeaturesKHR; +typedef VkPhysicalDeviceGlobalPriorityQueryFeatures VkPhysicalDeviceGlobalPriorityQueryFeaturesKHR; -typedef struct VkQueueFamilyGlobalPriorityPropertiesKHR { - VkStructureType sType; - void* pNext; - uint32_t priorityCount; - VkQueueGlobalPriorityKHR priorities[VK_MAX_GLOBAL_PRIORITY_SIZE_KHR]; -} VkQueueFamilyGlobalPriorityPropertiesKHR; +typedef VkQueueFamilyGlobalPriorityProperties VkQueueFamilyGlobalPriorityPropertiesKHR; @@ -10350,39 +11113,23 @@ VKAPI_ATTR void VKAPI_CALL vkCmdSetFragmentShadingRateKHR( #define VK_KHR_dynamic_rendering_local_read 1 #define VK_KHR_DYNAMIC_RENDERING_LOCAL_READ_SPEC_VERSION 1 #define VK_KHR_DYNAMIC_RENDERING_LOCAL_READ_EXTENSION_NAME "VK_KHR_dynamic_rendering_local_read" -typedef struct VkPhysicalDeviceDynamicRenderingLocalReadFeaturesKHR { - VkStructureType sType; - void* pNext; - VkBool32 dynamicRenderingLocalRead; -} VkPhysicalDeviceDynamicRenderingLocalReadFeaturesKHR; +typedef VkPhysicalDeviceDynamicRenderingLocalReadFeatures VkPhysicalDeviceDynamicRenderingLocalReadFeaturesKHR; -typedef struct VkRenderingAttachmentLocationInfoKHR { - VkStructureType sType; - const void* pNext; - uint32_t colorAttachmentCount; - const uint32_t* pColorAttachmentLocations; -} VkRenderingAttachmentLocationInfoKHR; +typedef VkRenderingAttachmentLocationInfo VkRenderingAttachmentLocationInfoKHR; -typedef struct VkRenderingInputAttachmentIndexInfoKHR { - VkStructureType sType; - const void* pNext; - uint32_t colorAttachmentCount; - const uint32_t* pColorAttachmentInputIndices; - const uint32_t* pDepthInputAttachmentIndex; - const uint32_t* pStencilInputAttachmentIndex; -} VkRenderingInputAttachmentIndexInfoKHR; +typedef VkRenderingInputAttachmentIndexInfo VkRenderingInputAttachmentIndexInfoKHR; -typedef void (VKAPI_PTR *PFN_vkCmdSetRenderingAttachmentLocationsKHR)(VkCommandBuffer commandBuffer, const VkRenderingAttachmentLocationInfoKHR* pLocationInfo); -typedef void (VKAPI_PTR *PFN_vkCmdSetRenderingInputAttachmentIndicesKHR)(VkCommandBuffer commandBuffer, const VkRenderingInputAttachmentIndexInfoKHR* pInputAttachmentIndexInfo); +typedef void (VKAPI_PTR *PFN_vkCmdSetRenderingAttachmentLocationsKHR)(VkCommandBuffer commandBuffer, const VkRenderingAttachmentLocationInfo* pLocationInfo); +typedef void (VKAPI_PTR *PFN_vkCmdSetRenderingInputAttachmentIndicesKHR)(VkCommandBuffer commandBuffer, const VkRenderingInputAttachmentIndexInfo* pInputAttachmentIndexInfo); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR void VKAPI_CALL vkCmdSetRenderingAttachmentLocationsKHR( VkCommandBuffer commandBuffer, - const VkRenderingAttachmentLocationInfoKHR* pLocationInfo); + const VkRenderingAttachmentLocationInfo* pLocationInfo); VKAPI_ATTR void VKAPI_CALL vkCmdSetRenderingInputAttachmentIndicesKHR( VkCommandBuffer commandBuffer, - const VkRenderingInputAttachmentIndexInfoKHR* pInputAttachmentIndexInfo); + const VkRenderingInputAttachmentIndexInfo* pInputAttachmentIndexInfo); #endif @@ -10621,40 +11368,26 @@ VKAPI_ATTR VkResult VKAPI_CALL vkGetPipelineExecutableInternalRepresentationsKHR #define VK_KHR_map_memory2 1 #define VK_KHR_MAP_MEMORY_2_SPEC_VERSION 1 #define VK_KHR_MAP_MEMORY_2_EXTENSION_NAME "VK_KHR_map_memory2" +typedef VkMemoryUnmapFlagBits VkMemoryUnmapFlagBitsKHR; -typedef enum VkMemoryUnmapFlagBitsKHR { - VK_MEMORY_UNMAP_RESERVE_BIT_EXT = 0x00000001, - VK_MEMORY_UNMAP_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF -} VkMemoryUnmapFlagBitsKHR; -typedef VkFlags VkMemoryUnmapFlagsKHR; -typedef struct VkMemoryMapInfoKHR { - VkStructureType sType; - const void* pNext; - VkMemoryMapFlags flags; - VkDeviceMemory memory; - VkDeviceSize offset; - VkDeviceSize size; -} VkMemoryMapInfoKHR; +typedef VkMemoryUnmapFlags VkMemoryUnmapFlagsKHR; -typedef struct VkMemoryUnmapInfoKHR { - VkStructureType sType; - const void* pNext; - VkMemoryUnmapFlagsKHR flags; - VkDeviceMemory memory; -} VkMemoryUnmapInfoKHR; +typedef VkMemoryMapInfo VkMemoryMapInfoKHR; -typedef VkResult (VKAPI_PTR *PFN_vkMapMemory2KHR)(VkDevice device, const VkMemoryMapInfoKHR* pMemoryMapInfo, void** ppData); -typedef VkResult (VKAPI_PTR *PFN_vkUnmapMemory2KHR)(VkDevice device, const VkMemoryUnmapInfoKHR* pMemoryUnmapInfo); +typedef VkMemoryUnmapInfo VkMemoryUnmapInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkMapMemory2KHR)(VkDevice device, const VkMemoryMapInfo* pMemoryMapInfo, void** ppData); +typedef VkResult (VKAPI_PTR *PFN_vkUnmapMemory2KHR)(VkDevice device, const VkMemoryUnmapInfo* pMemoryUnmapInfo); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR VkResult VKAPI_CALL vkMapMemory2KHR( VkDevice device, - const VkMemoryMapInfoKHR* pMemoryMapInfo, + const VkMemoryMapInfo* pMemoryMapInfo, void** ppData); VKAPI_ATTR VkResult VKAPI_CALL vkUnmapMemory2KHR( VkDevice device, - const VkMemoryUnmapInfoKHR* pMemoryUnmapInfo); + const VkMemoryUnmapInfo* pMemoryUnmapInfo); #endif @@ -11168,12 +11901,7 @@ VKAPI_ATTR void VKAPI_CALL vkGetDeviceImageSparseMemoryRequirementsKHR( #define VK_KHR_shader_subgroup_rotate 1 #define VK_KHR_SHADER_SUBGROUP_ROTATE_SPEC_VERSION 2 #define VK_KHR_SHADER_SUBGROUP_ROTATE_EXTENSION_NAME "VK_KHR_shader_subgroup_rotate" -typedef struct VkPhysicalDeviceShaderSubgroupRotateFeaturesKHR { - VkStructureType sType; - void* pNext; - VkBool32 shaderSubgroupRotate; - VkBool32 shaderSubgroupRotateClustered; -} VkPhysicalDeviceShaderSubgroupRotateFeaturesKHR; +typedef VkPhysicalDeviceShaderSubgroupRotateFeatures VkPhysicalDeviceShaderSubgroupRotateFeaturesKHR; @@ -11193,145 +11921,34 @@ typedef struct VkPhysicalDeviceShaderMaximalReconvergenceFeaturesKHR { #define VK_KHR_maintenance5 1 #define VK_KHR_MAINTENANCE_5_SPEC_VERSION 1 #define VK_KHR_MAINTENANCE_5_EXTENSION_NAME "VK_KHR_maintenance5" -typedef VkFlags64 VkPipelineCreateFlags2KHR; +typedef VkPipelineCreateFlags2 VkPipelineCreateFlags2KHR; -// Flag bits for VkPipelineCreateFlagBits2KHR -typedef VkFlags64 VkPipelineCreateFlagBits2KHR; -static const VkPipelineCreateFlagBits2KHR VK_PIPELINE_CREATE_2_DISABLE_OPTIMIZATION_BIT_KHR = 0x00000001ULL; -static const VkPipelineCreateFlagBits2KHR VK_PIPELINE_CREATE_2_ALLOW_DERIVATIVES_BIT_KHR = 0x00000002ULL; -static const VkPipelineCreateFlagBits2KHR VK_PIPELINE_CREATE_2_DERIVATIVE_BIT_KHR = 0x00000004ULL; -#ifdef VK_ENABLE_BETA_EXTENSIONS -static const VkPipelineCreateFlagBits2KHR VK_PIPELINE_CREATE_2_EXECUTION_GRAPH_BIT_AMDX = 0x100000000ULL; -#endif -static const VkPipelineCreateFlagBits2KHR VK_PIPELINE_CREATE_2_ENABLE_LEGACY_DITHERING_BIT_EXT = 0x400000000ULL; -static const VkPipelineCreateFlagBits2KHR VK_PIPELINE_CREATE_2_VIEW_INDEX_FROM_DEVICE_INDEX_BIT_KHR = 0x00000008ULL; -static const VkPipelineCreateFlagBits2KHR VK_PIPELINE_CREATE_2_DISPATCH_BASE_BIT_KHR = 0x00000010ULL; -static const VkPipelineCreateFlagBits2KHR VK_PIPELINE_CREATE_2_DEFER_COMPILE_BIT_NV = 0x00000020ULL; -static const VkPipelineCreateFlagBits2KHR VK_PIPELINE_CREATE_2_CAPTURE_STATISTICS_BIT_KHR = 0x00000040ULL; -static const VkPipelineCreateFlagBits2KHR VK_PIPELINE_CREATE_2_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR = 0x00000080ULL; -static const VkPipelineCreateFlagBits2KHR VK_PIPELINE_CREATE_2_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT_KHR = 0x00000100ULL; -static const VkPipelineCreateFlagBits2KHR VK_PIPELINE_CREATE_2_EARLY_RETURN_ON_FAILURE_BIT_KHR = 0x00000200ULL; -static const VkPipelineCreateFlagBits2KHR VK_PIPELINE_CREATE_2_LINK_TIME_OPTIMIZATION_BIT_EXT = 0x00000400ULL; -static const VkPipelineCreateFlagBits2KHR VK_PIPELINE_CREATE_2_RETAIN_LINK_TIME_OPTIMIZATION_INFO_BIT_EXT = 0x00800000ULL; -static const VkPipelineCreateFlagBits2KHR VK_PIPELINE_CREATE_2_LIBRARY_BIT_KHR = 0x00000800ULL; -static const VkPipelineCreateFlagBits2KHR VK_PIPELINE_CREATE_2_RAY_TRACING_SKIP_TRIANGLES_BIT_KHR = 0x00001000ULL; -static const VkPipelineCreateFlagBits2KHR VK_PIPELINE_CREATE_2_RAY_TRACING_SKIP_AABBS_BIT_KHR = 0x00002000ULL; -static const VkPipelineCreateFlagBits2KHR VK_PIPELINE_CREATE_2_RAY_TRACING_NO_NULL_ANY_HIT_SHADERS_BIT_KHR = 0x00004000ULL; -static const VkPipelineCreateFlagBits2KHR VK_PIPELINE_CREATE_2_RAY_TRACING_NO_NULL_CLOSEST_HIT_SHADERS_BIT_KHR = 0x00008000ULL; -static const VkPipelineCreateFlagBits2KHR VK_PIPELINE_CREATE_2_RAY_TRACING_NO_NULL_MISS_SHADERS_BIT_KHR = 0x00010000ULL; -static const VkPipelineCreateFlagBits2KHR VK_PIPELINE_CREATE_2_RAY_TRACING_NO_NULL_INTERSECTION_SHADERS_BIT_KHR = 0x00020000ULL; -static const VkPipelineCreateFlagBits2KHR VK_PIPELINE_CREATE_2_RAY_TRACING_SHADER_GROUP_HANDLE_CAPTURE_REPLAY_BIT_KHR = 0x00080000ULL; -static const VkPipelineCreateFlagBits2KHR VK_PIPELINE_CREATE_2_INDIRECT_BINDABLE_BIT_NV = 0x00040000ULL; -static const VkPipelineCreateFlagBits2KHR VK_PIPELINE_CREATE_2_RAY_TRACING_ALLOW_MOTION_BIT_NV = 0x00100000ULL; -static const VkPipelineCreateFlagBits2KHR VK_PIPELINE_CREATE_2_RENDERING_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR = 0x00200000ULL; -static const VkPipelineCreateFlagBits2KHR VK_PIPELINE_CREATE_2_RENDERING_FRAGMENT_DENSITY_MAP_ATTACHMENT_BIT_EXT = 0x00400000ULL; -static const VkPipelineCreateFlagBits2KHR VK_PIPELINE_CREATE_2_RAY_TRACING_OPACITY_MICROMAP_BIT_EXT = 0x01000000ULL; -static const VkPipelineCreateFlagBits2KHR VK_PIPELINE_CREATE_2_COLOR_ATTACHMENT_FEEDBACK_LOOP_BIT_EXT = 0x02000000ULL; -static const VkPipelineCreateFlagBits2KHR VK_PIPELINE_CREATE_2_DEPTH_STENCIL_ATTACHMENT_FEEDBACK_LOOP_BIT_EXT = 0x04000000ULL; -static const VkPipelineCreateFlagBits2KHR VK_PIPELINE_CREATE_2_NO_PROTECTED_ACCESS_BIT_EXT = 0x08000000ULL; -static const VkPipelineCreateFlagBits2KHR VK_PIPELINE_CREATE_2_PROTECTED_ACCESS_ONLY_BIT_EXT = 0x40000000ULL; -static const VkPipelineCreateFlagBits2KHR VK_PIPELINE_CREATE_2_RAY_TRACING_DISPLACEMENT_MICROMAP_BIT_NV = 0x10000000ULL; -static const VkPipelineCreateFlagBits2KHR VK_PIPELINE_CREATE_2_DESCRIPTOR_BUFFER_BIT_EXT = 0x20000000ULL; -static const VkPipelineCreateFlagBits2KHR VK_PIPELINE_CREATE_2_CAPTURE_DATA_BIT_KHR = 0x80000000ULL; -static const VkPipelineCreateFlagBits2KHR VK_PIPELINE_CREATE_2_INDIRECT_BINDABLE_BIT_EXT = 0x4000000000ULL; +typedef VkPipelineCreateFlagBits2 VkPipelineCreateFlagBits2KHR; -typedef VkFlags64 VkBufferUsageFlags2KHR; +typedef VkBufferUsageFlags2 VkBufferUsageFlags2KHR; -// Flag bits for VkBufferUsageFlagBits2KHR -typedef VkFlags64 VkBufferUsageFlagBits2KHR; -static const VkBufferUsageFlagBits2KHR VK_BUFFER_USAGE_2_TRANSFER_SRC_BIT_KHR = 0x00000001ULL; -static const VkBufferUsageFlagBits2KHR VK_BUFFER_USAGE_2_TRANSFER_DST_BIT_KHR = 0x00000002ULL; -static const VkBufferUsageFlagBits2KHR VK_BUFFER_USAGE_2_UNIFORM_TEXEL_BUFFER_BIT_KHR = 0x00000004ULL; -static const VkBufferUsageFlagBits2KHR VK_BUFFER_USAGE_2_STORAGE_TEXEL_BUFFER_BIT_KHR = 0x00000008ULL; -static const VkBufferUsageFlagBits2KHR VK_BUFFER_USAGE_2_UNIFORM_BUFFER_BIT_KHR = 0x00000010ULL; -static const VkBufferUsageFlagBits2KHR VK_BUFFER_USAGE_2_STORAGE_BUFFER_BIT_KHR = 0x00000020ULL; -static const VkBufferUsageFlagBits2KHR VK_BUFFER_USAGE_2_INDEX_BUFFER_BIT_KHR = 0x00000040ULL; -static const VkBufferUsageFlagBits2KHR VK_BUFFER_USAGE_2_VERTEX_BUFFER_BIT_KHR = 0x00000080ULL; -static const VkBufferUsageFlagBits2KHR VK_BUFFER_USAGE_2_INDIRECT_BUFFER_BIT_KHR = 0x00000100ULL; -#ifdef VK_ENABLE_BETA_EXTENSIONS -static const VkBufferUsageFlagBits2KHR VK_BUFFER_USAGE_2_EXECUTION_GRAPH_SCRATCH_BIT_AMDX = 0x02000000ULL; -#endif -static const VkBufferUsageFlagBits2KHR VK_BUFFER_USAGE_2_CONDITIONAL_RENDERING_BIT_EXT = 0x00000200ULL; -static const VkBufferUsageFlagBits2KHR VK_BUFFER_USAGE_2_SHADER_BINDING_TABLE_BIT_KHR = 0x00000400ULL; -static const VkBufferUsageFlagBits2KHR VK_BUFFER_USAGE_2_RAY_TRACING_BIT_NV = 0x00000400ULL; -static const VkBufferUsageFlagBits2KHR VK_BUFFER_USAGE_2_TRANSFORM_FEEDBACK_BUFFER_BIT_EXT = 0x00000800ULL; -static const VkBufferUsageFlagBits2KHR VK_BUFFER_USAGE_2_TRANSFORM_FEEDBACK_COUNTER_BUFFER_BIT_EXT = 0x00001000ULL; -static const VkBufferUsageFlagBits2KHR VK_BUFFER_USAGE_2_VIDEO_DECODE_SRC_BIT_KHR = 0x00002000ULL; -static const VkBufferUsageFlagBits2KHR VK_BUFFER_USAGE_2_VIDEO_DECODE_DST_BIT_KHR = 0x00004000ULL; -static const VkBufferUsageFlagBits2KHR VK_BUFFER_USAGE_2_VIDEO_ENCODE_DST_BIT_KHR = 0x00008000ULL; -static const VkBufferUsageFlagBits2KHR VK_BUFFER_USAGE_2_VIDEO_ENCODE_SRC_BIT_KHR = 0x00010000ULL; -static const VkBufferUsageFlagBits2KHR VK_BUFFER_USAGE_2_SHADER_DEVICE_ADDRESS_BIT_KHR = 0x00020000ULL; -static const VkBufferUsageFlagBits2KHR VK_BUFFER_USAGE_2_ACCELERATION_STRUCTURE_BUILD_INPUT_READ_ONLY_BIT_KHR = 0x00080000ULL; -static const VkBufferUsageFlagBits2KHR VK_BUFFER_USAGE_2_ACCELERATION_STRUCTURE_STORAGE_BIT_KHR = 0x00100000ULL; -static const VkBufferUsageFlagBits2KHR VK_BUFFER_USAGE_2_SAMPLER_DESCRIPTOR_BUFFER_BIT_EXT = 0x00200000ULL; -static const VkBufferUsageFlagBits2KHR VK_BUFFER_USAGE_2_RESOURCE_DESCRIPTOR_BUFFER_BIT_EXT = 0x00400000ULL; -static const VkBufferUsageFlagBits2KHR VK_BUFFER_USAGE_2_PUSH_DESCRIPTORS_DESCRIPTOR_BUFFER_BIT_EXT = 0x04000000ULL; -static const VkBufferUsageFlagBits2KHR VK_BUFFER_USAGE_2_MICROMAP_BUILD_INPUT_READ_ONLY_BIT_EXT = 0x00800000ULL; -static const VkBufferUsageFlagBits2KHR VK_BUFFER_USAGE_2_MICROMAP_STORAGE_BIT_EXT = 0x01000000ULL; -static const VkBufferUsageFlagBits2KHR VK_BUFFER_USAGE_2_PREPROCESS_BUFFER_BIT_EXT = 0x80000000ULL; +typedef VkBufferUsageFlagBits2 VkBufferUsageFlagBits2KHR; -typedef struct VkPhysicalDeviceMaintenance5FeaturesKHR { - VkStructureType sType; - void* pNext; - VkBool32 maintenance5; -} VkPhysicalDeviceMaintenance5FeaturesKHR; +typedef VkPhysicalDeviceMaintenance5Features VkPhysicalDeviceMaintenance5FeaturesKHR; -typedef struct VkPhysicalDeviceMaintenance5PropertiesKHR { - VkStructureType sType; - void* pNext; - VkBool32 earlyFragmentMultisampleCoverageAfterSampleCounting; - VkBool32 earlyFragmentSampleMaskTestBeforeSampleCounting; - VkBool32 depthStencilSwizzleOneSupport; - VkBool32 polygonModePointSize; - VkBool32 nonStrictSinglePixelWideLinesUseParallelogram; - VkBool32 nonStrictWideLinesUseParallelogram; -} VkPhysicalDeviceMaintenance5PropertiesKHR; +typedef VkPhysicalDeviceMaintenance5Properties VkPhysicalDeviceMaintenance5PropertiesKHR; -typedef struct VkRenderingAreaInfoKHR { - VkStructureType sType; - const void* pNext; - uint32_t viewMask; - uint32_t colorAttachmentCount; - const VkFormat* pColorAttachmentFormats; - VkFormat depthAttachmentFormat; - VkFormat stencilAttachmentFormat; -} VkRenderingAreaInfoKHR; +typedef VkRenderingAreaInfo VkRenderingAreaInfoKHR; -typedef struct VkImageSubresource2KHR { - VkStructureType sType; - void* pNext; - VkImageSubresource imageSubresource; -} VkImageSubresource2KHR; +typedef VkDeviceImageSubresourceInfo VkDeviceImageSubresourceInfoKHR; -typedef struct VkDeviceImageSubresourceInfoKHR { - VkStructureType sType; - const void* pNext; - const VkImageCreateInfo* pCreateInfo; - const VkImageSubresource2KHR* pSubresource; -} VkDeviceImageSubresourceInfoKHR; +typedef VkImageSubresource2 VkImageSubresource2KHR; -typedef struct VkSubresourceLayout2KHR { - VkStructureType sType; - void* pNext; - VkSubresourceLayout subresourceLayout; -} VkSubresourceLayout2KHR; +typedef VkSubresourceLayout2 VkSubresourceLayout2KHR; -typedef struct VkPipelineCreateFlags2CreateInfoKHR { - VkStructureType sType; - const void* pNext; - VkPipelineCreateFlags2KHR flags; -} VkPipelineCreateFlags2CreateInfoKHR; +typedef VkPipelineCreateFlags2CreateInfo VkPipelineCreateFlags2CreateInfoKHR; -typedef struct VkBufferUsageFlags2CreateInfoKHR { - VkStructureType sType; - const void* pNext; - VkBufferUsageFlags2KHR usage; -} VkBufferUsageFlags2CreateInfoKHR; +typedef VkBufferUsageFlags2CreateInfo VkBufferUsageFlags2CreateInfoKHR; typedef void (VKAPI_PTR *PFN_vkCmdBindIndexBuffer2KHR)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkDeviceSize size, VkIndexType indexType); -typedef void (VKAPI_PTR *PFN_vkGetRenderingAreaGranularityKHR)(VkDevice device, const VkRenderingAreaInfoKHR* pRenderingAreaInfo, VkExtent2D* pGranularity); -typedef void (VKAPI_PTR *PFN_vkGetDeviceImageSubresourceLayoutKHR)(VkDevice device, const VkDeviceImageSubresourceInfoKHR* pInfo, VkSubresourceLayout2KHR* pLayout); -typedef void (VKAPI_PTR *PFN_vkGetImageSubresourceLayout2KHR)(VkDevice device, VkImage image, const VkImageSubresource2KHR* pSubresource, VkSubresourceLayout2KHR* pLayout); +typedef void (VKAPI_PTR *PFN_vkGetRenderingAreaGranularityKHR)(VkDevice device, const VkRenderingAreaInfo* pRenderingAreaInfo, VkExtent2D* pGranularity); +typedef void (VKAPI_PTR *PFN_vkGetDeviceImageSubresourceLayoutKHR)(VkDevice device, const VkDeviceImageSubresourceInfo* pInfo, VkSubresourceLayout2* pLayout); +typedef void (VKAPI_PTR *PFN_vkGetImageSubresourceLayout2KHR)(VkDevice device, VkImage image, const VkImageSubresource2* pSubresource, VkSubresourceLayout2* pLayout); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR void VKAPI_CALL vkCmdBindIndexBuffer2KHR( @@ -11343,19 +11960,19 @@ VKAPI_ATTR void VKAPI_CALL vkCmdBindIndexBuffer2KHR( VKAPI_ATTR void VKAPI_CALL vkGetRenderingAreaGranularityKHR( VkDevice device, - const VkRenderingAreaInfoKHR* pRenderingAreaInfo, + const VkRenderingAreaInfo* pRenderingAreaInfo, VkExtent2D* pGranularity); VKAPI_ATTR void VKAPI_CALL vkGetDeviceImageSubresourceLayoutKHR( VkDevice device, - const VkDeviceImageSubresourceInfoKHR* pInfo, - VkSubresourceLayout2KHR* pLayout); + const VkDeviceImageSubresourceInfo* pInfo, + VkSubresourceLayout2* pLayout); VKAPI_ATTR void VKAPI_CALL vkGetImageSubresourceLayout2KHR( VkDevice device, VkImage image, - const VkImageSubresource2KHR* pSubresource, - VkSubresourceLayout2KHR* pLayout); + const VkImageSubresource2* pSubresource, + VkSubresourceLayout2* pLayout); #endif @@ -11854,31 +12471,13 @@ typedef struct VkVideoInlineQueryInfoKHR { #define VK_KHR_vertex_attribute_divisor 1 #define VK_KHR_VERTEX_ATTRIBUTE_DIVISOR_SPEC_VERSION 1 #define VK_KHR_VERTEX_ATTRIBUTE_DIVISOR_EXTENSION_NAME "VK_KHR_vertex_attribute_divisor" -typedef struct VkPhysicalDeviceVertexAttributeDivisorPropertiesKHR { - VkStructureType sType; - void* pNext; - uint32_t maxVertexAttribDivisor; - VkBool32 supportsNonZeroFirstInstance; -} VkPhysicalDeviceVertexAttributeDivisorPropertiesKHR; +typedef VkPhysicalDeviceVertexAttributeDivisorProperties VkPhysicalDeviceVertexAttributeDivisorPropertiesKHR; -typedef struct VkVertexInputBindingDivisorDescriptionKHR { - uint32_t binding; - uint32_t divisor; -} VkVertexInputBindingDivisorDescriptionKHR; +typedef VkVertexInputBindingDivisorDescription VkVertexInputBindingDivisorDescriptionKHR; -typedef struct VkPipelineVertexInputDivisorStateCreateInfoKHR { - VkStructureType sType; - const void* pNext; - uint32_t vertexBindingDivisorCount; - const VkVertexInputBindingDivisorDescriptionKHR* pVertexBindingDivisors; -} VkPipelineVertexInputDivisorStateCreateInfoKHR; +typedef VkPipelineVertexInputDivisorStateCreateInfo VkPipelineVertexInputDivisorStateCreateInfoKHR; -typedef struct VkPhysicalDeviceVertexAttributeDivisorFeaturesKHR { - VkStructureType sType; - void* pNext; - VkBool32 vertexAttributeInstanceRateDivisor; - VkBool32 vertexAttributeInstanceRateZeroDivisor; -} VkPhysicalDeviceVertexAttributeDivisorFeaturesKHR; +typedef VkPhysicalDeviceVertexAttributeDivisorFeatures VkPhysicalDeviceVertexAttributeDivisorFeaturesKHR; @@ -11892,11 +12491,7 @@ typedef struct VkPhysicalDeviceVertexAttributeDivisorFeaturesKHR { #define VK_KHR_shader_float_controls2 1 #define VK_KHR_SHADER_FLOAT_CONTROLS_2_SPEC_VERSION 1 #define VK_KHR_SHADER_FLOAT_CONTROLS_2_EXTENSION_NAME "VK_KHR_shader_float_controls2" -typedef struct VkPhysicalDeviceShaderFloatControls2FeaturesKHR { - VkStructureType sType; - void* pNext; - VkBool32 shaderFloatControls2; -} VkPhysicalDeviceShaderFloatControls2FeaturesKHR; +typedef VkPhysicalDeviceShaderFloatControls2Features VkPhysicalDeviceShaderFloatControls2FeaturesKHR; @@ -11904,11 +12499,7 @@ typedef struct VkPhysicalDeviceShaderFloatControls2FeaturesKHR { #define VK_KHR_index_type_uint8 1 #define VK_KHR_INDEX_TYPE_UINT8_SPEC_VERSION 1 #define VK_KHR_INDEX_TYPE_UINT8_EXTENSION_NAME "VK_KHR_index_type_uint8" -typedef struct VkPhysicalDeviceIndexTypeUint8FeaturesKHR { - VkStructureType sType; - void* pNext; - VkBool32 indexTypeUint8; -} VkPhysicalDeviceIndexTypeUint8FeaturesKHR; +typedef VkPhysicalDeviceIndexTypeUint8Features VkPhysicalDeviceIndexTypeUint8FeaturesKHR; @@ -11916,43 +12507,13 @@ typedef struct VkPhysicalDeviceIndexTypeUint8FeaturesKHR { #define VK_KHR_line_rasterization 1 #define VK_KHR_LINE_RASTERIZATION_SPEC_VERSION 1 #define VK_KHR_LINE_RASTERIZATION_EXTENSION_NAME "VK_KHR_line_rasterization" +typedef VkLineRasterizationMode VkLineRasterizationModeKHR; -typedef enum VkLineRasterizationModeKHR { - VK_LINE_RASTERIZATION_MODE_DEFAULT_KHR = 0, - VK_LINE_RASTERIZATION_MODE_RECTANGULAR_KHR = 1, - VK_LINE_RASTERIZATION_MODE_BRESENHAM_KHR = 2, - VK_LINE_RASTERIZATION_MODE_RECTANGULAR_SMOOTH_KHR = 3, - VK_LINE_RASTERIZATION_MODE_DEFAULT_EXT = VK_LINE_RASTERIZATION_MODE_DEFAULT_KHR, - VK_LINE_RASTERIZATION_MODE_RECTANGULAR_EXT = VK_LINE_RASTERIZATION_MODE_RECTANGULAR_KHR, - VK_LINE_RASTERIZATION_MODE_BRESENHAM_EXT = VK_LINE_RASTERIZATION_MODE_BRESENHAM_KHR, - VK_LINE_RASTERIZATION_MODE_RECTANGULAR_SMOOTH_EXT = VK_LINE_RASTERIZATION_MODE_RECTANGULAR_SMOOTH_KHR, - VK_LINE_RASTERIZATION_MODE_MAX_ENUM_KHR = 0x7FFFFFFF -} VkLineRasterizationModeKHR; -typedef struct VkPhysicalDeviceLineRasterizationFeaturesKHR { - VkStructureType sType; - void* pNext; - VkBool32 rectangularLines; - VkBool32 bresenhamLines; - VkBool32 smoothLines; - VkBool32 stippledRectangularLines; - VkBool32 stippledBresenhamLines; - VkBool32 stippledSmoothLines; -} VkPhysicalDeviceLineRasterizationFeaturesKHR; +typedef VkPhysicalDeviceLineRasterizationFeatures VkPhysicalDeviceLineRasterizationFeaturesKHR; -typedef struct VkPhysicalDeviceLineRasterizationPropertiesKHR { - VkStructureType sType; - void* pNext; - uint32_t lineSubPixelPrecisionBits; -} VkPhysicalDeviceLineRasterizationPropertiesKHR; +typedef VkPhysicalDeviceLineRasterizationProperties VkPhysicalDeviceLineRasterizationPropertiesKHR; -typedef struct VkPipelineRasterizationLineStateCreateInfoKHR { - VkStructureType sType; - const void* pNext; - VkLineRasterizationModeKHR lineRasterizationMode; - VkBool32 stippledLineEnable; - uint32_t lineStippleFactor; - uint16_t lineStipplePattern; -} VkPipelineRasterizationLineStateCreateInfoKHR; +typedef VkPipelineRasterizationLineStateCreateInfo VkPipelineRasterizationLineStateCreateInfoKHR; typedef void (VKAPI_PTR *PFN_vkCmdSetLineStippleKHR)(VkCommandBuffer commandBuffer, uint32_t lineStippleFactor, uint16_t lineStipplePattern); @@ -12008,11 +12569,7 @@ VKAPI_ATTR VkResult VKAPI_CALL vkGetCalibratedTimestampsKHR( #define VK_KHR_shader_expect_assume 1 #define VK_KHR_SHADER_EXPECT_ASSUME_SPEC_VERSION 1 #define VK_KHR_SHADER_EXPECT_ASSUME_EXTENSION_NAME "VK_KHR_shader_expect_assume" -typedef struct VkPhysicalDeviceShaderExpectAssumeFeaturesKHR { - VkStructureType sType; - void* pNext; - VkBool32 shaderExpectAssume; -} VkPhysicalDeviceShaderExpectAssumeFeaturesKHR; +typedef VkPhysicalDeviceShaderExpectAssumeFeatures VkPhysicalDeviceShaderExpectAssumeFeaturesKHR; @@ -12020,66 +12577,19 @@ typedef struct VkPhysicalDeviceShaderExpectAssumeFeaturesKHR { #define VK_KHR_maintenance6 1 #define VK_KHR_MAINTENANCE_6_SPEC_VERSION 1 #define VK_KHR_MAINTENANCE_6_EXTENSION_NAME "VK_KHR_maintenance6" -typedef struct VkPhysicalDeviceMaintenance6FeaturesKHR { - VkStructureType sType; - void* pNext; - VkBool32 maintenance6; -} VkPhysicalDeviceMaintenance6FeaturesKHR; +typedef VkPhysicalDeviceMaintenance6Features VkPhysicalDeviceMaintenance6FeaturesKHR; -typedef struct VkPhysicalDeviceMaintenance6PropertiesKHR { - VkStructureType sType; - void* pNext; - VkBool32 blockTexelViewCompatibleMultipleLayers; - uint32_t maxCombinedImageSamplerDescriptorCount; - VkBool32 fragmentShadingRateClampCombinerInputs; -} VkPhysicalDeviceMaintenance6PropertiesKHR; +typedef VkPhysicalDeviceMaintenance6Properties VkPhysicalDeviceMaintenance6PropertiesKHR; -typedef struct VkBindMemoryStatusKHR { - VkStructureType sType; - const void* pNext; - VkResult* pResult; -} VkBindMemoryStatusKHR; +typedef VkBindMemoryStatus VkBindMemoryStatusKHR; -typedef struct VkBindDescriptorSetsInfoKHR { - VkStructureType sType; - const void* pNext; - VkShaderStageFlags stageFlags; - VkPipelineLayout layout; - uint32_t firstSet; - uint32_t descriptorSetCount; - const VkDescriptorSet* pDescriptorSets; - uint32_t dynamicOffsetCount; - const uint32_t* pDynamicOffsets; -} VkBindDescriptorSetsInfoKHR; +typedef VkBindDescriptorSetsInfo VkBindDescriptorSetsInfoKHR; -typedef struct VkPushConstantsInfoKHR { - VkStructureType sType; - const void* pNext; - VkPipelineLayout layout; - VkShaderStageFlags stageFlags; - uint32_t offset; - uint32_t size; - const void* pValues; -} VkPushConstantsInfoKHR; +typedef VkPushConstantsInfo VkPushConstantsInfoKHR; -typedef struct VkPushDescriptorSetInfoKHR { - VkStructureType sType; - const void* pNext; - VkShaderStageFlags stageFlags; - VkPipelineLayout layout; - uint32_t set; - uint32_t descriptorWriteCount; - const VkWriteDescriptorSet* pDescriptorWrites; -} VkPushDescriptorSetInfoKHR; +typedef VkPushDescriptorSetInfo VkPushDescriptorSetInfoKHR; -typedef struct VkPushDescriptorSetWithTemplateInfoKHR { - VkStructureType sType; - const void* pNext; - VkDescriptorUpdateTemplate descriptorUpdateTemplate; - VkPipelineLayout layout; - uint32_t set; - const void* pData; -} VkPushDescriptorSetWithTemplateInfoKHR; +typedef VkPushDescriptorSetWithTemplateInfo VkPushDescriptorSetWithTemplateInfoKHR; typedef struct VkSetDescriptorBufferOffsetsInfoEXT { VkStructureType sType; @@ -12100,29 +12610,29 @@ typedef struct VkBindDescriptorBufferEmbeddedSamplersInfoEXT { uint32_t set; } VkBindDescriptorBufferEmbeddedSamplersInfoEXT; -typedef void (VKAPI_PTR *PFN_vkCmdBindDescriptorSets2KHR)(VkCommandBuffer commandBuffer, const VkBindDescriptorSetsInfoKHR* pBindDescriptorSetsInfo); -typedef void (VKAPI_PTR *PFN_vkCmdPushConstants2KHR)(VkCommandBuffer commandBuffer, const VkPushConstantsInfoKHR* pPushConstantsInfo); -typedef void (VKAPI_PTR *PFN_vkCmdPushDescriptorSet2KHR)(VkCommandBuffer commandBuffer, const VkPushDescriptorSetInfoKHR* pPushDescriptorSetInfo); -typedef void (VKAPI_PTR *PFN_vkCmdPushDescriptorSetWithTemplate2KHR)(VkCommandBuffer commandBuffer, const VkPushDescriptorSetWithTemplateInfoKHR* pPushDescriptorSetWithTemplateInfo); +typedef void (VKAPI_PTR *PFN_vkCmdBindDescriptorSets2KHR)(VkCommandBuffer commandBuffer, const VkBindDescriptorSetsInfo* pBindDescriptorSetsInfo); +typedef void (VKAPI_PTR *PFN_vkCmdPushConstants2KHR)(VkCommandBuffer commandBuffer, const VkPushConstantsInfo* pPushConstantsInfo); +typedef void (VKAPI_PTR *PFN_vkCmdPushDescriptorSet2KHR)(VkCommandBuffer commandBuffer, const VkPushDescriptorSetInfo* pPushDescriptorSetInfo); +typedef void (VKAPI_PTR *PFN_vkCmdPushDescriptorSetWithTemplate2KHR)(VkCommandBuffer commandBuffer, const VkPushDescriptorSetWithTemplateInfo* pPushDescriptorSetWithTemplateInfo); typedef void (VKAPI_PTR *PFN_vkCmdSetDescriptorBufferOffsets2EXT)(VkCommandBuffer commandBuffer, const VkSetDescriptorBufferOffsetsInfoEXT* pSetDescriptorBufferOffsetsInfo); typedef void (VKAPI_PTR *PFN_vkCmdBindDescriptorBufferEmbeddedSamplers2EXT)(VkCommandBuffer commandBuffer, const VkBindDescriptorBufferEmbeddedSamplersInfoEXT* pBindDescriptorBufferEmbeddedSamplersInfo); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR void VKAPI_CALL vkCmdBindDescriptorSets2KHR( VkCommandBuffer commandBuffer, - const VkBindDescriptorSetsInfoKHR* pBindDescriptorSetsInfo); + const VkBindDescriptorSetsInfo* pBindDescriptorSetsInfo); VKAPI_ATTR void VKAPI_CALL vkCmdPushConstants2KHR( VkCommandBuffer commandBuffer, - const VkPushConstantsInfoKHR* pPushConstantsInfo); + const VkPushConstantsInfo* pPushConstantsInfo); VKAPI_ATTR void VKAPI_CALL vkCmdPushDescriptorSet2KHR( VkCommandBuffer commandBuffer, - const VkPushDescriptorSetInfoKHR* pPushDescriptorSetInfo); + const VkPushDescriptorSetInfo* pPushDescriptorSetInfo); VKAPI_ATTR void VKAPI_CALL vkCmdPushDescriptorSetWithTemplate2KHR( VkCommandBuffer commandBuffer, - const VkPushDescriptorSetWithTemplateInfoKHR* pPushDescriptorSetWithTemplateInfo); + const VkPushDescriptorSetWithTemplateInfo* pPushDescriptorSetWithTemplateInfo); VKAPI_ATTR void VKAPI_CALL vkCmdSetDescriptorBufferOffsets2EXT( VkCommandBuffer commandBuffer, @@ -12971,45 +13481,15 @@ typedef struct VkPhysicalDeviceASTCDecodeFeaturesEXT { #define VK_EXT_pipeline_robustness 1 #define VK_EXT_PIPELINE_ROBUSTNESS_SPEC_VERSION 1 #define VK_EXT_PIPELINE_ROBUSTNESS_EXTENSION_NAME "VK_EXT_pipeline_robustness" +typedef VkPipelineRobustnessBufferBehavior VkPipelineRobustnessBufferBehaviorEXT; -typedef enum VkPipelineRobustnessBufferBehaviorEXT { - VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_DEVICE_DEFAULT_EXT = 0, - VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_DISABLED_EXT = 1, - VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_ROBUST_BUFFER_ACCESS_EXT = 2, - VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_ROBUST_BUFFER_ACCESS_2_EXT = 3, - VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_MAX_ENUM_EXT = 0x7FFFFFFF -} VkPipelineRobustnessBufferBehaviorEXT; +typedef VkPipelineRobustnessImageBehavior VkPipelineRobustnessImageBehaviorEXT; -typedef enum VkPipelineRobustnessImageBehaviorEXT { - VK_PIPELINE_ROBUSTNESS_IMAGE_BEHAVIOR_DEVICE_DEFAULT_EXT = 0, - VK_PIPELINE_ROBUSTNESS_IMAGE_BEHAVIOR_DISABLED_EXT = 1, - VK_PIPELINE_ROBUSTNESS_IMAGE_BEHAVIOR_ROBUST_IMAGE_ACCESS_EXT = 2, - VK_PIPELINE_ROBUSTNESS_IMAGE_BEHAVIOR_ROBUST_IMAGE_ACCESS_2_EXT = 3, - VK_PIPELINE_ROBUSTNESS_IMAGE_BEHAVIOR_MAX_ENUM_EXT = 0x7FFFFFFF -} VkPipelineRobustnessImageBehaviorEXT; -typedef struct VkPhysicalDevicePipelineRobustnessFeaturesEXT { - VkStructureType sType; - void* pNext; - VkBool32 pipelineRobustness; -} VkPhysicalDevicePipelineRobustnessFeaturesEXT; +typedef VkPhysicalDevicePipelineRobustnessFeatures VkPhysicalDevicePipelineRobustnessFeaturesEXT; -typedef struct VkPhysicalDevicePipelineRobustnessPropertiesEXT { - VkStructureType sType; - void* pNext; - VkPipelineRobustnessBufferBehaviorEXT defaultRobustnessStorageBuffers; - VkPipelineRobustnessBufferBehaviorEXT defaultRobustnessUniformBuffers; - VkPipelineRobustnessBufferBehaviorEXT defaultRobustnessVertexInputs; - VkPipelineRobustnessImageBehaviorEXT defaultRobustnessImages; -} VkPhysicalDevicePipelineRobustnessPropertiesEXT; +typedef VkPhysicalDevicePipelineRobustnessProperties VkPhysicalDevicePipelineRobustnessPropertiesEXT; -typedef struct VkPipelineRobustnessCreateInfoEXT { - VkStructureType sType; - const void* pNext; - VkPipelineRobustnessBufferBehaviorEXT storageBuffers; - VkPipelineRobustnessBufferBehaviorEXT uniformBuffers; - VkPipelineRobustnessBufferBehaviorEXT vertexInputs; - VkPipelineRobustnessImageBehaviorEXT images; -} VkPipelineRobustnessCreateInfoEXT; +typedef VkPipelineRobustnessCreateInfo VkPipelineRobustnessCreateInfoEXT; @@ -14566,9 +15046,9 @@ typedef struct VkFilterCubicImageViewImageFormatPropertiesEXT { #define VK_EXT_global_priority 1 #define VK_EXT_GLOBAL_PRIORITY_SPEC_VERSION 2 #define VK_EXT_GLOBAL_PRIORITY_EXTENSION_NAME "VK_EXT_global_priority" -typedef VkQueueGlobalPriorityKHR VkQueueGlobalPriorityEXT; +typedef VkQueueGlobalPriority VkQueueGlobalPriorityEXT; -typedef VkDeviceQueueGlobalPriorityCreateInfoKHR VkDeviceQueueGlobalPriorityCreateInfoEXT; +typedef VkDeviceQueueGlobalPriorityCreateInfo VkDeviceQueueGlobalPriorityCreateInfoEXT; @@ -14727,11 +15207,11 @@ typedef struct VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT { uint32_t maxVertexAttribDivisor; } VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT; -typedef VkVertexInputBindingDivisorDescriptionKHR VkVertexInputBindingDivisorDescriptionEXT; +typedef VkVertexInputBindingDivisorDescription VkVertexInputBindingDivisorDescriptionEXT; -typedef VkPipelineVertexInputDivisorStateCreateInfoKHR VkPipelineVertexInputDivisorStateCreateInfoEXT; +typedef VkPipelineVertexInputDivisorStateCreateInfo VkPipelineVertexInputDivisorStateCreateInfoEXT; -typedef VkPhysicalDeviceVertexAttributeDivisorFeaturesKHR VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT; +typedef VkPhysicalDeviceVertexAttributeDivisorFeatures VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT; @@ -15546,13 +16026,13 @@ VKAPI_ATTR VkResult VKAPI_CALL vkCreateHeadlessSurfaceEXT( #define VK_EXT_line_rasterization 1 #define VK_EXT_LINE_RASTERIZATION_SPEC_VERSION 1 #define VK_EXT_LINE_RASTERIZATION_EXTENSION_NAME "VK_EXT_line_rasterization" -typedef VkLineRasterizationModeKHR VkLineRasterizationModeEXT; +typedef VkLineRasterizationMode VkLineRasterizationModeEXT; -typedef VkPhysicalDeviceLineRasterizationFeaturesKHR VkPhysicalDeviceLineRasterizationFeaturesEXT; +typedef VkPhysicalDeviceLineRasterizationFeatures VkPhysicalDeviceLineRasterizationFeaturesEXT; -typedef VkPhysicalDeviceLineRasterizationPropertiesKHR VkPhysicalDeviceLineRasterizationPropertiesEXT; +typedef VkPhysicalDeviceLineRasterizationProperties VkPhysicalDeviceLineRasterizationPropertiesEXT; -typedef VkPipelineRasterizationLineStateCreateInfoKHR VkPipelineRasterizationLineStateCreateInfoEXT; +typedef VkPipelineRasterizationLineStateCreateInfo VkPipelineRasterizationLineStateCreateInfoEXT; typedef void (VKAPI_PTR *PFN_vkCmdSetLineStippleEXT)(VkCommandBuffer commandBuffer, uint32_t lineStippleFactor, uint16_t lineStipplePattern); @@ -15608,7 +16088,7 @@ VKAPI_ATTR void VKAPI_CALL vkResetQueryPoolEXT( #define VK_EXT_index_type_uint8 1 #define VK_EXT_INDEX_TYPE_UINT8_SPEC_VERSION 1 #define VK_EXT_INDEX_TYPE_UINT8_EXTENSION_NAME "VK_EXT_index_type_uint8" -typedef VkPhysicalDeviceIndexTypeUint8FeaturesKHR VkPhysicalDeviceIndexTypeUint8FeaturesEXT; +typedef VkPhysicalDeviceIndexTypeUint8Features VkPhysicalDeviceIndexTypeUint8FeaturesEXT; @@ -15701,138 +16181,63 @@ VKAPI_ATTR void VKAPI_CALL vkCmdSetStencilOpEXT( #define VK_EXT_host_image_copy 1 #define VK_EXT_HOST_IMAGE_COPY_SPEC_VERSION 1 #define VK_EXT_HOST_IMAGE_COPY_EXTENSION_NAME "VK_EXT_host_image_copy" +typedef VkHostImageCopyFlagBits VkHostImageCopyFlagBitsEXT; -typedef enum VkHostImageCopyFlagBitsEXT { - VK_HOST_IMAGE_COPY_MEMCPY_EXT = 0x00000001, - VK_HOST_IMAGE_COPY_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF -} VkHostImageCopyFlagBitsEXT; -typedef VkFlags VkHostImageCopyFlagsEXT; -typedef struct VkPhysicalDeviceHostImageCopyFeaturesEXT { - VkStructureType sType; - void* pNext; - VkBool32 hostImageCopy; -} VkPhysicalDeviceHostImageCopyFeaturesEXT; +typedef VkHostImageCopyFlags VkHostImageCopyFlagsEXT; -typedef struct VkPhysicalDeviceHostImageCopyPropertiesEXT { - VkStructureType sType; - void* pNext; - uint32_t copySrcLayoutCount; - VkImageLayout* pCopySrcLayouts; - uint32_t copyDstLayoutCount; - VkImageLayout* pCopyDstLayouts; - uint8_t optimalTilingLayoutUUID[VK_UUID_SIZE]; - VkBool32 identicalMemoryTypeRequirements; -} VkPhysicalDeviceHostImageCopyPropertiesEXT; +typedef VkPhysicalDeviceHostImageCopyFeatures VkPhysicalDeviceHostImageCopyFeaturesEXT; -typedef struct VkMemoryToImageCopyEXT { - VkStructureType sType; - const void* pNext; - const void* pHostPointer; - uint32_t memoryRowLength; - uint32_t memoryImageHeight; - VkImageSubresourceLayers imageSubresource; - VkOffset3D imageOffset; - VkExtent3D imageExtent; -} VkMemoryToImageCopyEXT; +typedef VkPhysicalDeviceHostImageCopyProperties VkPhysicalDeviceHostImageCopyPropertiesEXT; -typedef struct VkImageToMemoryCopyEXT { - VkStructureType sType; - const void* pNext; - void* pHostPointer; - uint32_t memoryRowLength; - uint32_t memoryImageHeight; - VkImageSubresourceLayers imageSubresource; - VkOffset3D imageOffset; - VkExtent3D imageExtent; -} VkImageToMemoryCopyEXT; +typedef VkMemoryToImageCopy VkMemoryToImageCopyEXT; -typedef struct VkCopyMemoryToImageInfoEXT { - VkStructureType sType; - const void* pNext; - VkHostImageCopyFlagsEXT flags; - VkImage dstImage; - VkImageLayout dstImageLayout; - uint32_t regionCount; - const VkMemoryToImageCopyEXT* pRegions; -} VkCopyMemoryToImageInfoEXT; +typedef VkImageToMemoryCopy VkImageToMemoryCopyEXT; -typedef struct VkCopyImageToMemoryInfoEXT { - VkStructureType sType; - const void* pNext; - VkHostImageCopyFlagsEXT flags; - VkImage srcImage; - VkImageLayout srcImageLayout; - uint32_t regionCount; - const VkImageToMemoryCopyEXT* pRegions; -} VkCopyImageToMemoryInfoEXT; +typedef VkCopyMemoryToImageInfo VkCopyMemoryToImageInfoEXT; -typedef struct VkCopyImageToImageInfoEXT { - VkStructureType sType; - const void* pNext; - VkHostImageCopyFlagsEXT flags; - VkImage srcImage; - VkImageLayout srcImageLayout; - VkImage dstImage; - VkImageLayout dstImageLayout; - uint32_t regionCount; - const VkImageCopy2* pRegions; -} VkCopyImageToImageInfoEXT; +typedef VkCopyImageToMemoryInfo VkCopyImageToMemoryInfoEXT; -typedef struct VkHostImageLayoutTransitionInfoEXT { - VkStructureType sType; - const void* pNext; - VkImage image; - VkImageLayout oldLayout; - VkImageLayout newLayout; - VkImageSubresourceRange subresourceRange; -} VkHostImageLayoutTransitionInfoEXT; +typedef VkCopyImageToImageInfo VkCopyImageToImageInfoEXT; -typedef struct VkSubresourceHostMemcpySizeEXT { - VkStructureType sType; - void* pNext; - VkDeviceSize size; -} VkSubresourceHostMemcpySizeEXT; +typedef VkHostImageLayoutTransitionInfo VkHostImageLayoutTransitionInfoEXT; -typedef struct VkHostImageCopyDevicePerformanceQueryEXT { - VkStructureType sType; - void* pNext; - VkBool32 optimalDeviceAccess; - VkBool32 identicalMemoryLayout; -} VkHostImageCopyDevicePerformanceQueryEXT; +typedef VkSubresourceHostMemcpySize VkSubresourceHostMemcpySizeEXT; -typedef VkSubresourceLayout2KHR VkSubresourceLayout2EXT; +typedef VkHostImageCopyDevicePerformanceQuery VkHostImageCopyDevicePerformanceQueryEXT; -typedef VkImageSubresource2KHR VkImageSubresource2EXT; +typedef VkSubresourceLayout2 VkSubresourceLayout2EXT; -typedef VkResult (VKAPI_PTR *PFN_vkCopyMemoryToImageEXT)(VkDevice device, const VkCopyMemoryToImageInfoEXT* pCopyMemoryToImageInfo); -typedef VkResult (VKAPI_PTR *PFN_vkCopyImageToMemoryEXT)(VkDevice device, const VkCopyImageToMemoryInfoEXT* pCopyImageToMemoryInfo); -typedef VkResult (VKAPI_PTR *PFN_vkCopyImageToImageEXT)(VkDevice device, const VkCopyImageToImageInfoEXT* pCopyImageToImageInfo); -typedef VkResult (VKAPI_PTR *PFN_vkTransitionImageLayoutEXT)(VkDevice device, uint32_t transitionCount, const VkHostImageLayoutTransitionInfoEXT* pTransitions); -typedef void (VKAPI_PTR *PFN_vkGetImageSubresourceLayout2EXT)(VkDevice device, VkImage image, const VkImageSubresource2KHR* pSubresource, VkSubresourceLayout2KHR* pLayout); +typedef VkImageSubresource2 VkImageSubresource2EXT; + +typedef VkResult (VKAPI_PTR *PFN_vkCopyMemoryToImageEXT)(VkDevice device, const VkCopyMemoryToImageInfo* pCopyMemoryToImageInfo); +typedef VkResult (VKAPI_PTR *PFN_vkCopyImageToMemoryEXT)(VkDevice device, const VkCopyImageToMemoryInfo* pCopyImageToMemoryInfo); +typedef VkResult (VKAPI_PTR *PFN_vkCopyImageToImageEXT)(VkDevice device, const VkCopyImageToImageInfo* pCopyImageToImageInfo); +typedef VkResult (VKAPI_PTR *PFN_vkTransitionImageLayoutEXT)(VkDevice device, uint32_t transitionCount, const VkHostImageLayoutTransitionInfo* pTransitions); +typedef void (VKAPI_PTR *PFN_vkGetImageSubresourceLayout2EXT)(VkDevice device, VkImage image, const VkImageSubresource2* pSubresource, VkSubresourceLayout2* pLayout); #ifndef VK_NO_PROTOTYPES VKAPI_ATTR VkResult VKAPI_CALL vkCopyMemoryToImageEXT( VkDevice device, - const VkCopyMemoryToImageInfoEXT* pCopyMemoryToImageInfo); + const VkCopyMemoryToImageInfo* pCopyMemoryToImageInfo); VKAPI_ATTR VkResult VKAPI_CALL vkCopyImageToMemoryEXT( VkDevice device, - const VkCopyImageToMemoryInfoEXT* pCopyImageToMemoryInfo); + const VkCopyImageToMemoryInfo* pCopyImageToMemoryInfo); VKAPI_ATTR VkResult VKAPI_CALL vkCopyImageToImageEXT( VkDevice device, - const VkCopyImageToImageInfoEXT* pCopyImageToImageInfo); + const VkCopyImageToImageInfo* pCopyImageToImageInfo); VKAPI_ATTR VkResult VKAPI_CALL vkTransitionImageLayoutEXT( VkDevice device, uint32_t transitionCount, - const VkHostImageLayoutTransitionInfoEXT* pTransitions); + const VkHostImageLayoutTransitionInfo* pTransitions); VKAPI_ATTR void VKAPI_CALL vkGetImageSubresourceLayout2EXT( VkDevice device, VkImage image, - const VkImageSubresource2KHR* pSubresource, - VkSubresourceLayout2KHR* pLayout); + const VkImageSubresource2* pSubresource, + VkSubresourceLayout2* pLayout); #endif @@ -17700,10 +18105,10 @@ typedef struct VkPhysicalDevicePrimitivesGeneratedQueryFeaturesEXT { #define VK_EXT_global_priority_query 1 #define VK_EXT_GLOBAL_PRIORITY_QUERY_SPEC_VERSION 1 #define VK_EXT_GLOBAL_PRIORITY_QUERY_EXTENSION_NAME "VK_EXT_global_priority_query" -#define VK_MAX_GLOBAL_PRIORITY_SIZE_EXT VK_MAX_GLOBAL_PRIORITY_SIZE_KHR -typedef VkPhysicalDeviceGlobalPriorityQueryFeaturesKHR VkPhysicalDeviceGlobalPriorityQueryFeaturesEXT; +#define VK_MAX_GLOBAL_PRIORITY_SIZE_EXT VK_MAX_GLOBAL_PRIORITY_SIZE +typedef VkPhysicalDeviceGlobalPriorityQueryFeatures VkPhysicalDeviceGlobalPriorityQueryFeaturesEXT; -typedef VkQueueFamilyGlobalPriorityPropertiesKHR VkQueueFamilyGlobalPriorityPropertiesEXT; +typedef VkQueueFamilyGlobalPriorityProperties VkQueueFamilyGlobalPriorityPropertiesEXT; @@ -19165,11 +19570,7 @@ typedef struct VkPhysicalDeviceLegacyDitheringFeaturesEXT { #define VK_EXT_pipeline_protected_access 1 #define VK_EXT_PIPELINE_PROTECTED_ACCESS_SPEC_VERSION 1 #define VK_EXT_PIPELINE_PROTECTED_ACCESS_EXTENSION_NAME "VK_EXT_pipeline_protected_access" -typedef struct VkPhysicalDevicePipelineProtectedAccessFeaturesEXT { - VkStructureType sType; - void* pNext; - VkBool32 pipelineProtectedAccess; -} VkPhysicalDevicePipelineProtectedAccessFeaturesEXT; +typedef VkPhysicalDevicePipelineProtectedAccessFeatures VkPhysicalDevicePipelineProtectedAccessFeaturesEXT; diff --git a/third_party/vulkan/vulkan_enums.hpp b/third_party/vulkan/vulkan_enums.hpp index ee1fdac..c7de355 100644 --- a/third_party/vulkan/vulkan_enums.hpp +++ b/third_party/vulkan/vulkan_enums.hpp @@ -258,6 +258,9 @@ namespace VULKAN_HPP_NAMESPACE ePipelineCompileRequired = VK_PIPELINE_COMPILE_REQUIRED, eErrorPipelineCompileRequiredEXT = VK_ERROR_PIPELINE_COMPILE_REQUIRED_EXT, ePipelineCompileRequiredEXT = VK_PIPELINE_COMPILE_REQUIRED_EXT, + eErrorNotPermitted = VK_ERROR_NOT_PERMITTED, + eErrorNotPermittedEXT = VK_ERROR_NOT_PERMITTED_EXT, + eErrorNotPermittedKHR = VK_ERROR_NOT_PERMITTED_KHR, eErrorSurfaceLostKHR = VK_ERROR_SURFACE_LOST_KHR, eErrorNativeWindowInUseKHR = VK_ERROR_NATIVE_WINDOW_IN_USE_KHR, eSuboptimalKHR = VK_SUBOPTIMAL_KHR, @@ -272,8 +275,6 @@ namespace VULKAN_HPP_NAMESPACE eErrorVideoProfileCodecNotSupportedKHR = VK_ERROR_VIDEO_PROFILE_CODEC_NOT_SUPPORTED_KHR, eErrorVideoStdVersionNotSupportedKHR = VK_ERROR_VIDEO_STD_VERSION_NOT_SUPPORTED_KHR, eErrorInvalidDrmFormatModifierPlaneLayoutEXT = VK_ERROR_INVALID_DRM_FORMAT_MODIFIER_PLANE_LAYOUT_EXT, - eErrorNotPermittedKHR = VK_ERROR_NOT_PERMITTED_KHR, - eErrorNotPermittedEXT = VK_ERROR_NOT_PERMITTED_EXT, #if defined( VK_USE_PLATFORM_WIN32_KHR ) eErrorFullScreenExclusiveModeLostEXT = VK_ERROR_FULL_SCREEN_EXCLUSIVE_MODE_LOST_EXT, #endif /*VK_USE_PLATFORM_WIN32_KHR*/ @@ -668,6 +669,115 @@ namespace VULKAN_HPP_NAMESPACE eDeviceBufferMemoryRequirementsKHR = VK_STRUCTURE_TYPE_DEVICE_BUFFER_MEMORY_REQUIREMENTS_KHR, eDeviceImageMemoryRequirements = VK_STRUCTURE_TYPE_DEVICE_IMAGE_MEMORY_REQUIREMENTS, eDeviceImageMemoryRequirementsKHR = VK_STRUCTURE_TYPE_DEVICE_IMAGE_MEMORY_REQUIREMENTS_KHR, + ePhysicalDeviceVulkan14Features = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_4_FEATURES, + ePhysicalDeviceVulkan14Properties = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_4_PROPERTIES, + eDeviceQueueGlobalPriorityCreateInfo = VK_STRUCTURE_TYPE_DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO, + eDeviceQueueGlobalPriorityCreateInfoEXT = VK_STRUCTURE_TYPE_DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO_EXT, + eDeviceQueueGlobalPriorityCreateInfoKHR = VK_STRUCTURE_TYPE_DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO_KHR, + ePhysicalDeviceGlobalPriorityQueryFeatures = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GLOBAL_PRIORITY_QUERY_FEATURES, + ePhysicalDeviceGlobalPriorityQueryFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GLOBAL_PRIORITY_QUERY_FEATURES_EXT, + ePhysicalDeviceGlobalPriorityQueryFeaturesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GLOBAL_PRIORITY_QUERY_FEATURES_KHR, + eQueueFamilyGlobalPriorityProperties = VK_STRUCTURE_TYPE_QUEUE_FAMILY_GLOBAL_PRIORITY_PROPERTIES, + eQueueFamilyGlobalPriorityPropertiesEXT = VK_STRUCTURE_TYPE_QUEUE_FAMILY_GLOBAL_PRIORITY_PROPERTIES_EXT, + eQueueFamilyGlobalPriorityPropertiesKHR = VK_STRUCTURE_TYPE_QUEUE_FAMILY_GLOBAL_PRIORITY_PROPERTIES_KHR, + ePhysicalDeviceShaderSubgroupRotateFeatures = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SUBGROUP_ROTATE_FEATURES, + ePhysicalDeviceShaderSubgroupRotateFeaturesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SUBGROUP_ROTATE_FEATURES_KHR, + ePhysicalDeviceShaderFloatControls2Features = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT_CONTROLS_2_FEATURES, + ePhysicalDeviceShaderFloatControls2FeaturesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT_CONTROLS_2_FEATURES_KHR, + ePhysicalDeviceShaderExpectAssumeFeatures = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_EXPECT_ASSUME_FEATURES, + ePhysicalDeviceShaderExpectAssumeFeaturesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_EXPECT_ASSUME_FEATURES_KHR, + ePhysicalDeviceLineRasterizationFeatures = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_FEATURES, + ePhysicalDeviceLineRasterizationFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_FEATURES_EXT, + ePhysicalDeviceLineRasterizationFeaturesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_FEATURES_KHR, + ePipelineRasterizationLineStateCreateInfo = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_LINE_STATE_CREATE_INFO, + ePipelineRasterizationLineStateCreateInfoEXT = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_LINE_STATE_CREATE_INFO_EXT, + ePipelineRasterizationLineStateCreateInfoKHR = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_LINE_STATE_CREATE_INFO_KHR, + ePhysicalDeviceLineRasterizationProperties = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_PROPERTIES, + ePhysicalDeviceLineRasterizationPropertiesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_PROPERTIES_EXT, + ePhysicalDeviceLineRasterizationPropertiesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_PROPERTIES_KHR, + ePhysicalDeviceVertexAttributeDivisorProperties = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_PROPERTIES, + ePhysicalDeviceVertexAttributeDivisorPropertiesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_PROPERTIES_KHR, + ePipelineVertexInputDivisorStateCreateInfo = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO, + ePipelineVertexInputDivisorStateCreateInfoEXT = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_EXT, + ePipelineVertexInputDivisorStateCreateInfoKHR = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_KHR, + ePhysicalDeviceVertexAttributeDivisorFeatures = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES, + ePhysicalDeviceVertexAttributeDivisorFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES_EXT, + ePhysicalDeviceVertexAttributeDivisorFeaturesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES_KHR, + ePhysicalDeviceIndexTypeUint8Features = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INDEX_TYPE_UINT8_FEATURES, + ePhysicalDeviceIndexTypeUint8FeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INDEX_TYPE_UINT8_FEATURES_EXT, + ePhysicalDeviceIndexTypeUint8FeaturesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INDEX_TYPE_UINT8_FEATURES_KHR, + eMemoryMapInfo = VK_STRUCTURE_TYPE_MEMORY_MAP_INFO, + eMemoryMapInfoKHR = VK_STRUCTURE_TYPE_MEMORY_MAP_INFO_KHR, + eMemoryUnmapInfo = VK_STRUCTURE_TYPE_MEMORY_UNMAP_INFO, + eMemoryUnmapInfoKHR = VK_STRUCTURE_TYPE_MEMORY_UNMAP_INFO_KHR, + ePhysicalDeviceMaintenance5Features = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_5_FEATURES, + ePhysicalDeviceMaintenance5FeaturesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_5_FEATURES_KHR, + ePhysicalDeviceMaintenance5Properties = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_5_PROPERTIES, + ePhysicalDeviceMaintenance5PropertiesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_5_PROPERTIES_KHR, + eRenderingAreaInfo = VK_STRUCTURE_TYPE_RENDERING_AREA_INFO, + eRenderingAreaInfoKHR = VK_STRUCTURE_TYPE_RENDERING_AREA_INFO_KHR, + eDeviceImageSubresourceInfo = VK_STRUCTURE_TYPE_DEVICE_IMAGE_SUBRESOURCE_INFO, + eDeviceImageSubresourceInfoKHR = VK_STRUCTURE_TYPE_DEVICE_IMAGE_SUBRESOURCE_INFO_KHR, + eSubresourceLayout2 = VK_STRUCTURE_TYPE_SUBRESOURCE_LAYOUT_2, + eSubresourceLayout2EXT = VK_STRUCTURE_TYPE_SUBRESOURCE_LAYOUT_2_EXT, + eSubresourceLayout2KHR = VK_STRUCTURE_TYPE_SUBRESOURCE_LAYOUT_2_KHR, + eImageSubresource2 = VK_STRUCTURE_TYPE_IMAGE_SUBRESOURCE_2, + eImageSubresource2EXT = VK_STRUCTURE_TYPE_IMAGE_SUBRESOURCE_2_EXT, + eImageSubresource2KHR = VK_STRUCTURE_TYPE_IMAGE_SUBRESOURCE_2_KHR, + ePipelineCreateFlags2CreateInfo = VK_STRUCTURE_TYPE_PIPELINE_CREATE_FLAGS_2_CREATE_INFO, + ePipelineCreateFlags2CreateInfoKHR = VK_STRUCTURE_TYPE_PIPELINE_CREATE_FLAGS_2_CREATE_INFO_KHR, + eBufferUsageFlags2CreateInfo = VK_STRUCTURE_TYPE_BUFFER_USAGE_FLAGS_2_CREATE_INFO, + eBufferUsageFlags2CreateInfoKHR = VK_STRUCTURE_TYPE_BUFFER_USAGE_FLAGS_2_CREATE_INFO_KHR, + ePhysicalDevicePushDescriptorProperties = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES, + ePhysicalDevicePushDescriptorPropertiesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR, + ePhysicalDeviceDynamicRenderingLocalReadFeatures = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DYNAMIC_RENDERING_LOCAL_READ_FEATURES, + ePhysicalDeviceDynamicRenderingLocalReadFeaturesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DYNAMIC_RENDERING_LOCAL_READ_FEATURES_KHR, + eRenderingAttachmentLocationInfo = VK_STRUCTURE_TYPE_RENDERING_ATTACHMENT_LOCATION_INFO, + eRenderingAttachmentLocationInfoKHR = VK_STRUCTURE_TYPE_RENDERING_ATTACHMENT_LOCATION_INFO_KHR, + eRenderingInputAttachmentIndexInfo = VK_STRUCTURE_TYPE_RENDERING_INPUT_ATTACHMENT_INDEX_INFO, + eRenderingInputAttachmentIndexInfoKHR = VK_STRUCTURE_TYPE_RENDERING_INPUT_ATTACHMENT_INDEX_INFO_KHR, + ePhysicalDeviceMaintenance6Features = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_6_FEATURES, + ePhysicalDeviceMaintenance6FeaturesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_6_FEATURES_KHR, + ePhysicalDeviceMaintenance6Properties = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_6_PROPERTIES, + ePhysicalDeviceMaintenance6PropertiesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_6_PROPERTIES_KHR, + eBindMemoryStatus = VK_STRUCTURE_TYPE_BIND_MEMORY_STATUS, + eBindMemoryStatusKHR = VK_STRUCTURE_TYPE_BIND_MEMORY_STATUS_KHR, + eBindDescriptorSetsInfo = VK_STRUCTURE_TYPE_BIND_DESCRIPTOR_SETS_INFO, + eBindDescriptorSetsInfoKHR = VK_STRUCTURE_TYPE_BIND_DESCRIPTOR_SETS_INFO_KHR, + ePushConstantsInfo = VK_STRUCTURE_TYPE_PUSH_CONSTANTS_INFO, + ePushConstantsInfoKHR = VK_STRUCTURE_TYPE_PUSH_CONSTANTS_INFO_KHR, + ePushDescriptorSetInfo = VK_STRUCTURE_TYPE_PUSH_DESCRIPTOR_SET_INFO, + ePushDescriptorSetInfoKHR = VK_STRUCTURE_TYPE_PUSH_DESCRIPTOR_SET_INFO_KHR, + ePushDescriptorSetWithTemplateInfo = VK_STRUCTURE_TYPE_PUSH_DESCRIPTOR_SET_WITH_TEMPLATE_INFO, + ePushDescriptorSetWithTemplateInfoKHR = VK_STRUCTURE_TYPE_PUSH_DESCRIPTOR_SET_WITH_TEMPLATE_INFO_KHR, + ePhysicalDevicePipelineProtectedAccessFeatures = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_PROTECTED_ACCESS_FEATURES, + ePhysicalDevicePipelineProtectedAccessFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_PROTECTED_ACCESS_FEATURES_EXT, + ePipelineRobustnessCreateInfo = VK_STRUCTURE_TYPE_PIPELINE_ROBUSTNESS_CREATE_INFO, + ePipelineRobustnessCreateInfoEXT = VK_STRUCTURE_TYPE_PIPELINE_ROBUSTNESS_CREATE_INFO_EXT, + ePhysicalDevicePipelineRobustnessFeatures = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_ROBUSTNESS_FEATURES, + ePhysicalDevicePipelineRobustnessFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_ROBUSTNESS_FEATURES_EXT, + ePhysicalDevicePipelineRobustnessProperties = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_ROBUSTNESS_PROPERTIES, + ePhysicalDevicePipelineRobustnessPropertiesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_ROBUSTNESS_PROPERTIES_EXT, + ePhysicalDeviceHostImageCopyFeatures = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_IMAGE_COPY_FEATURES, + ePhysicalDeviceHostImageCopyFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_IMAGE_COPY_FEATURES_EXT, + ePhysicalDeviceHostImageCopyProperties = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_IMAGE_COPY_PROPERTIES, + ePhysicalDeviceHostImageCopyPropertiesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_IMAGE_COPY_PROPERTIES_EXT, + eMemoryToImageCopy = VK_STRUCTURE_TYPE_MEMORY_TO_IMAGE_COPY, + eMemoryToImageCopyEXT = VK_STRUCTURE_TYPE_MEMORY_TO_IMAGE_COPY_EXT, + eImageToMemoryCopy = VK_STRUCTURE_TYPE_IMAGE_TO_MEMORY_COPY, + eImageToMemoryCopyEXT = VK_STRUCTURE_TYPE_IMAGE_TO_MEMORY_COPY_EXT, + eCopyImageToMemoryInfo = VK_STRUCTURE_TYPE_COPY_IMAGE_TO_MEMORY_INFO, + eCopyImageToMemoryInfoEXT = VK_STRUCTURE_TYPE_COPY_IMAGE_TO_MEMORY_INFO_EXT, + eCopyMemoryToImageInfo = VK_STRUCTURE_TYPE_COPY_MEMORY_TO_IMAGE_INFO, + eCopyMemoryToImageInfoEXT = VK_STRUCTURE_TYPE_COPY_MEMORY_TO_IMAGE_INFO_EXT, + eHostImageLayoutTransitionInfo = VK_STRUCTURE_TYPE_HOST_IMAGE_LAYOUT_TRANSITION_INFO, + eHostImageLayoutTransitionInfoEXT = VK_STRUCTURE_TYPE_HOST_IMAGE_LAYOUT_TRANSITION_INFO_EXT, + eCopyImageToImageInfo = VK_STRUCTURE_TYPE_COPY_IMAGE_TO_IMAGE_INFO, + eCopyImageToImageInfoEXT = VK_STRUCTURE_TYPE_COPY_IMAGE_TO_IMAGE_INFO_EXT, + eSubresourceHostMemcpySize = VK_STRUCTURE_TYPE_SUBRESOURCE_HOST_MEMCPY_SIZE, + eSubresourceHostMemcpySizeEXT = VK_STRUCTURE_TYPE_SUBRESOURCE_HOST_MEMCPY_SIZE_EXT, + eHostImageCopyDevicePerformanceQuery = VK_STRUCTURE_TYPE_HOST_IMAGE_COPY_DEVICE_PERFORMANCE_QUERY, + eHostImageCopyDevicePerformanceQueryEXT = VK_STRUCTURE_TYPE_HOST_IMAGE_COPY_DEVICE_PERFORMANCE_QUERY_EXT, eSwapchainCreateInfoKHR = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR, ePresentInfoKHR = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR, eDeviceGroupPresentCapabilitiesKHR = VK_STRUCTURE_TYPE_DEVICE_GROUP_PRESENT_CAPABILITIES_KHR, @@ -782,11 +892,8 @@ namespace VULKAN_HPP_NAMESPACE #if defined( VK_USE_PLATFORM_VI_NN ) eViSurfaceCreateInfoNN = VK_STRUCTURE_TYPE_VI_SURFACE_CREATE_INFO_NN, #endif /*VK_USE_PLATFORM_VI_NN*/ - eImageViewAstcDecodeModeEXT = VK_STRUCTURE_TYPE_IMAGE_VIEW_ASTC_DECODE_MODE_EXT, - ePhysicalDeviceAstcDecodeFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ASTC_DECODE_FEATURES_EXT, - ePipelineRobustnessCreateInfoEXT = VK_STRUCTURE_TYPE_PIPELINE_ROBUSTNESS_CREATE_INFO_EXT, - ePhysicalDevicePipelineRobustnessFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_ROBUSTNESS_FEATURES_EXT, - ePhysicalDevicePipelineRobustnessPropertiesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_ROBUSTNESS_PROPERTIES_EXT, + eImageViewAstcDecodeModeEXT = VK_STRUCTURE_TYPE_IMAGE_VIEW_ASTC_DECODE_MODE_EXT, + ePhysicalDeviceAstcDecodeFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ASTC_DECODE_FEATURES_EXT, #if defined( VK_USE_PLATFORM_WIN32_KHR ) eImportMemoryWin32HandleInfoKHR = VK_STRUCTURE_TYPE_IMPORT_MEMORY_WIN32_HANDLE_INFO_KHR, eExportMemoryWin32HandleInfoKHR = VK_STRUCTURE_TYPE_EXPORT_MEMORY_WIN32_HANDLE_INFO_KHR, @@ -805,7 +912,6 @@ namespace VULKAN_HPP_NAMESPACE #endif /*VK_USE_PLATFORM_WIN32_KHR*/ eImportSemaphoreFdInfoKHR = VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_FD_INFO_KHR, eSemaphoreGetFdInfoKHR = VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR, - ePhysicalDevicePushDescriptorPropertiesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR, eCommandBufferInheritanceConditionalRenderingInfoEXT = VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_CONDITIONAL_RENDERING_INFO_EXT, ePhysicalDeviceConditionalRenderingFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT, eConditionalRenderingBeginInfoEXT = VK_STRUCTURE_TYPE_CONDITIONAL_RENDERING_BEGIN_INFO_EXT, @@ -956,12 +1062,6 @@ namespace VULKAN_HPP_NAMESPACE eVideoDecodeH265ProfileInfoKHR = VK_STRUCTURE_TYPE_VIDEO_DECODE_H265_PROFILE_INFO_KHR, eVideoDecodeH265PictureInfoKHR = VK_STRUCTURE_TYPE_VIDEO_DECODE_H265_PICTURE_INFO_KHR, eVideoDecodeH265DpbSlotInfoKHR = VK_STRUCTURE_TYPE_VIDEO_DECODE_H265_DPB_SLOT_INFO_KHR, - eDeviceQueueGlobalPriorityCreateInfoKHR = VK_STRUCTURE_TYPE_DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO_KHR, - eDeviceQueueGlobalPriorityCreateInfoEXT = VK_STRUCTURE_TYPE_DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO_EXT, - ePhysicalDeviceGlobalPriorityQueryFeaturesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GLOBAL_PRIORITY_QUERY_FEATURES_KHR, - ePhysicalDeviceGlobalPriorityQueryFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GLOBAL_PRIORITY_QUERY_FEATURES_EXT, - eQueueFamilyGlobalPriorityPropertiesKHR = VK_STRUCTURE_TYPE_QUEUE_FAMILY_GLOBAL_PRIORITY_PROPERTIES_KHR, - eQueueFamilyGlobalPriorityPropertiesEXT = VK_STRUCTURE_TYPE_QUEUE_FAMILY_GLOBAL_PRIORITY_PROPERTIES_EXT, eDeviceMemoryOverallocationCreateInfoAMD = VK_STRUCTURE_TYPE_DEVICE_MEMORY_OVERALLOCATION_CREATE_INFO_AMD, ePhysicalDeviceVertexAttributeDivisorPropertiesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_PROPERTIES_EXT, #if defined( VK_USE_PLATFORM_GGP ) @@ -1005,9 +1105,6 @@ namespace VULKAN_HPP_NAMESPACE eRenderingFragmentShadingRateAttachmentInfoKHR = VK_STRUCTURE_TYPE_RENDERING_FRAGMENT_SHADING_RATE_ATTACHMENT_INFO_KHR, ePhysicalDeviceShaderCoreProperties2AMD = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CORE_PROPERTIES_2_AMD, ePhysicalDeviceCoherentMemoryFeaturesAMD = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COHERENT_MEMORY_FEATURES_AMD, - ePhysicalDeviceDynamicRenderingLocalReadFeaturesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DYNAMIC_RENDERING_LOCAL_READ_FEATURES_KHR, - eRenderingAttachmentLocationInfoKHR = VK_STRUCTURE_TYPE_RENDERING_ATTACHMENT_LOCATION_INFO_KHR, - eRenderingInputAttachmentIndexInfoKHR = VK_STRUCTURE_TYPE_RENDERING_INPUT_ATTACHMENT_INDEX_INFO_KHR, ePhysicalDeviceShaderImageAtomicInt64FeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_IMAGE_ATOMIC_INT64_FEATURES_EXT, ePhysicalDeviceShaderQuadControlFeaturesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_QUAD_CONTROL_FEATURES_KHR, ePhysicalDeviceMemoryBudgetPropertiesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_BUDGET_PROPERTIES_EXT, @@ -1046,18 +1143,6 @@ namespace VULKAN_HPP_NAMESPACE ePipelineExecutableInfoKHR = VK_STRUCTURE_TYPE_PIPELINE_EXECUTABLE_INFO_KHR, ePipelineExecutableStatisticKHR = VK_STRUCTURE_TYPE_PIPELINE_EXECUTABLE_STATISTIC_KHR, ePipelineExecutableInternalRepresentationKHR = VK_STRUCTURE_TYPE_PIPELINE_EXECUTABLE_INTERNAL_REPRESENTATION_KHR, - ePhysicalDeviceHostImageCopyFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_IMAGE_COPY_FEATURES_EXT, - ePhysicalDeviceHostImageCopyPropertiesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_IMAGE_COPY_PROPERTIES_EXT, - eMemoryToImageCopyEXT = VK_STRUCTURE_TYPE_MEMORY_TO_IMAGE_COPY_EXT, - eImageToMemoryCopyEXT = VK_STRUCTURE_TYPE_IMAGE_TO_MEMORY_COPY_EXT, - eCopyImageToMemoryInfoEXT = VK_STRUCTURE_TYPE_COPY_IMAGE_TO_MEMORY_INFO_EXT, - eCopyMemoryToImageInfoEXT = VK_STRUCTURE_TYPE_COPY_MEMORY_TO_IMAGE_INFO_EXT, - eHostImageLayoutTransitionInfoEXT = VK_STRUCTURE_TYPE_HOST_IMAGE_LAYOUT_TRANSITION_INFO_EXT, - eCopyImageToImageInfoEXT = VK_STRUCTURE_TYPE_COPY_IMAGE_TO_IMAGE_INFO_EXT, - eSubresourceHostMemcpySizeEXT = VK_STRUCTURE_TYPE_SUBRESOURCE_HOST_MEMCPY_SIZE_EXT, - eHostImageCopyDevicePerformanceQueryEXT = VK_STRUCTURE_TYPE_HOST_IMAGE_COPY_DEVICE_PERFORMANCE_QUERY_EXT, - eMemoryMapInfoKHR = VK_STRUCTURE_TYPE_MEMORY_MAP_INFO_KHR, - eMemoryUnmapInfoKHR = VK_STRUCTURE_TYPE_MEMORY_UNMAP_INFO_KHR, ePhysicalDeviceMapMemoryPlacedFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAP_MEMORY_PLACED_FEATURES_EXT, ePhysicalDeviceMapMemoryPlacedPropertiesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAP_MEMORY_PLACED_PROPERTIES_EXT, eMemoryMapPlacedInfoEXT = VK_STRUCTURE_TYPE_MEMORY_MAP_PLACED_INFO_EXT, @@ -1259,7 +1344,6 @@ namespace VULKAN_HPP_NAMESPACE eSamplerBorderColorComponentMappingCreateInfoEXT = VK_STRUCTURE_TYPE_SAMPLER_BORDER_COLOR_COMPONENT_MAPPING_CREATE_INFO_EXT, ePhysicalDevicePageableDeviceLocalMemoryFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PAGEABLE_DEVICE_LOCAL_MEMORY_FEATURES_EXT, ePhysicalDeviceShaderCorePropertiesARM = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CORE_PROPERTIES_ARM, - ePhysicalDeviceShaderSubgroupRotateFeaturesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SUBGROUP_ROTATE_FEATURES_KHR, eDeviceQueueShaderCoreControlCreateInfoARM = VK_STRUCTURE_TYPE_DEVICE_QUEUE_SHADER_CORE_CONTROL_CREATE_INFO_ARM, ePhysicalDeviceSchedulingControlsFeaturesARM = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SCHEDULING_CONTROLS_FEATURES_ARM, ePhysicalDeviceSchedulingControlsPropertiesARM = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SCHEDULING_CONTROLS_PROPERTIES_ARM, @@ -1316,22 +1400,11 @@ namespace VULKAN_HPP_NAMESPACE eOpticalFlowExecuteInfoNV = VK_STRUCTURE_TYPE_OPTICAL_FLOW_EXECUTE_INFO_NV, eOpticalFlowSessionCreatePrivateDataInfoNV = VK_STRUCTURE_TYPE_OPTICAL_FLOW_SESSION_CREATE_PRIVATE_DATA_INFO_NV, ePhysicalDeviceLegacyDitheringFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LEGACY_DITHERING_FEATURES_EXT, - ePhysicalDevicePipelineProtectedAccessFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_PROTECTED_ACCESS_FEATURES_EXT, #if defined( VK_USE_PLATFORM_ANDROID_KHR ) ePhysicalDeviceExternalFormatResolveFeaturesANDROID = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_FORMAT_RESOLVE_FEATURES_ANDROID, ePhysicalDeviceExternalFormatResolvePropertiesANDROID = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_FORMAT_RESOLVE_PROPERTIES_ANDROID, eAndroidHardwareBufferFormatResolvePropertiesANDROID = VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_FORMAT_RESOLVE_PROPERTIES_ANDROID, #endif /*VK_USE_PLATFORM_ANDROID_KHR*/ - ePhysicalDeviceMaintenance5FeaturesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_5_FEATURES_KHR, - ePhysicalDeviceMaintenance5PropertiesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_5_PROPERTIES_KHR, - eRenderingAreaInfoKHR = VK_STRUCTURE_TYPE_RENDERING_AREA_INFO_KHR, - eDeviceImageSubresourceInfoKHR = VK_STRUCTURE_TYPE_DEVICE_IMAGE_SUBRESOURCE_INFO_KHR, - eSubresourceLayout2KHR = VK_STRUCTURE_TYPE_SUBRESOURCE_LAYOUT_2_KHR, - eSubresourceLayout2EXT = VK_STRUCTURE_TYPE_SUBRESOURCE_LAYOUT_2_EXT, - eImageSubresource2KHR = VK_STRUCTURE_TYPE_IMAGE_SUBRESOURCE_2_KHR, - eImageSubresource2EXT = VK_STRUCTURE_TYPE_IMAGE_SUBRESOURCE_2_EXT, - ePipelineCreateFlags2CreateInfoKHR = VK_STRUCTURE_TYPE_PIPELINE_CREATE_FLAGS_2_CREATE_INFO_KHR, - eBufferUsageFlags2CreateInfoKHR = VK_STRUCTURE_TYPE_BUFFER_USAGE_FLAGS_2_CREATE_INFO_KHR, ePhysicalDeviceAntiLagFeaturesAMD = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ANTI_LAG_FEATURES_AMD, eAntiLagDataAMD = VK_STRUCTURE_TYPE_ANTI_LAG_DATA_AMD, eAntiLagPresentationInfoAMD = VK_STRUCTURE_TYPE_ANTI_LAG_PRESENTATION_INFO_AMD, @@ -1415,12 +1488,6 @@ namespace VULKAN_HPP_NAMESPACE eSamplerYcbcrConversionYcbcrDegammaCreateInfoQCOM = VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_YCBCR_DEGAMMA_CREATE_INFO_QCOM, ePhysicalDeviceCubicClampFeaturesQCOM = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUBIC_CLAMP_FEATURES_QCOM, ePhysicalDeviceAttachmentFeedbackLoopDynamicStateFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ATTACHMENT_FEEDBACK_LOOP_DYNAMIC_STATE_FEATURES_EXT, - ePhysicalDeviceVertexAttributeDivisorPropertiesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_PROPERTIES_KHR, - ePipelineVertexInputDivisorStateCreateInfoKHR = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_KHR, - ePipelineVertexInputDivisorStateCreateInfoEXT = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_EXT, - ePhysicalDeviceVertexAttributeDivisorFeaturesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES_KHR, - ePhysicalDeviceVertexAttributeDivisorFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES_EXT, - ePhysicalDeviceShaderFloatControls2FeaturesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT_CONTROLS_2_FEATURES_KHR, #if defined( VK_USE_PLATFORM_SCREEN_QNX ) eScreenBufferPropertiesQNX = VK_STRUCTURE_TYPE_SCREEN_BUFFER_PROPERTIES_QNX, eScreenBufferFormatPropertiesQNX = VK_STRUCTURE_TYPE_SCREEN_BUFFER_FORMAT_PROPERTIES_QNX, @@ -1429,24 +1496,8 @@ namespace VULKAN_HPP_NAMESPACE ePhysicalDeviceExternalMemoryScreenBufferFeaturesQNX = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_MEMORY_SCREEN_BUFFER_FEATURES_QNX, #endif /*VK_USE_PLATFORM_SCREEN_QNX*/ ePhysicalDeviceLayeredDriverPropertiesMSFT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LAYERED_DRIVER_PROPERTIES_MSFT, - ePhysicalDeviceIndexTypeUint8FeaturesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INDEX_TYPE_UINT8_FEATURES_KHR, - ePhysicalDeviceIndexTypeUint8FeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INDEX_TYPE_UINT8_FEATURES_EXT, - ePhysicalDeviceLineRasterizationFeaturesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_FEATURES_KHR, - ePhysicalDeviceLineRasterizationFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_FEATURES_EXT, - ePipelineRasterizationLineStateCreateInfoKHR = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_LINE_STATE_CREATE_INFO_KHR, - ePipelineRasterizationLineStateCreateInfoEXT = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_LINE_STATE_CREATE_INFO_EXT, - ePhysicalDeviceLineRasterizationPropertiesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_PROPERTIES_KHR, - ePhysicalDeviceLineRasterizationPropertiesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_PROPERTIES_EXT, eCalibratedTimestampInfoKHR = VK_STRUCTURE_TYPE_CALIBRATED_TIMESTAMP_INFO_KHR, eCalibratedTimestampInfoEXT = VK_STRUCTURE_TYPE_CALIBRATED_TIMESTAMP_INFO_EXT, - ePhysicalDeviceShaderExpectAssumeFeaturesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_EXPECT_ASSUME_FEATURES_KHR, - ePhysicalDeviceMaintenance6FeaturesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_6_FEATURES_KHR, - ePhysicalDeviceMaintenance6PropertiesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_6_PROPERTIES_KHR, - eBindMemoryStatusKHR = VK_STRUCTURE_TYPE_BIND_MEMORY_STATUS_KHR, - eBindDescriptorSetsInfoKHR = VK_STRUCTURE_TYPE_BIND_DESCRIPTOR_SETS_INFO_KHR, - ePushConstantsInfoKHR = VK_STRUCTURE_TYPE_PUSH_CONSTANTS_INFO_KHR, - ePushDescriptorSetInfoKHR = VK_STRUCTURE_TYPE_PUSH_DESCRIPTOR_SET_INFO_KHR, - ePushDescriptorSetWithTemplateInfoKHR = VK_STRUCTURE_TYPE_PUSH_DESCRIPTOR_SET_WITH_TEMPLATE_INFO_KHR, eSetDescriptorBufferOffsetsInfoEXT = VK_STRUCTURE_TYPE_SET_DESCRIPTOR_BUFFER_OFFSETS_INFO_EXT, eBindDescriptorBufferEmbeddedSamplersInfoEXT = VK_STRUCTURE_TYPE_BIND_DESCRIPTOR_BUFFER_EMBEDDED_SAMPLERS_INFO_EXT, ePhysicalDeviceDescriptorPoolOverallocationFeaturesNV = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_POOL_OVERALLOCATION_FEATURES_NV, @@ -1877,6 +1928,10 @@ namespace VULKAN_HPP_NAMESPACE eAstc12x10SfloatBlockEXT = VK_FORMAT_ASTC_12x10_SFLOAT_BLOCK_EXT, eAstc12x12SfloatBlock = VK_FORMAT_ASTC_12x12_SFLOAT_BLOCK, eAstc12x12SfloatBlockEXT = VK_FORMAT_ASTC_12x12_SFLOAT_BLOCK_EXT, + eA1B5G5R5UnormPack16 = VK_FORMAT_A1B5G5R5_UNORM_PACK16, + eA1B5G5R5UnormPack16KHR = VK_FORMAT_A1B5G5R5_UNORM_PACK16_KHR, + eA8Unorm = VK_FORMAT_A8_UNORM, + eA8UnormKHR = VK_FORMAT_A8_UNORM_KHR, ePvrtc12BppUnormBlockIMG = VK_FORMAT_PVRTC1_2BPP_UNORM_BLOCK_IMG, ePvrtc14BppUnormBlockIMG = VK_FORMAT_PVRTC1_4BPP_UNORM_BLOCK_IMG, ePvrtc22BppUnormBlockIMG = VK_FORMAT_PVRTC2_2BPP_UNORM_BLOCK_IMG, @@ -1886,9 +1941,7 @@ namespace VULKAN_HPP_NAMESPACE ePvrtc22BppSrgbBlockIMG = VK_FORMAT_PVRTC2_2BPP_SRGB_BLOCK_IMG, ePvrtc24BppSrgbBlockIMG = VK_FORMAT_PVRTC2_4BPP_SRGB_BLOCK_IMG, eR16G16Sfixed5NV = VK_FORMAT_R16G16_SFIXED5_NV, - eR16G16S105NV = VK_FORMAT_R16G16_S10_5_NV, - eA1B5G5R5UnormPack16KHR = VK_FORMAT_A1B5G5R5_UNORM_PACK16_KHR, - eA8UnormKHR = VK_FORMAT_A8_UNORM_KHR + eR16G16S105NV = VK_FORMAT_R16G16_S10_5_NV }; enum class FormatFeatureFlagBits : VkFormatFeatureFlags @@ -2031,13 +2084,14 @@ namespace VULKAN_HPP_NAMESPACE eDepthStencilAttachment = VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, eTransientAttachment = VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT, eInputAttachment = VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT, + eHostTransfer = VK_IMAGE_USAGE_HOST_TRANSFER_BIT, + eHostTransferEXT = VK_IMAGE_USAGE_HOST_TRANSFER_BIT_EXT, eVideoDecodeDstKHR = VK_IMAGE_USAGE_VIDEO_DECODE_DST_BIT_KHR, eVideoDecodeSrcKHR = VK_IMAGE_USAGE_VIDEO_DECODE_SRC_BIT_KHR, eVideoDecodeDpbKHR = VK_IMAGE_USAGE_VIDEO_DECODE_DPB_BIT_KHR, eFragmentDensityMapEXT = VK_IMAGE_USAGE_FRAGMENT_DENSITY_MAP_BIT_EXT, eFragmentShadingRateAttachmentKHR = VK_IMAGE_USAGE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR, eShadingRateImageNV = VK_IMAGE_USAGE_SHADING_RATE_IMAGE_BIT_NV, - eHostTransferEXT = VK_IMAGE_USAGE_HOST_TRANSFER_BIT_EXT, eVideoEncodeDstKHR = VK_IMAGE_USAGE_VIDEO_ENCODE_DST_BIT_KHR, eVideoEncodeSrcKHR = VK_IMAGE_USAGE_VIDEO_ENCODE_SRC_BIT_KHR, eVideoEncodeDpbKHR = VK_IMAGE_USAGE_VIDEO_ENCODE_DPB_BIT_KHR, @@ -2058,9 +2112,9 @@ namespace VULKAN_HPP_NAMESPACE static VULKAN_HPP_CONST_OR_CONSTEXPR ImageUsageFlags allFlags = ImageUsageFlagBits::eTransferSrc | ImageUsageFlagBits::eTransferDst | ImageUsageFlagBits::eSampled | ImageUsageFlagBits::eStorage | ImageUsageFlagBits::eColorAttachment | ImageUsageFlagBits::eDepthStencilAttachment | ImageUsageFlagBits::eTransientAttachment | - ImageUsageFlagBits::eInputAttachment | ImageUsageFlagBits::eVideoDecodeDstKHR | ImageUsageFlagBits::eVideoDecodeSrcKHR | - ImageUsageFlagBits::eVideoDecodeDpbKHR | ImageUsageFlagBits::eFragmentDensityMapEXT | ImageUsageFlagBits::eFragmentShadingRateAttachmentKHR | - ImageUsageFlagBits::eHostTransferEXT | ImageUsageFlagBits::eVideoEncodeDstKHR | ImageUsageFlagBits::eVideoEncodeSrcKHR | + ImageUsageFlagBits::eInputAttachment | ImageUsageFlagBits::eHostTransfer | ImageUsageFlagBits::eVideoDecodeDstKHR | + ImageUsageFlagBits::eVideoDecodeSrcKHR | ImageUsageFlagBits::eVideoDecodeDpbKHR | ImageUsageFlagBits::eFragmentDensityMapEXT | + ImageUsageFlagBits::eFragmentShadingRateAttachmentKHR | ImageUsageFlagBits::eVideoEncodeDstKHR | ImageUsageFlagBits::eVideoEncodeSrcKHR | ImageUsageFlagBits::eVideoEncodeDpbKHR | ImageUsageFlagBits::eAttachmentFeedbackLoopEXT | ImageUsageFlagBits::eInvocationMaskHUAWEI | ImageUsageFlagBits::eSampleWeightQCOM | ImageUsageFlagBits::eSampleBlockMatchQCOM | ImageUsageFlagBits::eVideoEncodeQuantizationDeltaMapKHR | ImageUsageFlagBits::eVideoEncodeEmphasisMapKHR; @@ -2607,6 +2661,8 @@ namespace VULKAN_HPP_NAMESPACE eReadOnlyOptimalKHR = VK_IMAGE_LAYOUT_READ_ONLY_OPTIMAL_KHR, eAttachmentOptimal = VK_IMAGE_LAYOUT_ATTACHMENT_OPTIMAL, eAttachmentOptimalKHR = VK_IMAGE_LAYOUT_ATTACHMENT_OPTIMAL_KHR, + eRenderingLocalRead = VK_IMAGE_LAYOUT_RENDERING_LOCAL_READ, + eRenderingLocalReadKHR = VK_IMAGE_LAYOUT_RENDERING_LOCAL_READ_KHR, ePresentSrcKHR = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR, eVideoDecodeDstKHR = VK_IMAGE_LAYOUT_VIDEO_DECODE_DST_KHR, eVideoDecodeSrcKHR = VK_IMAGE_LAYOUT_VIDEO_DECODE_SRC_KHR, @@ -2615,7 +2671,6 @@ namespace VULKAN_HPP_NAMESPACE eFragmentDensityMapOptimalEXT = VK_IMAGE_LAYOUT_FRAGMENT_DENSITY_MAP_OPTIMAL_EXT, eFragmentShadingRateAttachmentOptimalKHR = VK_IMAGE_LAYOUT_FRAGMENT_SHADING_RATE_ATTACHMENT_OPTIMAL_KHR, eShadingRateOptimalNV = VK_IMAGE_LAYOUT_SHADING_RATE_OPTIMAL_NV, - eRenderingLocalReadKHR = VK_IMAGE_LAYOUT_RENDERING_LOCAL_READ_KHR, eVideoEncodeDstKHR = VK_IMAGE_LAYOUT_VIDEO_ENCODE_DST_KHR, eVideoEncodeSrcKHR = VK_IMAGE_LAYOUT_VIDEO_ENCODE_SRC_KHR, eVideoEncodeDpbKHR = VK_IMAGE_LAYOUT_VIDEO_ENCODE_DPB_KHR, @@ -2858,6 +2913,9 @@ namespace VULKAN_HPP_NAMESPACE eDepthBiasEnableEXT = VK_DYNAMIC_STATE_DEPTH_BIAS_ENABLE_EXT, ePrimitiveRestartEnable = VK_DYNAMIC_STATE_PRIMITIVE_RESTART_ENABLE, ePrimitiveRestartEnableEXT = VK_DYNAMIC_STATE_PRIMITIVE_RESTART_ENABLE_EXT, + eLineStipple = VK_DYNAMIC_STATE_LINE_STIPPLE, + eLineStippleEXT = VK_DYNAMIC_STATE_LINE_STIPPLE_EXT, + eLineStippleKHR = VK_DYNAMIC_STATE_LINE_STIPPLE_KHR, eViewportWScalingNV = VK_DYNAMIC_STATE_VIEWPORT_W_SCALING_NV, eDiscardRectangleEXT = VK_DYNAMIC_STATE_DISCARD_RECTANGLE_EXT, eDiscardRectangleEnableEXT = VK_DYNAMIC_STATE_DISCARD_RECTANGLE_ENABLE_EXT, @@ -2905,8 +2963,6 @@ namespace VULKAN_HPP_NAMESPACE eRepresentativeFragmentTestEnableNV = VK_DYNAMIC_STATE_REPRESENTATIVE_FRAGMENT_TEST_ENABLE_NV, eCoverageReductionModeNV = VK_DYNAMIC_STATE_COVERAGE_REDUCTION_MODE_NV, eAttachmentFeedbackLoopEnableEXT = VK_DYNAMIC_STATE_ATTACHMENT_FEEDBACK_LOOP_ENABLE_EXT, - eLineStippleKHR = VK_DYNAMIC_STATE_LINE_STIPPLE_KHR, - eLineStippleEXT = VK_DYNAMIC_STATE_LINE_STIPPLE_EXT, eDepthClampRangeEXT = VK_DYNAMIC_STATE_DEPTH_CLAMP_RANGE_EXT }; @@ -2949,6 +3005,10 @@ namespace VULKAN_HPP_NAMESPACE eFailOnPipelineCompileRequiredEXT = VK_PIPELINE_CREATE_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT_EXT, eEarlyReturnOnFailure = VK_PIPELINE_CREATE_EARLY_RETURN_ON_FAILURE_BIT, eEarlyReturnOnFailureEXT = VK_PIPELINE_CREATE_EARLY_RETURN_ON_FAILURE_BIT_EXT, + eNoProtectedAccess = VK_PIPELINE_CREATE_NO_PROTECTED_ACCESS_BIT, + eNoProtectedAccessEXT = VK_PIPELINE_CREATE_NO_PROTECTED_ACCESS_BIT_EXT, + eProtectedAccessOnly = VK_PIPELINE_CREATE_PROTECTED_ACCESS_ONLY_BIT, + eProtectedAccessOnlyEXT = VK_PIPELINE_CREATE_PROTECTED_ACCESS_ONLY_BIT_EXT, eRayTracingNoNullAnyHitShadersKHR = VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_ANY_HIT_SHADERS_BIT_KHR, eRayTracingNoNullClosestHitShadersKHR = VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_CLOSEST_HIT_SHADERS_BIT_KHR, eRayTracingNoNullMissShadersKHR = VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_MISS_SHADERS_BIT_KHR, @@ -2973,10 +3033,8 @@ namespace VULKAN_HPP_NAMESPACE eDepthStencilAttachmentFeedbackLoopEXT = VK_PIPELINE_CREATE_DEPTH_STENCIL_ATTACHMENT_FEEDBACK_LOOP_BIT_EXT, eRayTracingOpacityMicromapEXT = VK_PIPELINE_CREATE_RAY_TRACING_OPACITY_MICROMAP_BIT_EXT, #if defined( VK_ENABLE_BETA_EXTENSIONS ) - eRayTracingDisplacementMicromapNV = VK_PIPELINE_CREATE_RAY_TRACING_DISPLACEMENT_MICROMAP_BIT_NV, + eRayTracingDisplacementMicromapNV = VK_PIPELINE_CREATE_RAY_TRACING_DISPLACEMENT_MICROMAP_BIT_NV #endif /*VK_ENABLE_BETA_EXTENSIONS*/ - eNoProtectedAccessEXT = VK_PIPELINE_CREATE_NO_PROTECTED_ACCESS_BIT_EXT, - eProtectedAccessOnlyEXT = VK_PIPELINE_CREATE_PROTECTED_ACCESS_ONLY_BIT_EXT }; using PipelineCreateFlags = Flags; @@ -2988,21 +3046,21 @@ namespace VULKAN_HPP_NAMESPACE static VULKAN_HPP_CONST_OR_CONSTEXPR PipelineCreateFlags allFlags = PipelineCreateFlagBits::eDisableOptimization | PipelineCreateFlagBits::eAllowDerivatives | PipelineCreateFlagBits::eDerivative | PipelineCreateFlagBits::eViewIndexFromDeviceIndex | PipelineCreateFlagBits::eDispatchBase | PipelineCreateFlagBits::eFailOnPipelineCompileRequired | - PipelineCreateFlagBits::eEarlyReturnOnFailure | PipelineCreateFlagBits::eRayTracingNoNullAnyHitShadersKHR | - PipelineCreateFlagBits::eRayTracingNoNullClosestHitShadersKHR | PipelineCreateFlagBits::eRayTracingNoNullMissShadersKHR | - PipelineCreateFlagBits::eRayTracingNoNullIntersectionShadersKHR | PipelineCreateFlagBits::eRayTracingSkipTrianglesKHR | - PipelineCreateFlagBits::eRayTracingSkipAabbsKHR | PipelineCreateFlagBits::eRayTracingShaderGroupHandleCaptureReplayKHR | - PipelineCreateFlagBits::eDeferCompileNV | PipelineCreateFlagBits::eRenderingFragmentDensityMapAttachmentEXT | - PipelineCreateFlagBits::eRenderingFragmentShadingRateAttachmentKHR | PipelineCreateFlagBits::eCaptureStatisticsKHR | - PipelineCreateFlagBits::eCaptureInternalRepresentationsKHR | PipelineCreateFlagBits::eIndirectBindableNV | PipelineCreateFlagBits::eLibraryKHR | - PipelineCreateFlagBits::eDescriptorBufferEXT | PipelineCreateFlagBits::eRetainLinkTimeOptimizationInfoEXT | + PipelineCreateFlagBits::eEarlyReturnOnFailure | PipelineCreateFlagBits::eNoProtectedAccess | PipelineCreateFlagBits::eProtectedAccessOnly | + PipelineCreateFlagBits::eRayTracingNoNullAnyHitShadersKHR | PipelineCreateFlagBits::eRayTracingNoNullClosestHitShadersKHR | + PipelineCreateFlagBits::eRayTracingNoNullMissShadersKHR | PipelineCreateFlagBits::eRayTracingNoNullIntersectionShadersKHR | + PipelineCreateFlagBits::eRayTracingSkipTrianglesKHR | PipelineCreateFlagBits::eRayTracingSkipAabbsKHR | + PipelineCreateFlagBits::eRayTracingShaderGroupHandleCaptureReplayKHR | PipelineCreateFlagBits::eDeferCompileNV | + PipelineCreateFlagBits::eRenderingFragmentDensityMapAttachmentEXT | PipelineCreateFlagBits::eRenderingFragmentShadingRateAttachmentKHR | + PipelineCreateFlagBits::eCaptureStatisticsKHR | PipelineCreateFlagBits::eCaptureInternalRepresentationsKHR | PipelineCreateFlagBits::eIndirectBindableNV | + PipelineCreateFlagBits::eLibraryKHR | PipelineCreateFlagBits::eDescriptorBufferEXT | PipelineCreateFlagBits::eRetainLinkTimeOptimizationInfoEXT | PipelineCreateFlagBits::eLinkTimeOptimizationEXT | PipelineCreateFlagBits::eRayTracingAllowMotionNV | PipelineCreateFlagBits::eColorAttachmentFeedbackLoopEXT | PipelineCreateFlagBits::eDepthStencilAttachmentFeedbackLoopEXT | PipelineCreateFlagBits::eRayTracingOpacityMicromapEXT #if defined( VK_ENABLE_BETA_EXTENSIONS ) | PipelineCreateFlagBits::eRayTracingDisplacementMicromapNV #endif /*VK_ENABLE_BETA_EXTENSIONS*/ - | PipelineCreateFlagBits::eNoProtectedAccessEXT | PipelineCreateFlagBits::eProtectedAccessOnlyEXT; + ; }; enum class PipelineShaderStageCreateFlagBits : VkPipelineShaderStageCreateFlags @@ -3330,6 +3388,7 @@ namespace VULKAN_HPP_NAMESPACE { eUpdateAfterBindPool = VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT, eUpdateAfterBindPoolEXT = VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT, + ePushDescriptor = VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT, ePushDescriptorKHR = VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR, eDescriptorBufferEXT = VK_DESCRIPTOR_SET_LAYOUT_CREATE_DESCRIPTOR_BUFFER_BIT_EXT, eEmbeddedImmutableSamplersEXT = VK_DESCRIPTOR_SET_LAYOUT_CREATE_EMBEDDED_IMMUTABLE_SAMPLERS_BIT_EXT, @@ -3346,7 +3405,7 @@ namespace VULKAN_HPP_NAMESPACE { static VULKAN_HPP_CONST_OR_CONSTEXPR bool isBitmask = true; static VULKAN_HPP_CONST_OR_CONSTEXPR DescriptorSetLayoutCreateFlags allFlags = - DescriptorSetLayoutCreateFlagBits::eUpdateAfterBindPool | DescriptorSetLayoutCreateFlagBits::ePushDescriptorKHR | + DescriptorSetLayoutCreateFlagBits::eUpdateAfterBindPool | DescriptorSetLayoutCreateFlagBits::ePushDescriptor | DescriptorSetLayoutCreateFlagBits::eDescriptorBufferEXT | DescriptorSetLayoutCreateFlagBits::eEmbeddedImmutableSamplersEXT | DescriptorSetLayoutCreateFlagBits::eIndirectBindableNV | DescriptorSetLayoutCreateFlagBits::eHostOnlyPoolEXT | DescriptorSetLayoutCreateFlagBits::ePerStageNV; @@ -3463,8 +3522,9 @@ namespace VULKAN_HPP_NAMESPACE eLoad = VK_ATTACHMENT_LOAD_OP_LOAD, eClear = VK_ATTACHMENT_LOAD_OP_CLEAR, eDontCare = VK_ATTACHMENT_LOAD_OP_DONT_CARE, - eNoneKHR = VK_ATTACHMENT_LOAD_OP_NONE_KHR, - eNoneEXT = VK_ATTACHMENT_LOAD_OP_NONE_EXT + eNone = VK_ATTACHMENT_LOAD_OP_NONE, + eNoneEXT = VK_ATTACHMENT_LOAD_OP_NONE_EXT, + eNoneKHR = VK_ATTACHMENT_LOAD_OP_NONE_KHR }; enum class AttachmentStoreOp @@ -3652,10 +3712,11 @@ namespace VULKAN_HPP_NAMESPACE { eUint16 = VK_INDEX_TYPE_UINT16, eUint32 = VK_INDEX_TYPE_UINT32, - eNoneKHR = VK_INDEX_TYPE_NONE_KHR, - eNoneNV = VK_INDEX_TYPE_NONE_NV, + eUint8 = VK_INDEX_TYPE_UINT8, + eUint8EXT = VK_INDEX_TYPE_UINT8_EXT, eUint8KHR = VK_INDEX_TYPE_UINT8_KHR, - eUint8EXT = VK_INDEX_TYPE_UINT8_EXT + eNoneKHR = VK_INDEX_TYPE_NONE_KHR, + eNoneNV = VK_INDEX_TYPE_NONE_NV }; //========================= @@ -3694,11 +3755,11 @@ namespace VULKAN_HPP_NAMESPACE template <> struct IndexTypeValue { - static VULKAN_HPP_CONST_OR_CONSTEXPR IndexType value = IndexType::eUint8KHR; + static VULKAN_HPP_CONST_OR_CONSTEXPR IndexType value = IndexType::eUint8; }; template <> - struct CppType + struct CppType { using Type = uint8_t; }; @@ -3741,9 +3802,11 @@ namespace VULKAN_HPP_NAMESPACE eShuffleRelative = VK_SUBGROUP_FEATURE_SHUFFLE_RELATIVE_BIT, eClustered = VK_SUBGROUP_FEATURE_CLUSTERED_BIT, eQuad = VK_SUBGROUP_FEATURE_QUAD_BIT, - ePartitionedNV = VK_SUBGROUP_FEATURE_PARTITIONED_BIT_NV, + eRotate = VK_SUBGROUP_FEATURE_ROTATE_BIT, eRotateKHR = VK_SUBGROUP_FEATURE_ROTATE_BIT_KHR, - eRotateClusteredKHR = VK_SUBGROUP_FEATURE_ROTATE_CLUSTERED_BIT_KHR + eRotateClustered = VK_SUBGROUP_FEATURE_ROTATE_CLUSTERED_BIT, + eRotateClusteredKHR = VK_SUBGROUP_FEATURE_ROTATE_CLUSTERED_BIT_KHR, + ePartitionedNV = VK_SUBGROUP_FEATURE_PARTITIONED_BIT_NV }; using SubgroupFeatureFlags = Flags; @@ -3755,7 +3818,7 @@ namespace VULKAN_HPP_NAMESPACE static VULKAN_HPP_CONST_OR_CONSTEXPR SubgroupFeatureFlags allFlags = SubgroupFeatureFlagBits::eBasic | SubgroupFeatureFlagBits::eVote | SubgroupFeatureFlagBits::eArithmetic | SubgroupFeatureFlagBits::eBallot | SubgroupFeatureFlagBits::eShuffle | SubgroupFeatureFlagBits::eShuffleRelative | SubgroupFeatureFlagBits::eClustered | SubgroupFeatureFlagBits::eQuad | - SubgroupFeatureFlagBits::ePartitionedNV | SubgroupFeatureFlagBits::eRotateKHR | SubgroupFeatureFlagBits::eRotateClusteredKHR; + SubgroupFeatureFlagBits::eRotate | SubgroupFeatureFlagBits::eRotateClustered | SubgroupFeatureFlagBits::ePartitionedNV; }; enum class PeerMemoryFeatureFlagBits : VkPeerMemoryFeatureFlags @@ -3851,8 +3914,8 @@ namespace VULKAN_HPP_NAMESPACE enum class DescriptorUpdateTemplateType { - eDescriptorSet = VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET, - ePushDescriptorsKHR = VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR + eDescriptorSet = VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET, + ePushDescriptors = VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS }; using DescriptorUpdateTemplateTypeKHR = DescriptorUpdateTemplateType; @@ -4451,8 +4514,6 @@ namespace VULKAN_HPP_NAMESPACE eBlitSrc = VK_FORMAT_FEATURE_2_BLIT_SRC_BIT, eBlitDst = VK_FORMAT_FEATURE_2_BLIT_DST_BIT, eSampledImageFilterLinear = VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_FILTER_LINEAR_BIT, - eSampledImageFilterCubic = VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_FILTER_CUBIC_BIT, - eSampledImageFilterCubicEXT = VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_FILTER_CUBIC_BIT_EXT, eTransferSrc = VK_FORMAT_FEATURE_2_TRANSFER_SRC_BIT, eTransferDst = VK_FORMAT_FEATURE_2_TRANSFER_DST_BIT, eSampledImageFilterMinmax = VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_FILTER_MINMAX_BIT, @@ -4467,12 +4528,15 @@ namespace VULKAN_HPP_NAMESPACE eStorageReadWithoutFormat = VK_FORMAT_FEATURE_2_STORAGE_READ_WITHOUT_FORMAT_BIT, eStorageWriteWithoutFormat = VK_FORMAT_FEATURE_2_STORAGE_WRITE_WITHOUT_FORMAT_BIT, eSampledImageDepthComparison = VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_DEPTH_COMPARISON_BIT, + eSampledImageFilterCubic = VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_FILTER_CUBIC_BIT, + eSampledImageFilterCubicEXT = VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_FILTER_CUBIC_BIT_EXT, + eHostImageTransfer = VK_FORMAT_FEATURE_2_HOST_IMAGE_TRANSFER_BIT, + eHostImageTransferEXT = VK_FORMAT_FEATURE_2_HOST_IMAGE_TRANSFER_BIT_EXT, eVideoDecodeOutputKHR = VK_FORMAT_FEATURE_2_VIDEO_DECODE_OUTPUT_BIT_KHR, eVideoDecodeDpbKHR = VK_FORMAT_FEATURE_2_VIDEO_DECODE_DPB_BIT_KHR, eAccelerationStructureVertexBufferKHR = VK_FORMAT_FEATURE_2_ACCELERATION_STRUCTURE_VERTEX_BUFFER_BIT_KHR, eFragmentDensityMapEXT = VK_FORMAT_FEATURE_2_FRAGMENT_DENSITY_MAP_BIT_EXT, eFragmentShadingRateAttachmentKHR = VK_FORMAT_FEATURE_2_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR, - eHostImageTransferEXT = VK_FORMAT_FEATURE_2_HOST_IMAGE_TRANSFER_BIT_EXT, eVideoEncodeInputKHR = VK_FORMAT_FEATURE_2_VIDEO_ENCODE_INPUT_BIT_KHR, eVideoEncodeDpbKHR = VK_FORMAT_FEATURE_2_VIDEO_ENCODE_DPB_BIT_KHR, eLinearColorAttachmentNV = VK_FORMAT_FEATURE_2_LINEAR_COLOR_ATTACHMENT_BIT_NV, @@ -4500,19 +4564,230 @@ namespace VULKAN_HPP_NAMESPACE FormatFeatureFlagBits2::eUniformTexelBuffer | FormatFeatureFlagBits2::eStorageTexelBuffer | FormatFeatureFlagBits2::eStorageTexelBufferAtomic | FormatFeatureFlagBits2::eVertexBuffer | FormatFeatureFlagBits2::eColorAttachment | FormatFeatureFlagBits2::eColorAttachmentBlend | FormatFeatureFlagBits2::eDepthStencilAttachment | FormatFeatureFlagBits2::eBlitSrc | FormatFeatureFlagBits2::eBlitDst | - FormatFeatureFlagBits2::eSampledImageFilterLinear | FormatFeatureFlagBits2::eSampledImageFilterCubic | FormatFeatureFlagBits2::eTransferSrc | - FormatFeatureFlagBits2::eTransferDst | FormatFeatureFlagBits2::eSampledImageFilterMinmax | FormatFeatureFlagBits2::eMidpointChromaSamples | + FormatFeatureFlagBits2::eSampledImageFilterLinear | FormatFeatureFlagBits2::eTransferSrc | FormatFeatureFlagBits2::eTransferDst | + FormatFeatureFlagBits2::eSampledImageFilterMinmax | FormatFeatureFlagBits2::eMidpointChromaSamples | FormatFeatureFlagBits2::eSampledImageYcbcrConversionLinearFilter | FormatFeatureFlagBits2::eSampledImageYcbcrConversionSeparateReconstructionFilter | FormatFeatureFlagBits2::eSampledImageYcbcrConversionChromaReconstructionExplicit | FormatFeatureFlagBits2::eSampledImageYcbcrConversionChromaReconstructionExplicitForceable | FormatFeatureFlagBits2::eDisjoint | FormatFeatureFlagBits2::eCositedChromaSamples | FormatFeatureFlagBits2::eStorageReadWithoutFormat | FormatFeatureFlagBits2::eStorageWriteWithoutFormat | - FormatFeatureFlagBits2::eSampledImageDepthComparison | FormatFeatureFlagBits2::eVideoDecodeOutputKHR | FormatFeatureFlagBits2::eVideoDecodeDpbKHR | + FormatFeatureFlagBits2::eSampledImageDepthComparison | FormatFeatureFlagBits2::eSampledImageFilterCubic | FormatFeatureFlagBits2::eHostImageTransfer | + FormatFeatureFlagBits2::eVideoDecodeOutputKHR | FormatFeatureFlagBits2::eVideoDecodeDpbKHR | FormatFeatureFlagBits2::eAccelerationStructureVertexBufferKHR | FormatFeatureFlagBits2::eFragmentDensityMapEXT | - FormatFeatureFlagBits2::eFragmentShadingRateAttachmentKHR | FormatFeatureFlagBits2::eHostImageTransferEXT | FormatFeatureFlagBits2::eVideoEncodeInputKHR | - FormatFeatureFlagBits2::eVideoEncodeDpbKHR | FormatFeatureFlagBits2::eLinearColorAttachmentNV | FormatFeatureFlagBits2::eWeightImageQCOM | - FormatFeatureFlagBits2::eWeightSampledImageQCOM | FormatFeatureFlagBits2::eBlockMatchingQCOM | FormatFeatureFlagBits2::eBoxFilterSampledQCOM | - FormatFeatureFlagBits2::eOpticalFlowImageNV | FormatFeatureFlagBits2::eOpticalFlowVectorNV | FormatFeatureFlagBits2::eOpticalFlowCostNV | - FormatFeatureFlagBits2::eVideoEncodeQuantizationDeltaMapKHR | FormatFeatureFlagBits2::eVideoEncodeEmphasisMapKHR; + FormatFeatureFlagBits2::eFragmentShadingRateAttachmentKHR | FormatFeatureFlagBits2::eVideoEncodeInputKHR | FormatFeatureFlagBits2::eVideoEncodeDpbKHR | + FormatFeatureFlagBits2::eLinearColorAttachmentNV | FormatFeatureFlagBits2::eWeightImageQCOM | FormatFeatureFlagBits2::eWeightSampledImageQCOM | + FormatFeatureFlagBits2::eBlockMatchingQCOM | FormatFeatureFlagBits2::eBoxFilterSampledQCOM | FormatFeatureFlagBits2::eOpticalFlowImageNV | + FormatFeatureFlagBits2::eOpticalFlowVectorNV | FormatFeatureFlagBits2::eOpticalFlowCostNV | FormatFeatureFlagBits2::eVideoEncodeQuantizationDeltaMapKHR | + FormatFeatureFlagBits2::eVideoEncodeEmphasisMapKHR; + }; + + //=== VK_VERSION_1_4 === + + enum class QueueGlobalPriority + { + eLow = VK_QUEUE_GLOBAL_PRIORITY_LOW, + eLowKHR = VK_QUEUE_GLOBAL_PRIORITY_LOW_KHR, + eMedium = VK_QUEUE_GLOBAL_PRIORITY_MEDIUM, + eMediumKHR = VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_KHR, + eHigh = VK_QUEUE_GLOBAL_PRIORITY_HIGH, + eHighKHR = VK_QUEUE_GLOBAL_PRIORITY_HIGH_KHR, + eRealtime = VK_QUEUE_GLOBAL_PRIORITY_REALTIME, + eRealtimeKHR = VK_QUEUE_GLOBAL_PRIORITY_REALTIME_KHR + }; + using QueueGlobalPriorityEXT = QueueGlobalPriority; + using QueueGlobalPriorityKHR = QueueGlobalPriority; + + enum class LineRasterizationMode + { + eDefault = VK_LINE_RASTERIZATION_MODE_DEFAULT, + eDefaultKHR = VK_LINE_RASTERIZATION_MODE_DEFAULT_KHR, + eRectangular = VK_LINE_RASTERIZATION_MODE_RECTANGULAR, + eRectangularKHR = VK_LINE_RASTERIZATION_MODE_RECTANGULAR_KHR, + eBresenham = VK_LINE_RASTERIZATION_MODE_BRESENHAM, + eBresenhamKHR = VK_LINE_RASTERIZATION_MODE_BRESENHAM_KHR, + eRectangularSmooth = VK_LINE_RASTERIZATION_MODE_RECTANGULAR_SMOOTH, + eRectangularSmoothKHR = VK_LINE_RASTERIZATION_MODE_RECTANGULAR_SMOOTH_KHR + }; + using LineRasterizationModeEXT = LineRasterizationMode; + using LineRasterizationModeKHR = LineRasterizationMode; + + enum class MemoryUnmapFlagBits : VkMemoryUnmapFlags + { + eReserveEXT = VK_MEMORY_UNMAP_RESERVE_BIT_EXT + }; + using MemoryUnmapFlagBitsKHR = MemoryUnmapFlagBits; + + using MemoryUnmapFlags = Flags; + using MemoryUnmapFlagsKHR = MemoryUnmapFlags; + + template <> + struct FlagTraits + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool isBitmask = true; + static VULKAN_HPP_CONST_OR_CONSTEXPR MemoryUnmapFlags allFlags = MemoryUnmapFlagBits::eReserveEXT; + }; + + enum class PipelineCreateFlagBits2 : VkPipelineCreateFlags2 + { + eDisableOptimization = VK_PIPELINE_CREATE_2_DISABLE_OPTIMIZATION_BIT, + eAllowDerivatives = VK_PIPELINE_CREATE_2_ALLOW_DERIVATIVES_BIT, + eDerivative = VK_PIPELINE_CREATE_2_DERIVATIVE_BIT, + eViewIndexFromDeviceIndex = VK_PIPELINE_CREATE_2_VIEW_INDEX_FROM_DEVICE_INDEX_BIT, + eDispatchBase = VK_PIPELINE_CREATE_2_DISPATCH_BASE_BIT, + eFailOnPipelineCompileRequired = VK_PIPELINE_CREATE_2_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT, + eEarlyReturnOnFailure = VK_PIPELINE_CREATE_2_EARLY_RETURN_ON_FAILURE_BIT, + eNoProtectedAccess = VK_PIPELINE_CREATE_2_NO_PROTECTED_ACCESS_BIT, + eNoProtectedAccessEXT = VK_PIPELINE_CREATE_2_NO_PROTECTED_ACCESS_BIT_EXT, + eProtectedAccessOnly = VK_PIPELINE_CREATE_2_PROTECTED_ACCESS_ONLY_BIT, + eProtectedAccessOnlyEXT = VK_PIPELINE_CREATE_2_PROTECTED_ACCESS_ONLY_BIT_EXT, +#if defined( VK_ENABLE_BETA_EXTENSIONS ) + eExecutionGraphAMDX = VK_PIPELINE_CREATE_2_EXECUTION_GRAPH_BIT_AMDX, +#endif /*VK_ENABLE_BETA_EXTENSIONS*/ + eEnableLegacyDitheringEXT = VK_PIPELINE_CREATE_2_ENABLE_LEGACY_DITHERING_BIT_EXT, + eDeferCompileNV = VK_PIPELINE_CREATE_2_DEFER_COMPILE_BIT_NV, + eCaptureStatisticsKHR = VK_PIPELINE_CREATE_2_CAPTURE_STATISTICS_BIT_KHR, + eCaptureInternalRepresentationsKHR = VK_PIPELINE_CREATE_2_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR, + eLinkTimeOptimizationEXT = VK_PIPELINE_CREATE_2_LINK_TIME_OPTIMIZATION_BIT_EXT, + eRetainLinkTimeOptimizationInfoEXT = VK_PIPELINE_CREATE_2_RETAIN_LINK_TIME_OPTIMIZATION_INFO_BIT_EXT, + eLibraryKHR = VK_PIPELINE_CREATE_2_LIBRARY_BIT_KHR, + eRayTracingSkipTrianglesKHR = VK_PIPELINE_CREATE_2_RAY_TRACING_SKIP_TRIANGLES_BIT_KHR, + eRayTracingSkipAabbsKHR = VK_PIPELINE_CREATE_2_RAY_TRACING_SKIP_AABBS_BIT_KHR, + eRayTracingNoNullAnyHitShadersKHR = VK_PIPELINE_CREATE_2_RAY_TRACING_NO_NULL_ANY_HIT_SHADERS_BIT_KHR, + eRayTracingNoNullClosestHitShadersKHR = VK_PIPELINE_CREATE_2_RAY_TRACING_NO_NULL_CLOSEST_HIT_SHADERS_BIT_KHR, + eRayTracingNoNullMissShadersKHR = VK_PIPELINE_CREATE_2_RAY_TRACING_NO_NULL_MISS_SHADERS_BIT_KHR, + eRayTracingNoNullIntersectionShadersKHR = VK_PIPELINE_CREATE_2_RAY_TRACING_NO_NULL_INTERSECTION_SHADERS_BIT_KHR, + eRayTracingShaderGroupHandleCaptureReplayKHR = VK_PIPELINE_CREATE_2_RAY_TRACING_SHADER_GROUP_HANDLE_CAPTURE_REPLAY_BIT_KHR, + eIndirectBindableNV = VK_PIPELINE_CREATE_2_INDIRECT_BINDABLE_BIT_NV, + eRayTracingAllowMotionNV = VK_PIPELINE_CREATE_2_RAY_TRACING_ALLOW_MOTION_BIT_NV, + eRenderingFragmentShadingRateAttachmentKHR = VK_PIPELINE_CREATE_2_RENDERING_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR, + eRenderingFragmentDensityMapAttachmentEXT = VK_PIPELINE_CREATE_2_RENDERING_FRAGMENT_DENSITY_MAP_ATTACHMENT_BIT_EXT, + eRayTracingOpacityMicromapEXT = VK_PIPELINE_CREATE_2_RAY_TRACING_OPACITY_MICROMAP_BIT_EXT, + eColorAttachmentFeedbackLoopEXT = VK_PIPELINE_CREATE_2_COLOR_ATTACHMENT_FEEDBACK_LOOP_BIT_EXT, + eDepthStencilAttachmentFeedbackLoopEXT = VK_PIPELINE_CREATE_2_DEPTH_STENCIL_ATTACHMENT_FEEDBACK_LOOP_BIT_EXT, + eRayTracingDisplacementMicromapNV = VK_PIPELINE_CREATE_2_RAY_TRACING_DISPLACEMENT_MICROMAP_BIT_NV, + eDescriptorBufferEXT = VK_PIPELINE_CREATE_2_DESCRIPTOR_BUFFER_BIT_EXT, + eCaptureDataKHR = VK_PIPELINE_CREATE_2_CAPTURE_DATA_BIT_KHR, + eIndirectBindableEXT = VK_PIPELINE_CREATE_2_INDIRECT_BINDABLE_BIT_EXT + }; + using PipelineCreateFlagBits2KHR = PipelineCreateFlagBits2; + + using PipelineCreateFlags2 = Flags; + using PipelineCreateFlags2KHR = PipelineCreateFlags2; + + template <> + struct FlagTraits + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool isBitmask = true; + static VULKAN_HPP_CONST_OR_CONSTEXPR PipelineCreateFlags2 allFlags = + PipelineCreateFlagBits2::eDisableOptimization | PipelineCreateFlagBits2::eAllowDerivatives | PipelineCreateFlagBits2::eDerivative | + PipelineCreateFlagBits2::eViewIndexFromDeviceIndex | PipelineCreateFlagBits2::eDispatchBase | PipelineCreateFlagBits2::eFailOnPipelineCompileRequired | + PipelineCreateFlagBits2::eEarlyReturnOnFailure | PipelineCreateFlagBits2::eNoProtectedAccess | PipelineCreateFlagBits2::eProtectedAccessOnly +#if defined( VK_ENABLE_BETA_EXTENSIONS ) + | PipelineCreateFlagBits2::eExecutionGraphAMDX +#endif /*VK_ENABLE_BETA_EXTENSIONS*/ + | PipelineCreateFlagBits2::eEnableLegacyDitheringEXT | PipelineCreateFlagBits2::eDeferCompileNV | PipelineCreateFlagBits2::eCaptureStatisticsKHR | + PipelineCreateFlagBits2::eCaptureInternalRepresentationsKHR | PipelineCreateFlagBits2::eLinkTimeOptimizationEXT | + PipelineCreateFlagBits2::eRetainLinkTimeOptimizationInfoEXT | PipelineCreateFlagBits2::eLibraryKHR | + PipelineCreateFlagBits2::eRayTracingSkipTrianglesKHR | PipelineCreateFlagBits2::eRayTracingSkipAabbsKHR | + PipelineCreateFlagBits2::eRayTracingNoNullAnyHitShadersKHR | PipelineCreateFlagBits2::eRayTracingNoNullClosestHitShadersKHR | + PipelineCreateFlagBits2::eRayTracingNoNullMissShadersKHR | PipelineCreateFlagBits2::eRayTracingNoNullIntersectionShadersKHR | + PipelineCreateFlagBits2::eRayTracingShaderGroupHandleCaptureReplayKHR | PipelineCreateFlagBits2::eIndirectBindableNV | + PipelineCreateFlagBits2::eRayTracingAllowMotionNV | PipelineCreateFlagBits2::eRenderingFragmentShadingRateAttachmentKHR | + PipelineCreateFlagBits2::eRenderingFragmentDensityMapAttachmentEXT | PipelineCreateFlagBits2::eRayTracingOpacityMicromapEXT | + PipelineCreateFlagBits2::eColorAttachmentFeedbackLoopEXT | PipelineCreateFlagBits2::eDepthStencilAttachmentFeedbackLoopEXT | + PipelineCreateFlagBits2::eRayTracingDisplacementMicromapNV | PipelineCreateFlagBits2::eDescriptorBufferEXT | PipelineCreateFlagBits2::eCaptureDataKHR | + PipelineCreateFlagBits2::eIndirectBindableEXT; + }; + + enum class BufferUsageFlagBits2 : VkBufferUsageFlags2 + { + eTransferSrc = VK_BUFFER_USAGE_2_TRANSFER_SRC_BIT, + eTransferDst = VK_BUFFER_USAGE_2_TRANSFER_DST_BIT, + eUniformTexelBuffer = VK_BUFFER_USAGE_2_UNIFORM_TEXEL_BUFFER_BIT, + eStorageTexelBuffer = VK_BUFFER_USAGE_2_STORAGE_TEXEL_BUFFER_BIT, + eUniformBuffer = VK_BUFFER_USAGE_2_UNIFORM_BUFFER_BIT, + eStorageBuffer = VK_BUFFER_USAGE_2_STORAGE_BUFFER_BIT, + eIndexBuffer = VK_BUFFER_USAGE_2_INDEX_BUFFER_BIT, + eVertexBuffer = VK_BUFFER_USAGE_2_VERTEX_BUFFER_BIT, + eIndirectBuffer = VK_BUFFER_USAGE_2_INDIRECT_BUFFER_BIT, + eShaderDeviceAddress = VK_BUFFER_USAGE_2_SHADER_DEVICE_ADDRESS_BIT, +#if defined( VK_ENABLE_BETA_EXTENSIONS ) + eExecutionGraphScratchAMDX = VK_BUFFER_USAGE_2_EXECUTION_GRAPH_SCRATCH_BIT_AMDX, +#endif /*VK_ENABLE_BETA_EXTENSIONS*/ + eConditionalRenderingEXT = VK_BUFFER_USAGE_2_CONDITIONAL_RENDERING_BIT_EXT, + eShaderBindingTableKHR = VK_BUFFER_USAGE_2_SHADER_BINDING_TABLE_BIT_KHR, + eRayTracingNV = VK_BUFFER_USAGE_2_RAY_TRACING_BIT_NV, + eTransformFeedbackBufferEXT = VK_BUFFER_USAGE_2_TRANSFORM_FEEDBACK_BUFFER_BIT_EXT, + eTransformFeedbackCounterBufferEXT = VK_BUFFER_USAGE_2_TRANSFORM_FEEDBACK_COUNTER_BUFFER_BIT_EXT, + eVideoDecodeSrcKHR = VK_BUFFER_USAGE_2_VIDEO_DECODE_SRC_BIT_KHR, + eVideoDecodeDstKHR = VK_BUFFER_USAGE_2_VIDEO_DECODE_DST_BIT_KHR, + eVideoEncodeDstKHR = VK_BUFFER_USAGE_2_VIDEO_ENCODE_DST_BIT_KHR, + eVideoEncodeSrcKHR = VK_BUFFER_USAGE_2_VIDEO_ENCODE_SRC_BIT_KHR, + eAccelerationStructureBuildInputReadOnlyKHR = VK_BUFFER_USAGE_2_ACCELERATION_STRUCTURE_BUILD_INPUT_READ_ONLY_BIT_KHR, + eAccelerationStructureStorageKHR = VK_BUFFER_USAGE_2_ACCELERATION_STRUCTURE_STORAGE_BIT_KHR, + eSamplerDescriptorBufferEXT = VK_BUFFER_USAGE_2_SAMPLER_DESCRIPTOR_BUFFER_BIT_EXT, + eResourceDescriptorBufferEXT = VK_BUFFER_USAGE_2_RESOURCE_DESCRIPTOR_BUFFER_BIT_EXT, + ePushDescriptorsDescriptorBufferEXT = VK_BUFFER_USAGE_2_PUSH_DESCRIPTORS_DESCRIPTOR_BUFFER_BIT_EXT, + eMicromapBuildInputReadOnlyEXT = VK_BUFFER_USAGE_2_MICROMAP_BUILD_INPUT_READ_ONLY_BIT_EXT, + eMicromapStorageEXT = VK_BUFFER_USAGE_2_MICROMAP_STORAGE_BIT_EXT, + ePreprocessBufferEXT = VK_BUFFER_USAGE_2_PREPROCESS_BUFFER_BIT_EXT + }; + using BufferUsageFlagBits2KHR = BufferUsageFlagBits2; + + using BufferUsageFlags2 = Flags; + using BufferUsageFlags2KHR = BufferUsageFlags2; + + template <> + struct FlagTraits + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool isBitmask = true; + static VULKAN_HPP_CONST_OR_CONSTEXPR BufferUsageFlags2 allFlags = + BufferUsageFlagBits2::eTransferSrc | BufferUsageFlagBits2::eTransferDst | BufferUsageFlagBits2::eUniformTexelBuffer | + BufferUsageFlagBits2::eStorageTexelBuffer | BufferUsageFlagBits2::eUniformBuffer | BufferUsageFlagBits2::eStorageBuffer | + BufferUsageFlagBits2::eIndexBuffer | BufferUsageFlagBits2::eVertexBuffer | BufferUsageFlagBits2::eIndirectBuffer | + BufferUsageFlagBits2::eShaderDeviceAddress +#if defined( VK_ENABLE_BETA_EXTENSIONS ) + | BufferUsageFlagBits2::eExecutionGraphScratchAMDX +#endif /*VK_ENABLE_BETA_EXTENSIONS*/ + | BufferUsageFlagBits2::eConditionalRenderingEXT | BufferUsageFlagBits2::eShaderBindingTableKHR | BufferUsageFlagBits2::eTransformFeedbackBufferEXT | + BufferUsageFlagBits2::eTransformFeedbackCounterBufferEXT | BufferUsageFlagBits2::eVideoDecodeSrcKHR | BufferUsageFlagBits2::eVideoDecodeDstKHR | + BufferUsageFlagBits2::eVideoEncodeDstKHR | BufferUsageFlagBits2::eVideoEncodeSrcKHR | BufferUsageFlagBits2::eAccelerationStructureBuildInputReadOnlyKHR | + BufferUsageFlagBits2::eAccelerationStructureStorageKHR | BufferUsageFlagBits2::eSamplerDescriptorBufferEXT | + BufferUsageFlagBits2::eResourceDescriptorBufferEXT | BufferUsageFlagBits2::ePushDescriptorsDescriptorBufferEXT | + BufferUsageFlagBits2::eMicromapBuildInputReadOnlyEXT | BufferUsageFlagBits2::eMicromapStorageEXT | BufferUsageFlagBits2::ePreprocessBufferEXT; + }; + + enum class PipelineRobustnessBufferBehavior + { + eDeviceDefault = VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_DEVICE_DEFAULT, + eDisabled = VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_DISABLED, + eRobustBufferAccess = VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_ROBUST_BUFFER_ACCESS, + eRobustBufferAccess2 = VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_ROBUST_BUFFER_ACCESS_2 + }; + using PipelineRobustnessBufferBehaviorEXT = PipelineRobustnessBufferBehavior; + + enum class PipelineRobustnessImageBehavior + { + eDeviceDefault = VK_PIPELINE_ROBUSTNESS_IMAGE_BEHAVIOR_DEVICE_DEFAULT, + eDisabled = VK_PIPELINE_ROBUSTNESS_IMAGE_BEHAVIOR_DISABLED, + eRobustImageAccess = VK_PIPELINE_ROBUSTNESS_IMAGE_BEHAVIOR_ROBUST_IMAGE_ACCESS, + eRobustImageAccess2 = VK_PIPELINE_ROBUSTNESS_IMAGE_BEHAVIOR_ROBUST_IMAGE_ACCESS_2 + }; + using PipelineRobustnessImageBehaviorEXT = PipelineRobustnessImageBehavior; + + enum class HostImageCopyFlagBits : VkHostImageCopyFlags + { + eMemcpy = VK_HOST_IMAGE_COPY_MEMCPY + }; + using HostImageCopyFlagBitsEXT = HostImageCopyFlagBits; + + using HostImageCopyFlags = Flags; + using HostImageCopyFlagsEXT = HostImageCopyFlags; + + template <> + struct FlagTraits + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool isBitmask = true; + static VULKAN_HPP_CONST_OR_CONSTEXPR HostImageCopyFlags allFlags = HostImageCopyFlagBits::eMemcpy; }; //=== VK_KHR_surface === @@ -5415,24 +5690,6 @@ namespace VULKAN_HPP_NAMESPACE }; #endif /*VK_USE_PLATFORM_VI_NN*/ - //=== VK_EXT_pipeline_robustness === - - enum class PipelineRobustnessBufferBehaviorEXT - { - eDeviceDefault = VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_DEVICE_DEFAULT_EXT, - eDisabled = VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_DISABLED_EXT, - eRobustBufferAccess = VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_ROBUST_BUFFER_ACCESS_EXT, - eRobustBufferAccess2 = VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_ROBUST_BUFFER_ACCESS_2_EXT - }; - - enum class PipelineRobustnessImageBehaviorEXT - { - eDeviceDefault = VK_PIPELINE_ROBUSTNESS_IMAGE_BEHAVIOR_DEVICE_DEFAULT_EXT, - eDisabled = VK_PIPELINE_ROBUSTNESS_IMAGE_BEHAVIOR_DISABLED_EXT, - eRobustImageAccess = VK_PIPELINE_ROBUSTNESS_IMAGE_BEHAVIOR_ROBUST_IMAGE_ACCESS_EXT, - eRobustImageAccess2 = VK_PIPELINE_ROBUSTNESS_IMAGE_BEHAVIOR_ROBUST_IMAGE_ACCESS_2_EXT - }; - //=== VK_EXT_conditional_rendering === enum class ConditionalRenderingFlagBitsEXT : VkConditionalRenderingFlagsEXT @@ -6014,17 +6271,6 @@ namespace VULKAN_HPP_NAMESPACE static VULKAN_HPP_CONST_OR_CONSTEXPR PipelineCompilerControlFlagsAMD allFlags = {}; }; - //=== VK_KHR_global_priority === - - enum class QueueGlobalPriorityKHR - { - eLow = VK_QUEUE_GLOBAL_PRIORITY_LOW_KHR, - eMedium = VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_KHR, - eHigh = VK_QUEUE_GLOBAL_PRIORITY_HIGH_KHR, - eRealtime = VK_QUEUE_GLOBAL_PRIORITY_REALTIME_KHR - }; - using QueueGlobalPriorityEXT = QueueGlobalPriorityKHR; - //=== VK_AMD_memory_overallocation_behavior === enum class MemoryOverallocationBehaviorAMD @@ -6216,38 +6462,6 @@ namespace VULKAN_HPP_NAMESPACE eFloat64 = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_FLOAT64_KHR }; - //=== VK_EXT_host_image_copy === - - enum class HostImageCopyFlagBitsEXT : VkHostImageCopyFlagsEXT - { - eMemcpy = VK_HOST_IMAGE_COPY_MEMCPY_EXT - }; - - using HostImageCopyFlagsEXT = Flags; - - template <> - struct FlagTraits - { - static VULKAN_HPP_CONST_OR_CONSTEXPR bool isBitmask = true; - static VULKAN_HPP_CONST_OR_CONSTEXPR HostImageCopyFlagsEXT allFlags = HostImageCopyFlagBitsEXT::eMemcpy; - }; - - //=== VK_KHR_map_memory2 === - - enum class MemoryUnmapFlagBitsKHR : VkMemoryUnmapFlagsKHR - { - eReserveEXT = VK_MEMORY_UNMAP_RESERVE_BIT_EXT - }; - - using MemoryUnmapFlagsKHR = Flags; - - template <> - struct FlagTraits - { - static VULKAN_HPP_CONST_OR_CONSTEXPR bool isBitmask = true; - static VULKAN_HPP_CONST_OR_CONSTEXPR MemoryUnmapFlagsKHR allFlags = MemoryUnmapFlagBitsKHR::eReserveEXT; - }; - //=== VK_EXT_surface_maintenance1 === enum class PresentScalingFlagBitsEXT : VkPresentScalingFlagsEXT @@ -7074,134 +7288,6 @@ namespace VULKAN_HPP_NAMESPACE static VULKAN_HPP_CONST_OR_CONSTEXPR OpticalFlowExecuteFlagsNV allFlags = OpticalFlowExecuteFlagBitsNV::eDisableTemporalHints; }; - //=== VK_KHR_maintenance5 === - - enum class PipelineCreateFlagBits2KHR : VkPipelineCreateFlags2KHR - { - eDisableOptimization = VK_PIPELINE_CREATE_2_DISABLE_OPTIMIZATION_BIT_KHR, - eAllowDerivatives = VK_PIPELINE_CREATE_2_ALLOW_DERIVATIVES_BIT_KHR, - eDerivative = VK_PIPELINE_CREATE_2_DERIVATIVE_BIT_KHR, -#if defined( VK_ENABLE_BETA_EXTENSIONS ) - eExecutionGraphAMDX = VK_PIPELINE_CREATE_2_EXECUTION_GRAPH_BIT_AMDX, -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ - eEnableLegacyDitheringEXT = VK_PIPELINE_CREATE_2_ENABLE_LEGACY_DITHERING_BIT_EXT, - eViewIndexFromDeviceIndex = VK_PIPELINE_CREATE_2_VIEW_INDEX_FROM_DEVICE_INDEX_BIT_KHR, - eDispatchBase = VK_PIPELINE_CREATE_2_DISPATCH_BASE_BIT_KHR, - eDeferCompileNV = VK_PIPELINE_CREATE_2_DEFER_COMPILE_BIT_NV, - eCaptureStatistics = VK_PIPELINE_CREATE_2_CAPTURE_STATISTICS_BIT_KHR, - eCaptureInternalRepresentations = VK_PIPELINE_CREATE_2_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR, - eFailOnPipelineCompileRequired = VK_PIPELINE_CREATE_2_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT_KHR, - eEarlyReturnOnFailure = VK_PIPELINE_CREATE_2_EARLY_RETURN_ON_FAILURE_BIT_KHR, - eLinkTimeOptimizationEXT = VK_PIPELINE_CREATE_2_LINK_TIME_OPTIMIZATION_BIT_EXT, - eRetainLinkTimeOptimizationInfoEXT = VK_PIPELINE_CREATE_2_RETAIN_LINK_TIME_OPTIMIZATION_INFO_BIT_EXT, - eLibrary = VK_PIPELINE_CREATE_2_LIBRARY_BIT_KHR, - eRayTracingSkipTriangles = VK_PIPELINE_CREATE_2_RAY_TRACING_SKIP_TRIANGLES_BIT_KHR, - eRayTracingSkipAabbs = VK_PIPELINE_CREATE_2_RAY_TRACING_SKIP_AABBS_BIT_KHR, - eRayTracingNoNullAnyHitShaders = VK_PIPELINE_CREATE_2_RAY_TRACING_NO_NULL_ANY_HIT_SHADERS_BIT_KHR, - eRayTracingNoNullClosestHitShaders = VK_PIPELINE_CREATE_2_RAY_TRACING_NO_NULL_CLOSEST_HIT_SHADERS_BIT_KHR, - eRayTracingNoNullMissShaders = VK_PIPELINE_CREATE_2_RAY_TRACING_NO_NULL_MISS_SHADERS_BIT_KHR, - eRayTracingNoNullIntersectionShaders = VK_PIPELINE_CREATE_2_RAY_TRACING_NO_NULL_INTERSECTION_SHADERS_BIT_KHR, - eRayTracingShaderGroupHandleCaptureReplay = VK_PIPELINE_CREATE_2_RAY_TRACING_SHADER_GROUP_HANDLE_CAPTURE_REPLAY_BIT_KHR, - eIndirectBindableNV = VK_PIPELINE_CREATE_2_INDIRECT_BINDABLE_BIT_NV, - eRayTracingAllowMotionNV = VK_PIPELINE_CREATE_2_RAY_TRACING_ALLOW_MOTION_BIT_NV, - eRenderingFragmentShadingRateAttachment = VK_PIPELINE_CREATE_2_RENDERING_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR, - eRenderingFragmentDensityMapAttachmentEXT = VK_PIPELINE_CREATE_2_RENDERING_FRAGMENT_DENSITY_MAP_ATTACHMENT_BIT_EXT, - eRayTracingOpacityMicromapEXT = VK_PIPELINE_CREATE_2_RAY_TRACING_OPACITY_MICROMAP_BIT_EXT, - eColorAttachmentFeedbackLoopEXT = VK_PIPELINE_CREATE_2_COLOR_ATTACHMENT_FEEDBACK_LOOP_BIT_EXT, - eDepthStencilAttachmentFeedbackLoopEXT = VK_PIPELINE_CREATE_2_DEPTH_STENCIL_ATTACHMENT_FEEDBACK_LOOP_BIT_EXT, - eNoProtectedAccessEXT = VK_PIPELINE_CREATE_2_NO_PROTECTED_ACCESS_BIT_EXT, - eProtectedAccessOnlyEXT = VK_PIPELINE_CREATE_2_PROTECTED_ACCESS_ONLY_BIT_EXT, - eRayTracingDisplacementMicromapNV = VK_PIPELINE_CREATE_2_RAY_TRACING_DISPLACEMENT_MICROMAP_BIT_NV, - eDescriptorBufferEXT = VK_PIPELINE_CREATE_2_DESCRIPTOR_BUFFER_BIT_EXT, - eCaptureData = VK_PIPELINE_CREATE_2_CAPTURE_DATA_BIT_KHR, - eIndirectBindableEXT = VK_PIPELINE_CREATE_2_INDIRECT_BINDABLE_BIT_EXT - }; - - using PipelineCreateFlags2KHR = Flags; - - template <> - struct FlagTraits - { - static VULKAN_HPP_CONST_OR_CONSTEXPR bool isBitmask = true; - static VULKAN_HPP_CONST_OR_CONSTEXPR PipelineCreateFlags2KHR allFlags = - PipelineCreateFlagBits2KHR::eDisableOptimization | PipelineCreateFlagBits2KHR::eAllowDerivatives | PipelineCreateFlagBits2KHR::eDerivative -#if defined( VK_ENABLE_BETA_EXTENSIONS ) - | PipelineCreateFlagBits2KHR::eExecutionGraphAMDX -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ - | PipelineCreateFlagBits2KHR::eEnableLegacyDitheringEXT | PipelineCreateFlagBits2KHR::eViewIndexFromDeviceIndex | - PipelineCreateFlagBits2KHR::eDispatchBase | PipelineCreateFlagBits2KHR::eDeferCompileNV | PipelineCreateFlagBits2KHR::eCaptureStatistics | - PipelineCreateFlagBits2KHR::eCaptureInternalRepresentations | PipelineCreateFlagBits2KHR::eFailOnPipelineCompileRequired | - PipelineCreateFlagBits2KHR::eEarlyReturnOnFailure | PipelineCreateFlagBits2KHR::eLinkTimeOptimizationEXT | - PipelineCreateFlagBits2KHR::eRetainLinkTimeOptimizationInfoEXT | PipelineCreateFlagBits2KHR::eLibrary | - PipelineCreateFlagBits2KHR::eRayTracingSkipTriangles | PipelineCreateFlagBits2KHR::eRayTracingSkipAabbs | - PipelineCreateFlagBits2KHR::eRayTracingNoNullAnyHitShaders | PipelineCreateFlagBits2KHR::eRayTracingNoNullClosestHitShaders | - PipelineCreateFlagBits2KHR::eRayTracingNoNullMissShaders | PipelineCreateFlagBits2KHR::eRayTracingNoNullIntersectionShaders | - PipelineCreateFlagBits2KHR::eRayTracingShaderGroupHandleCaptureReplay | PipelineCreateFlagBits2KHR::eIndirectBindableNV | - PipelineCreateFlagBits2KHR::eRayTracingAllowMotionNV | PipelineCreateFlagBits2KHR::eRenderingFragmentShadingRateAttachment | - PipelineCreateFlagBits2KHR::eRenderingFragmentDensityMapAttachmentEXT | PipelineCreateFlagBits2KHR::eRayTracingOpacityMicromapEXT | - PipelineCreateFlagBits2KHR::eColorAttachmentFeedbackLoopEXT | PipelineCreateFlagBits2KHR::eDepthStencilAttachmentFeedbackLoopEXT | - PipelineCreateFlagBits2KHR::eNoProtectedAccessEXT | PipelineCreateFlagBits2KHR::eProtectedAccessOnlyEXT | - PipelineCreateFlagBits2KHR::eRayTracingDisplacementMicromapNV | PipelineCreateFlagBits2KHR::eDescriptorBufferEXT | - PipelineCreateFlagBits2KHR::eCaptureData | PipelineCreateFlagBits2KHR::eIndirectBindableEXT; - }; - - enum class BufferUsageFlagBits2KHR : VkBufferUsageFlags2KHR - { - eTransferSrc = VK_BUFFER_USAGE_2_TRANSFER_SRC_BIT_KHR, - eTransferDst = VK_BUFFER_USAGE_2_TRANSFER_DST_BIT_KHR, - eUniformTexelBuffer = VK_BUFFER_USAGE_2_UNIFORM_TEXEL_BUFFER_BIT_KHR, - eStorageTexelBuffer = VK_BUFFER_USAGE_2_STORAGE_TEXEL_BUFFER_BIT_KHR, - eUniformBuffer = VK_BUFFER_USAGE_2_UNIFORM_BUFFER_BIT_KHR, - eStorageBuffer = VK_BUFFER_USAGE_2_STORAGE_BUFFER_BIT_KHR, - eIndexBuffer = VK_BUFFER_USAGE_2_INDEX_BUFFER_BIT_KHR, - eVertexBuffer = VK_BUFFER_USAGE_2_VERTEX_BUFFER_BIT_KHR, - eIndirectBuffer = VK_BUFFER_USAGE_2_INDIRECT_BUFFER_BIT_KHR, -#if defined( VK_ENABLE_BETA_EXTENSIONS ) - eExecutionGraphScratchAMDX = VK_BUFFER_USAGE_2_EXECUTION_GRAPH_SCRATCH_BIT_AMDX, -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ - eConditionalRenderingEXT = VK_BUFFER_USAGE_2_CONDITIONAL_RENDERING_BIT_EXT, - eShaderBindingTable = VK_BUFFER_USAGE_2_SHADER_BINDING_TABLE_BIT_KHR, - eRayTracingNV = VK_BUFFER_USAGE_2_RAY_TRACING_BIT_NV, - eTransformFeedbackBufferEXT = VK_BUFFER_USAGE_2_TRANSFORM_FEEDBACK_BUFFER_BIT_EXT, - eTransformFeedbackCounterBufferEXT = VK_BUFFER_USAGE_2_TRANSFORM_FEEDBACK_COUNTER_BUFFER_BIT_EXT, - eVideoDecodeSrc = VK_BUFFER_USAGE_2_VIDEO_DECODE_SRC_BIT_KHR, - eVideoDecodeDst = VK_BUFFER_USAGE_2_VIDEO_DECODE_DST_BIT_KHR, - eVideoEncodeDst = VK_BUFFER_USAGE_2_VIDEO_ENCODE_DST_BIT_KHR, - eVideoEncodeSrc = VK_BUFFER_USAGE_2_VIDEO_ENCODE_SRC_BIT_KHR, - eShaderDeviceAddress = VK_BUFFER_USAGE_2_SHADER_DEVICE_ADDRESS_BIT_KHR, - eAccelerationStructureBuildInputReadOnly = VK_BUFFER_USAGE_2_ACCELERATION_STRUCTURE_BUILD_INPUT_READ_ONLY_BIT_KHR, - eAccelerationStructureStorage = VK_BUFFER_USAGE_2_ACCELERATION_STRUCTURE_STORAGE_BIT_KHR, - eSamplerDescriptorBufferEXT = VK_BUFFER_USAGE_2_SAMPLER_DESCRIPTOR_BUFFER_BIT_EXT, - eResourceDescriptorBufferEXT = VK_BUFFER_USAGE_2_RESOURCE_DESCRIPTOR_BUFFER_BIT_EXT, - ePushDescriptorsDescriptorBufferEXT = VK_BUFFER_USAGE_2_PUSH_DESCRIPTORS_DESCRIPTOR_BUFFER_BIT_EXT, - eMicromapBuildInputReadOnlyEXT = VK_BUFFER_USAGE_2_MICROMAP_BUILD_INPUT_READ_ONLY_BIT_EXT, - eMicromapStorageEXT = VK_BUFFER_USAGE_2_MICROMAP_STORAGE_BIT_EXT, - ePreprocessBufferEXT = VK_BUFFER_USAGE_2_PREPROCESS_BUFFER_BIT_EXT - }; - - using BufferUsageFlags2KHR = Flags; - - template <> - struct FlagTraits - { - static VULKAN_HPP_CONST_OR_CONSTEXPR bool isBitmask = true; - static VULKAN_HPP_CONST_OR_CONSTEXPR BufferUsageFlags2KHR allFlags = - BufferUsageFlagBits2KHR::eTransferSrc | BufferUsageFlagBits2KHR::eTransferDst | BufferUsageFlagBits2KHR::eUniformTexelBuffer | - BufferUsageFlagBits2KHR::eStorageTexelBuffer | BufferUsageFlagBits2KHR::eUniformBuffer | BufferUsageFlagBits2KHR::eStorageBuffer | - BufferUsageFlagBits2KHR::eIndexBuffer | BufferUsageFlagBits2KHR::eVertexBuffer | BufferUsageFlagBits2KHR::eIndirectBuffer -#if defined( VK_ENABLE_BETA_EXTENSIONS ) - | BufferUsageFlagBits2KHR::eExecutionGraphScratchAMDX -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ - | BufferUsageFlagBits2KHR::eConditionalRenderingEXT | BufferUsageFlagBits2KHR::eShaderBindingTable | - BufferUsageFlagBits2KHR::eTransformFeedbackBufferEXT | BufferUsageFlagBits2KHR::eTransformFeedbackCounterBufferEXT | - BufferUsageFlagBits2KHR::eVideoDecodeSrc | BufferUsageFlagBits2KHR::eVideoDecodeDst | BufferUsageFlagBits2KHR::eVideoEncodeDst | - BufferUsageFlagBits2KHR::eVideoEncodeSrc | BufferUsageFlagBits2KHR::eShaderDeviceAddress | - BufferUsageFlagBits2KHR::eAccelerationStructureBuildInputReadOnly | BufferUsageFlagBits2KHR::eAccelerationStructureStorage | - BufferUsageFlagBits2KHR::eSamplerDescriptorBufferEXT | BufferUsageFlagBits2KHR::eResourceDescriptorBufferEXT | - BufferUsageFlagBits2KHR::ePushDescriptorsDescriptorBufferEXT | BufferUsageFlagBits2KHR::eMicromapBuildInputReadOnlyEXT | - BufferUsageFlagBits2KHR::eMicromapStorageEXT | BufferUsageFlagBits2KHR::ePreprocessBufferEXT; - }; - //=== VK_AMD_anti_lag === enum class AntiLagModeAMD @@ -7509,17 +7595,6 @@ namespace VULKAN_HPP_NAMESPACE eD3D12 = VK_LAYERED_DRIVER_UNDERLYING_API_D3D12_MSFT }; - //=== VK_KHR_line_rasterization === - - enum class LineRasterizationModeKHR - { - eDefault = VK_LINE_RASTERIZATION_MODE_DEFAULT_KHR, - eRectangular = VK_LINE_RASTERIZATION_MODE_RECTANGULAR_KHR, - eBresenham = VK_LINE_RASTERIZATION_MODE_BRESENHAM_KHR, - eRectangularSmooth = VK_LINE_RASTERIZATION_MODE_RECTANGULAR_SMOOTH_KHR - }; - using LineRasterizationModeEXT = LineRasterizationModeKHR; - //=== VK_KHR_calibrated_timestamps === enum class TimeDomainKHR diff --git a/third_party/vulkan/vulkan_extension_inspection.hpp b/third_party/vulkan/vulkan_extension_inspection.hpp index bcd9d11..4829143 100644 --- a/third_party/vulkan/vulkan_extension_inspection.hpp +++ b/third_party/vulkan/vulkan_extension_inspection.hpp @@ -1517,7 +1517,8 @@ namespace VULKAN_HPP_NAMESPACE { { "VK_VERSION_1_0", { { "VK_KHR_map_memory2", - } } } } }, + } } }, + { "VK_VERSION_1_4", { {} } } } }, { "VK_EXT_shader_atomic_float2", { { "VK_VERSION_1_0", { { @@ -2267,7 +2268,8 @@ namespace VULKAN_HPP_NAMESPACE { { "VK_VERSION_1_0", { { "VK_KHR_maintenance6", - } } } } }, + } } }, + { "VK_VERSION_1_4", { {} } } } }, { "VK_QCOM_image_processing2", { { "VK_VERSION_1_0", { { @@ -2408,7 +2410,7 @@ namespace VULKAN_HPP_NAMESPACE std::string const & extension ) { #if !defined( NDEBUG ) - static std::set versions = { "VK_VERSION_1_0", "VK_VERSION_1_1", "VK_VERSION_1_2", "VK_VERSION_1_3" }; + static std::set versions = { "VK_VERSION_1_0", "VK_VERSION_1_1", "VK_VERSION_1_2", "VK_VERSION_1_3", "VK_VERSION_1_4" }; assert( versions.find( version ) != versions.end() ); #endif static std::vector> noDependencies; @@ -2454,12 +2456,14 @@ namespace VULKAN_HPP_NAMESPACE { "VK_KHR_device_group", "VK_VERSION_1_1" }, { "VK_KHR_shader_draw_parameters", "VK_VERSION_1_1" }, { "VK_EXT_texture_compression_astc_hdr", "VK_VERSION_1_3" }, + { "VK_EXT_pipeline_robustness", "VK_VERSION_1_4" }, { "VK_KHR_maintenance1", "VK_VERSION_1_1" }, { "VK_KHR_device_group_creation", "VK_VERSION_1_1" }, { "VK_KHR_external_memory_capabilities", "VK_VERSION_1_1" }, { "VK_KHR_external_memory", "VK_VERSION_1_1" }, { "VK_KHR_external_semaphore_capabilities", "VK_VERSION_1_1" }, { "VK_KHR_external_semaphore", "VK_VERSION_1_1" }, + { "VK_KHR_push_descriptor", "VK_VERSION_1_4" }, { "VK_KHR_shader_float16_int8", "VK_VERSION_1_2" }, { "VK_KHR_16bit_storage", "VK_VERSION_1_1" }, { "VK_KHR_descriptor_update_template", "VK_VERSION_1_1" }, @@ -2487,6 +2491,7 @@ namespace VULKAN_HPP_NAMESPACE { "VK_KHR_8bit_storage", "VK_VERSION_1_2" }, { "VK_KHR_shader_atomic_int64", "VK_VERSION_1_2" }, { "VK_EXT_calibrated_timestamps", "VK_KHR_calibrated_timestamps" }, + { "VK_KHR_global_priority", "VK_VERSION_1_4" }, { "VK_EXT_vertex_attribute_divisor", "VK_KHR_vertex_attribute_divisor" }, { "VK_EXT_pipeline_creation_feedback", "VK_VERSION_1_3" }, { "VK_KHR_driver_properties", "VK_VERSION_1_2" }, @@ -2499,6 +2504,7 @@ namespace VULKAN_HPP_NAMESPACE { "VK_KHR_shader_terminate_invocation", "VK_VERSION_1_3" }, { "VK_EXT_scalar_block_layout", "VK_VERSION_1_2" }, { "VK_EXT_subgroup_size_control", "VK_VERSION_1_3" }, + { "VK_KHR_dynamic_rendering_local_read", "VK_VERSION_1_4" }, { "VK_KHR_spirv_1_4", "VK_VERSION_1_2" }, { "VK_KHR_separate_depth_stencil_layouts", "VK_VERSION_1_2" }, { "VK_EXT_tooling_info", "VK_VERSION_1_3" }, @@ -2509,6 +2515,8 @@ namespace VULKAN_HPP_NAMESPACE { "VK_EXT_host_query_reset", "VK_VERSION_1_2" }, { "VK_EXT_index_type_uint8", "VK_KHR_index_type_uint8" }, { "VK_EXT_extended_dynamic_state", "VK_VERSION_1_3" }, + { "VK_EXT_host_image_copy", "VK_VERSION_1_4" }, + { "VK_KHR_map_memory2", "VK_VERSION_1_4" }, { "VK_EXT_shader_demote_to_helper_invocation", "VK_VERSION_1_3" }, { "VK_KHR_shader_integer_dot_product", "VK_VERSION_1_3" }, { "VK_EXT_texel_buffer_alignment", "VK_VERSION_1_3" }, @@ -2527,7 +2535,17 @@ namespace VULKAN_HPP_NAMESPACE { "VK_EXT_extended_dynamic_state2", "VK_VERSION_1_3" }, { "VK_EXT_global_priority_query", "VK_KHR_global_priority" }, { "VK_EXT_load_store_op_none", "VK_KHR_load_store_op_none" }, - { "VK_KHR_maintenance4", "VK_VERSION_1_3" } + { "VK_KHR_maintenance4", "VK_VERSION_1_3" }, + { "VK_KHR_shader_subgroup_rotate", "VK_VERSION_1_4" }, + { "VK_EXT_pipeline_protected_access", "VK_VERSION_1_4" }, + { "VK_KHR_maintenance5", "VK_VERSION_1_4" }, + { "VK_KHR_vertex_attribute_divisor", "VK_VERSION_1_4" }, + { "VK_KHR_load_store_op_none", "VK_VERSION_1_4" }, + { "VK_KHR_shader_float_controls2", "VK_VERSION_1_4" }, + { "VK_KHR_index_type_uint8", "VK_VERSION_1_4" }, + { "VK_KHR_line_rasterization", "VK_VERSION_1_4" }, + { "VK_KHR_shader_expect_assume", "VK_VERSION_1_4" }, + { "VK_KHR_maintenance6", "VK_VERSION_1_4" } }; return promotedExtensions; } @@ -2664,6 +2682,10 @@ namespace VULKAN_HPP_NAMESPACE { return "VK_VERSION_1_3"; } + if ( extension == "VK_EXT_pipeline_robustness" ) + { + return "VK_VERSION_1_4"; + } if ( extension == "VK_KHR_maintenance1" ) { return "VK_VERSION_1_1"; @@ -2688,6 +2710,10 @@ namespace VULKAN_HPP_NAMESPACE { return "VK_VERSION_1_1"; } + if ( extension == "VK_KHR_push_descriptor" ) + { + return "VK_VERSION_1_4"; + } if ( extension == "VK_KHR_shader_float16_int8" ) { return "VK_VERSION_1_2"; @@ -2796,6 +2822,10 @@ namespace VULKAN_HPP_NAMESPACE { return "VK_KHR_calibrated_timestamps"; } + if ( extension == "VK_KHR_global_priority" ) + { + return "VK_VERSION_1_4"; + } if ( extension == "VK_EXT_vertex_attribute_divisor" ) { return "VK_KHR_vertex_attribute_divisor"; @@ -2844,6 +2874,10 @@ namespace VULKAN_HPP_NAMESPACE { return "VK_VERSION_1_3"; } + if ( extension == "VK_KHR_dynamic_rendering_local_read" ) + { + return "VK_VERSION_1_4"; + } if ( extension == "VK_KHR_spirv_1_4" ) { return "VK_VERSION_1_2"; @@ -2884,6 +2918,14 @@ namespace VULKAN_HPP_NAMESPACE { return "VK_VERSION_1_3"; } + if ( extension == "VK_EXT_host_image_copy" ) + { + return "VK_VERSION_1_4"; + } + if ( extension == "VK_KHR_map_memory2" ) + { + return "VK_VERSION_1_4"; + } if ( extension == "VK_EXT_shader_demote_to_helper_invocation" ) { return "VK_VERSION_1_3"; @@ -2960,6 +3002,46 @@ namespace VULKAN_HPP_NAMESPACE { return "VK_VERSION_1_3"; } + if ( extension == "VK_KHR_shader_subgroup_rotate" ) + { + return "VK_VERSION_1_4"; + } + if ( extension == "VK_EXT_pipeline_protected_access" ) + { + return "VK_VERSION_1_4"; + } + if ( extension == "VK_KHR_maintenance5" ) + { + return "VK_VERSION_1_4"; + } + if ( extension == "VK_KHR_vertex_attribute_divisor" ) + { + return "VK_VERSION_1_4"; + } + if ( extension == "VK_KHR_load_store_op_none" ) + { + return "VK_VERSION_1_4"; + } + if ( extension == "VK_KHR_shader_float_controls2" ) + { + return "VK_VERSION_1_4"; + } + if ( extension == "VK_KHR_index_type_uint8" ) + { + return "VK_VERSION_1_4"; + } + if ( extension == "VK_KHR_line_rasterization" ) + { + return "VK_VERSION_1_4"; + } + if ( extension == "VK_KHR_shader_expect_assume" ) + { + return "VK_VERSION_1_4"; + } + if ( extension == "VK_KHR_maintenance6" ) + { + return "VK_VERSION_1_4"; + } return ""; } @@ -3255,30 +3337,31 @@ namespace VULKAN_HPP_NAMESPACE #endif /*VK_USE_PLATFORM_WIN32_KHR*/ ( extension == "VK_KHR_get_physical_device_properties2" ) || ( extension == "VK_KHR_device_group" ) || ( extension == "VK_KHR_shader_draw_parameters" ) || ( extension == "VK_EXT_texture_compression_astc_hdr" ) || - ( extension == "VK_KHR_maintenance1" ) || ( extension == "VK_KHR_device_group_creation" ) || + ( extension == "VK_EXT_pipeline_robustness" ) || ( extension == "VK_KHR_maintenance1" ) || ( extension == "VK_KHR_device_group_creation" ) || ( extension == "VK_KHR_external_memory_capabilities" ) || ( extension == "VK_KHR_external_memory" ) || ( extension == "VK_KHR_external_semaphore_capabilities" ) || ( extension == "VK_KHR_external_semaphore" ) || - ( extension == "VK_KHR_shader_float16_int8" ) || ( extension == "VK_KHR_16bit_storage" ) || ( extension == "VK_KHR_descriptor_update_template" ) || - ( extension == "VK_KHR_imageless_framebuffer" ) || ( extension == "VK_KHR_create_renderpass2" ) || - ( extension == "VK_KHR_external_fence_capabilities" ) || ( extension == "VK_KHR_external_fence" ) || ( extension == "VK_KHR_maintenance2" ) || - ( extension == "VK_KHR_variable_pointers" ) || ( extension == "VK_KHR_dedicated_allocation" ) || ( extension == "VK_EXT_sampler_filter_minmax" ) || - ( extension == "VK_KHR_storage_buffer_storage_class" ) || ( extension == "VK_EXT_inline_uniform_block" ) || - ( extension == "VK_KHR_relaxed_block_layout" ) || ( extension == "VK_KHR_get_memory_requirements2" ) || - ( extension == "VK_KHR_image_format_list" ) || ( extension == "VK_KHR_sampler_ycbcr_conversion" ) || ( extension == "VK_KHR_bind_memory2" ) || - ( extension == "VK_EXT_descriptor_indexing" ) || ( extension == "VK_EXT_shader_viewport_index_layer" ) || ( extension == "VK_KHR_maintenance3" ) || - ( extension == "VK_KHR_draw_indirect_count" ) || ( extension == "VK_EXT_global_priority" ) || - ( extension == "VK_KHR_shader_subgroup_extended_types" ) || ( extension == "VK_KHR_8bit_storage" ) || - ( extension == "VK_KHR_shader_atomic_int64" ) || ( extension == "VK_EXT_calibrated_timestamps" ) || + ( extension == "VK_KHR_push_descriptor" ) || ( extension == "VK_KHR_shader_float16_int8" ) || ( extension == "VK_KHR_16bit_storage" ) || + ( extension == "VK_KHR_descriptor_update_template" ) || ( extension == "VK_KHR_imageless_framebuffer" ) || + ( extension == "VK_KHR_create_renderpass2" ) || ( extension == "VK_KHR_external_fence_capabilities" ) || ( extension == "VK_KHR_external_fence" ) || + ( extension == "VK_KHR_maintenance2" ) || ( extension == "VK_KHR_variable_pointers" ) || ( extension == "VK_KHR_dedicated_allocation" ) || + ( extension == "VK_EXT_sampler_filter_minmax" ) || ( extension == "VK_KHR_storage_buffer_storage_class" ) || + ( extension == "VK_EXT_inline_uniform_block" ) || ( extension == "VK_KHR_relaxed_block_layout" ) || + ( extension == "VK_KHR_get_memory_requirements2" ) || ( extension == "VK_KHR_image_format_list" ) || + ( extension == "VK_KHR_sampler_ycbcr_conversion" ) || ( extension == "VK_KHR_bind_memory2" ) || ( extension == "VK_EXT_descriptor_indexing" ) || + ( extension == "VK_EXT_shader_viewport_index_layer" ) || ( extension == "VK_KHR_maintenance3" ) || ( extension == "VK_KHR_draw_indirect_count" ) || + ( extension == "VK_EXT_global_priority" ) || ( extension == "VK_KHR_shader_subgroup_extended_types" ) || ( extension == "VK_KHR_8bit_storage" ) || + ( extension == "VK_KHR_shader_atomic_int64" ) || ( extension == "VK_EXT_calibrated_timestamps" ) || ( extension == "VK_KHR_global_priority" ) || ( extension == "VK_EXT_vertex_attribute_divisor" ) || ( extension == "VK_EXT_pipeline_creation_feedback" ) || ( extension == "VK_KHR_driver_properties" ) || ( extension == "VK_KHR_shader_float_controls" ) || ( extension == "VK_KHR_depth_stencil_resolve" ) || ( extension == "VK_NV_compute_shader_derivatives" ) || ( extension == "VK_NV_fragment_shader_barycentric" ) || ( extension == "VK_KHR_timeline_semaphore" ) || ( extension == "VK_KHR_vulkan_memory_model" ) || ( extension == "VK_KHR_shader_terminate_invocation" ) || ( extension == "VK_EXT_scalar_block_layout" ) || - ( extension == "VK_EXT_subgroup_size_control" ) || ( extension == "VK_KHR_spirv_1_4" ) || ( extension == "VK_KHR_separate_depth_stencil_layouts" ) || - ( extension == "VK_EXT_tooling_info" ) || ( extension == "VK_EXT_separate_stencil_usage" ) || - ( extension == "VK_KHR_uniform_buffer_standard_layout" ) || ( extension == "VK_KHR_buffer_device_address" ) || - ( extension == "VK_EXT_line_rasterization" ) || ( extension == "VK_EXT_host_query_reset" ) || ( extension == "VK_EXT_index_type_uint8" ) || - ( extension == "VK_EXT_extended_dynamic_state" ) || ( extension == "VK_EXT_shader_demote_to_helper_invocation" ) || + ( extension == "VK_EXT_subgroup_size_control" ) || ( extension == "VK_KHR_dynamic_rendering_local_read" ) || ( extension == "VK_KHR_spirv_1_4" ) || + ( extension == "VK_KHR_separate_depth_stencil_layouts" ) || ( extension == "VK_EXT_tooling_info" ) || + ( extension == "VK_EXT_separate_stencil_usage" ) || ( extension == "VK_KHR_uniform_buffer_standard_layout" ) || + ( extension == "VK_KHR_buffer_device_address" ) || ( extension == "VK_EXT_line_rasterization" ) || ( extension == "VK_EXT_host_query_reset" ) || + ( extension == "VK_EXT_index_type_uint8" ) || ( extension == "VK_EXT_extended_dynamic_state" ) || ( extension == "VK_EXT_host_image_copy" ) || + ( extension == "VK_KHR_map_memory2" ) || ( extension == "VK_EXT_shader_demote_to_helper_invocation" ) || ( extension == "VK_KHR_shader_integer_dot_product" ) || ( extension == "VK_EXT_texel_buffer_alignment" ) || ( extension == "VK_KHR_shader_non_semantic_info" ) || ( extension == "VK_EXT_private_data" ) || ( extension == "VK_EXT_pipeline_creation_cache_control" ) || ( extension == "VK_KHR_synchronization2" ) || @@ -3286,7 +3369,11 @@ namespace VULKAN_HPP_NAMESPACE ( extension == "VK_EXT_image_robustness" ) || ( extension == "VK_KHR_copy_commands2" ) || ( extension == "VK_EXT_4444_formats" ) || ( extension == "VK_ARM_rasterization_order_attachment_access" ) || ( extension == "VK_VALVE_mutable_descriptor_type" ) || ( extension == "VK_KHR_format_feature_flags2" ) || ( extension == "VK_EXT_extended_dynamic_state2" ) || - ( extension == "VK_EXT_global_priority_query" ) || ( extension == "VK_EXT_load_store_op_none" ) || ( extension == "VK_KHR_maintenance4" ); + ( extension == "VK_EXT_global_priority_query" ) || ( extension == "VK_EXT_load_store_op_none" ) || ( extension == "VK_KHR_maintenance4" ) || + ( extension == "VK_KHR_shader_subgroup_rotate" ) || ( extension == "VK_EXT_pipeline_protected_access" ) || ( extension == "VK_KHR_maintenance5" ) || + ( extension == "VK_KHR_vertex_attribute_divisor" ) || ( extension == "VK_KHR_load_store_op_none" ) || + ( extension == "VK_KHR_shader_float_controls2" ) || ( extension == "VK_KHR_index_type_uint8" ) || ( extension == "VK_KHR_line_rasterization" ) || + ( extension == "VK_KHR_shader_expect_assume" ) || ( extension == "VK_KHR_maintenance6" ); } } // namespace VULKAN_HPP_NAMESPACE diff --git a/third_party/vulkan/vulkan_format_traits.hpp b/third_party/vulkan/vulkan_format_traits.hpp index 25790fd..75d9d80 100644 --- a/third_party/vulkan/vulkan_format_traits.hpp +++ b/third_party/vulkan/vulkan_format_traits.hpp @@ -354,6 +354,8 @@ namespace VULKAN_HPP_NAMESPACE case VULKAN_HPP_NAMESPACE::Format::eAstc10x10SfloatBlock: return 16; case VULKAN_HPP_NAMESPACE::Format::eAstc12x10SfloatBlock: return 16; case VULKAN_HPP_NAMESPACE::Format::eAstc12x12SfloatBlock: return 16; + case VULKAN_HPP_NAMESPACE::Format::eA1B5G5R5UnormPack16: return 2; + case VULKAN_HPP_NAMESPACE::Format::eA8Unorm: return 1; case VULKAN_HPP_NAMESPACE::Format::ePvrtc12BppUnormBlockIMG: return 8; case VULKAN_HPP_NAMESPACE::Format::ePvrtc14BppUnormBlockIMG: return 8; case VULKAN_HPP_NAMESPACE::Format::ePvrtc22BppUnormBlockIMG: return 8; @@ -363,8 +365,6 @@ namespace VULKAN_HPP_NAMESPACE case VULKAN_HPP_NAMESPACE::Format::ePvrtc22BppSrgbBlockIMG: return 8; case VULKAN_HPP_NAMESPACE::Format::ePvrtc24BppSrgbBlockIMG: return 8; case VULKAN_HPP_NAMESPACE::Format::eR16G16Sfixed5NV: return 4; - case VULKAN_HPP_NAMESPACE::Format::eA1B5G5R5UnormPack16KHR: return 2; - case VULKAN_HPP_NAMESPACE::Format::eA8UnormKHR: return 1; default: VULKAN_HPP_ASSERT( false ); return 0; } @@ -613,6 +613,8 @@ namespace VULKAN_HPP_NAMESPACE case VULKAN_HPP_NAMESPACE::Format::eAstc10x10SfloatBlock: return "ASTC_10x10"; case VULKAN_HPP_NAMESPACE::Format::eAstc12x10SfloatBlock: return "ASTC_12x10"; case VULKAN_HPP_NAMESPACE::Format::eAstc12x12SfloatBlock: return "ASTC_12x12"; + case VULKAN_HPP_NAMESPACE::Format::eA1B5G5R5UnormPack16: return "16-bit"; + case VULKAN_HPP_NAMESPACE::Format::eA8Unorm: return "8-bit alpha"; case VULKAN_HPP_NAMESPACE::Format::ePvrtc12BppUnormBlockIMG: return "PVRTC1_2BPP"; case VULKAN_HPP_NAMESPACE::Format::ePvrtc14BppUnormBlockIMG: return "PVRTC1_4BPP"; case VULKAN_HPP_NAMESPACE::Format::ePvrtc22BppUnormBlockIMG: return "PVRTC2_2BPP"; @@ -622,8 +624,6 @@ namespace VULKAN_HPP_NAMESPACE case VULKAN_HPP_NAMESPACE::Format::ePvrtc22BppSrgbBlockIMG: return "PVRTC2_2BPP"; case VULKAN_HPP_NAMESPACE::Format::ePvrtc24BppSrgbBlockIMG: return "PVRTC2_4BPP"; case VULKAN_HPP_NAMESPACE::Format::eR16G16Sfixed5NV: return "32-bit"; - case VULKAN_HPP_NAMESPACE::Format::eA1B5G5R5UnormPack16KHR: return "16-bit"; - case VULKAN_HPP_NAMESPACE::Format::eA8UnormKHR: return "8-bit alpha"; default: VULKAN_HPP_ASSERT( false ); return ""; } @@ -2005,14 +2005,7 @@ namespace VULKAN_HPP_NAMESPACE case 3: return 4; default: VULKAN_HPP_ASSERT( false ); return 0; } - case VULKAN_HPP_NAMESPACE::Format::eR16G16Sfixed5NV: - switch ( component ) - { - case 0: return 16; - case 1: return 16; - default: VULKAN_HPP_ASSERT( false ); return 0; - } - case VULKAN_HPP_NAMESPACE::Format::eA1B5G5R5UnormPack16KHR: + case VULKAN_HPP_NAMESPACE::Format::eA1B5G5R5UnormPack16: switch ( component ) { case 0: return 1; @@ -2021,12 +2014,19 @@ namespace VULKAN_HPP_NAMESPACE case 3: return 5; default: VULKAN_HPP_ASSERT( false ); return 0; } - case VULKAN_HPP_NAMESPACE::Format::eA8UnormKHR: + case VULKAN_HPP_NAMESPACE::Format::eA8Unorm: switch ( component ) { case 0: return 8; default: VULKAN_HPP_ASSERT( false ); return 0; } + case VULKAN_HPP_NAMESPACE::Format::eR16G16Sfixed5NV: + switch ( component ) + { + case 0: return 16; + case 1: return 16; + default: VULKAN_HPP_ASSERT( false ); return 0; + } default: return 0; } @@ -2275,6 +2275,8 @@ namespace VULKAN_HPP_NAMESPACE case VULKAN_HPP_NAMESPACE::Format::eAstc10x10SfloatBlock: return 4; case VULKAN_HPP_NAMESPACE::Format::eAstc12x10SfloatBlock: return 4; case VULKAN_HPP_NAMESPACE::Format::eAstc12x12SfloatBlock: return 4; + case VULKAN_HPP_NAMESPACE::Format::eA1B5G5R5UnormPack16: return 4; + case VULKAN_HPP_NAMESPACE::Format::eA8Unorm: return 1; case VULKAN_HPP_NAMESPACE::Format::ePvrtc12BppUnormBlockIMG: return 4; case VULKAN_HPP_NAMESPACE::Format::ePvrtc14BppUnormBlockIMG: return 4; case VULKAN_HPP_NAMESPACE::Format::ePvrtc22BppUnormBlockIMG: return 4; @@ -2284,8 +2286,6 @@ namespace VULKAN_HPP_NAMESPACE case VULKAN_HPP_NAMESPACE::Format::ePvrtc22BppSrgbBlockIMG: return 4; case VULKAN_HPP_NAMESPACE::Format::ePvrtc24BppSrgbBlockIMG: return 4; case VULKAN_HPP_NAMESPACE::Format::eR16G16Sfixed5NV: return 2; - case VULKAN_HPP_NAMESPACE::Format::eA1B5G5R5UnormPack16KHR: return 4; - case VULKAN_HPP_NAMESPACE::Format::eA8UnormKHR: return 1; default: return 0; } @@ -4227,6 +4227,21 @@ namespace VULKAN_HPP_NAMESPACE case 3: return "A"; default: VULKAN_HPP_ASSERT( false ); return ""; } + case VULKAN_HPP_NAMESPACE::Format::eA1B5G5R5UnormPack16: + switch ( component ) + { + case 0: return "A"; + case 1: return "B"; + case 2: return "G"; + case 3: return "R"; + default: VULKAN_HPP_ASSERT( false ); return ""; + } + case VULKAN_HPP_NAMESPACE::Format::eA8Unorm: + switch ( component ) + { + case 0: return "A"; + default: VULKAN_HPP_ASSERT( false ); return ""; + } case VULKAN_HPP_NAMESPACE::Format::ePvrtc12BppUnormBlockIMG: switch ( component ) { @@ -4306,21 +4321,6 @@ namespace VULKAN_HPP_NAMESPACE case 1: return "G"; default: VULKAN_HPP_ASSERT( false ); return ""; } - case VULKAN_HPP_NAMESPACE::Format::eA1B5G5R5UnormPack16KHR: - switch ( component ) - { - case 0: return "A"; - case 1: return "B"; - case 2: return "G"; - case 3: return "R"; - default: VULKAN_HPP_ASSERT( false ); return ""; - } - case VULKAN_HPP_NAMESPACE::Format::eA8UnormKHR: - switch ( component ) - { - case 0: return "A"; - default: VULKAN_HPP_ASSERT( false ); return ""; - } default: return ""; } @@ -6262,6 +6262,21 @@ namespace VULKAN_HPP_NAMESPACE case 3: return "SFLOAT"; default: VULKAN_HPP_ASSERT( false ); return ""; } + case VULKAN_HPP_NAMESPACE::Format::eA1B5G5R5UnormPack16: + switch ( component ) + { + case 0: return "UNORM"; + case 1: return "UNORM"; + case 2: return "UNORM"; + case 3: return "UNORM"; + default: VULKAN_HPP_ASSERT( false ); return ""; + } + case VULKAN_HPP_NAMESPACE::Format::eA8Unorm: + switch ( component ) + { + case 0: return "UNORM"; + default: VULKAN_HPP_ASSERT( false ); return ""; + } case VULKAN_HPP_NAMESPACE::Format::ePvrtc12BppUnormBlockIMG: switch ( component ) { @@ -6341,21 +6356,6 @@ namespace VULKAN_HPP_NAMESPACE case 1: return "SFIXED5"; default: VULKAN_HPP_ASSERT( false ); return ""; } - case VULKAN_HPP_NAMESPACE::Format::eA1B5G5R5UnormPack16KHR: - switch ( component ) - { - case 0: return "UNORM"; - case 1: return "UNORM"; - case 2: return "UNORM"; - case 3: return "UNORM"; - default: VULKAN_HPP_ASSERT( false ); return ""; - } - case VULKAN_HPP_NAMESPACE::Format::eA8UnormKHR: - switch ( component ) - { - case 0: return "UNORM"; - default: VULKAN_HPP_ASSERT( false ); return ""; - } default: return ""; } @@ -6796,7 +6796,7 @@ namespace VULKAN_HPP_NAMESPACE case VULKAN_HPP_NAMESPACE::Format::eG12X4B12X4R12X42Plane444Unorm3Pack16: return 16; case VULKAN_HPP_NAMESPACE::Format::eA4R4G4B4UnormPack16: return 16; case VULKAN_HPP_NAMESPACE::Format::eA4B4G4R4UnormPack16: return 16; - case VULKAN_HPP_NAMESPACE::Format::eA1B5G5R5UnormPack16KHR: return 16; + case VULKAN_HPP_NAMESPACE::Format::eA1B5G5R5UnormPack16: return 16; default: return 0; } @@ -7649,6 +7649,8 @@ namespace VULKAN_HPP_NAMESPACE case VULKAN_HPP_NAMESPACE::Format::eAstc10x10SfloatBlock: return 100; case VULKAN_HPP_NAMESPACE::Format::eAstc12x10SfloatBlock: return 120; case VULKAN_HPP_NAMESPACE::Format::eAstc12x12SfloatBlock: return 144; + case VULKAN_HPP_NAMESPACE::Format::eA1B5G5R5UnormPack16: return 1; + case VULKAN_HPP_NAMESPACE::Format::eA8Unorm: return 1; case VULKAN_HPP_NAMESPACE::Format::ePvrtc12BppUnormBlockIMG: return 1; case VULKAN_HPP_NAMESPACE::Format::ePvrtc14BppUnormBlockIMG: return 1; case VULKAN_HPP_NAMESPACE::Format::ePvrtc22BppUnormBlockIMG: return 1; @@ -7658,8 +7660,6 @@ namespace VULKAN_HPP_NAMESPACE case VULKAN_HPP_NAMESPACE::Format::ePvrtc22BppSrgbBlockIMG: return 1; case VULKAN_HPP_NAMESPACE::Format::ePvrtc24BppSrgbBlockIMG: return 1; case VULKAN_HPP_NAMESPACE::Format::eR16G16Sfixed5NV: return 1; - case VULKAN_HPP_NAMESPACE::Format::eA1B5G5R5UnormPack16KHR: return 1; - case VULKAN_HPP_NAMESPACE::Format::eA8UnormKHR: return 1; default: VULKAN_HPP_ASSERT( false ); return 0; } diff --git a/third_party/vulkan/vulkan_funcs.hpp b/third_party/vulkan/vulkan_funcs.hpp index 14e59f8..f7ecce5 100644 --- a/third_party/vulkan/vulkan_funcs.hpp +++ b/third_party/vulkan/vulkan_funcs.hpp @@ -8451,6 +8451,547 @@ namespace VULKAN_HPP_NAMESPACE } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ + //=== VK_VERSION_1_4 === + + template + VULKAN_HPP_INLINE void CommandBuffer::setLineStipple( uint32_t lineStippleFactor, uint16_t lineStipplePattern, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_ASSERT( d.getVkHeaderVersion() == VK_HEADER_VERSION ); + d.vkCmdSetLineStipple( static_cast( m_commandBuffer ), lineStippleFactor, lineStipplePattern ); + } + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::mapMemory2( const VULKAN_HPP_NAMESPACE::MemoryMapInfo * pMemoryMapInfo, + void ** ppData, + Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_ASSERT( d.getVkHeaderVersion() == VK_HEADER_VERSION ); + return static_cast( d.vkMapMemory2( static_cast( m_device ), reinterpret_cast( pMemoryMapInfo ), ppData ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType::type Device::mapMemory2( const VULKAN_HPP_NAMESPACE::MemoryMapInfo & memoryMapInfo, + Dispatch const & d ) const + { + VULKAN_HPP_ASSERT( d.getVkHeaderVersion() == VK_HEADER_VERSION ); +# if ( VULKAN_HPP_DISPATCH_LOADER_DYNAMIC == 1 ) + VULKAN_HPP_ASSERT( d.vkMapMemory2 && "Function requires or " ); +# endif + + void * pData; + VULKAN_HPP_NAMESPACE::Result result = + static_cast( d.vkMapMemory2( m_device, reinterpret_cast( &memoryMapInfo ), &pData ) ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::mapMemory2" ); + + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( pData ) ); + } +#endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::unmapMemory2( const VULKAN_HPP_NAMESPACE::MemoryUnmapInfo * pMemoryUnmapInfo, + Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_ASSERT( d.getVkHeaderVersion() == VK_HEADER_VERSION ); + return static_cast( d.vkUnmapMemory2( static_cast( m_device ), reinterpret_cast( pMemoryUnmapInfo ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE typename ResultValueType::type Device::unmapMemory2( const VULKAN_HPP_NAMESPACE::MemoryUnmapInfo & memoryUnmapInfo, + Dispatch const & d ) const + { + VULKAN_HPP_ASSERT( d.getVkHeaderVersion() == VK_HEADER_VERSION ); +# if ( VULKAN_HPP_DISPATCH_LOADER_DYNAMIC == 1 ) + VULKAN_HPP_ASSERT( d.vkUnmapMemory2 && "Function requires or " ); +# endif + + VULKAN_HPP_NAMESPACE::Result result = + static_cast( d.vkUnmapMemory2( m_device, reinterpret_cast( &memoryUnmapInfo ) ) ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::unmapMemory2" ); + + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result ); + } +#endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ + + template + VULKAN_HPP_INLINE void CommandBuffer::bindIndexBuffer2( VULKAN_HPP_NAMESPACE::Buffer buffer, + VULKAN_HPP_NAMESPACE::DeviceSize offset, + VULKAN_HPP_NAMESPACE::DeviceSize size, + VULKAN_HPP_NAMESPACE::IndexType indexType, + Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_ASSERT( d.getVkHeaderVersion() == VK_HEADER_VERSION ); + d.vkCmdBindIndexBuffer2( static_cast( m_commandBuffer ), + static_cast( buffer ), + static_cast( offset ), + static_cast( size ), + static_cast( indexType ) ); + } + + template + VULKAN_HPP_INLINE void Device::getRenderingAreaGranularity( const VULKAN_HPP_NAMESPACE::RenderingAreaInfo * pRenderingAreaInfo, + VULKAN_HPP_NAMESPACE::Extent2D * pGranularity, + Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_ASSERT( d.getVkHeaderVersion() == VK_HEADER_VERSION ); + d.vkGetRenderingAreaGranularity( + static_cast( m_device ), reinterpret_cast( pRenderingAreaInfo ), reinterpret_cast( pGranularity ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::Extent2D + Device::getRenderingAreaGranularity( const VULKAN_HPP_NAMESPACE::RenderingAreaInfo & renderingAreaInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_ASSERT( d.getVkHeaderVersion() == VK_HEADER_VERSION ); +# if ( VULKAN_HPP_DISPATCH_LOADER_DYNAMIC == 1 ) + VULKAN_HPP_ASSERT( d.vkGetRenderingAreaGranularity && "Function requires or " ); +# endif + + VULKAN_HPP_NAMESPACE::Extent2D granularity; + d.vkGetRenderingAreaGranularity( + m_device, reinterpret_cast( &renderingAreaInfo ), reinterpret_cast( &granularity ) ); + + return granularity; + } +#endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ + + template + VULKAN_HPP_INLINE void Device::getImageSubresourceLayout( const VULKAN_HPP_NAMESPACE::DeviceImageSubresourceInfo * pInfo, + VULKAN_HPP_NAMESPACE::SubresourceLayout2 * pLayout, + Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_ASSERT( d.getVkHeaderVersion() == VK_HEADER_VERSION ); + d.vkGetDeviceImageSubresourceLayout( + static_cast( m_device ), reinterpret_cast( pInfo ), reinterpret_cast( pLayout ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::SubresourceLayout2 + Device::getImageSubresourceLayout( const VULKAN_HPP_NAMESPACE::DeviceImageSubresourceInfo & info, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_ASSERT( d.getVkHeaderVersion() == VK_HEADER_VERSION ); +# if ( VULKAN_HPP_DISPATCH_LOADER_DYNAMIC == 1 ) + VULKAN_HPP_ASSERT( d.vkGetDeviceImageSubresourceLayout && + "Function requires or " ); +# endif + + VULKAN_HPP_NAMESPACE::SubresourceLayout2 layout; + d.vkGetDeviceImageSubresourceLayout( + m_device, reinterpret_cast( &info ), reinterpret_cast( &layout ) ); + + return layout; + } + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::StructureChain + Device::getImageSubresourceLayout( const VULKAN_HPP_NAMESPACE::DeviceImageSubresourceInfo & info, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_ASSERT( d.getVkHeaderVersion() == VK_HEADER_VERSION ); +# if ( VULKAN_HPP_DISPATCH_LOADER_DYNAMIC == 1 ) + VULKAN_HPP_ASSERT( d.vkGetDeviceImageSubresourceLayout && + "Function requires or " ); +# endif + + VULKAN_HPP_NAMESPACE::StructureChain structureChain; + VULKAN_HPP_NAMESPACE::SubresourceLayout2 & layout = structureChain.template get(); + d.vkGetDeviceImageSubresourceLayout( + m_device, reinterpret_cast( &info ), reinterpret_cast( &layout ) ); + + return structureChain; + } +#endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ + + template + VULKAN_HPP_INLINE void Device::getImageSubresourceLayout2( VULKAN_HPP_NAMESPACE::Image image, + const VULKAN_HPP_NAMESPACE::ImageSubresource2 * pSubresource, + VULKAN_HPP_NAMESPACE::SubresourceLayout2 * pLayout, + Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_ASSERT( d.getVkHeaderVersion() == VK_HEADER_VERSION ); + d.vkGetImageSubresourceLayout2( static_cast( m_device ), + static_cast( image ), + reinterpret_cast( pSubresource ), + reinterpret_cast( pLayout ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::SubresourceLayout2 Device::getImageSubresourceLayout2( + VULKAN_HPP_NAMESPACE::Image image, const VULKAN_HPP_NAMESPACE::ImageSubresource2 & subresource, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_ASSERT( d.getVkHeaderVersion() == VK_HEADER_VERSION ); +# if ( VULKAN_HPP_DISPATCH_LOADER_DYNAMIC == 1 ) + VULKAN_HPP_ASSERT( + d.vkGetImageSubresourceLayout2 && + "Function requires or or or " ); +# endif + + VULKAN_HPP_NAMESPACE::SubresourceLayout2 layout; + d.vkGetImageSubresourceLayout2( m_device, + static_cast( image ), + reinterpret_cast( &subresource ), + reinterpret_cast( &layout ) ); + + return layout; + } + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::StructureChain Device::getImageSubresourceLayout2( + VULKAN_HPP_NAMESPACE::Image image, const VULKAN_HPP_NAMESPACE::ImageSubresource2 & subresource, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_ASSERT( d.getVkHeaderVersion() == VK_HEADER_VERSION ); +# if ( VULKAN_HPP_DISPATCH_LOADER_DYNAMIC == 1 ) + VULKAN_HPP_ASSERT( + d.vkGetImageSubresourceLayout2 && + "Function requires or or or " ); +# endif + + VULKAN_HPP_NAMESPACE::StructureChain structureChain; + VULKAN_HPP_NAMESPACE::SubresourceLayout2 & layout = structureChain.template get(); + d.vkGetImageSubresourceLayout2( m_device, + static_cast( image ), + reinterpret_cast( &subresource ), + reinterpret_cast( &layout ) ); + + return structureChain; + } +#endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ + + template + VULKAN_HPP_INLINE void CommandBuffer::pushDescriptorSet( VULKAN_HPP_NAMESPACE::PipelineBindPoint pipelineBindPoint, + VULKAN_HPP_NAMESPACE::PipelineLayout layout, + uint32_t set, + uint32_t descriptorWriteCount, + const VULKAN_HPP_NAMESPACE::WriteDescriptorSet * pDescriptorWrites, + Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_ASSERT( d.getVkHeaderVersion() == VK_HEADER_VERSION ); + d.vkCmdPushDescriptorSet( static_cast( m_commandBuffer ), + static_cast( pipelineBindPoint ), + static_cast( layout ), + set, + descriptorWriteCount, + reinterpret_cast( pDescriptorWrites ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void + CommandBuffer::pushDescriptorSet( VULKAN_HPP_NAMESPACE::PipelineBindPoint pipelineBindPoint, + VULKAN_HPP_NAMESPACE::PipelineLayout layout, + uint32_t set, + VULKAN_HPP_NAMESPACE::ArrayProxy const & descriptorWrites, + Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_ASSERT( d.getVkHeaderVersion() == VK_HEADER_VERSION ); +# if ( VULKAN_HPP_DISPATCH_LOADER_DYNAMIC == 1 ) + VULKAN_HPP_ASSERT( d.vkCmdPushDescriptorSet && "Function requires or " ); +# endif + + d.vkCmdPushDescriptorSet( m_commandBuffer, + static_cast( pipelineBindPoint ), + static_cast( layout ), + set, + descriptorWrites.size(), + reinterpret_cast( descriptorWrites.data() ) ); + } +#endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ + + template + VULKAN_HPP_INLINE void CommandBuffer::pushDescriptorSetWithTemplate( VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate descriptorUpdateTemplate, + VULKAN_HPP_NAMESPACE::PipelineLayout layout, + uint32_t set, + const void * pData, + Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_ASSERT( d.getVkHeaderVersion() == VK_HEADER_VERSION ); + d.vkCmdPushDescriptorSetWithTemplate( static_cast( m_commandBuffer ), + static_cast( descriptorUpdateTemplate ), + static_cast( layout ), + set, + pData ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::pushDescriptorSetWithTemplate( VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate descriptorUpdateTemplate, + VULKAN_HPP_NAMESPACE::PipelineLayout layout, + uint32_t set, + DataType const & data, + Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_ASSERT( d.getVkHeaderVersion() == VK_HEADER_VERSION ); +# if ( VULKAN_HPP_DISPATCH_LOADER_DYNAMIC == 1 ) + VULKAN_HPP_ASSERT( + d.vkCmdPushDescriptorSetWithTemplate && + "Function requires or or " ); +# endif + + d.vkCmdPushDescriptorSetWithTemplate( m_commandBuffer, + static_cast( descriptorUpdateTemplate ), + static_cast( layout ), + set, + reinterpret_cast( &data ) ); + } +#endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ + + template + VULKAN_HPP_INLINE void CommandBuffer::setRenderingAttachmentLocations( const VULKAN_HPP_NAMESPACE::RenderingAttachmentLocationInfo * pLocationInfo, + Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_ASSERT( d.getVkHeaderVersion() == VK_HEADER_VERSION ); + d.vkCmdSetRenderingAttachmentLocations( static_cast( m_commandBuffer ), + reinterpret_cast( pLocationInfo ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::setRenderingAttachmentLocations( const VULKAN_HPP_NAMESPACE::RenderingAttachmentLocationInfo & locationInfo, + Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_ASSERT( d.getVkHeaderVersion() == VK_HEADER_VERSION ); +# if ( VULKAN_HPP_DISPATCH_LOADER_DYNAMIC == 1 ) + VULKAN_HPP_ASSERT( d.vkCmdSetRenderingAttachmentLocations && + "Function requires or " ); +# endif + + d.vkCmdSetRenderingAttachmentLocations( m_commandBuffer, reinterpret_cast( &locationInfo ) ); + } +#endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ + + template + VULKAN_HPP_INLINE void + CommandBuffer::setRenderingInputAttachmentIndices( const VULKAN_HPP_NAMESPACE::RenderingInputAttachmentIndexInfo * pInputAttachmentIndexInfo, + Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_ASSERT( d.getVkHeaderVersion() == VK_HEADER_VERSION ); + d.vkCmdSetRenderingInputAttachmentIndices( static_cast( m_commandBuffer ), + reinterpret_cast( pInputAttachmentIndexInfo ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void + CommandBuffer::setRenderingInputAttachmentIndices( const VULKAN_HPP_NAMESPACE::RenderingInputAttachmentIndexInfo & inputAttachmentIndexInfo, + Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_ASSERT( d.getVkHeaderVersion() == VK_HEADER_VERSION ); +# if ( VULKAN_HPP_DISPATCH_LOADER_DYNAMIC == 1 ) + VULKAN_HPP_ASSERT( d.vkCmdSetRenderingInputAttachmentIndices && + "Function requires or " ); +# endif + + d.vkCmdSetRenderingInputAttachmentIndices( m_commandBuffer, reinterpret_cast( &inputAttachmentIndexInfo ) ); + } +#endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ + + template + VULKAN_HPP_INLINE void CommandBuffer::bindDescriptorSets2( const VULKAN_HPP_NAMESPACE::BindDescriptorSetsInfo * pBindDescriptorSetsInfo, + Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_ASSERT( d.getVkHeaderVersion() == VK_HEADER_VERSION ); + d.vkCmdBindDescriptorSets2( static_cast( m_commandBuffer ), + reinterpret_cast( pBindDescriptorSetsInfo ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::bindDescriptorSets2( const VULKAN_HPP_NAMESPACE::BindDescriptorSetsInfo & bindDescriptorSetsInfo, + Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_ASSERT( d.getVkHeaderVersion() == VK_HEADER_VERSION ); +# if ( VULKAN_HPP_DISPATCH_LOADER_DYNAMIC == 1 ) + VULKAN_HPP_ASSERT( d.vkCmdBindDescriptorSets2 && "Function requires or " ); +# endif + + d.vkCmdBindDescriptorSets2( m_commandBuffer, reinterpret_cast( &bindDescriptorSetsInfo ) ); + } +#endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ + + template + VULKAN_HPP_INLINE void CommandBuffer::pushConstants2( const VULKAN_HPP_NAMESPACE::PushConstantsInfo * pPushConstantsInfo, + Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_ASSERT( d.getVkHeaderVersion() == VK_HEADER_VERSION ); + d.vkCmdPushConstants2( static_cast( m_commandBuffer ), reinterpret_cast( pPushConstantsInfo ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::pushConstants2( const VULKAN_HPP_NAMESPACE::PushConstantsInfo & pushConstantsInfo, + Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_ASSERT( d.getVkHeaderVersion() == VK_HEADER_VERSION ); +# if ( VULKAN_HPP_DISPATCH_LOADER_DYNAMIC == 1 ) + VULKAN_HPP_ASSERT( d.vkCmdPushConstants2 && "Function requires or " ); +# endif + + d.vkCmdPushConstants2( m_commandBuffer, reinterpret_cast( &pushConstantsInfo ) ); + } +#endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ + + template + VULKAN_HPP_INLINE void CommandBuffer::pushDescriptorSet2( const VULKAN_HPP_NAMESPACE::PushDescriptorSetInfo * pPushDescriptorSetInfo, + Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_ASSERT( d.getVkHeaderVersion() == VK_HEADER_VERSION ); + d.vkCmdPushDescriptorSet2( static_cast( m_commandBuffer ), reinterpret_cast( pPushDescriptorSetInfo ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::pushDescriptorSet2( const VULKAN_HPP_NAMESPACE::PushDescriptorSetInfo & pushDescriptorSetInfo, + Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_ASSERT( d.getVkHeaderVersion() == VK_HEADER_VERSION ); +# if ( VULKAN_HPP_DISPATCH_LOADER_DYNAMIC == 1 ) + VULKAN_HPP_ASSERT( d.vkCmdPushDescriptorSet2 && "Function requires or " ); +# endif + + d.vkCmdPushDescriptorSet2( m_commandBuffer, reinterpret_cast( &pushDescriptorSetInfo ) ); + } +#endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ + + template + VULKAN_HPP_INLINE void + CommandBuffer::pushDescriptorSetWithTemplate2( const VULKAN_HPP_NAMESPACE::PushDescriptorSetWithTemplateInfo * pPushDescriptorSetWithTemplateInfo, + Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_ASSERT( d.getVkHeaderVersion() == VK_HEADER_VERSION ); + d.vkCmdPushDescriptorSetWithTemplate2( static_cast( m_commandBuffer ), + reinterpret_cast( pPushDescriptorSetWithTemplateInfo ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void + CommandBuffer::pushDescriptorSetWithTemplate2( const VULKAN_HPP_NAMESPACE::PushDescriptorSetWithTemplateInfo & pushDescriptorSetWithTemplateInfo, + Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_ASSERT( d.getVkHeaderVersion() == VK_HEADER_VERSION ); +# if ( VULKAN_HPP_DISPATCH_LOADER_DYNAMIC == 1 ) + VULKAN_HPP_ASSERT( d.vkCmdPushDescriptorSetWithTemplate2 && + "Function requires or " ); +# endif + + d.vkCmdPushDescriptorSetWithTemplate2( m_commandBuffer, + reinterpret_cast( &pushDescriptorSetWithTemplateInfo ) ); + } +#endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::copyMemoryToImage( const VULKAN_HPP_NAMESPACE::CopyMemoryToImageInfo * pCopyMemoryToImageInfo, + Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_ASSERT( d.getVkHeaderVersion() == VK_HEADER_VERSION ); + return static_cast( + d.vkCopyMemoryToImage( static_cast( m_device ), reinterpret_cast( pCopyMemoryToImageInfo ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type + Device::copyMemoryToImage( const VULKAN_HPP_NAMESPACE::CopyMemoryToImageInfo & copyMemoryToImageInfo, Dispatch const & d ) const + { + VULKAN_HPP_ASSERT( d.getVkHeaderVersion() == VK_HEADER_VERSION ); +# if ( VULKAN_HPP_DISPATCH_LOADER_DYNAMIC == 1 ) + VULKAN_HPP_ASSERT( d.vkCopyMemoryToImage && "Function requires or " ); +# endif + + VULKAN_HPP_NAMESPACE::Result result = static_cast( + d.vkCopyMemoryToImage( m_device, reinterpret_cast( ©MemoryToImageInfo ) ) ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::copyMemoryToImage" ); + + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result ); + } +#endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::copyImageToMemory( const VULKAN_HPP_NAMESPACE::CopyImageToMemoryInfo * pCopyImageToMemoryInfo, + Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_ASSERT( d.getVkHeaderVersion() == VK_HEADER_VERSION ); + return static_cast( + d.vkCopyImageToMemory( static_cast( m_device ), reinterpret_cast( pCopyImageToMemoryInfo ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type + Device::copyImageToMemory( const VULKAN_HPP_NAMESPACE::CopyImageToMemoryInfo & copyImageToMemoryInfo, Dispatch const & d ) const + { + VULKAN_HPP_ASSERT( d.getVkHeaderVersion() == VK_HEADER_VERSION ); +# if ( VULKAN_HPP_DISPATCH_LOADER_DYNAMIC == 1 ) + VULKAN_HPP_ASSERT( d.vkCopyImageToMemory && "Function requires or " ); +# endif + + VULKAN_HPP_NAMESPACE::Result result = static_cast( + d.vkCopyImageToMemory( m_device, reinterpret_cast( ©ImageToMemoryInfo ) ) ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::copyImageToMemory" ); + + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result ); + } +#endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::copyImageToImage( const VULKAN_HPP_NAMESPACE::CopyImageToImageInfo * pCopyImageToImageInfo, + Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_ASSERT( d.getVkHeaderVersion() == VK_HEADER_VERSION ); + return static_cast( + d.vkCopyImageToImage( static_cast( m_device ), reinterpret_cast( pCopyImageToImageInfo ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type + Device::copyImageToImage( const VULKAN_HPP_NAMESPACE::CopyImageToImageInfo & copyImageToImageInfo, Dispatch const & d ) const + { + VULKAN_HPP_ASSERT( d.getVkHeaderVersion() == VK_HEADER_VERSION ); +# if ( VULKAN_HPP_DISPATCH_LOADER_DYNAMIC == 1 ) + VULKAN_HPP_ASSERT( d.vkCopyImageToImage && "Function requires or " ); +# endif + + VULKAN_HPP_NAMESPACE::Result result = + static_cast( d.vkCopyImageToImage( m_device, reinterpret_cast( ©ImageToImageInfo ) ) ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::copyImageToImage" ); + + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result ); + } +#endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::transitionImageLayout( uint32_t transitionCount, + const VULKAN_HPP_NAMESPACE::HostImageLayoutTransitionInfo * pTransitions, + Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_ASSERT( d.getVkHeaderVersion() == VK_HEADER_VERSION ); + return static_cast( d.vkTransitionImageLayout( + static_cast( m_device ), transitionCount, reinterpret_cast( pTransitions ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type + Device::transitionImageLayout( VULKAN_HPP_NAMESPACE::ArrayProxy const & transitions, + Dispatch const & d ) const + { + VULKAN_HPP_ASSERT( d.getVkHeaderVersion() == VK_HEADER_VERSION ); +# if ( VULKAN_HPP_DISPATCH_LOADER_DYNAMIC == 1 ) + VULKAN_HPP_ASSERT( d.vkTransitionImageLayout && "Function requires or " ); +# endif + + VULKAN_HPP_NAMESPACE::Result result = static_cast( + d.vkTransitionImageLayout( m_device, transitions.size(), reinterpret_cast( transitions.data() ) ) ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::transitionImageLayout" ); + + return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result ); + } +#endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ + //=== VK_KHR_surface === template @@ -13201,7 +13742,7 @@ namespace VULKAN_HPP_NAMESPACE { VULKAN_HPP_ASSERT( d.getVkHeaderVersion() == VK_HEADER_VERSION ); # if ( VULKAN_HPP_DISPATCH_LOADER_DYNAMIC == 1 ) - VULKAN_HPP_ASSERT( d.vkCmdPushDescriptorSetKHR && "Function requires " ); + VULKAN_HPP_ASSERT( d.vkCmdPushDescriptorSetKHR && "Function requires or " ); # endif d.vkCmdPushDescriptorSetKHR( m_commandBuffer, @@ -13238,8 +13779,9 @@ namespace VULKAN_HPP_NAMESPACE { VULKAN_HPP_ASSERT( d.getVkHeaderVersion() == VK_HEADER_VERSION ); # if ( VULKAN_HPP_DISPATCH_LOADER_DYNAMIC == 1 ) - VULKAN_HPP_ASSERT( d.vkCmdPushDescriptorSetWithTemplateKHR && - "Function requires or " ); + VULKAN_HPP_ASSERT( + d.vkCmdPushDescriptorSetWithTemplateKHR && + "Function requires or or " ); # endif d.vkCmdPushDescriptorSetWithTemplateKHR( m_commandBuffer, @@ -19817,53 +20359,52 @@ namespace VULKAN_HPP_NAMESPACE //=== VK_KHR_dynamic_rendering_local_read === template - VULKAN_HPP_INLINE void CommandBuffer::setRenderingAttachmentLocationsKHR( const VULKAN_HPP_NAMESPACE::RenderingAttachmentLocationInfoKHR * pLocationInfo, + VULKAN_HPP_INLINE void CommandBuffer::setRenderingAttachmentLocationsKHR( const VULKAN_HPP_NAMESPACE::RenderingAttachmentLocationInfo * pLocationInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { VULKAN_HPP_ASSERT( d.getVkHeaderVersion() == VK_HEADER_VERSION ); d.vkCmdSetRenderingAttachmentLocationsKHR( static_cast( m_commandBuffer ), - reinterpret_cast( pLocationInfo ) ); + reinterpret_cast( pLocationInfo ) ); } #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE template - VULKAN_HPP_INLINE void CommandBuffer::setRenderingAttachmentLocationsKHR( const VULKAN_HPP_NAMESPACE::RenderingAttachmentLocationInfoKHR & locationInfo, + VULKAN_HPP_INLINE void CommandBuffer::setRenderingAttachmentLocationsKHR( const VULKAN_HPP_NAMESPACE::RenderingAttachmentLocationInfo & locationInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { VULKAN_HPP_ASSERT( d.getVkHeaderVersion() == VK_HEADER_VERSION ); # if ( VULKAN_HPP_DISPATCH_LOADER_DYNAMIC == 1 ) VULKAN_HPP_ASSERT( d.vkCmdSetRenderingAttachmentLocationsKHR && - "Function requires " ); + "Function requires or " ); # endif - d.vkCmdSetRenderingAttachmentLocationsKHR( m_commandBuffer, reinterpret_cast( &locationInfo ) ); + d.vkCmdSetRenderingAttachmentLocationsKHR( m_commandBuffer, reinterpret_cast( &locationInfo ) ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ template VULKAN_HPP_INLINE void - CommandBuffer::setRenderingInputAttachmentIndicesKHR( const VULKAN_HPP_NAMESPACE::RenderingInputAttachmentIndexInfoKHR * pInputAttachmentIndexInfo, - Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + CommandBuffer::setRenderingInputAttachmentIndicesKHR( const VULKAN_HPP_NAMESPACE::RenderingInputAttachmentIndexInfo * pInputAttachmentIndexInfo, + Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { VULKAN_HPP_ASSERT( d.getVkHeaderVersion() == VK_HEADER_VERSION ); d.vkCmdSetRenderingInputAttachmentIndicesKHR( static_cast( m_commandBuffer ), - reinterpret_cast( pInputAttachmentIndexInfo ) ); + reinterpret_cast( pInputAttachmentIndexInfo ) ); } #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE template VULKAN_HPP_INLINE void - CommandBuffer::setRenderingInputAttachmentIndicesKHR( const VULKAN_HPP_NAMESPACE::RenderingInputAttachmentIndexInfoKHR & inputAttachmentIndexInfo, - Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + CommandBuffer::setRenderingInputAttachmentIndicesKHR( const VULKAN_HPP_NAMESPACE::RenderingInputAttachmentIndexInfo & inputAttachmentIndexInfo, + Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { VULKAN_HPP_ASSERT( d.getVkHeaderVersion() == VK_HEADER_VERSION ); # if ( VULKAN_HPP_DISPATCH_LOADER_DYNAMIC == 1 ) VULKAN_HPP_ASSERT( d.vkCmdSetRenderingInputAttachmentIndicesKHR && - "Function requires " ); + "Function requires or " ); # endif - d.vkCmdSetRenderingInputAttachmentIndicesKHR( m_commandBuffer, - reinterpret_cast( &inputAttachmentIndexInfo ) ); + d.vkCmdSetRenderingInputAttachmentIndicesKHR( m_commandBuffer, reinterpret_cast( &inputAttachmentIndexInfo ) ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -21215,26 +21756,26 @@ namespace VULKAN_HPP_NAMESPACE //=== VK_EXT_host_image_copy === template - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::copyMemoryToImageEXT( const VULKAN_HPP_NAMESPACE::CopyMemoryToImageInfoEXT * pCopyMemoryToImageInfo, - Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::copyMemoryToImageEXT( const VULKAN_HPP_NAMESPACE::CopyMemoryToImageInfo * pCopyMemoryToImageInfo, + Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { VULKAN_HPP_ASSERT( d.getVkHeaderVersion() == VK_HEADER_VERSION ); return static_cast( - d.vkCopyMemoryToImageEXT( static_cast( m_device ), reinterpret_cast( pCopyMemoryToImageInfo ) ) ); + d.vkCopyMemoryToImageEXT( static_cast( m_device ), reinterpret_cast( pCopyMemoryToImageInfo ) ) ); } #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE template VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type - Device::copyMemoryToImageEXT( const VULKAN_HPP_NAMESPACE::CopyMemoryToImageInfoEXT & copyMemoryToImageInfo, Dispatch const & d ) const + Device::copyMemoryToImageEXT( const VULKAN_HPP_NAMESPACE::CopyMemoryToImageInfo & copyMemoryToImageInfo, Dispatch const & d ) const { VULKAN_HPP_ASSERT( d.getVkHeaderVersion() == VK_HEADER_VERSION ); # if ( VULKAN_HPP_DISPATCH_LOADER_DYNAMIC == 1 ) - VULKAN_HPP_ASSERT( d.vkCopyMemoryToImageEXT && "Function requires " ); + VULKAN_HPP_ASSERT( d.vkCopyMemoryToImageEXT && "Function requires or " ); # endif VULKAN_HPP_NAMESPACE::Result result = static_cast( - d.vkCopyMemoryToImageEXT( m_device, reinterpret_cast( ©MemoryToImageInfo ) ) ); + d.vkCopyMemoryToImageEXT( m_device, reinterpret_cast( ©MemoryToImageInfo ) ) ); VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::copyMemoryToImageEXT" ); return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result ); @@ -21242,26 +21783,26 @@ namespace VULKAN_HPP_NAMESPACE #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ template - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::copyImageToMemoryEXT( const VULKAN_HPP_NAMESPACE::CopyImageToMemoryInfoEXT * pCopyImageToMemoryInfo, - Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::copyImageToMemoryEXT( const VULKAN_HPP_NAMESPACE::CopyImageToMemoryInfo * pCopyImageToMemoryInfo, + Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { VULKAN_HPP_ASSERT( d.getVkHeaderVersion() == VK_HEADER_VERSION ); return static_cast( - d.vkCopyImageToMemoryEXT( static_cast( m_device ), reinterpret_cast( pCopyImageToMemoryInfo ) ) ); + d.vkCopyImageToMemoryEXT( static_cast( m_device ), reinterpret_cast( pCopyImageToMemoryInfo ) ) ); } #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE template VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type - Device::copyImageToMemoryEXT( const VULKAN_HPP_NAMESPACE::CopyImageToMemoryInfoEXT & copyImageToMemoryInfo, Dispatch const & d ) const + Device::copyImageToMemoryEXT( const VULKAN_HPP_NAMESPACE::CopyImageToMemoryInfo & copyImageToMemoryInfo, Dispatch const & d ) const { VULKAN_HPP_ASSERT( d.getVkHeaderVersion() == VK_HEADER_VERSION ); # if ( VULKAN_HPP_DISPATCH_LOADER_DYNAMIC == 1 ) - VULKAN_HPP_ASSERT( d.vkCopyImageToMemoryEXT && "Function requires " ); + VULKAN_HPP_ASSERT( d.vkCopyImageToMemoryEXT && "Function requires or " ); # endif VULKAN_HPP_NAMESPACE::Result result = static_cast( - d.vkCopyImageToMemoryEXT( m_device, reinterpret_cast( ©ImageToMemoryInfo ) ) ); + d.vkCopyImageToMemoryEXT( m_device, reinterpret_cast( ©ImageToMemoryInfo ) ) ); VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::copyImageToMemoryEXT" ); return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result ); @@ -21269,26 +21810,26 @@ namespace VULKAN_HPP_NAMESPACE #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ template - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::copyImageToImageEXT( const VULKAN_HPP_NAMESPACE::CopyImageToImageInfoEXT * pCopyImageToImageInfo, - Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::copyImageToImageEXT( const VULKAN_HPP_NAMESPACE::CopyImageToImageInfo * pCopyImageToImageInfo, + Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { VULKAN_HPP_ASSERT( d.getVkHeaderVersion() == VK_HEADER_VERSION ); return static_cast( - d.vkCopyImageToImageEXT( static_cast( m_device ), reinterpret_cast( pCopyImageToImageInfo ) ) ); + d.vkCopyImageToImageEXT( static_cast( m_device ), reinterpret_cast( pCopyImageToImageInfo ) ) ); } #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE template VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type - Device::copyImageToImageEXT( const VULKAN_HPP_NAMESPACE::CopyImageToImageInfoEXT & copyImageToImageInfo, Dispatch const & d ) const + Device::copyImageToImageEXT( const VULKAN_HPP_NAMESPACE::CopyImageToImageInfo & copyImageToImageInfo, Dispatch const & d ) const { VULKAN_HPP_ASSERT( d.getVkHeaderVersion() == VK_HEADER_VERSION ); # if ( VULKAN_HPP_DISPATCH_LOADER_DYNAMIC == 1 ) - VULKAN_HPP_ASSERT( d.vkCopyImageToImageEXT && "Function requires " ); + VULKAN_HPP_ASSERT( d.vkCopyImageToImageEXT && "Function requires or " ); # endif VULKAN_HPP_NAMESPACE::Result result = static_cast( - d.vkCopyImageToImageEXT( m_device, reinterpret_cast( ©ImageToImageInfo ) ) ); + d.vkCopyImageToImageEXT( m_device, reinterpret_cast( ©ImageToImageInfo ) ) ); VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::copyImageToImageEXT" ); return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result ); @@ -21296,28 +21837,28 @@ namespace VULKAN_HPP_NAMESPACE #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ template - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::transitionImageLayoutEXT( uint32_t transitionCount, - const VULKAN_HPP_NAMESPACE::HostImageLayoutTransitionInfoEXT * pTransitions, + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::transitionImageLayoutEXT( uint32_t transitionCount, + const VULKAN_HPP_NAMESPACE::HostImageLayoutTransitionInfo * pTransitions, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { VULKAN_HPP_ASSERT( d.getVkHeaderVersion() == VK_HEADER_VERSION ); return static_cast( d.vkTransitionImageLayoutEXT( - static_cast( m_device ), transitionCount, reinterpret_cast( pTransitions ) ) ); + static_cast( m_device ), transitionCount, reinterpret_cast( pTransitions ) ) ); } #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE template VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type - Device::transitionImageLayoutEXT( VULKAN_HPP_NAMESPACE::ArrayProxy const & transitions, - Dispatch const & d ) const + Device::transitionImageLayoutEXT( VULKAN_HPP_NAMESPACE::ArrayProxy const & transitions, + Dispatch const & d ) const { VULKAN_HPP_ASSERT( d.getVkHeaderVersion() == VK_HEADER_VERSION ); # if ( VULKAN_HPP_DISPATCH_LOADER_DYNAMIC == 1 ) - VULKAN_HPP_ASSERT( d.vkTransitionImageLayoutEXT && "Function requires " ); + VULKAN_HPP_ASSERT( d.vkTransitionImageLayoutEXT && "Function requires or " ); # endif VULKAN_HPP_NAMESPACE::Result result = static_cast( - d.vkTransitionImageLayoutEXT( m_device, transitions.size(), reinterpret_cast( transitions.data() ) ) ); + d.vkTransitionImageLayoutEXT( m_device, transitions.size(), reinterpret_cast( transitions.data() ) ) ); VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::transitionImageLayoutEXT" ); return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result ); @@ -21325,56 +21866,56 @@ namespace VULKAN_HPP_NAMESPACE #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ template - VULKAN_HPP_INLINE void Device::getImageSubresourceLayout2EXT( VULKAN_HPP_NAMESPACE::Image image, - const VULKAN_HPP_NAMESPACE::ImageSubresource2KHR * pSubresource, - VULKAN_HPP_NAMESPACE::SubresourceLayout2KHR * pLayout, - Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + VULKAN_HPP_INLINE void Device::getImageSubresourceLayout2EXT( VULKAN_HPP_NAMESPACE::Image image, + const VULKAN_HPP_NAMESPACE::ImageSubresource2 * pSubresource, + VULKAN_HPP_NAMESPACE::SubresourceLayout2 * pLayout, + Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { VULKAN_HPP_ASSERT( d.getVkHeaderVersion() == VK_HEADER_VERSION ); d.vkGetImageSubresourceLayout2EXT( static_cast( m_device ), static_cast( image ), - reinterpret_cast( pSubresource ), - reinterpret_cast( pLayout ) ); + reinterpret_cast( pSubresource ), + reinterpret_cast( pLayout ) ); } #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE template - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::SubresourceLayout2KHR Device::getImageSubresourceLayout2EXT( - VULKAN_HPP_NAMESPACE::Image image, const VULKAN_HPP_NAMESPACE::ImageSubresource2KHR & subresource, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::SubresourceLayout2 Device::getImageSubresourceLayout2EXT( + VULKAN_HPP_NAMESPACE::Image image, const VULKAN_HPP_NAMESPACE::ImageSubresource2 & subresource, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { VULKAN_HPP_ASSERT( d.getVkHeaderVersion() == VK_HEADER_VERSION ); # if ( VULKAN_HPP_DISPATCH_LOADER_DYNAMIC == 1 ) VULKAN_HPP_ASSERT( d.vkGetImageSubresourceLayout2EXT && - "Function requires or or " ); + "Function requires or or or " ); # endif - VULKAN_HPP_NAMESPACE::SubresourceLayout2KHR layout; + VULKAN_HPP_NAMESPACE::SubresourceLayout2 layout; d.vkGetImageSubresourceLayout2EXT( m_device, static_cast( image ), - reinterpret_cast( &subresource ), - reinterpret_cast( &layout ) ); + reinterpret_cast( &subresource ), + reinterpret_cast( &layout ) ); return layout; } template VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::StructureChain Device::getImageSubresourceLayout2EXT( - VULKAN_HPP_NAMESPACE::Image image, const VULKAN_HPP_NAMESPACE::ImageSubresource2KHR & subresource, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + VULKAN_HPP_NAMESPACE::Image image, const VULKAN_HPP_NAMESPACE::ImageSubresource2 & subresource, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { VULKAN_HPP_ASSERT( d.getVkHeaderVersion() == VK_HEADER_VERSION ); # if ( VULKAN_HPP_DISPATCH_LOADER_DYNAMIC == 1 ) VULKAN_HPP_ASSERT( d.vkGetImageSubresourceLayout2EXT && - "Function requires or or " ); + "Function requires or or or " ); # endif VULKAN_HPP_NAMESPACE::StructureChain structureChain; - VULKAN_HPP_NAMESPACE::SubresourceLayout2KHR & layout = structureChain.template get(); + VULKAN_HPP_NAMESPACE::SubresourceLayout2 & layout = structureChain.template get(); d.vkGetImageSubresourceLayout2EXT( m_device, static_cast( image ), - reinterpret_cast( &subresource ), - reinterpret_cast( &layout ) ); + reinterpret_cast( &subresource ), + reinterpret_cast( &layout ) ); return structureChain; } @@ -21383,28 +21924,27 @@ namespace VULKAN_HPP_NAMESPACE //=== VK_KHR_map_memory2 === template - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::mapMemory2KHR( const VULKAN_HPP_NAMESPACE::MemoryMapInfoKHR * pMemoryMapInfo, - void ** ppData, - Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::mapMemory2KHR( const VULKAN_HPP_NAMESPACE::MemoryMapInfo * pMemoryMapInfo, + void ** ppData, + Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { VULKAN_HPP_ASSERT( d.getVkHeaderVersion() == VK_HEADER_VERSION ); - return static_cast( - d.vkMapMemory2KHR( static_cast( m_device ), reinterpret_cast( pMemoryMapInfo ), ppData ) ); + return static_cast( d.vkMapMemory2KHR( static_cast( m_device ), reinterpret_cast( pMemoryMapInfo ), ppData ) ); } #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE template VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType::type - Device::mapMemory2KHR( const VULKAN_HPP_NAMESPACE::MemoryMapInfoKHR & memoryMapInfo, Dispatch const & d ) const + Device::mapMemory2KHR( const VULKAN_HPP_NAMESPACE::MemoryMapInfo & memoryMapInfo, Dispatch const & d ) const { VULKAN_HPP_ASSERT( d.getVkHeaderVersion() == VK_HEADER_VERSION ); # if ( VULKAN_HPP_DISPATCH_LOADER_DYNAMIC == 1 ) - VULKAN_HPP_ASSERT( d.vkMapMemory2KHR && "Function requires " ); + VULKAN_HPP_ASSERT( d.vkMapMemory2KHR && "Function requires or " ); # endif void * pData; VULKAN_HPP_NAMESPACE::Result result = - static_cast( d.vkMapMemory2KHR( m_device, reinterpret_cast( &memoryMapInfo ), &pData ) ); + static_cast( d.vkMapMemory2KHR( m_device, reinterpret_cast( &memoryMapInfo ), &pData ) ); VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::mapMemory2KHR" ); return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result, std::move( pData ) ); @@ -21412,25 +21952,25 @@ namespace VULKAN_HPP_NAMESPACE #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ template - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::unmapMemory2KHR( const VULKAN_HPP_NAMESPACE::MemoryUnmapInfoKHR * pMemoryUnmapInfo, - Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::unmapMemory2KHR( const VULKAN_HPP_NAMESPACE::MemoryUnmapInfo * pMemoryUnmapInfo, + Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { VULKAN_HPP_ASSERT( d.getVkHeaderVersion() == VK_HEADER_VERSION ); - return static_cast( d.vkUnmapMemory2KHR( static_cast( m_device ), reinterpret_cast( pMemoryUnmapInfo ) ) ); + return static_cast( d.vkUnmapMemory2KHR( static_cast( m_device ), reinterpret_cast( pMemoryUnmapInfo ) ) ); } #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE template - VULKAN_HPP_INLINE typename ResultValueType::type Device::unmapMemory2KHR( const VULKAN_HPP_NAMESPACE::MemoryUnmapInfoKHR & memoryUnmapInfo, - Dispatch const & d ) const + VULKAN_HPP_INLINE typename ResultValueType::type Device::unmapMemory2KHR( const VULKAN_HPP_NAMESPACE::MemoryUnmapInfo & memoryUnmapInfo, + Dispatch const & d ) const { VULKAN_HPP_ASSERT( d.getVkHeaderVersion() == VK_HEADER_VERSION ); # if ( VULKAN_HPP_DISPATCH_LOADER_DYNAMIC == 1 ) - VULKAN_HPP_ASSERT( d.vkUnmapMemory2KHR && "Function requires " ); + VULKAN_HPP_ASSERT( d.vkUnmapMemory2KHR && "Function requires or " ); # endif VULKAN_HPP_NAMESPACE::Result result = - static_cast( d.vkUnmapMemory2KHR( m_device, reinterpret_cast( &memoryUnmapInfo ) ) ); + static_cast( d.vkUnmapMemory2KHR( m_device, reinterpret_cast( &memoryUnmapInfo ) ) ); VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::unmapMemory2KHR" ); return VULKAN_HPP_NAMESPACE::detail::createResultValueType( result ); @@ -25967,131 +26507,131 @@ namespace VULKAN_HPP_NAMESPACE } template - VULKAN_HPP_INLINE void Device::getRenderingAreaGranularityKHR( const VULKAN_HPP_NAMESPACE::RenderingAreaInfoKHR * pRenderingAreaInfo, - VULKAN_HPP_NAMESPACE::Extent2D * pGranularity, - Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + VULKAN_HPP_INLINE void Device::getRenderingAreaGranularityKHR( const VULKAN_HPP_NAMESPACE::RenderingAreaInfo * pRenderingAreaInfo, + VULKAN_HPP_NAMESPACE::Extent2D * pGranularity, + Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { VULKAN_HPP_ASSERT( d.getVkHeaderVersion() == VK_HEADER_VERSION ); - d.vkGetRenderingAreaGranularityKHR( static_cast( m_device ), - reinterpret_cast( pRenderingAreaInfo ), - reinterpret_cast( pGranularity ) ); + d.vkGetRenderingAreaGranularityKHR( + static_cast( m_device ), reinterpret_cast( pRenderingAreaInfo ), reinterpret_cast( pGranularity ) ); } #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE template VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::Extent2D - Device::getRenderingAreaGranularityKHR( const VULKAN_HPP_NAMESPACE::RenderingAreaInfoKHR & renderingAreaInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + Device::getRenderingAreaGranularityKHR( const VULKAN_HPP_NAMESPACE::RenderingAreaInfo & renderingAreaInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { VULKAN_HPP_ASSERT( d.getVkHeaderVersion() == VK_HEADER_VERSION ); # if ( VULKAN_HPP_DISPATCH_LOADER_DYNAMIC == 1 ) - VULKAN_HPP_ASSERT( d.vkGetRenderingAreaGranularityKHR && "Function requires " ); + VULKAN_HPP_ASSERT( d.vkGetRenderingAreaGranularityKHR && "Function requires or " ); # endif VULKAN_HPP_NAMESPACE::Extent2D granularity; d.vkGetRenderingAreaGranularityKHR( - m_device, reinterpret_cast( &renderingAreaInfo ), reinterpret_cast( &granularity ) ); + m_device, reinterpret_cast( &renderingAreaInfo ), reinterpret_cast( &granularity ) ); return granularity; } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ template - VULKAN_HPP_INLINE void Device::getImageSubresourceLayoutKHR( const VULKAN_HPP_NAMESPACE::DeviceImageSubresourceInfoKHR * pInfo, - VULKAN_HPP_NAMESPACE::SubresourceLayout2KHR * pLayout, - Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + VULKAN_HPP_INLINE void Device::getImageSubresourceLayoutKHR( const VULKAN_HPP_NAMESPACE::DeviceImageSubresourceInfo * pInfo, + VULKAN_HPP_NAMESPACE::SubresourceLayout2 * pLayout, + Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { VULKAN_HPP_ASSERT( d.getVkHeaderVersion() == VK_HEADER_VERSION ); - d.vkGetDeviceImageSubresourceLayoutKHR( static_cast( m_device ), - reinterpret_cast( pInfo ), - reinterpret_cast( pLayout ) ); + d.vkGetDeviceImageSubresourceLayoutKHR( + static_cast( m_device ), reinterpret_cast( pInfo ), reinterpret_cast( pLayout ) ); } #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE template - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::SubresourceLayout2KHR - Device::getImageSubresourceLayoutKHR( const VULKAN_HPP_NAMESPACE::DeviceImageSubresourceInfoKHR & info, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::SubresourceLayout2 + Device::getImageSubresourceLayoutKHR( const VULKAN_HPP_NAMESPACE::DeviceImageSubresourceInfo & info, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { VULKAN_HPP_ASSERT( d.getVkHeaderVersion() == VK_HEADER_VERSION ); # if ( VULKAN_HPP_DISPATCH_LOADER_DYNAMIC == 1 ) - VULKAN_HPP_ASSERT( d.vkGetDeviceImageSubresourceLayoutKHR && "Function requires " ); + VULKAN_HPP_ASSERT( d.vkGetDeviceImageSubresourceLayoutKHR && + "Function requires or " ); # endif - VULKAN_HPP_NAMESPACE::SubresourceLayout2KHR layout; + VULKAN_HPP_NAMESPACE::SubresourceLayout2 layout; d.vkGetDeviceImageSubresourceLayoutKHR( - m_device, reinterpret_cast( &info ), reinterpret_cast( &layout ) ); + m_device, reinterpret_cast( &info ), reinterpret_cast( &layout ) ); return layout; } template VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::StructureChain - Device::getImageSubresourceLayoutKHR( const VULKAN_HPP_NAMESPACE::DeviceImageSubresourceInfoKHR & info, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + Device::getImageSubresourceLayoutKHR( const VULKAN_HPP_NAMESPACE::DeviceImageSubresourceInfo & info, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { VULKAN_HPP_ASSERT( d.getVkHeaderVersion() == VK_HEADER_VERSION ); # if ( VULKAN_HPP_DISPATCH_LOADER_DYNAMIC == 1 ) - VULKAN_HPP_ASSERT( d.vkGetDeviceImageSubresourceLayoutKHR && "Function requires " ); + VULKAN_HPP_ASSERT( d.vkGetDeviceImageSubresourceLayoutKHR && + "Function requires or " ); # endif VULKAN_HPP_NAMESPACE::StructureChain structureChain; - VULKAN_HPP_NAMESPACE::SubresourceLayout2KHR & layout = structureChain.template get(); + VULKAN_HPP_NAMESPACE::SubresourceLayout2 & layout = structureChain.template get(); d.vkGetDeviceImageSubresourceLayoutKHR( - m_device, reinterpret_cast( &info ), reinterpret_cast( &layout ) ); + m_device, reinterpret_cast( &info ), reinterpret_cast( &layout ) ); return structureChain; } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ template - VULKAN_HPP_INLINE void Device::getImageSubresourceLayout2KHR( VULKAN_HPP_NAMESPACE::Image image, - const VULKAN_HPP_NAMESPACE::ImageSubresource2KHR * pSubresource, - VULKAN_HPP_NAMESPACE::SubresourceLayout2KHR * pLayout, - Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + VULKAN_HPP_INLINE void Device::getImageSubresourceLayout2KHR( VULKAN_HPP_NAMESPACE::Image image, + const VULKAN_HPP_NAMESPACE::ImageSubresource2 * pSubresource, + VULKAN_HPP_NAMESPACE::SubresourceLayout2 * pLayout, + Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { VULKAN_HPP_ASSERT( d.getVkHeaderVersion() == VK_HEADER_VERSION ); d.vkGetImageSubresourceLayout2KHR( static_cast( m_device ), static_cast( image ), - reinterpret_cast( pSubresource ), - reinterpret_cast( pLayout ) ); + reinterpret_cast( pSubresource ), + reinterpret_cast( pLayout ) ); } #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE template - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::SubresourceLayout2KHR Device::getImageSubresourceLayout2KHR( - VULKAN_HPP_NAMESPACE::Image image, const VULKAN_HPP_NAMESPACE::ImageSubresource2KHR & subresource, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::SubresourceLayout2 Device::getImageSubresourceLayout2KHR( + VULKAN_HPP_NAMESPACE::Image image, const VULKAN_HPP_NAMESPACE::ImageSubresource2 & subresource, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { VULKAN_HPP_ASSERT( d.getVkHeaderVersion() == VK_HEADER_VERSION ); # if ( VULKAN_HPP_DISPATCH_LOADER_DYNAMIC == 1 ) VULKAN_HPP_ASSERT( d.vkGetImageSubresourceLayout2KHR && - "Function requires or or " ); + "Function requires or or or " ); # endif - VULKAN_HPP_NAMESPACE::SubresourceLayout2KHR layout; + VULKAN_HPP_NAMESPACE::SubresourceLayout2 layout; d.vkGetImageSubresourceLayout2KHR( m_device, static_cast( image ), - reinterpret_cast( &subresource ), - reinterpret_cast( &layout ) ); + reinterpret_cast( &subresource ), + reinterpret_cast( &layout ) ); return layout; } template VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::StructureChain Device::getImageSubresourceLayout2KHR( - VULKAN_HPP_NAMESPACE::Image image, const VULKAN_HPP_NAMESPACE::ImageSubresource2KHR & subresource, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + VULKAN_HPP_NAMESPACE::Image image, const VULKAN_HPP_NAMESPACE::ImageSubresource2 & subresource, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { VULKAN_HPP_ASSERT( d.getVkHeaderVersion() == VK_HEADER_VERSION ); # if ( VULKAN_HPP_DISPATCH_LOADER_DYNAMIC == 1 ) VULKAN_HPP_ASSERT( d.vkGetImageSubresourceLayout2KHR && - "Function requires or or " ); + "Function requires or or or " ); # endif VULKAN_HPP_NAMESPACE::StructureChain structureChain; - VULKAN_HPP_NAMESPACE::SubresourceLayout2KHR & layout = structureChain.template get(); + VULKAN_HPP_NAMESPACE::SubresourceLayout2 & layout = structureChain.template get(); d.vkGetImageSubresourceLayout2KHR( m_device, static_cast( image ), - reinterpret_cast( &subresource ), - reinterpret_cast( &layout ) ); + reinterpret_cast( &subresource ), + reinterpret_cast( &layout ) ); return structureChain; } @@ -27599,96 +28139,97 @@ namespace VULKAN_HPP_NAMESPACE //=== VK_KHR_maintenance6 === template - VULKAN_HPP_INLINE void CommandBuffer::bindDescriptorSets2KHR( const VULKAN_HPP_NAMESPACE::BindDescriptorSetsInfoKHR * pBindDescriptorSetsInfo, - Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + VULKAN_HPP_INLINE void CommandBuffer::bindDescriptorSets2KHR( const VULKAN_HPP_NAMESPACE::BindDescriptorSetsInfo * pBindDescriptorSetsInfo, + Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { VULKAN_HPP_ASSERT( d.getVkHeaderVersion() == VK_HEADER_VERSION ); d.vkCmdBindDescriptorSets2KHR( static_cast( m_commandBuffer ), - reinterpret_cast( pBindDescriptorSetsInfo ) ); + reinterpret_cast( pBindDescriptorSetsInfo ) ); } #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE template - VULKAN_HPP_INLINE void CommandBuffer::bindDescriptorSets2KHR( const VULKAN_HPP_NAMESPACE::BindDescriptorSetsInfoKHR & bindDescriptorSetsInfo, - Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + VULKAN_HPP_INLINE void CommandBuffer::bindDescriptorSets2KHR( const VULKAN_HPP_NAMESPACE::BindDescriptorSetsInfo & bindDescriptorSetsInfo, + Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { VULKAN_HPP_ASSERT( d.getVkHeaderVersion() == VK_HEADER_VERSION ); # if ( VULKAN_HPP_DISPATCH_LOADER_DYNAMIC == 1 ) - VULKAN_HPP_ASSERT( d.vkCmdBindDescriptorSets2KHR && "Function requires " ); + VULKAN_HPP_ASSERT( d.vkCmdBindDescriptorSets2KHR && "Function requires or " ); # endif - d.vkCmdBindDescriptorSets2KHR( m_commandBuffer, reinterpret_cast( &bindDescriptorSetsInfo ) ); + d.vkCmdBindDescriptorSets2KHR( m_commandBuffer, reinterpret_cast( &bindDescriptorSetsInfo ) ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ template - VULKAN_HPP_INLINE void CommandBuffer::pushConstants2KHR( const VULKAN_HPP_NAMESPACE::PushConstantsInfoKHR * pPushConstantsInfo, - Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + VULKAN_HPP_INLINE void CommandBuffer::pushConstants2KHR( const VULKAN_HPP_NAMESPACE::PushConstantsInfo * pPushConstantsInfo, + Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { VULKAN_HPP_ASSERT( d.getVkHeaderVersion() == VK_HEADER_VERSION ); - d.vkCmdPushConstants2KHR( static_cast( m_commandBuffer ), reinterpret_cast( pPushConstantsInfo ) ); + d.vkCmdPushConstants2KHR( static_cast( m_commandBuffer ), reinterpret_cast( pPushConstantsInfo ) ); } #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE template - VULKAN_HPP_INLINE void CommandBuffer::pushConstants2KHR( const VULKAN_HPP_NAMESPACE::PushConstantsInfoKHR & pushConstantsInfo, - Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + VULKAN_HPP_INLINE void CommandBuffer::pushConstants2KHR( const VULKAN_HPP_NAMESPACE::PushConstantsInfo & pushConstantsInfo, + Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { VULKAN_HPP_ASSERT( d.getVkHeaderVersion() == VK_HEADER_VERSION ); # if ( VULKAN_HPP_DISPATCH_LOADER_DYNAMIC == 1 ) - VULKAN_HPP_ASSERT( d.vkCmdPushConstants2KHR && "Function requires " ); + VULKAN_HPP_ASSERT( d.vkCmdPushConstants2KHR && "Function requires or " ); # endif - d.vkCmdPushConstants2KHR( m_commandBuffer, reinterpret_cast( &pushConstantsInfo ) ); + d.vkCmdPushConstants2KHR( m_commandBuffer, reinterpret_cast( &pushConstantsInfo ) ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ template - VULKAN_HPP_INLINE void CommandBuffer::pushDescriptorSet2KHR( const VULKAN_HPP_NAMESPACE::PushDescriptorSetInfoKHR * pPushDescriptorSetInfo, - Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + VULKAN_HPP_INLINE void CommandBuffer::pushDescriptorSet2KHR( const VULKAN_HPP_NAMESPACE::PushDescriptorSetInfo * pPushDescriptorSetInfo, + Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { VULKAN_HPP_ASSERT( d.getVkHeaderVersion() == VK_HEADER_VERSION ); d.vkCmdPushDescriptorSet2KHR( static_cast( m_commandBuffer ), - reinterpret_cast( pPushDescriptorSetInfo ) ); + reinterpret_cast( pPushDescriptorSetInfo ) ); } #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE template - VULKAN_HPP_INLINE void CommandBuffer::pushDescriptorSet2KHR( const VULKAN_HPP_NAMESPACE::PushDescriptorSetInfoKHR & pushDescriptorSetInfo, - Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + VULKAN_HPP_INLINE void CommandBuffer::pushDescriptorSet2KHR( const VULKAN_HPP_NAMESPACE::PushDescriptorSetInfo & pushDescriptorSetInfo, + Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { VULKAN_HPP_ASSERT( d.getVkHeaderVersion() == VK_HEADER_VERSION ); # if ( VULKAN_HPP_DISPATCH_LOADER_DYNAMIC == 1 ) - VULKAN_HPP_ASSERT( d.vkCmdPushDescriptorSet2KHR && "Function requires " ); + VULKAN_HPP_ASSERT( d.vkCmdPushDescriptorSet2KHR && "Function requires or " ); # endif - d.vkCmdPushDescriptorSet2KHR( m_commandBuffer, reinterpret_cast( &pushDescriptorSetInfo ) ); + d.vkCmdPushDescriptorSet2KHR( m_commandBuffer, reinterpret_cast( &pushDescriptorSetInfo ) ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ template VULKAN_HPP_INLINE void - CommandBuffer::pushDescriptorSetWithTemplate2KHR( const VULKAN_HPP_NAMESPACE::PushDescriptorSetWithTemplateInfoKHR * pPushDescriptorSetWithTemplateInfo, - Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + CommandBuffer::pushDescriptorSetWithTemplate2KHR( const VULKAN_HPP_NAMESPACE::PushDescriptorSetWithTemplateInfo * pPushDescriptorSetWithTemplateInfo, + Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { VULKAN_HPP_ASSERT( d.getVkHeaderVersion() == VK_HEADER_VERSION ); d.vkCmdPushDescriptorSetWithTemplate2KHR( static_cast( m_commandBuffer ), - reinterpret_cast( pPushDescriptorSetWithTemplateInfo ) ); + reinterpret_cast( pPushDescriptorSetWithTemplateInfo ) ); } #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE template VULKAN_HPP_INLINE void - CommandBuffer::pushDescriptorSetWithTemplate2KHR( const VULKAN_HPP_NAMESPACE::PushDescriptorSetWithTemplateInfoKHR & pushDescriptorSetWithTemplateInfo, - Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + CommandBuffer::pushDescriptorSetWithTemplate2KHR( const VULKAN_HPP_NAMESPACE::PushDescriptorSetWithTemplateInfo & pushDescriptorSetWithTemplateInfo, + Dispatch const & d ) const VULKAN_HPP_NOEXCEPT { VULKAN_HPP_ASSERT( d.getVkHeaderVersion() == VK_HEADER_VERSION ); # if ( VULKAN_HPP_DISPATCH_LOADER_DYNAMIC == 1 ) - VULKAN_HPP_ASSERT( d.vkCmdPushDescriptorSetWithTemplate2KHR && "Function requires " ); + VULKAN_HPP_ASSERT( d.vkCmdPushDescriptorSetWithTemplate2KHR && + "Function requires or " ); # endif d.vkCmdPushDescriptorSetWithTemplate2KHR( m_commandBuffer, - reinterpret_cast( &pushDescriptorSetWithTemplateInfo ) ); + reinterpret_cast( &pushDescriptorSetWithTemplateInfo ) ); } #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ diff --git a/third_party/vulkan/vulkan_handles.hpp b/third_party/vulkan/vulkan_handles.hpp index d911ead..617126a 100644 --- a/third_party/vulkan/vulkan_handles.hpp +++ b/third_party/vulkan/vulkan_handles.hpp @@ -474,6 +474,120 @@ namespace VULKAN_HPP_NAMESPACE struct DeviceImageMemoryRequirements; using DeviceImageMemoryRequirementsKHR = DeviceImageMemoryRequirements; + //=== VK_VERSION_1_4 === + struct PhysicalDeviceVulkan14Features; + struct PhysicalDeviceVulkan14Properties; + struct DeviceQueueGlobalPriorityCreateInfo; + using DeviceQueueGlobalPriorityCreateInfoEXT = DeviceQueueGlobalPriorityCreateInfo; + using DeviceQueueGlobalPriorityCreateInfoKHR = DeviceQueueGlobalPriorityCreateInfo; + struct PhysicalDeviceGlobalPriorityQueryFeatures; + using PhysicalDeviceGlobalPriorityQueryFeaturesEXT = PhysicalDeviceGlobalPriorityQueryFeatures; + using PhysicalDeviceGlobalPriorityQueryFeaturesKHR = PhysicalDeviceGlobalPriorityQueryFeatures; + struct QueueFamilyGlobalPriorityProperties; + using QueueFamilyGlobalPriorityPropertiesEXT = QueueFamilyGlobalPriorityProperties; + using QueueFamilyGlobalPriorityPropertiesKHR = QueueFamilyGlobalPriorityProperties; + struct PhysicalDeviceShaderSubgroupRotateFeatures; + using PhysicalDeviceShaderSubgroupRotateFeaturesKHR = PhysicalDeviceShaderSubgroupRotateFeatures; + struct PhysicalDeviceShaderFloatControls2Features; + using PhysicalDeviceShaderFloatControls2FeaturesKHR = PhysicalDeviceShaderFloatControls2Features; + struct PhysicalDeviceShaderExpectAssumeFeatures; + using PhysicalDeviceShaderExpectAssumeFeaturesKHR = PhysicalDeviceShaderExpectAssumeFeatures; + struct PhysicalDeviceLineRasterizationFeatures; + using PhysicalDeviceLineRasterizationFeaturesEXT = PhysicalDeviceLineRasterizationFeatures; + using PhysicalDeviceLineRasterizationFeaturesKHR = PhysicalDeviceLineRasterizationFeatures; + struct PhysicalDeviceLineRasterizationProperties; + using PhysicalDeviceLineRasterizationPropertiesEXT = PhysicalDeviceLineRasterizationProperties; + using PhysicalDeviceLineRasterizationPropertiesKHR = PhysicalDeviceLineRasterizationProperties; + struct PipelineRasterizationLineStateCreateInfo; + using PipelineRasterizationLineStateCreateInfoEXT = PipelineRasterizationLineStateCreateInfo; + using PipelineRasterizationLineStateCreateInfoKHR = PipelineRasterizationLineStateCreateInfo; + struct PhysicalDeviceVertexAttributeDivisorProperties; + using PhysicalDeviceVertexAttributeDivisorPropertiesKHR = PhysicalDeviceVertexAttributeDivisorProperties; + struct VertexInputBindingDivisorDescription; + using VertexInputBindingDivisorDescriptionEXT = VertexInputBindingDivisorDescription; + using VertexInputBindingDivisorDescriptionKHR = VertexInputBindingDivisorDescription; + struct PipelineVertexInputDivisorStateCreateInfo; + using PipelineVertexInputDivisorStateCreateInfoEXT = PipelineVertexInputDivisorStateCreateInfo; + using PipelineVertexInputDivisorStateCreateInfoKHR = PipelineVertexInputDivisorStateCreateInfo; + struct PhysicalDeviceVertexAttributeDivisorFeatures; + using PhysicalDeviceVertexAttributeDivisorFeaturesEXT = PhysicalDeviceVertexAttributeDivisorFeatures; + using PhysicalDeviceVertexAttributeDivisorFeaturesKHR = PhysicalDeviceVertexAttributeDivisorFeatures; + struct PhysicalDeviceIndexTypeUint8Features; + using PhysicalDeviceIndexTypeUint8FeaturesEXT = PhysicalDeviceIndexTypeUint8Features; + using PhysicalDeviceIndexTypeUint8FeaturesKHR = PhysicalDeviceIndexTypeUint8Features; + struct MemoryMapInfo; + using MemoryMapInfoKHR = MemoryMapInfo; + struct MemoryUnmapInfo; + using MemoryUnmapInfoKHR = MemoryUnmapInfo; + struct PhysicalDeviceMaintenance5Features; + using PhysicalDeviceMaintenance5FeaturesKHR = PhysicalDeviceMaintenance5Features; + struct PhysicalDeviceMaintenance5Properties; + using PhysicalDeviceMaintenance5PropertiesKHR = PhysicalDeviceMaintenance5Properties; + struct RenderingAreaInfo; + using RenderingAreaInfoKHR = RenderingAreaInfo; + struct DeviceImageSubresourceInfo; + using DeviceImageSubresourceInfoKHR = DeviceImageSubresourceInfo; + struct ImageSubresource2; + using ImageSubresource2EXT = ImageSubresource2; + using ImageSubresource2KHR = ImageSubresource2; + struct SubresourceLayout2; + using SubresourceLayout2EXT = SubresourceLayout2; + using SubresourceLayout2KHR = SubresourceLayout2; + struct PipelineCreateFlags2CreateInfo; + using PipelineCreateFlags2CreateInfoKHR = PipelineCreateFlags2CreateInfo; + struct BufferUsageFlags2CreateInfo; + using BufferUsageFlags2CreateInfoKHR = BufferUsageFlags2CreateInfo; + struct PhysicalDevicePushDescriptorProperties; + using PhysicalDevicePushDescriptorPropertiesKHR = PhysicalDevicePushDescriptorProperties; + struct PhysicalDeviceDynamicRenderingLocalReadFeatures; + using PhysicalDeviceDynamicRenderingLocalReadFeaturesKHR = PhysicalDeviceDynamicRenderingLocalReadFeatures; + struct RenderingAttachmentLocationInfo; + using RenderingAttachmentLocationInfoKHR = RenderingAttachmentLocationInfo; + struct RenderingInputAttachmentIndexInfo; + using RenderingInputAttachmentIndexInfoKHR = RenderingInputAttachmentIndexInfo; + struct PhysicalDeviceMaintenance6Features; + using PhysicalDeviceMaintenance6FeaturesKHR = PhysicalDeviceMaintenance6Features; + struct PhysicalDeviceMaintenance6Properties; + using PhysicalDeviceMaintenance6PropertiesKHR = PhysicalDeviceMaintenance6Properties; + struct BindMemoryStatus; + using BindMemoryStatusKHR = BindMemoryStatus; + struct BindDescriptorSetsInfo; + using BindDescriptorSetsInfoKHR = BindDescriptorSetsInfo; + struct PushConstantsInfo; + using PushConstantsInfoKHR = PushConstantsInfo; + struct PushDescriptorSetInfo; + using PushDescriptorSetInfoKHR = PushDescriptorSetInfo; + struct PushDescriptorSetWithTemplateInfo; + using PushDescriptorSetWithTemplateInfoKHR = PushDescriptorSetWithTemplateInfo; + struct PhysicalDevicePipelineProtectedAccessFeatures; + using PhysicalDevicePipelineProtectedAccessFeaturesEXT = PhysicalDevicePipelineProtectedAccessFeatures; + struct PhysicalDevicePipelineRobustnessFeatures; + using PhysicalDevicePipelineRobustnessFeaturesEXT = PhysicalDevicePipelineRobustnessFeatures; + struct PhysicalDevicePipelineRobustnessProperties; + using PhysicalDevicePipelineRobustnessPropertiesEXT = PhysicalDevicePipelineRobustnessProperties; + struct PipelineRobustnessCreateInfo; + using PipelineRobustnessCreateInfoEXT = PipelineRobustnessCreateInfo; + struct PhysicalDeviceHostImageCopyFeatures; + using PhysicalDeviceHostImageCopyFeaturesEXT = PhysicalDeviceHostImageCopyFeatures; + struct PhysicalDeviceHostImageCopyProperties; + using PhysicalDeviceHostImageCopyPropertiesEXT = PhysicalDeviceHostImageCopyProperties; + struct MemoryToImageCopy; + using MemoryToImageCopyEXT = MemoryToImageCopy; + struct ImageToMemoryCopy; + using ImageToMemoryCopyEXT = ImageToMemoryCopy; + struct CopyMemoryToImageInfo; + using CopyMemoryToImageInfoEXT = CopyMemoryToImageInfo; + struct CopyImageToMemoryInfo; + using CopyImageToMemoryInfoEXT = CopyImageToMemoryInfo; + struct CopyImageToImageInfo; + using CopyImageToImageInfoEXT = CopyImageToImageInfo; + struct HostImageLayoutTransitionInfo; + using HostImageLayoutTransitionInfoEXT = HostImageLayoutTransitionInfo; + struct SubresourceHostMemcpySize; + using SubresourceHostMemcpySizeEXT = SubresourceHostMemcpySize; + struct HostImageCopyDevicePerformanceQuery; + using HostImageCopyDevicePerformanceQueryEXT = HostImageCopyDevicePerformanceQuery; + //=== VK_KHR_surface === struct SurfaceCapabilitiesKHR; struct SurfaceFormatKHR; @@ -669,11 +783,6 @@ namespace VULKAN_HPP_NAMESPACE struct ImageViewASTCDecodeModeEXT; struct PhysicalDeviceASTCDecodeFeaturesEXT; - //=== VK_EXT_pipeline_robustness === - struct PhysicalDevicePipelineRobustnessFeaturesEXT; - struct PhysicalDevicePipelineRobustnessPropertiesEXT; - struct PipelineRobustnessCreateInfoEXT; - #if defined( VK_USE_PLATFORM_WIN32_KHR ) //=== VK_KHR_external_memory_win32 === struct ImportMemoryWin32HandleInfoKHR; @@ -704,9 +813,6 @@ namespace VULKAN_HPP_NAMESPACE struct ImportSemaphoreFdInfoKHR; struct SemaphoreGetFdInfoKHR; - //=== VK_KHR_push_descriptor === - struct PhysicalDevicePushDescriptorPropertiesKHR; - //=== VK_EXT_conditional_rendering === struct ConditionalRenderingBeginInfoEXT; struct PhysicalDeviceConditionalRenderingFeaturesEXT; @@ -980,14 +1086,6 @@ namespace VULKAN_HPP_NAMESPACE struct VideoDecodeH265PictureInfoKHR; struct VideoDecodeH265DpbSlotInfoKHR; - //=== VK_KHR_global_priority === - struct DeviceQueueGlobalPriorityCreateInfoKHR; - using DeviceQueueGlobalPriorityCreateInfoEXT = DeviceQueueGlobalPriorityCreateInfoKHR; - struct PhysicalDeviceGlobalPriorityQueryFeaturesKHR; - using PhysicalDeviceGlobalPriorityQueryFeaturesEXT = PhysicalDeviceGlobalPriorityQueryFeaturesKHR; - struct QueueFamilyGlobalPriorityPropertiesKHR; - using QueueFamilyGlobalPriorityPropertiesEXT = QueueFamilyGlobalPriorityPropertiesKHR; - //=== VK_AMD_memory_overallocation_behavior === struct DeviceMemoryOverallocationCreateInfoAMD; @@ -1068,11 +1166,6 @@ namespace VULKAN_HPP_NAMESPACE //=== VK_AMD_device_coherent_memory === struct PhysicalDeviceCoherentMemoryFeaturesAMD; - //=== VK_KHR_dynamic_rendering_local_read === - struct PhysicalDeviceDynamicRenderingLocalReadFeaturesKHR; - struct RenderingAttachmentLocationInfoKHR; - struct RenderingInputAttachmentIndexInfoKHR; - //=== VK_EXT_shader_image_atomic_int64 === struct PhysicalDeviceShaderImageAtomicInt64FeaturesEXT; @@ -1150,22 +1243,6 @@ namespace VULKAN_HPP_NAMESPACE struct PipelineExecutableStatisticKHR; struct PipelineExecutableInternalRepresentationKHR; - //=== VK_EXT_host_image_copy === - struct PhysicalDeviceHostImageCopyFeaturesEXT; - struct PhysicalDeviceHostImageCopyPropertiesEXT; - struct MemoryToImageCopyEXT; - struct ImageToMemoryCopyEXT; - struct CopyMemoryToImageInfoEXT; - struct CopyImageToMemoryInfoEXT; - struct CopyImageToImageInfoEXT; - struct HostImageLayoutTransitionInfoEXT; - struct SubresourceHostMemcpySizeEXT; - struct HostImageCopyDevicePerformanceQueryEXT; - - //=== VK_KHR_map_memory2 === - struct MemoryMapInfoKHR; - struct MemoryUnmapInfoKHR; - //=== VK_EXT_map_memory_placed === struct PhysicalDeviceMapMemoryPlacedFeaturesEXT; struct PhysicalDeviceMapMemoryPlacedPropertiesEXT; @@ -1527,9 +1604,6 @@ namespace VULKAN_HPP_NAMESPACE //=== VK_ARM_shader_core_properties === struct PhysicalDeviceShaderCorePropertiesARM; - //=== VK_KHR_shader_subgroup_rotate === - struct PhysicalDeviceShaderSubgroupRotateFeaturesKHR; - //=== VK_ARM_scheduling_controls === struct DeviceQueueShaderCoreControlCreateInfoARM; struct PhysicalDeviceSchedulingControlsFeaturesARM; @@ -1640,9 +1714,6 @@ namespace VULKAN_HPP_NAMESPACE //=== VK_EXT_legacy_dithering === struct PhysicalDeviceLegacyDitheringFeaturesEXT; - //=== VK_EXT_pipeline_protected_access === - struct PhysicalDevicePipelineProtectedAccessFeaturesEXT; - #if defined( VK_USE_PLATFORM_ANDROID_KHR ) //=== VK_ANDROID_external_format_resolve === struct PhysicalDeviceExternalFormatResolveFeaturesANDROID; @@ -1650,18 +1721,6 @@ namespace VULKAN_HPP_NAMESPACE struct AndroidHardwareBufferFormatResolvePropertiesANDROID; #endif /*VK_USE_PLATFORM_ANDROID_KHR*/ - //=== VK_KHR_maintenance5 === - struct PhysicalDeviceMaintenance5FeaturesKHR; - struct PhysicalDeviceMaintenance5PropertiesKHR; - struct RenderingAreaInfoKHR; - struct DeviceImageSubresourceInfoKHR; - struct ImageSubresource2KHR; - using ImageSubresource2EXT = ImageSubresource2KHR; - struct SubresourceLayout2KHR; - using SubresourceLayout2EXT = SubresourceLayout2KHR; - struct PipelineCreateFlags2CreateInfoKHR; - struct BufferUsageFlags2CreateInfoKHR; - //=== VK_AMD_anti_lag === struct PhysicalDeviceAntiLagFeaturesAMD; struct AntiLagDataAMD; @@ -1808,18 +1867,6 @@ namespace VULKAN_HPP_NAMESPACE //=== VK_EXT_attachment_feedback_loop_dynamic_state === struct PhysicalDeviceAttachmentFeedbackLoopDynamicStateFeaturesEXT; - //=== VK_KHR_vertex_attribute_divisor === - struct PhysicalDeviceVertexAttributeDivisorPropertiesKHR; - struct VertexInputBindingDivisorDescriptionKHR; - using VertexInputBindingDivisorDescriptionEXT = VertexInputBindingDivisorDescriptionKHR; - struct PipelineVertexInputDivisorStateCreateInfoKHR; - using PipelineVertexInputDivisorStateCreateInfoEXT = PipelineVertexInputDivisorStateCreateInfoKHR; - struct PhysicalDeviceVertexAttributeDivisorFeaturesKHR; - using PhysicalDeviceVertexAttributeDivisorFeaturesEXT = PhysicalDeviceVertexAttributeDivisorFeaturesKHR; - - //=== VK_KHR_shader_float_controls2 === - struct PhysicalDeviceShaderFloatControls2FeaturesKHR; - #if defined( VK_USE_PLATFORM_SCREEN_QNX ) //=== VK_QNX_external_memory_screen_buffer === struct ScreenBufferPropertiesQNX; @@ -1832,33 +1879,11 @@ namespace VULKAN_HPP_NAMESPACE //=== VK_MSFT_layered_driver === struct PhysicalDeviceLayeredDriverPropertiesMSFT; - //=== VK_KHR_index_type_uint8 === - struct PhysicalDeviceIndexTypeUint8FeaturesKHR; - using PhysicalDeviceIndexTypeUint8FeaturesEXT = PhysicalDeviceIndexTypeUint8FeaturesKHR; - - //=== VK_KHR_line_rasterization === - struct PhysicalDeviceLineRasterizationFeaturesKHR; - using PhysicalDeviceLineRasterizationFeaturesEXT = PhysicalDeviceLineRasterizationFeaturesKHR; - struct PhysicalDeviceLineRasterizationPropertiesKHR; - using PhysicalDeviceLineRasterizationPropertiesEXT = PhysicalDeviceLineRasterizationPropertiesKHR; - struct PipelineRasterizationLineStateCreateInfoKHR; - using PipelineRasterizationLineStateCreateInfoEXT = PipelineRasterizationLineStateCreateInfoKHR; - //=== VK_KHR_calibrated_timestamps === struct CalibratedTimestampInfoKHR; using CalibratedTimestampInfoEXT = CalibratedTimestampInfoKHR; - //=== VK_KHR_shader_expect_assume === - struct PhysicalDeviceShaderExpectAssumeFeaturesKHR; - //=== VK_KHR_maintenance6 === - struct PhysicalDeviceMaintenance6FeaturesKHR; - struct PhysicalDeviceMaintenance6PropertiesKHR; - struct BindMemoryStatusKHR; - struct BindDescriptorSetsInfoKHR; - struct PushConstantsInfoKHR; - struct PushDescriptorSetInfoKHR; - struct PushDescriptorSetWithTemplateInfoKHR; struct SetDescriptorBufferOffsetsInfoEXT; struct BindDescriptorBufferEmbeddedSamplersInfoEXT; @@ -2602,25 +2627,6 @@ namespace VULKAN_HPP_NAMESPACE return *this; } -#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( SurfaceKHR const & ) const = default; -#else - bool operator==( SurfaceKHR const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_surfaceKHR == rhs.m_surfaceKHR; - } - - bool operator!=( SurfaceKHR const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_surfaceKHR != rhs.m_surfaceKHR; - } - - bool operator<( SurfaceKHR const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_surfaceKHR < rhs.m_surfaceKHR; - } -#endif - VULKAN_HPP_TYPESAFE_EXPLICIT operator VkSurfaceKHR() const VULKAN_HPP_NOEXCEPT { return m_surfaceKHR; @@ -2718,25 +2724,6 @@ namespace VULKAN_HPP_NAMESPACE return *this; } -#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( DebugReportCallbackEXT const & ) const = default; -#else - bool operator==( DebugReportCallbackEXT const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_debugReportCallbackEXT == rhs.m_debugReportCallbackEXT; - } - - bool operator!=( DebugReportCallbackEXT const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_debugReportCallbackEXT != rhs.m_debugReportCallbackEXT; - } - - bool operator<( DebugReportCallbackEXT const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_debugReportCallbackEXT < rhs.m_debugReportCallbackEXT; - } -#endif - VULKAN_HPP_TYPESAFE_EXPLICIT operator VkDebugReportCallbackEXT() const VULKAN_HPP_NOEXCEPT { return m_debugReportCallbackEXT; @@ -2834,25 +2821,6 @@ namespace VULKAN_HPP_NAMESPACE return *this; } -#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( DebugUtilsMessengerEXT const & ) const = default; -#else - bool operator==( DebugUtilsMessengerEXT const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_debugUtilsMessengerEXT == rhs.m_debugUtilsMessengerEXT; - } - - bool operator!=( DebugUtilsMessengerEXT const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_debugUtilsMessengerEXT != rhs.m_debugUtilsMessengerEXT; - } - - bool operator<( DebugUtilsMessengerEXT const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_debugUtilsMessengerEXT < rhs.m_debugUtilsMessengerEXT; - } -#endif - VULKAN_HPP_TYPESAFE_EXPLICIT operator VkDebugUtilsMessengerEXT() const VULKAN_HPP_NOEXCEPT { return m_debugUtilsMessengerEXT; @@ -2938,25 +2906,6 @@ namespace VULKAN_HPP_NAMESPACE return *this; } -#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( DisplayKHR const & ) const = default; -#else - bool operator==( DisplayKHR const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_displayKHR == rhs.m_displayKHR; - } - - bool operator!=( DisplayKHR const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_displayKHR != rhs.m_displayKHR; - } - - bool operator<( DisplayKHR const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_displayKHR < rhs.m_displayKHR; - } -#endif - VULKAN_HPP_TYPESAFE_EXPLICIT operator VkDisplayKHR() const VULKAN_HPP_NOEXCEPT { return m_displayKHR; @@ -3048,25 +2997,6 @@ namespace VULKAN_HPP_NAMESPACE return *this; } -#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( SwapchainKHR const & ) const = default; -#else - bool operator==( SwapchainKHR const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_swapchainKHR == rhs.m_swapchainKHR; - } - - bool operator!=( SwapchainKHR const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_swapchainKHR != rhs.m_swapchainKHR; - } - - bool operator<( SwapchainKHR const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_swapchainKHR < rhs.m_swapchainKHR; - } -#endif - VULKAN_HPP_TYPESAFE_EXPLICIT operator VkSwapchainKHR() const VULKAN_HPP_NOEXCEPT { return m_swapchainKHR; @@ -3158,25 +3088,6 @@ namespace VULKAN_HPP_NAMESPACE return *this; } -#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( Semaphore const & ) const = default; -#else - bool operator==( Semaphore const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_semaphore == rhs.m_semaphore; - } - - bool operator!=( Semaphore const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_semaphore != rhs.m_semaphore; - } - - bool operator<( Semaphore const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_semaphore < rhs.m_semaphore; - } -#endif - VULKAN_HPP_TYPESAFE_EXPLICIT operator VkSemaphore() const VULKAN_HPP_NOEXCEPT { return m_semaphore; @@ -3268,25 +3179,6 @@ namespace VULKAN_HPP_NAMESPACE return *this; } -#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( Fence const & ) const = default; -#else - bool operator==( Fence const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_fence == rhs.m_fence; - } - - bool operator!=( Fence const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_fence != rhs.m_fence; - } - - bool operator<( Fence const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_fence < rhs.m_fence; - } -#endif - VULKAN_HPP_TYPESAFE_EXPLICIT operator VkFence() const VULKAN_HPP_NOEXCEPT { return m_fence; @@ -3384,25 +3276,6 @@ namespace VULKAN_HPP_NAMESPACE return *this; } -#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( PerformanceConfigurationINTEL const & ) const = default; -#else - bool operator==( PerformanceConfigurationINTEL const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_performanceConfigurationINTEL == rhs.m_performanceConfigurationINTEL; - } - - bool operator!=( PerformanceConfigurationINTEL const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_performanceConfigurationINTEL != rhs.m_performanceConfigurationINTEL; - } - - bool operator<( PerformanceConfigurationINTEL const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_performanceConfigurationINTEL < rhs.m_performanceConfigurationINTEL; - } -#endif - VULKAN_HPP_TYPESAFE_EXPLICIT operator VkPerformanceConfigurationINTEL() const VULKAN_HPP_NOEXCEPT { return m_performanceConfigurationINTEL; @@ -3488,25 +3361,6 @@ namespace VULKAN_HPP_NAMESPACE return *this; } -#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( QueryPool const & ) const = default; -#else - bool operator==( QueryPool const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_queryPool == rhs.m_queryPool; - } - - bool operator!=( QueryPool const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_queryPool != rhs.m_queryPool; - } - - bool operator<( QueryPool const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_queryPool < rhs.m_queryPool; - } -#endif - VULKAN_HPP_TYPESAFE_EXPLICIT operator VkQueryPool() const VULKAN_HPP_NOEXCEPT { return m_queryPool; @@ -3598,25 +3452,6 @@ namespace VULKAN_HPP_NAMESPACE return *this; } -#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( Buffer const & ) const = default; -#else - bool operator==( Buffer const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_buffer == rhs.m_buffer; - } - - bool operator!=( Buffer const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_buffer != rhs.m_buffer; - } - - bool operator<( Buffer const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_buffer < rhs.m_buffer; - } -#endif - VULKAN_HPP_TYPESAFE_EXPLICIT operator VkBuffer() const VULKAN_HPP_NOEXCEPT { return m_buffer; @@ -3708,25 +3543,6 @@ namespace VULKAN_HPP_NAMESPACE return *this; } -#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( PipelineLayout const & ) const = default; -#else - bool operator==( PipelineLayout const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_pipelineLayout == rhs.m_pipelineLayout; - } - - bool operator!=( PipelineLayout const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_pipelineLayout != rhs.m_pipelineLayout; - } - - bool operator<( PipelineLayout const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_pipelineLayout < rhs.m_pipelineLayout; - } -#endif - VULKAN_HPP_TYPESAFE_EXPLICIT operator VkPipelineLayout() const VULKAN_HPP_NOEXCEPT { return m_pipelineLayout; @@ -3818,25 +3634,6 @@ namespace VULKAN_HPP_NAMESPACE return *this; } -#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( DescriptorSet const & ) const = default; -#else - bool operator==( DescriptorSet const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_descriptorSet == rhs.m_descriptorSet; - } - - bool operator!=( DescriptorSet const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_descriptorSet != rhs.m_descriptorSet; - } - - bool operator<( DescriptorSet const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_descriptorSet < rhs.m_descriptorSet; - } -#endif - VULKAN_HPP_TYPESAFE_EXPLICIT operator VkDescriptorSet() const VULKAN_HPP_NOEXCEPT { return m_descriptorSet; @@ -3928,25 +3725,6 @@ namespace VULKAN_HPP_NAMESPACE return *this; } -#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( ImageView const & ) const = default; -#else - bool operator==( ImageView const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_imageView == rhs.m_imageView; - } - - bool operator!=( ImageView const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_imageView != rhs.m_imageView; - } - - bool operator<( ImageView const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_imageView < rhs.m_imageView; - } -#endif - VULKAN_HPP_TYPESAFE_EXPLICIT operator VkImageView() const VULKAN_HPP_NOEXCEPT { return m_imageView; @@ -4038,25 +3816,6 @@ namespace VULKAN_HPP_NAMESPACE return *this; } -#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( Pipeline const & ) const = default; -#else - bool operator==( Pipeline const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_pipeline == rhs.m_pipeline; - } - - bool operator!=( Pipeline const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_pipeline != rhs.m_pipeline; - } - - bool operator<( Pipeline const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_pipeline < rhs.m_pipeline; - } -#endif - VULKAN_HPP_TYPESAFE_EXPLICIT operator VkPipeline() const VULKAN_HPP_NOEXCEPT { return m_pipeline; @@ -4148,25 +3907,6 @@ namespace VULKAN_HPP_NAMESPACE return *this; } -#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( ShaderEXT const & ) const = default; -#else - bool operator==( ShaderEXT const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_shaderEXT == rhs.m_shaderEXT; - } - - bool operator!=( ShaderEXT const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_shaderEXT != rhs.m_shaderEXT; - } - - bool operator<( ShaderEXT const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_shaderEXT < rhs.m_shaderEXT; - } -#endif - VULKAN_HPP_TYPESAFE_EXPLICIT operator VkShaderEXT() const VULKAN_HPP_NOEXCEPT { return m_shaderEXT; @@ -4252,25 +3992,6 @@ namespace VULKAN_HPP_NAMESPACE return *this; } -#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( Image const & ) const = default; -#else - bool operator==( Image const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_image == rhs.m_image; - } - - bool operator!=( Image const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_image != rhs.m_image; - } - - bool operator<( Image const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_image < rhs.m_image; - } -#endif - VULKAN_HPP_TYPESAFE_EXPLICIT operator VkImage() const VULKAN_HPP_NOEXCEPT { return m_image; @@ -4368,25 +4089,6 @@ namespace VULKAN_HPP_NAMESPACE return *this; } -#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( AccelerationStructureNV const & ) const = default; -#else - bool operator==( AccelerationStructureNV const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_accelerationStructureNV == rhs.m_accelerationStructureNV; - } - - bool operator!=( AccelerationStructureNV const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_accelerationStructureNV != rhs.m_accelerationStructureNV; - } - - bool operator<( AccelerationStructureNV const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_accelerationStructureNV < rhs.m_accelerationStructureNV; - } -#endif - VULKAN_HPP_TYPESAFE_EXPLICIT operator VkAccelerationStructureNV() const VULKAN_HPP_NOEXCEPT { return m_accelerationStructureNV; @@ -4484,25 +4186,6 @@ namespace VULKAN_HPP_NAMESPACE return *this; } -#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( OpticalFlowSessionNV const & ) const = default; -#else - bool operator==( OpticalFlowSessionNV const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_opticalFlowSessionNV == rhs.m_opticalFlowSessionNV; - } - - bool operator!=( OpticalFlowSessionNV const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_opticalFlowSessionNV != rhs.m_opticalFlowSessionNV; - } - - bool operator<( OpticalFlowSessionNV const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_opticalFlowSessionNV < rhs.m_opticalFlowSessionNV; - } -#endif - VULKAN_HPP_TYPESAFE_EXPLICIT operator VkOpticalFlowSessionNV() const VULKAN_HPP_NOEXCEPT { return m_opticalFlowSessionNV; @@ -4594,25 +4277,6 @@ namespace VULKAN_HPP_NAMESPACE return *this; } -#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( DescriptorUpdateTemplate const & ) const = default; -#else - bool operator==( DescriptorUpdateTemplate const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_descriptorUpdateTemplate == rhs.m_descriptorUpdateTemplate; - } - - bool operator!=( DescriptorUpdateTemplate const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_descriptorUpdateTemplate != rhs.m_descriptorUpdateTemplate; - } - - bool operator<( DescriptorUpdateTemplate const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_descriptorUpdateTemplate < rhs.m_descriptorUpdateTemplate; - } -#endif - VULKAN_HPP_TYPESAFE_EXPLICIT operator VkDescriptorUpdateTemplate() const VULKAN_HPP_NOEXCEPT { return m_descriptorUpdateTemplate; @@ -4706,25 +4370,6 @@ namespace VULKAN_HPP_NAMESPACE return *this; } -#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( Event const & ) const = default; -#else - bool operator==( Event const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_event == rhs.m_event; - } - - bool operator!=( Event const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_event != rhs.m_event; - } - - bool operator<( Event const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_event < rhs.m_event; - } -#endif - VULKAN_HPP_TYPESAFE_EXPLICIT operator VkEvent() const VULKAN_HPP_NOEXCEPT { return m_event; @@ -4822,25 +4467,6 @@ namespace VULKAN_HPP_NAMESPACE return *this; } -#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( AccelerationStructureKHR const & ) const = default; -#else - bool operator==( AccelerationStructureKHR const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_accelerationStructureKHR == rhs.m_accelerationStructureKHR; - } - - bool operator!=( AccelerationStructureKHR const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_accelerationStructureKHR != rhs.m_accelerationStructureKHR; - } - - bool operator<( AccelerationStructureKHR const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_accelerationStructureKHR < rhs.m_accelerationStructureKHR; - } -#endif - VULKAN_HPP_TYPESAFE_EXPLICIT operator VkAccelerationStructureKHR() const VULKAN_HPP_NOEXCEPT { return m_accelerationStructureKHR; @@ -4932,25 +4558,6 @@ namespace VULKAN_HPP_NAMESPACE return *this; } -#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( MicromapEXT const & ) const = default; -#else - bool operator==( MicromapEXT const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_micromapEXT == rhs.m_micromapEXT; - } - - bool operator!=( MicromapEXT const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_micromapEXT != rhs.m_micromapEXT; - } - - bool operator<( MicromapEXT const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_micromapEXT < rhs.m_micromapEXT; - } -#endif - VULKAN_HPP_TYPESAFE_EXPLICIT operator VkMicromapEXT() const VULKAN_HPP_NOEXCEPT { return m_micromapEXT; @@ -5034,25 +4641,6 @@ namespace VULKAN_HPP_NAMESPACE return *this; } -#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( CommandBuffer const & ) const = default; -#else - bool operator==( CommandBuffer const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_commandBuffer == rhs.m_commandBuffer; - } - - bool operator!=( CommandBuffer const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_commandBuffer != rhs.m_commandBuffer; - } - - bool operator<( CommandBuffer const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_commandBuffer < rhs.m_commandBuffer; - } -#endif - //=== VK_VERSION_1_0 === template @@ -5784,6 +5372,105 @@ namespace VULKAN_HPP_NAMESPACE void setPrimitiveRestartEnable( VULKAN_HPP_NAMESPACE::Bool32 primitiveRestartEnable, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + //=== VK_VERSION_1_4 === + + template + void setLineStipple( uint32_t lineStippleFactor, + uint16_t lineStipplePattern, + Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + template + void bindIndexBuffer2( VULKAN_HPP_NAMESPACE::Buffer buffer, + VULKAN_HPP_NAMESPACE::DeviceSize offset, + VULKAN_HPP_NAMESPACE::DeviceSize size, + VULKAN_HPP_NAMESPACE::IndexType indexType, + Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + template + void pushDescriptorSet( VULKAN_HPP_NAMESPACE::PipelineBindPoint pipelineBindPoint, + VULKAN_HPP_NAMESPACE::PipelineLayout layout, + uint32_t set, + uint32_t descriptorWriteCount, + const VULKAN_HPP_NAMESPACE::WriteDescriptorSet * pDescriptorWrites, + Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void pushDescriptorSet( VULKAN_HPP_NAMESPACE::PipelineBindPoint pipelineBindPoint, + VULKAN_HPP_NAMESPACE::PipelineLayout layout, + uint32_t set, + VULKAN_HPP_NAMESPACE::ArrayProxy const & descriptorWrites, + Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ + + template + void pushDescriptorSetWithTemplate( VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate descriptorUpdateTemplate, + VULKAN_HPP_NAMESPACE::PipelineLayout layout, + uint32_t set, + const void * pData, + Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void pushDescriptorSetWithTemplate( VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate descriptorUpdateTemplate, + VULKAN_HPP_NAMESPACE::PipelineLayout layout, + uint32_t set, + DataType const & data, + Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ + + template + void setRenderingAttachmentLocations( const VULKAN_HPP_NAMESPACE::RenderingAttachmentLocationInfo * pLocationInfo, + Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void setRenderingAttachmentLocations( const VULKAN_HPP_NAMESPACE::RenderingAttachmentLocationInfo & locationInfo, + Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ + + template + void setRenderingInputAttachmentIndices( const VULKAN_HPP_NAMESPACE::RenderingInputAttachmentIndexInfo * pInputAttachmentIndexInfo, + Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void setRenderingInputAttachmentIndices( const VULKAN_HPP_NAMESPACE::RenderingInputAttachmentIndexInfo & inputAttachmentIndexInfo, + Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ + + template + void bindDescriptorSets2( const VULKAN_HPP_NAMESPACE::BindDescriptorSetsInfo * pBindDescriptorSetsInfo, + Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void bindDescriptorSets2( const VULKAN_HPP_NAMESPACE::BindDescriptorSetsInfo & bindDescriptorSetsInfo, + Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ + + template + void pushConstants2( const VULKAN_HPP_NAMESPACE::PushConstantsInfo * pPushConstantsInfo, + Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void pushConstants2( const VULKAN_HPP_NAMESPACE::PushConstantsInfo & pushConstantsInfo, + Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ + + template + void pushDescriptorSet2( const VULKAN_HPP_NAMESPACE::PushDescriptorSetInfo * pPushDescriptorSetInfo, + Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void pushDescriptorSet2( const VULKAN_HPP_NAMESPACE::PushDescriptorSetInfo & pushDescriptorSetInfo, + Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ + + template + void pushDescriptorSetWithTemplate2( const VULKAN_HPP_NAMESPACE::PushDescriptorSetWithTemplateInfo * pPushDescriptorSetWithTemplateInfo, + Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void pushDescriptorSetWithTemplate2( const VULKAN_HPP_NAMESPACE::PushDescriptorSetWithTemplateInfo & pushDescriptorSetWithTemplateInfo, + Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ + //=== VK_EXT_debug_marker === template @@ -6525,20 +6212,20 @@ namespace VULKAN_HPP_NAMESPACE //=== VK_KHR_dynamic_rendering_local_read === template - void setRenderingAttachmentLocationsKHR( const VULKAN_HPP_NAMESPACE::RenderingAttachmentLocationInfoKHR * pLocationInfo, + void setRenderingAttachmentLocationsKHR( const VULKAN_HPP_NAMESPACE::RenderingAttachmentLocationInfo * pLocationInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE template - void setRenderingAttachmentLocationsKHR( const VULKAN_HPP_NAMESPACE::RenderingAttachmentLocationInfoKHR & locationInfo, + void setRenderingAttachmentLocationsKHR( const VULKAN_HPP_NAMESPACE::RenderingAttachmentLocationInfo & locationInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ template - void setRenderingInputAttachmentIndicesKHR( const VULKAN_HPP_NAMESPACE::RenderingInputAttachmentIndexInfoKHR * pInputAttachmentIndexInfo, + void setRenderingInputAttachmentIndicesKHR( const VULKAN_HPP_NAMESPACE::RenderingInputAttachmentIndexInfo * pInputAttachmentIndexInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE template - void setRenderingInputAttachmentIndicesKHR( const VULKAN_HPP_NAMESPACE::RenderingInputAttachmentIndexInfoKHR & inputAttachmentIndexInfo, + void setRenderingInputAttachmentIndicesKHR( const VULKAN_HPP_NAMESPACE::RenderingInputAttachmentIndexInfo & inputAttachmentIndexInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -7319,38 +7006,38 @@ namespace VULKAN_HPP_NAMESPACE //=== VK_KHR_maintenance6 === template - void bindDescriptorSets2KHR( const VULKAN_HPP_NAMESPACE::BindDescriptorSetsInfoKHR * pBindDescriptorSetsInfo, - Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + void bindDescriptorSets2KHR( const VULKAN_HPP_NAMESPACE::BindDescriptorSetsInfo * pBindDescriptorSetsInfo, + Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE template - void bindDescriptorSets2KHR( const VULKAN_HPP_NAMESPACE::BindDescriptorSetsInfoKHR & bindDescriptorSetsInfo, - Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + void bindDescriptorSets2KHR( const VULKAN_HPP_NAMESPACE::BindDescriptorSetsInfo & bindDescriptorSetsInfo, + Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ template - void pushConstants2KHR( const VULKAN_HPP_NAMESPACE::PushConstantsInfoKHR * pPushConstantsInfo, - Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + void pushConstants2KHR( const VULKAN_HPP_NAMESPACE::PushConstantsInfo * pPushConstantsInfo, + Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE template - void pushConstants2KHR( const VULKAN_HPP_NAMESPACE::PushConstantsInfoKHR & pushConstantsInfo, - Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + void pushConstants2KHR( const VULKAN_HPP_NAMESPACE::PushConstantsInfo & pushConstantsInfo, + Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ template - void pushDescriptorSet2KHR( const VULKAN_HPP_NAMESPACE::PushDescriptorSetInfoKHR * pPushDescriptorSetInfo, - Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + void pushDescriptorSet2KHR( const VULKAN_HPP_NAMESPACE::PushDescriptorSetInfo * pPushDescriptorSetInfo, + Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE template - void pushDescriptorSet2KHR( const VULKAN_HPP_NAMESPACE::PushDescriptorSetInfoKHR & pushDescriptorSetInfo, - Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + void pushDescriptorSet2KHR( const VULKAN_HPP_NAMESPACE::PushDescriptorSetInfo & pushDescriptorSetInfo, + Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ template - void pushDescriptorSetWithTemplate2KHR( const VULKAN_HPP_NAMESPACE::PushDescriptorSetWithTemplateInfoKHR * pPushDescriptorSetWithTemplateInfo, + void pushDescriptorSetWithTemplate2KHR( const VULKAN_HPP_NAMESPACE::PushDescriptorSetWithTemplateInfo * pPushDescriptorSetWithTemplateInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE template - void pushDescriptorSetWithTemplate2KHR( const VULKAN_HPP_NAMESPACE::PushDescriptorSetWithTemplateInfoKHR & pushDescriptorSetWithTemplateInfo, + void pushDescriptorSetWithTemplate2KHR( const VULKAN_HPP_NAMESPACE::PushDescriptorSetWithTemplateInfo & pushDescriptorSetWithTemplateInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ @@ -7489,25 +7176,6 @@ namespace VULKAN_HPP_NAMESPACE return *this; } -#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( DeviceMemory const & ) const = default; -#else - bool operator==( DeviceMemory const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_deviceMemory == rhs.m_deviceMemory; - } - - bool operator!=( DeviceMemory const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_deviceMemory != rhs.m_deviceMemory; - } - - bool operator<( DeviceMemory const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_deviceMemory < rhs.m_deviceMemory; - } -#endif - VULKAN_HPP_TYPESAFE_EXPLICIT operator VkDeviceMemory() const VULKAN_HPP_NOEXCEPT { return m_deviceMemory; @@ -7599,25 +7267,6 @@ namespace VULKAN_HPP_NAMESPACE return *this; } -#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( VideoSessionKHR const & ) const = default; -#else - bool operator==( VideoSessionKHR const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_videoSessionKHR == rhs.m_videoSessionKHR; - } - - bool operator!=( VideoSessionKHR const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_videoSessionKHR != rhs.m_videoSessionKHR; - } - - bool operator<( VideoSessionKHR const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_videoSessionKHR < rhs.m_videoSessionKHR; - } -#endif - VULKAN_HPP_TYPESAFE_EXPLICIT operator VkVideoSessionKHR() const VULKAN_HPP_NOEXCEPT { return m_videoSessionKHR; @@ -7709,25 +7358,6 @@ namespace VULKAN_HPP_NAMESPACE return *this; } -#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( DeferredOperationKHR const & ) const = default; -#else - bool operator==( DeferredOperationKHR const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_deferredOperationKHR == rhs.m_deferredOperationKHR; - } - - bool operator!=( DeferredOperationKHR const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_deferredOperationKHR != rhs.m_deferredOperationKHR; - } - - bool operator<( DeferredOperationKHR const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_deferredOperationKHR < rhs.m_deferredOperationKHR; - } -#endif - VULKAN_HPP_TYPESAFE_EXPLICIT operator VkDeferredOperationKHR() const VULKAN_HPP_NOEXCEPT { return m_deferredOperationKHR; @@ -7820,25 +7450,6 @@ namespace VULKAN_HPP_NAMESPACE return *this; } -# if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( BufferCollectionFUCHSIA const & ) const = default; -# else - bool operator==( BufferCollectionFUCHSIA const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_bufferCollectionFUCHSIA == rhs.m_bufferCollectionFUCHSIA; - } - - bool operator!=( BufferCollectionFUCHSIA const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_bufferCollectionFUCHSIA != rhs.m_bufferCollectionFUCHSIA; - } - - bool operator<( BufferCollectionFUCHSIA const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_bufferCollectionFUCHSIA < rhs.m_bufferCollectionFUCHSIA; - } -# endif - VULKAN_HPP_TYPESAFE_EXPLICIT operator VkBufferCollectionFUCHSIA() const VULKAN_HPP_NOEXCEPT { return m_bufferCollectionFUCHSIA; @@ -7931,25 +7542,6 @@ namespace VULKAN_HPP_NAMESPACE return *this; } -#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( BufferView const & ) const = default; -#else - bool operator==( BufferView const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_bufferView == rhs.m_bufferView; - } - - bool operator!=( BufferView const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_bufferView != rhs.m_bufferView; - } - - bool operator<( BufferView const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_bufferView < rhs.m_bufferView; - } -#endif - VULKAN_HPP_TYPESAFE_EXPLICIT operator VkBufferView() const VULKAN_HPP_NOEXCEPT { return m_bufferView; @@ -8041,25 +7633,6 @@ namespace VULKAN_HPP_NAMESPACE return *this; } -#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( CommandPool const & ) const = default; -#else - bool operator==( CommandPool const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_commandPool == rhs.m_commandPool; - } - - bool operator!=( CommandPool const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_commandPool != rhs.m_commandPool; - } - - bool operator<( CommandPool const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_commandPool < rhs.m_commandPool; - } -#endif - VULKAN_HPP_TYPESAFE_EXPLICIT operator VkCommandPool() const VULKAN_HPP_NOEXCEPT { return m_commandPool; @@ -8151,25 +7724,6 @@ namespace VULKAN_HPP_NAMESPACE return *this; } -#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( PipelineCache const & ) const = default; -#else - bool operator==( PipelineCache const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_pipelineCache == rhs.m_pipelineCache; - } - - bool operator!=( PipelineCache const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_pipelineCache != rhs.m_pipelineCache; - } - - bool operator<( PipelineCache const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_pipelineCache < rhs.m_pipelineCache; - } -#endif - VULKAN_HPP_TYPESAFE_EXPLICIT operator VkPipelineCache() const VULKAN_HPP_NOEXCEPT { return m_pipelineCache; @@ -8261,25 +7815,6 @@ namespace VULKAN_HPP_NAMESPACE return *this; } -#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( CuFunctionNVX const & ) const = default; -#else - bool operator==( CuFunctionNVX const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_cuFunctionNVX == rhs.m_cuFunctionNVX; - } - - bool operator!=( CuFunctionNVX const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_cuFunctionNVX != rhs.m_cuFunctionNVX; - } - - bool operator<( CuFunctionNVX const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_cuFunctionNVX < rhs.m_cuFunctionNVX; - } -#endif - VULKAN_HPP_TYPESAFE_EXPLICIT operator VkCuFunctionNVX() const VULKAN_HPP_NOEXCEPT { return m_cuFunctionNVX; @@ -8371,25 +7906,6 @@ namespace VULKAN_HPP_NAMESPACE return *this; } -#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( CuModuleNVX const & ) const = default; -#else - bool operator==( CuModuleNVX const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_cuModuleNVX == rhs.m_cuModuleNVX; - } - - bool operator!=( CuModuleNVX const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_cuModuleNVX != rhs.m_cuModuleNVX; - } - - bool operator<( CuModuleNVX const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_cuModuleNVX < rhs.m_cuModuleNVX; - } -#endif - VULKAN_HPP_TYPESAFE_EXPLICIT operator VkCuModuleNVX() const VULKAN_HPP_NOEXCEPT { return m_cuModuleNVX; @@ -8482,25 +7998,6 @@ namespace VULKAN_HPP_NAMESPACE return *this; } -# if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( CudaFunctionNV const & ) const = default; -# else - bool operator==( CudaFunctionNV const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_cudaFunctionNV == rhs.m_cudaFunctionNV; - } - - bool operator!=( CudaFunctionNV const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_cudaFunctionNV != rhs.m_cudaFunctionNV; - } - - bool operator<( CudaFunctionNV const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_cudaFunctionNV < rhs.m_cudaFunctionNV; - } -# endif - VULKAN_HPP_TYPESAFE_EXPLICIT operator VkCudaFunctionNV() const VULKAN_HPP_NOEXCEPT { return m_cudaFunctionNV; @@ -8594,25 +8091,6 @@ namespace VULKAN_HPP_NAMESPACE return *this; } -# if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( CudaModuleNV const & ) const = default; -# else - bool operator==( CudaModuleNV const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_cudaModuleNV == rhs.m_cudaModuleNV; - } - - bool operator!=( CudaModuleNV const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_cudaModuleNV != rhs.m_cudaModuleNV; - } - - bool operator<( CudaModuleNV const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_cudaModuleNV < rhs.m_cudaModuleNV; - } -# endif - VULKAN_HPP_TYPESAFE_EXPLICIT operator VkCudaModuleNV() const VULKAN_HPP_NOEXCEPT { return m_cudaModuleNV; @@ -8705,25 +8183,6 @@ namespace VULKAN_HPP_NAMESPACE return *this; } -#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( DescriptorPool const & ) const = default; -#else - bool operator==( DescriptorPool const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_descriptorPool == rhs.m_descriptorPool; - } - - bool operator!=( DescriptorPool const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_descriptorPool != rhs.m_descriptorPool; - } - - bool operator<( DescriptorPool const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_descriptorPool < rhs.m_descriptorPool; - } -#endif - VULKAN_HPP_TYPESAFE_EXPLICIT operator VkDescriptorPool() const VULKAN_HPP_NOEXCEPT { return m_descriptorPool; @@ -8821,25 +8280,6 @@ namespace VULKAN_HPP_NAMESPACE return *this; } -#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( DescriptorSetLayout const & ) const = default; -#else - bool operator==( DescriptorSetLayout const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_descriptorSetLayout == rhs.m_descriptorSetLayout; - } - - bool operator!=( DescriptorSetLayout const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_descriptorSetLayout != rhs.m_descriptorSetLayout; - } - - bool operator<( DescriptorSetLayout const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_descriptorSetLayout < rhs.m_descriptorSetLayout; - } -#endif - VULKAN_HPP_TYPESAFE_EXPLICIT operator VkDescriptorSetLayout() const VULKAN_HPP_NOEXCEPT { return m_descriptorSetLayout; @@ -8931,25 +8371,6 @@ namespace VULKAN_HPP_NAMESPACE return *this; } -#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( Framebuffer const & ) const = default; -#else - bool operator==( Framebuffer const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_framebuffer == rhs.m_framebuffer; - } - - bool operator!=( Framebuffer const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_framebuffer != rhs.m_framebuffer; - } - - bool operator<( Framebuffer const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_framebuffer < rhs.m_framebuffer; - } -#endif - VULKAN_HPP_TYPESAFE_EXPLICIT operator VkFramebuffer() const VULKAN_HPP_NOEXCEPT { return m_framebuffer; @@ -9047,25 +8468,6 @@ namespace VULKAN_HPP_NAMESPACE return *this; } -#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( IndirectCommandsLayoutEXT const & ) const = default; -#else - bool operator==( IndirectCommandsLayoutEXT const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_indirectCommandsLayoutEXT == rhs.m_indirectCommandsLayoutEXT; - } - - bool operator!=( IndirectCommandsLayoutEXT const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_indirectCommandsLayoutEXT != rhs.m_indirectCommandsLayoutEXT; - } - - bool operator<( IndirectCommandsLayoutEXT const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_indirectCommandsLayoutEXT < rhs.m_indirectCommandsLayoutEXT; - } -#endif - VULKAN_HPP_TYPESAFE_EXPLICIT operator VkIndirectCommandsLayoutEXT() const VULKAN_HPP_NOEXCEPT { return m_indirectCommandsLayoutEXT; @@ -9157,25 +8559,6 @@ namespace VULKAN_HPP_NAMESPACE return *this; } -#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( IndirectCommandsLayoutNV const & ) const = default; -#else - bool operator==( IndirectCommandsLayoutNV const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_indirectCommandsLayoutNV == rhs.m_indirectCommandsLayoutNV; - } - - bool operator!=( IndirectCommandsLayoutNV const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_indirectCommandsLayoutNV != rhs.m_indirectCommandsLayoutNV; - } - - bool operator<( IndirectCommandsLayoutNV const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_indirectCommandsLayoutNV < rhs.m_indirectCommandsLayoutNV; - } -#endif - VULKAN_HPP_TYPESAFE_EXPLICIT operator VkIndirectCommandsLayoutNV() const VULKAN_HPP_NOEXCEPT { return m_indirectCommandsLayoutNV; @@ -9267,25 +8650,6 @@ namespace VULKAN_HPP_NAMESPACE return *this; } -#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( IndirectExecutionSetEXT const & ) const = default; -#else - bool operator==( IndirectExecutionSetEXT const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_indirectExecutionSetEXT == rhs.m_indirectExecutionSetEXT; - } - - bool operator!=( IndirectExecutionSetEXT const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_indirectExecutionSetEXT != rhs.m_indirectExecutionSetEXT; - } - - bool operator<( IndirectExecutionSetEXT const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_indirectExecutionSetEXT < rhs.m_indirectExecutionSetEXT; - } -#endif - VULKAN_HPP_TYPESAFE_EXPLICIT operator VkIndirectExecutionSetEXT() const VULKAN_HPP_NOEXCEPT { return m_indirectExecutionSetEXT; @@ -9371,25 +8735,6 @@ namespace VULKAN_HPP_NAMESPACE return *this; } -#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( PrivateDataSlot const & ) const = default; -#else - bool operator==( PrivateDataSlot const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_privateDataSlot == rhs.m_privateDataSlot; - } - - bool operator!=( PrivateDataSlot const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_privateDataSlot != rhs.m_privateDataSlot; - } - - bool operator<( PrivateDataSlot const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_privateDataSlot < rhs.m_privateDataSlot; - } -#endif - VULKAN_HPP_TYPESAFE_EXPLICIT operator VkPrivateDataSlot() const VULKAN_HPP_NOEXCEPT { return m_privateDataSlot; @@ -9477,25 +8822,6 @@ namespace VULKAN_HPP_NAMESPACE return *this; } -#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( RenderPass const & ) const = default; -#else - bool operator==( RenderPass const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_renderPass == rhs.m_renderPass; - } - - bool operator!=( RenderPass const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_renderPass != rhs.m_renderPass; - } - - bool operator<( RenderPass const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_renderPass < rhs.m_renderPass; - } -#endif - VULKAN_HPP_TYPESAFE_EXPLICIT operator VkRenderPass() const VULKAN_HPP_NOEXCEPT { return m_renderPass; @@ -9587,25 +8913,6 @@ namespace VULKAN_HPP_NAMESPACE return *this; } -#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( Sampler const & ) const = default; -#else - bool operator==( Sampler const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_sampler == rhs.m_sampler; - } - - bool operator!=( Sampler const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_sampler != rhs.m_sampler; - } - - bool operator<( Sampler const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_sampler < rhs.m_sampler; - } -#endif - VULKAN_HPP_TYPESAFE_EXPLICIT operator VkSampler() const VULKAN_HPP_NOEXCEPT { return m_sampler; @@ -9703,25 +9010,6 @@ namespace VULKAN_HPP_NAMESPACE return *this; } -#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( SamplerYcbcrConversion const & ) const = default; -#else - bool operator==( SamplerYcbcrConversion const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_samplerYcbcrConversion == rhs.m_samplerYcbcrConversion; - } - - bool operator!=( SamplerYcbcrConversion const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_samplerYcbcrConversion != rhs.m_samplerYcbcrConversion; - } - - bool operator<( SamplerYcbcrConversion const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_samplerYcbcrConversion < rhs.m_samplerYcbcrConversion; - } -#endif - VULKAN_HPP_TYPESAFE_EXPLICIT operator VkSamplerYcbcrConversion() const VULKAN_HPP_NOEXCEPT { return m_samplerYcbcrConversion; @@ -9815,25 +9103,6 @@ namespace VULKAN_HPP_NAMESPACE return *this; } -#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( ShaderModule const & ) const = default; -#else - bool operator==( ShaderModule const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_shaderModule == rhs.m_shaderModule; - } - - bool operator!=( ShaderModule const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_shaderModule != rhs.m_shaderModule; - } - - bool operator<( ShaderModule const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_shaderModule < rhs.m_shaderModule; - } -#endif - VULKAN_HPP_TYPESAFE_EXPLICIT operator VkShaderModule() const VULKAN_HPP_NOEXCEPT { return m_shaderModule; @@ -9929,25 +9198,6 @@ namespace VULKAN_HPP_NAMESPACE return *this; } -#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( ValidationCacheEXT const & ) const = default; -#else - bool operator==( ValidationCacheEXT const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_validationCacheEXT == rhs.m_validationCacheEXT; - } - - bool operator!=( ValidationCacheEXT const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_validationCacheEXT != rhs.m_validationCacheEXT; - } - - bool operator<( ValidationCacheEXT const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_validationCacheEXT < rhs.m_validationCacheEXT; - } -#endif - VULKAN_HPP_TYPESAFE_EXPLICIT operator VkValidationCacheEXT() const VULKAN_HPP_NOEXCEPT { return m_validationCacheEXT; @@ -10045,25 +9295,6 @@ namespace VULKAN_HPP_NAMESPACE return *this; } -#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( VideoSessionParametersKHR const & ) const = default; -#else - bool operator==( VideoSessionParametersKHR const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_videoSessionParametersKHR == rhs.m_videoSessionParametersKHR; - } - - bool operator!=( VideoSessionParametersKHR const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_videoSessionParametersKHR != rhs.m_videoSessionParametersKHR; - } - - bool operator<( VideoSessionParametersKHR const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_videoSessionParametersKHR < rhs.m_videoSessionParametersKHR; - } -#endif - VULKAN_HPP_TYPESAFE_EXPLICIT operator VkVideoSessionParametersKHR() const VULKAN_HPP_NOEXCEPT { return m_videoSessionParametersKHR; @@ -10149,25 +9380,6 @@ namespace VULKAN_HPP_NAMESPACE return *this; } -#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( PipelineBinaryKHR const & ) const = default; -#else - bool operator==( PipelineBinaryKHR const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_pipelineBinaryKHR == rhs.m_pipelineBinaryKHR; - } - - bool operator!=( PipelineBinaryKHR const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_pipelineBinaryKHR != rhs.m_pipelineBinaryKHR; - } - - bool operator<( PipelineBinaryKHR const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_pipelineBinaryKHR < rhs.m_pipelineBinaryKHR; - } -#endif - VULKAN_HPP_TYPESAFE_EXPLICIT operator VkPipelineBinaryKHR() const VULKAN_HPP_NOEXCEPT { return m_pipelineBinaryKHR; @@ -10251,25 +9463,6 @@ namespace VULKAN_HPP_NAMESPACE return *this; } -#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( Queue const & ) const = default; -#else - bool operator==( Queue const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_queue == rhs.m_queue; - } - - bool operator!=( Queue const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_queue != rhs.m_queue; - } - - bool operator<( Queue const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_queue < rhs.m_queue; - } -#endif - //=== VK_VERSION_1_0 === template @@ -10523,25 +9716,6 @@ namespace VULKAN_HPP_NAMESPACE return *this; } -#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( Device const & ) const = default; -#else - bool operator==( Device const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_device == rhs.m_device; - } - - bool operator!=( Device const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_device != rhs.m_device; - } - - bool operator<( Device const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_device < rhs.m_device; - } -#endif - //=== VK_VERSION_1_0 === template @@ -12351,6 +11525,112 @@ namespace VULKAN_HPP_NAMESPACE Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ + //=== VK_VERSION_1_4 === + + template + VULKAN_HPP_NODISCARD Result mapMemory2( const VULKAN_HPP_NAMESPACE::MemoryMapInfo * pMemoryMapInfo, + void ** ppData, + Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD typename ResultValueType::type mapMemory2( const VULKAN_HPP_NAMESPACE::MemoryMapInfo & memoryMapInfo, + Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ + + template + VULKAN_HPP_NODISCARD Result unmapMemory2( const VULKAN_HPP_NAMESPACE::MemoryUnmapInfo * pMemoryUnmapInfo, + Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + typename ResultValueType::type unmapMemory2( const VULKAN_HPP_NAMESPACE::MemoryUnmapInfo & memoryUnmapInfo, + Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ + + template + void getRenderingAreaGranularity( const VULKAN_HPP_NAMESPACE::RenderingAreaInfo * pRenderingAreaInfo, + VULKAN_HPP_NAMESPACE::Extent2D * pGranularity, + Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::Extent2D + getRenderingAreaGranularity( const VULKAN_HPP_NAMESPACE::RenderingAreaInfo & renderingAreaInfo, + Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ + + template + void getImageSubresourceLayout( const VULKAN_HPP_NAMESPACE::DeviceImageSubresourceInfo * pInfo, + VULKAN_HPP_NAMESPACE::SubresourceLayout2 * pLayout, + Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::SubresourceLayout2 + getImageSubresourceLayout( const VULKAN_HPP_NAMESPACE::DeviceImageSubresourceInfo & info, + Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::StructureChain + getImageSubresourceLayout( const VULKAN_HPP_NAMESPACE::DeviceImageSubresourceInfo & info, + Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ + + template + void getImageSubresourceLayout2( VULKAN_HPP_NAMESPACE::Image image, + const VULKAN_HPP_NAMESPACE::ImageSubresource2 * pSubresource, + VULKAN_HPP_NAMESPACE::SubresourceLayout2 * pLayout, + Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::SubresourceLayout2 + getImageSubresourceLayout2( VULKAN_HPP_NAMESPACE::Image image, + const VULKAN_HPP_NAMESPACE::ImageSubresource2 & subresource, + Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::StructureChain + getImageSubresourceLayout2( VULKAN_HPP_NAMESPACE::Image image, + const VULKAN_HPP_NAMESPACE::ImageSubresource2 & subresource, + Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ + + template + VULKAN_HPP_NODISCARD Result copyMemoryToImage( const VULKAN_HPP_NAMESPACE::CopyMemoryToImageInfo * pCopyMemoryToImageInfo, + Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type + copyMemoryToImage( const VULKAN_HPP_NAMESPACE::CopyMemoryToImageInfo & copyMemoryToImageInfo, + Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ + + template + VULKAN_HPP_NODISCARD Result copyImageToMemory( const VULKAN_HPP_NAMESPACE::CopyImageToMemoryInfo * pCopyImageToMemoryInfo, + Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type + copyImageToMemory( const VULKAN_HPP_NAMESPACE::CopyImageToMemoryInfo & copyImageToMemoryInfo, + Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ + + template + VULKAN_HPP_NODISCARD Result copyImageToImage( const VULKAN_HPP_NAMESPACE::CopyImageToImageInfo * pCopyImageToImageInfo, + Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type + copyImageToImage( const VULKAN_HPP_NAMESPACE::CopyImageToImageInfo & copyImageToImageInfo, + Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ + + template + VULKAN_HPP_NODISCARD Result transitionImageLayout( uint32_t transitionCount, + const VULKAN_HPP_NAMESPACE::HostImageLayoutTransitionInfo * pTransitions, + Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type + transitionImageLayout( VULKAN_HPP_NAMESPACE::ArrayProxy const & transitions, + Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ + //=== VK_KHR_swapchain === template @@ -14440,83 +13720,83 @@ namespace VULKAN_HPP_NAMESPACE //=== VK_EXT_host_image_copy === template - VULKAN_HPP_NODISCARD Result copyMemoryToImageEXT( const VULKAN_HPP_NAMESPACE::CopyMemoryToImageInfoEXT * pCopyMemoryToImageInfo, + VULKAN_HPP_NODISCARD Result copyMemoryToImageEXT( const VULKAN_HPP_NAMESPACE::CopyMemoryToImageInfo * pCopyMemoryToImageInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE template VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type - copyMemoryToImageEXT( const VULKAN_HPP_NAMESPACE::CopyMemoryToImageInfoEXT & copyMemoryToImageInfo, - Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + copyMemoryToImageEXT( const VULKAN_HPP_NAMESPACE::CopyMemoryToImageInfo & copyMemoryToImageInfo, + Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ template - VULKAN_HPP_NODISCARD Result copyImageToMemoryEXT( const VULKAN_HPP_NAMESPACE::CopyImageToMemoryInfoEXT * pCopyImageToMemoryInfo, + VULKAN_HPP_NODISCARD Result copyImageToMemoryEXT( const VULKAN_HPP_NAMESPACE::CopyImageToMemoryInfo * pCopyImageToMemoryInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE template VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type - copyImageToMemoryEXT( const VULKAN_HPP_NAMESPACE::CopyImageToMemoryInfoEXT & copyImageToMemoryInfo, - Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + copyImageToMemoryEXT( const VULKAN_HPP_NAMESPACE::CopyImageToMemoryInfo & copyImageToMemoryInfo, + Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ template - VULKAN_HPP_NODISCARD Result copyImageToImageEXT( const VULKAN_HPP_NAMESPACE::CopyImageToImageInfoEXT * pCopyImageToImageInfo, + VULKAN_HPP_NODISCARD Result copyImageToImageEXT( const VULKAN_HPP_NAMESPACE::CopyImageToImageInfo * pCopyImageToImageInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE template VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type - copyImageToImageEXT( const VULKAN_HPP_NAMESPACE::CopyImageToImageInfoEXT & copyImageToImageInfo, - Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + copyImageToImageEXT( const VULKAN_HPP_NAMESPACE::CopyImageToImageInfo & copyImageToImageInfo, + Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ template - VULKAN_HPP_NODISCARD Result transitionImageLayoutEXT( uint32_t transitionCount, - const VULKAN_HPP_NAMESPACE::HostImageLayoutTransitionInfoEXT * pTransitions, + VULKAN_HPP_NODISCARD Result transitionImageLayoutEXT( uint32_t transitionCount, + const VULKAN_HPP_NAMESPACE::HostImageLayoutTransitionInfo * pTransitions, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE template VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type - transitionImageLayoutEXT( VULKAN_HPP_NAMESPACE::ArrayProxy const & transitions, + transitionImageLayoutEXT( VULKAN_HPP_NAMESPACE::ArrayProxy const & transitions, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ template - void getImageSubresourceLayout2EXT( VULKAN_HPP_NAMESPACE::Image image, - const VULKAN_HPP_NAMESPACE::ImageSubresource2KHR * pSubresource, - VULKAN_HPP_NAMESPACE::SubresourceLayout2KHR * pLayout, - Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + void getImageSubresourceLayout2EXT( VULKAN_HPP_NAMESPACE::Image image, + const VULKAN_HPP_NAMESPACE::ImageSubresource2 * pSubresource, + VULKAN_HPP_NAMESPACE::SubresourceLayout2 * pLayout, + Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE template - VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::SubresourceLayout2KHR - getImageSubresourceLayout2EXT( VULKAN_HPP_NAMESPACE::Image image, - const VULKAN_HPP_NAMESPACE::ImageSubresource2KHR & subresource, - Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::SubresourceLayout2 + getImageSubresourceLayout2EXT( VULKAN_HPP_NAMESPACE::Image image, + const VULKAN_HPP_NAMESPACE::ImageSubresource2 & subresource, + Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; template VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::StructureChain - getImageSubresourceLayout2EXT( VULKAN_HPP_NAMESPACE::Image image, - const VULKAN_HPP_NAMESPACE::ImageSubresource2KHR & subresource, - Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + getImageSubresourceLayout2EXT( VULKAN_HPP_NAMESPACE::Image image, + const VULKAN_HPP_NAMESPACE::ImageSubresource2 & subresource, + Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ //=== VK_KHR_map_memory2 === template - VULKAN_HPP_NODISCARD Result mapMemory2KHR( const VULKAN_HPP_NAMESPACE::MemoryMapInfoKHR * pMemoryMapInfo, - void ** ppData, - Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + VULKAN_HPP_NODISCARD Result mapMemory2KHR( const VULKAN_HPP_NAMESPACE::MemoryMapInfo * pMemoryMapInfo, + void ** ppData, + Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE template - VULKAN_HPP_NODISCARD typename ResultValueType::type mapMemory2KHR( const VULKAN_HPP_NAMESPACE::MemoryMapInfoKHR & memoryMapInfo, + VULKAN_HPP_NODISCARD typename ResultValueType::type mapMemory2KHR( const VULKAN_HPP_NAMESPACE::MemoryMapInfo & memoryMapInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ template - VULKAN_HPP_NODISCARD Result unmapMemory2KHR( const VULKAN_HPP_NAMESPACE::MemoryUnmapInfoKHR * pMemoryUnmapInfo, + VULKAN_HPP_NODISCARD Result unmapMemory2KHR( const VULKAN_HPP_NAMESPACE::MemoryUnmapInfo * pMemoryUnmapInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE template - typename ResultValueType::type unmapMemory2KHR( const VULKAN_HPP_NAMESPACE::MemoryUnmapInfoKHR & memoryUnmapInfo, - Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + typename ResultValueType::type unmapMemory2KHR( const VULKAN_HPP_NAMESPACE::MemoryUnmapInfo & memoryUnmapInfo, + Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ //=== VK_EXT_swapchain_maintenance1 === @@ -15445,47 +14725,47 @@ namespace VULKAN_HPP_NAMESPACE //=== VK_KHR_maintenance5 === template - void getRenderingAreaGranularityKHR( const VULKAN_HPP_NAMESPACE::RenderingAreaInfoKHR * pRenderingAreaInfo, - VULKAN_HPP_NAMESPACE::Extent2D * pGranularity, - Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + void getRenderingAreaGranularityKHR( const VULKAN_HPP_NAMESPACE::RenderingAreaInfo * pRenderingAreaInfo, + VULKAN_HPP_NAMESPACE::Extent2D * pGranularity, + Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE template VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::Extent2D - getRenderingAreaGranularityKHR( const VULKAN_HPP_NAMESPACE::RenderingAreaInfoKHR & renderingAreaInfo, - Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + getRenderingAreaGranularityKHR( const VULKAN_HPP_NAMESPACE::RenderingAreaInfo & renderingAreaInfo, + Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ template - void getImageSubresourceLayoutKHR( const VULKAN_HPP_NAMESPACE::DeviceImageSubresourceInfoKHR * pInfo, - VULKAN_HPP_NAMESPACE::SubresourceLayout2KHR * pLayout, + void getImageSubresourceLayoutKHR( const VULKAN_HPP_NAMESPACE::DeviceImageSubresourceInfo * pInfo, + VULKAN_HPP_NAMESPACE::SubresourceLayout2 * pLayout, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE template - VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::SubresourceLayout2KHR - getImageSubresourceLayoutKHR( const VULKAN_HPP_NAMESPACE::DeviceImageSubresourceInfoKHR & info, + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::SubresourceLayout2 + getImageSubresourceLayoutKHR( const VULKAN_HPP_NAMESPACE::DeviceImageSubresourceInfo & info, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; template VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::StructureChain - getImageSubresourceLayoutKHR( const VULKAN_HPP_NAMESPACE::DeviceImageSubresourceInfoKHR & info, + getImageSubresourceLayoutKHR( const VULKAN_HPP_NAMESPACE::DeviceImageSubresourceInfo & info, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ template - void getImageSubresourceLayout2KHR( VULKAN_HPP_NAMESPACE::Image image, - const VULKAN_HPP_NAMESPACE::ImageSubresource2KHR * pSubresource, - VULKAN_HPP_NAMESPACE::SubresourceLayout2KHR * pLayout, - Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + void getImageSubresourceLayout2KHR( VULKAN_HPP_NAMESPACE::Image image, + const VULKAN_HPP_NAMESPACE::ImageSubresource2 * pSubresource, + VULKAN_HPP_NAMESPACE::SubresourceLayout2 * pLayout, + Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE template - VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::SubresourceLayout2KHR - getImageSubresourceLayout2KHR( VULKAN_HPP_NAMESPACE::Image image, - const VULKAN_HPP_NAMESPACE::ImageSubresource2KHR & subresource, - Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::SubresourceLayout2 + getImageSubresourceLayout2KHR( VULKAN_HPP_NAMESPACE::Image image, + const VULKAN_HPP_NAMESPACE::ImageSubresource2 & subresource, + Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; template VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::StructureChain - getImageSubresourceLayout2KHR( VULKAN_HPP_NAMESPACE::Image image, - const VULKAN_HPP_NAMESPACE::ImageSubresource2KHR & subresource, - Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + getImageSubresourceLayout2KHR( VULKAN_HPP_NAMESPACE::Image image, + const VULKAN_HPP_NAMESPACE::ImageSubresource2 & subresource, + Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ //=== VK_AMD_anti_lag === @@ -16064,25 +15344,6 @@ namespace VULKAN_HPP_NAMESPACE return *this; } -#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( DisplayModeKHR const & ) const = default; -#else - bool operator==( DisplayModeKHR const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_displayModeKHR == rhs.m_displayModeKHR; - } - - bool operator!=( DisplayModeKHR const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_displayModeKHR != rhs.m_displayModeKHR; - } - - bool operator<( DisplayModeKHR const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_displayModeKHR < rhs.m_displayModeKHR; - } -#endif - VULKAN_HPP_TYPESAFE_EXPLICIT operator VkDisplayModeKHR() const VULKAN_HPP_NOEXCEPT { return m_displayModeKHR; @@ -16172,25 +15433,6 @@ namespace VULKAN_HPP_NAMESPACE return *this; } -#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( PhysicalDevice const & ) const = default; -#else - bool operator==( PhysicalDevice const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_physicalDevice == rhs.m_physicalDevice; - } - - bool operator!=( PhysicalDevice const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_physicalDevice != rhs.m_physicalDevice; - } - - bool operator<( PhysicalDevice const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_physicalDevice < rhs.m_physicalDevice; - } -#endif - //=== VK_VERSION_1_0 === template @@ -17758,25 +17000,6 @@ namespace VULKAN_HPP_NAMESPACE return *this; } -#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( Instance const & ) const = default; -#else - bool operator==( Instance const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_instance == rhs.m_instance; - } - - bool operator!=( Instance const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_instance != rhs.m_instance; - } - - bool operator<( Instance const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return m_instance < rhs.m_instance; - } -#endif - //=== VK_VERSION_1_0 === template @@ -18484,29 +17707,4 @@ namespace VULKAN_HPP_NAMESPACE #endif /* VULKAN_HPP_DISABLE_ENHANCED_MODE */ } // namespace VULKAN_HPP_NAMESPACE - -// operators to compare vk::-handles with nullptr -template -typename std::enable_if::value, bool>::type operator==( const T & v, std::nullptr_t ) -{ - return !v; -} - -template -typename std::enable_if::value, bool>::type operator==( std::nullptr_t, const T & v ) -{ - return !v; -} - -template -typename std::enable_if::value, bool>::type operator!=( const T & v, std::nullptr_t ) -{ - return v; -} - -template -typename std::enable_if::value, bool>::type operator!=( std::nullptr_t, const T & v ) -{ - return v; -} #endif diff --git a/third_party/vulkan/vulkan_hash.hpp b/third_party/vulkan/vulkan_hash.hpp index 0d6d35c..0f54c3f 100644 --- a/third_party/vulkan/vulkan_hash.hpp +++ b/third_party/vulkan/vulkan_hash.hpp @@ -1394,20 +1394,20 @@ namespace std }; template <> - struct hash + struct hash { - std::size_t operator()( VULKAN_HPP_NAMESPACE::BindDescriptorSetsInfoKHR const & bindDescriptorSetsInfoKHR ) const VULKAN_HPP_NOEXCEPT + std::size_t operator()( VULKAN_HPP_NAMESPACE::BindDescriptorSetsInfo const & bindDescriptorSetsInfo ) const VULKAN_HPP_NOEXCEPT { std::size_t seed = 0; - VULKAN_HPP_HASH_COMBINE( seed, bindDescriptorSetsInfoKHR.sType ); - VULKAN_HPP_HASH_COMBINE( seed, bindDescriptorSetsInfoKHR.pNext ); - VULKAN_HPP_HASH_COMBINE( seed, bindDescriptorSetsInfoKHR.stageFlags ); - VULKAN_HPP_HASH_COMBINE( seed, bindDescriptorSetsInfoKHR.layout ); - VULKAN_HPP_HASH_COMBINE( seed, bindDescriptorSetsInfoKHR.firstSet ); - VULKAN_HPP_HASH_COMBINE( seed, bindDescriptorSetsInfoKHR.descriptorSetCount ); - VULKAN_HPP_HASH_COMBINE( seed, bindDescriptorSetsInfoKHR.pDescriptorSets ); - VULKAN_HPP_HASH_COMBINE( seed, bindDescriptorSetsInfoKHR.dynamicOffsetCount ); - VULKAN_HPP_HASH_COMBINE( seed, bindDescriptorSetsInfoKHR.pDynamicOffsets ); + VULKAN_HPP_HASH_COMBINE( seed, bindDescriptorSetsInfo.sType ); + VULKAN_HPP_HASH_COMBINE( seed, bindDescriptorSetsInfo.pNext ); + VULKAN_HPP_HASH_COMBINE( seed, bindDescriptorSetsInfo.stageFlags ); + VULKAN_HPP_HASH_COMBINE( seed, bindDescriptorSetsInfo.layout ); + VULKAN_HPP_HASH_COMBINE( seed, bindDescriptorSetsInfo.firstSet ); + VULKAN_HPP_HASH_COMBINE( seed, bindDescriptorSetsInfo.descriptorSetCount ); + VULKAN_HPP_HASH_COMBINE( seed, bindDescriptorSetsInfo.pDescriptorSets ); + VULKAN_HPP_HASH_COMBINE( seed, bindDescriptorSetsInfo.dynamicOffsetCount ); + VULKAN_HPP_HASH_COMBINE( seed, bindDescriptorSetsInfo.pDynamicOffsets ); return seed; } }; @@ -1521,14 +1521,14 @@ namespace std }; template <> - struct hash + struct hash { - std::size_t operator()( VULKAN_HPP_NAMESPACE::BindMemoryStatusKHR const & bindMemoryStatusKHR ) const VULKAN_HPP_NOEXCEPT + std::size_t operator()( VULKAN_HPP_NAMESPACE::BindMemoryStatus const & bindMemoryStatus ) const VULKAN_HPP_NOEXCEPT { std::size_t seed = 0; - VULKAN_HPP_HASH_COMBINE( seed, bindMemoryStatusKHR.sType ); - VULKAN_HPP_HASH_COMBINE( seed, bindMemoryStatusKHR.pNext ); - VULKAN_HPP_HASH_COMBINE( seed, bindMemoryStatusKHR.pResult ); + VULKAN_HPP_HASH_COMBINE( seed, bindMemoryStatus.sType ); + VULKAN_HPP_HASH_COMBINE( seed, bindMemoryStatus.pNext ); + VULKAN_HPP_HASH_COMBINE( seed, bindMemoryStatus.pResult ); return seed; } }; @@ -2108,14 +2108,14 @@ namespace std }; template <> - struct hash + struct hash { - std::size_t operator()( VULKAN_HPP_NAMESPACE::BufferUsageFlags2CreateInfoKHR const & bufferUsageFlags2CreateInfoKHR ) const VULKAN_HPP_NOEXCEPT + std::size_t operator()( VULKAN_HPP_NAMESPACE::BufferUsageFlags2CreateInfo const & bufferUsageFlags2CreateInfo ) const VULKAN_HPP_NOEXCEPT { std::size_t seed = 0; - VULKAN_HPP_HASH_COMBINE( seed, bufferUsageFlags2CreateInfoKHR.sType ); - VULKAN_HPP_HASH_COMBINE( seed, bufferUsageFlags2CreateInfoKHR.pNext ); - VULKAN_HPP_HASH_COMBINE( seed, bufferUsageFlags2CreateInfoKHR.usage ); + VULKAN_HPP_HASH_COMBINE( seed, bufferUsageFlags2CreateInfo.sType ); + VULKAN_HPP_HASH_COMBINE( seed, bufferUsageFlags2CreateInfo.pNext ); + VULKAN_HPP_HASH_COMBINE( seed, bufferUsageFlags2CreateInfo.usage ); return seed; } }; @@ -2724,55 +2724,55 @@ namespace std }; template <> - struct hash + struct hash { - std::size_t operator()( VULKAN_HPP_NAMESPACE::CopyImageToImageInfoEXT const & copyImageToImageInfoEXT ) const VULKAN_HPP_NOEXCEPT + std::size_t operator()( VULKAN_HPP_NAMESPACE::CopyImageToImageInfo const & copyImageToImageInfo ) const VULKAN_HPP_NOEXCEPT { std::size_t seed = 0; - VULKAN_HPP_HASH_COMBINE( seed, copyImageToImageInfoEXT.sType ); - VULKAN_HPP_HASH_COMBINE( seed, copyImageToImageInfoEXT.pNext ); - VULKAN_HPP_HASH_COMBINE( seed, copyImageToImageInfoEXT.flags ); - VULKAN_HPP_HASH_COMBINE( seed, copyImageToImageInfoEXT.srcImage ); - VULKAN_HPP_HASH_COMBINE( seed, copyImageToImageInfoEXT.srcImageLayout ); - VULKAN_HPP_HASH_COMBINE( seed, copyImageToImageInfoEXT.dstImage ); - VULKAN_HPP_HASH_COMBINE( seed, copyImageToImageInfoEXT.dstImageLayout ); - VULKAN_HPP_HASH_COMBINE( seed, copyImageToImageInfoEXT.regionCount ); - VULKAN_HPP_HASH_COMBINE( seed, copyImageToImageInfoEXT.pRegions ); + VULKAN_HPP_HASH_COMBINE( seed, copyImageToImageInfo.sType ); + VULKAN_HPP_HASH_COMBINE( seed, copyImageToImageInfo.pNext ); + VULKAN_HPP_HASH_COMBINE( seed, copyImageToImageInfo.flags ); + VULKAN_HPP_HASH_COMBINE( seed, copyImageToImageInfo.srcImage ); + VULKAN_HPP_HASH_COMBINE( seed, copyImageToImageInfo.srcImageLayout ); + VULKAN_HPP_HASH_COMBINE( seed, copyImageToImageInfo.dstImage ); + VULKAN_HPP_HASH_COMBINE( seed, copyImageToImageInfo.dstImageLayout ); + VULKAN_HPP_HASH_COMBINE( seed, copyImageToImageInfo.regionCount ); + VULKAN_HPP_HASH_COMBINE( seed, copyImageToImageInfo.pRegions ); return seed; } }; template <> - struct hash + struct hash { - std::size_t operator()( VULKAN_HPP_NAMESPACE::ImageToMemoryCopyEXT const & imageToMemoryCopyEXT ) const VULKAN_HPP_NOEXCEPT + std::size_t operator()( VULKAN_HPP_NAMESPACE::ImageToMemoryCopy const & imageToMemoryCopy ) const VULKAN_HPP_NOEXCEPT { std::size_t seed = 0; - VULKAN_HPP_HASH_COMBINE( seed, imageToMemoryCopyEXT.sType ); - VULKAN_HPP_HASH_COMBINE( seed, imageToMemoryCopyEXT.pNext ); - VULKAN_HPP_HASH_COMBINE( seed, imageToMemoryCopyEXT.pHostPointer ); - VULKAN_HPP_HASH_COMBINE( seed, imageToMemoryCopyEXT.memoryRowLength ); - VULKAN_HPP_HASH_COMBINE( seed, imageToMemoryCopyEXT.memoryImageHeight ); - VULKAN_HPP_HASH_COMBINE( seed, imageToMemoryCopyEXT.imageSubresource ); - VULKAN_HPP_HASH_COMBINE( seed, imageToMemoryCopyEXT.imageOffset ); - VULKAN_HPP_HASH_COMBINE( seed, imageToMemoryCopyEXT.imageExtent ); + VULKAN_HPP_HASH_COMBINE( seed, imageToMemoryCopy.sType ); + VULKAN_HPP_HASH_COMBINE( seed, imageToMemoryCopy.pNext ); + VULKAN_HPP_HASH_COMBINE( seed, imageToMemoryCopy.pHostPointer ); + VULKAN_HPP_HASH_COMBINE( seed, imageToMemoryCopy.memoryRowLength ); + VULKAN_HPP_HASH_COMBINE( seed, imageToMemoryCopy.memoryImageHeight ); + VULKAN_HPP_HASH_COMBINE( seed, imageToMemoryCopy.imageSubresource ); + VULKAN_HPP_HASH_COMBINE( seed, imageToMemoryCopy.imageOffset ); + VULKAN_HPP_HASH_COMBINE( seed, imageToMemoryCopy.imageExtent ); return seed; } }; template <> - struct hash + struct hash { - std::size_t operator()( VULKAN_HPP_NAMESPACE::CopyImageToMemoryInfoEXT const & copyImageToMemoryInfoEXT ) const VULKAN_HPP_NOEXCEPT + std::size_t operator()( VULKAN_HPP_NAMESPACE::CopyImageToMemoryInfo const & copyImageToMemoryInfo ) const VULKAN_HPP_NOEXCEPT { std::size_t seed = 0; - VULKAN_HPP_HASH_COMBINE( seed, copyImageToMemoryInfoEXT.sType ); - VULKAN_HPP_HASH_COMBINE( seed, copyImageToMemoryInfoEXT.pNext ); - VULKAN_HPP_HASH_COMBINE( seed, copyImageToMemoryInfoEXT.flags ); - VULKAN_HPP_HASH_COMBINE( seed, copyImageToMemoryInfoEXT.srcImage ); - VULKAN_HPP_HASH_COMBINE( seed, copyImageToMemoryInfoEXT.srcImageLayout ); - VULKAN_HPP_HASH_COMBINE( seed, copyImageToMemoryInfoEXT.regionCount ); - VULKAN_HPP_HASH_COMBINE( seed, copyImageToMemoryInfoEXT.pRegions ); + VULKAN_HPP_HASH_COMBINE( seed, copyImageToMemoryInfo.sType ); + VULKAN_HPP_HASH_COMBINE( seed, copyImageToMemoryInfo.pNext ); + VULKAN_HPP_HASH_COMBINE( seed, copyImageToMemoryInfo.flags ); + VULKAN_HPP_HASH_COMBINE( seed, copyImageToMemoryInfo.srcImage ); + VULKAN_HPP_HASH_COMBINE( seed, copyImageToMemoryInfo.srcImageLayout ); + VULKAN_HPP_HASH_COMBINE( seed, copyImageToMemoryInfo.regionCount ); + VULKAN_HPP_HASH_COMBINE( seed, copyImageToMemoryInfo.pRegions ); return seed; } }; @@ -2807,36 +2807,36 @@ namespace std }; template <> - struct hash + struct hash { - std::size_t operator()( VULKAN_HPP_NAMESPACE::MemoryToImageCopyEXT const & memoryToImageCopyEXT ) const VULKAN_HPP_NOEXCEPT + std::size_t operator()( VULKAN_HPP_NAMESPACE::MemoryToImageCopy const & memoryToImageCopy ) const VULKAN_HPP_NOEXCEPT { std::size_t seed = 0; - VULKAN_HPP_HASH_COMBINE( seed, memoryToImageCopyEXT.sType ); - VULKAN_HPP_HASH_COMBINE( seed, memoryToImageCopyEXT.pNext ); - VULKAN_HPP_HASH_COMBINE( seed, memoryToImageCopyEXT.pHostPointer ); - VULKAN_HPP_HASH_COMBINE( seed, memoryToImageCopyEXT.memoryRowLength ); - VULKAN_HPP_HASH_COMBINE( seed, memoryToImageCopyEXT.memoryImageHeight ); - VULKAN_HPP_HASH_COMBINE( seed, memoryToImageCopyEXT.imageSubresource ); - VULKAN_HPP_HASH_COMBINE( seed, memoryToImageCopyEXT.imageOffset ); - VULKAN_HPP_HASH_COMBINE( seed, memoryToImageCopyEXT.imageExtent ); + VULKAN_HPP_HASH_COMBINE( seed, memoryToImageCopy.sType ); + VULKAN_HPP_HASH_COMBINE( seed, memoryToImageCopy.pNext ); + VULKAN_HPP_HASH_COMBINE( seed, memoryToImageCopy.pHostPointer ); + VULKAN_HPP_HASH_COMBINE( seed, memoryToImageCopy.memoryRowLength ); + VULKAN_HPP_HASH_COMBINE( seed, memoryToImageCopy.memoryImageHeight ); + VULKAN_HPP_HASH_COMBINE( seed, memoryToImageCopy.imageSubresource ); + VULKAN_HPP_HASH_COMBINE( seed, memoryToImageCopy.imageOffset ); + VULKAN_HPP_HASH_COMBINE( seed, memoryToImageCopy.imageExtent ); return seed; } }; template <> - struct hash + struct hash { - std::size_t operator()( VULKAN_HPP_NAMESPACE::CopyMemoryToImageInfoEXT const & copyMemoryToImageInfoEXT ) const VULKAN_HPP_NOEXCEPT + std::size_t operator()( VULKAN_HPP_NAMESPACE::CopyMemoryToImageInfo const & copyMemoryToImageInfo ) const VULKAN_HPP_NOEXCEPT { std::size_t seed = 0; - VULKAN_HPP_HASH_COMBINE( seed, copyMemoryToImageInfoEXT.sType ); - VULKAN_HPP_HASH_COMBINE( seed, copyMemoryToImageInfoEXT.pNext ); - VULKAN_HPP_HASH_COMBINE( seed, copyMemoryToImageInfoEXT.flags ); - VULKAN_HPP_HASH_COMBINE( seed, copyMemoryToImageInfoEXT.dstImage ); - VULKAN_HPP_HASH_COMBINE( seed, copyMemoryToImageInfoEXT.dstImageLayout ); - VULKAN_HPP_HASH_COMBINE( seed, copyMemoryToImageInfoEXT.regionCount ); - VULKAN_HPP_HASH_COMBINE( seed, copyMemoryToImageInfoEXT.pRegions ); + VULKAN_HPP_HASH_COMBINE( seed, copyMemoryToImageInfo.sType ); + VULKAN_HPP_HASH_COMBINE( seed, copyMemoryToImageInfo.pNext ); + VULKAN_HPP_HASH_COMBINE( seed, copyMemoryToImageInfo.flags ); + VULKAN_HPP_HASH_COMBINE( seed, copyMemoryToImageInfo.dstImage ); + VULKAN_HPP_HASH_COMBINE( seed, copyMemoryToImageInfo.dstImageLayout ); + VULKAN_HPP_HASH_COMBINE( seed, copyMemoryToImageInfo.regionCount ); + VULKAN_HPP_HASH_COMBINE( seed, copyMemoryToImageInfo.pRegions ); return seed; } }; @@ -4052,28 +4052,28 @@ namespace std }; template <> - struct hash + struct hash { - std::size_t operator()( VULKAN_HPP_NAMESPACE::ImageSubresource2KHR const & imageSubresource2KHR ) const VULKAN_HPP_NOEXCEPT + std::size_t operator()( VULKAN_HPP_NAMESPACE::ImageSubresource2 const & imageSubresource2 ) const VULKAN_HPP_NOEXCEPT { std::size_t seed = 0; - VULKAN_HPP_HASH_COMBINE( seed, imageSubresource2KHR.sType ); - VULKAN_HPP_HASH_COMBINE( seed, imageSubresource2KHR.pNext ); - VULKAN_HPP_HASH_COMBINE( seed, imageSubresource2KHR.imageSubresource ); + VULKAN_HPP_HASH_COMBINE( seed, imageSubresource2.sType ); + VULKAN_HPP_HASH_COMBINE( seed, imageSubresource2.pNext ); + VULKAN_HPP_HASH_COMBINE( seed, imageSubresource2.imageSubresource ); return seed; } }; template <> - struct hash + struct hash { - std::size_t operator()( VULKAN_HPP_NAMESPACE::DeviceImageSubresourceInfoKHR const & deviceImageSubresourceInfoKHR ) const VULKAN_HPP_NOEXCEPT + std::size_t operator()( VULKAN_HPP_NAMESPACE::DeviceImageSubresourceInfo const & deviceImageSubresourceInfo ) const VULKAN_HPP_NOEXCEPT { std::size_t seed = 0; - VULKAN_HPP_HASH_COMBINE( seed, deviceImageSubresourceInfoKHR.sType ); - VULKAN_HPP_HASH_COMBINE( seed, deviceImageSubresourceInfoKHR.pNext ); - VULKAN_HPP_HASH_COMBINE( seed, deviceImageSubresourceInfoKHR.pCreateInfo ); - VULKAN_HPP_HASH_COMBINE( seed, deviceImageSubresourceInfoKHR.pSubresource ); + VULKAN_HPP_HASH_COMBINE( seed, deviceImageSubresourceInfo.sType ); + VULKAN_HPP_HASH_COMBINE( seed, deviceImageSubresourceInfo.pNext ); + VULKAN_HPP_HASH_COMBINE( seed, deviceImageSubresourceInfo.pCreateInfo ); + VULKAN_HPP_HASH_COMBINE( seed, deviceImageSubresourceInfo.pSubresource ); return seed; } }; @@ -4152,15 +4152,14 @@ namespace std }; template <> - struct hash + struct hash { - std::size_t - operator()( VULKAN_HPP_NAMESPACE::DeviceQueueGlobalPriorityCreateInfoKHR const & deviceQueueGlobalPriorityCreateInfoKHR ) const VULKAN_HPP_NOEXCEPT + std::size_t operator()( VULKAN_HPP_NAMESPACE::DeviceQueueGlobalPriorityCreateInfo const & deviceQueueGlobalPriorityCreateInfo ) const VULKAN_HPP_NOEXCEPT { std::size_t seed = 0; - VULKAN_HPP_HASH_COMBINE( seed, deviceQueueGlobalPriorityCreateInfoKHR.sType ); - VULKAN_HPP_HASH_COMBINE( seed, deviceQueueGlobalPriorityCreateInfoKHR.pNext ); - VULKAN_HPP_HASH_COMBINE( seed, deviceQueueGlobalPriorityCreateInfoKHR.globalPriority ); + VULKAN_HPP_HASH_COMBINE( seed, deviceQueueGlobalPriorityCreateInfo.sType ); + VULKAN_HPP_HASH_COMBINE( seed, deviceQueueGlobalPriorityCreateInfo.pNext ); + VULKAN_HPP_HASH_COMBINE( seed, deviceQueueGlobalPriorityCreateInfo.globalPriority ); return seed; } }; @@ -5860,32 +5859,31 @@ namespace std }; template <> - struct hash + struct hash { - std::size_t - operator()( VULKAN_HPP_NAMESPACE::HostImageCopyDevicePerformanceQueryEXT const & hostImageCopyDevicePerformanceQueryEXT ) const VULKAN_HPP_NOEXCEPT + std::size_t operator()( VULKAN_HPP_NAMESPACE::HostImageCopyDevicePerformanceQuery const & hostImageCopyDevicePerformanceQuery ) const VULKAN_HPP_NOEXCEPT { std::size_t seed = 0; - VULKAN_HPP_HASH_COMBINE( seed, hostImageCopyDevicePerformanceQueryEXT.sType ); - VULKAN_HPP_HASH_COMBINE( seed, hostImageCopyDevicePerformanceQueryEXT.pNext ); - VULKAN_HPP_HASH_COMBINE( seed, hostImageCopyDevicePerformanceQueryEXT.optimalDeviceAccess ); - VULKAN_HPP_HASH_COMBINE( seed, hostImageCopyDevicePerformanceQueryEXT.identicalMemoryLayout ); + VULKAN_HPP_HASH_COMBINE( seed, hostImageCopyDevicePerformanceQuery.sType ); + VULKAN_HPP_HASH_COMBINE( seed, hostImageCopyDevicePerformanceQuery.pNext ); + VULKAN_HPP_HASH_COMBINE( seed, hostImageCopyDevicePerformanceQuery.optimalDeviceAccess ); + VULKAN_HPP_HASH_COMBINE( seed, hostImageCopyDevicePerformanceQuery.identicalMemoryLayout ); return seed; } }; template <> - struct hash + struct hash { - std::size_t operator()( VULKAN_HPP_NAMESPACE::HostImageLayoutTransitionInfoEXT const & hostImageLayoutTransitionInfoEXT ) const VULKAN_HPP_NOEXCEPT + std::size_t operator()( VULKAN_HPP_NAMESPACE::HostImageLayoutTransitionInfo const & hostImageLayoutTransitionInfo ) const VULKAN_HPP_NOEXCEPT { std::size_t seed = 0; - VULKAN_HPP_HASH_COMBINE( seed, hostImageLayoutTransitionInfoEXT.sType ); - VULKAN_HPP_HASH_COMBINE( seed, hostImageLayoutTransitionInfoEXT.pNext ); - VULKAN_HPP_HASH_COMBINE( seed, hostImageLayoutTransitionInfoEXT.image ); - VULKAN_HPP_HASH_COMBINE( seed, hostImageLayoutTransitionInfoEXT.oldLayout ); - VULKAN_HPP_HASH_COMBINE( seed, hostImageLayoutTransitionInfoEXT.newLayout ); - VULKAN_HPP_HASH_COMBINE( seed, hostImageLayoutTransitionInfoEXT.subresourceRange ); + VULKAN_HPP_HASH_COMBINE( seed, hostImageLayoutTransitionInfo.sType ); + VULKAN_HPP_HASH_COMBINE( seed, hostImageLayoutTransitionInfo.pNext ); + VULKAN_HPP_HASH_COMBINE( seed, hostImageLayoutTransitionInfo.image ); + VULKAN_HPP_HASH_COMBINE( seed, hostImageLayoutTransitionInfo.oldLayout ); + VULKAN_HPP_HASH_COMBINE( seed, hostImageLayoutTransitionInfo.newLayout ); + VULKAN_HPP_HASH_COMBINE( seed, hostImageLayoutTransitionInfo.subresourceRange ); return seed; } }; @@ -7205,17 +7203,17 @@ namespace std }; template <> - struct hash + struct hash { - std::size_t operator()( VULKAN_HPP_NAMESPACE::MemoryMapInfoKHR const & memoryMapInfoKHR ) const VULKAN_HPP_NOEXCEPT + std::size_t operator()( VULKAN_HPP_NAMESPACE::MemoryMapInfo const & memoryMapInfo ) const VULKAN_HPP_NOEXCEPT { std::size_t seed = 0; - VULKAN_HPP_HASH_COMBINE( seed, memoryMapInfoKHR.sType ); - VULKAN_HPP_HASH_COMBINE( seed, memoryMapInfoKHR.pNext ); - VULKAN_HPP_HASH_COMBINE( seed, memoryMapInfoKHR.flags ); - VULKAN_HPP_HASH_COMBINE( seed, memoryMapInfoKHR.memory ); - VULKAN_HPP_HASH_COMBINE( seed, memoryMapInfoKHR.offset ); - VULKAN_HPP_HASH_COMBINE( seed, memoryMapInfoKHR.size ); + VULKAN_HPP_HASH_COMBINE( seed, memoryMapInfo.sType ); + VULKAN_HPP_HASH_COMBINE( seed, memoryMapInfo.pNext ); + VULKAN_HPP_HASH_COMBINE( seed, memoryMapInfo.flags ); + VULKAN_HPP_HASH_COMBINE( seed, memoryMapInfo.memory ); + VULKAN_HPP_HASH_COMBINE( seed, memoryMapInfo.offset ); + VULKAN_HPP_HASH_COMBINE( seed, memoryMapInfo.size ); return seed; } }; @@ -7299,15 +7297,15 @@ namespace std }; template <> - struct hash + struct hash { - std::size_t operator()( VULKAN_HPP_NAMESPACE::MemoryUnmapInfoKHR const & memoryUnmapInfoKHR ) const VULKAN_HPP_NOEXCEPT + std::size_t operator()( VULKAN_HPP_NAMESPACE::MemoryUnmapInfo const & memoryUnmapInfo ) const VULKAN_HPP_NOEXCEPT { std::size_t seed = 0; - VULKAN_HPP_HASH_COMBINE( seed, memoryUnmapInfoKHR.sType ); - VULKAN_HPP_HASH_COMBINE( seed, memoryUnmapInfoKHR.pNext ); - VULKAN_HPP_HASH_COMBINE( seed, memoryUnmapInfoKHR.flags ); - VULKAN_HPP_HASH_COMBINE( seed, memoryUnmapInfoKHR.memory ); + VULKAN_HPP_HASH_COMBINE( seed, memoryUnmapInfo.sType ); + VULKAN_HPP_HASH_COMBINE( seed, memoryUnmapInfo.pNext ); + VULKAN_HPP_HASH_COMBINE( seed, memoryUnmapInfo.flags ); + VULKAN_HPP_HASH_COMBINE( seed, memoryUnmapInfo.memory ); return seed; } }; @@ -8907,16 +8905,15 @@ namespace std }; template <> - struct hash + struct hash { - std::size_t - operator()( VULKAN_HPP_NAMESPACE::PhysicalDeviceDynamicRenderingLocalReadFeaturesKHR const & physicalDeviceDynamicRenderingLocalReadFeaturesKHR ) const - VULKAN_HPP_NOEXCEPT + std::size_t operator()( + VULKAN_HPP_NAMESPACE::PhysicalDeviceDynamicRenderingLocalReadFeatures const & physicalDeviceDynamicRenderingLocalReadFeatures ) const VULKAN_HPP_NOEXCEPT { std::size_t seed = 0; - VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceDynamicRenderingLocalReadFeaturesKHR.sType ); - VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceDynamicRenderingLocalReadFeaturesKHR.pNext ); - VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceDynamicRenderingLocalReadFeaturesKHR.dynamicRenderingLocalRead ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceDynamicRenderingLocalReadFeatures.sType ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceDynamicRenderingLocalReadFeatures.pNext ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceDynamicRenderingLocalReadFeatures.dynamicRenderingLocalRead ); return seed; } }; @@ -9503,15 +9500,15 @@ namespace std }; template <> - struct hash + struct hash { - std::size_t operator()( VULKAN_HPP_NAMESPACE::PhysicalDeviceGlobalPriorityQueryFeaturesKHR const & physicalDeviceGlobalPriorityQueryFeaturesKHR ) const - VULKAN_HPP_NOEXCEPT + std::size_t + operator()( VULKAN_HPP_NAMESPACE::PhysicalDeviceGlobalPriorityQueryFeatures const & physicalDeviceGlobalPriorityQueryFeatures ) const VULKAN_HPP_NOEXCEPT { std::size_t seed = 0; - VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceGlobalPriorityQueryFeaturesKHR.sType ); - VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceGlobalPriorityQueryFeaturesKHR.pNext ); - VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceGlobalPriorityQueryFeaturesKHR.globalPriorityQuery ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceGlobalPriorityQueryFeatures.sType ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceGlobalPriorityQueryFeatures.pNext ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceGlobalPriorityQueryFeatures.globalPriorityQuery ); return seed; } }; @@ -9578,37 +9575,36 @@ namespace std }; template <> - struct hash + struct hash { - std::size_t - operator()( VULKAN_HPP_NAMESPACE::PhysicalDeviceHostImageCopyFeaturesEXT const & physicalDeviceHostImageCopyFeaturesEXT ) const VULKAN_HPP_NOEXCEPT + std::size_t operator()( VULKAN_HPP_NAMESPACE::PhysicalDeviceHostImageCopyFeatures const & physicalDeviceHostImageCopyFeatures ) const VULKAN_HPP_NOEXCEPT { std::size_t seed = 0; - VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceHostImageCopyFeaturesEXT.sType ); - VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceHostImageCopyFeaturesEXT.pNext ); - VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceHostImageCopyFeaturesEXT.hostImageCopy ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceHostImageCopyFeatures.sType ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceHostImageCopyFeatures.pNext ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceHostImageCopyFeatures.hostImageCopy ); return seed; } }; template <> - struct hash + struct hash { std::size_t - operator()( VULKAN_HPP_NAMESPACE::PhysicalDeviceHostImageCopyPropertiesEXT const & physicalDeviceHostImageCopyPropertiesEXT ) const VULKAN_HPP_NOEXCEPT + operator()( VULKAN_HPP_NAMESPACE::PhysicalDeviceHostImageCopyProperties const & physicalDeviceHostImageCopyProperties ) const VULKAN_HPP_NOEXCEPT { std::size_t seed = 0; - VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceHostImageCopyPropertiesEXT.sType ); - VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceHostImageCopyPropertiesEXT.pNext ); - VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceHostImageCopyPropertiesEXT.copySrcLayoutCount ); - VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceHostImageCopyPropertiesEXT.pCopySrcLayouts ); - VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceHostImageCopyPropertiesEXT.copyDstLayoutCount ); - VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceHostImageCopyPropertiesEXT.pCopyDstLayouts ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceHostImageCopyProperties.sType ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceHostImageCopyProperties.pNext ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceHostImageCopyProperties.copySrcLayoutCount ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceHostImageCopyProperties.pCopySrcLayouts ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceHostImageCopyProperties.copyDstLayoutCount ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceHostImageCopyProperties.pCopyDstLayouts ); for ( size_t i = 0; i < VK_UUID_SIZE; ++i ) { - VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceHostImageCopyPropertiesEXT.optimalTilingLayoutUUID[i] ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceHostImageCopyProperties.optimalTilingLayoutUUID[i] ); } - VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceHostImageCopyPropertiesEXT.identicalMemoryTypeRequirements ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceHostImageCopyProperties.identicalMemoryTypeRequirements ); return seed; } }; @@ -9889,15 +9885,14 @@ namespace std }; template <> - struct hash + struct hash { - std::size_t - operator()( VULKAN_HPP_NAMESPACE::PhysicalDeviceIndexTypeUint8FeaturesKHR const & physicalDeviceIndexTypeUint8FeaturesKHR ) const VULKAN_HPP_NOEXCEPT + std::size_t operator()( VULKAN_HPP_NAMESPACE::PhysicalDeviceIndexTypeUint8Features const & physicalDeviceIndexTypeUint8Features ) const VULKAN_HPP_NOEXCEPT { std::size_t seed = 0; - VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceIndexTypeUint8FeaturesKHR.sType ); - VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceIndexTypeUint8FeaturesKHR.pNext ); - VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceIndexTypeUint8FeaturesKHR.indexTypeUint8 ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceIndexTypeUint8Features.sType ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceIndexTypeUint8Features.pNext ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceIndexTypeUint8Features.indexTypeUint8 ); return seed; } }; @@ -10256,34 +10251,34 @@ namespace std }; template <> - struct hash + struct hash { - std::size_t operator()( VULKAN_HPP_NAMESPACE::PhysicalDeviceLineRasterizationFeaturesKHR const & physicalDeviceLineRasterizationFeaturesKHR ) const - VULKAN_HPP_NOEXCEPT + std::size_t + operator()( VULKAN_HPP_NAMESPACE::PhysicalDeviceLineRasterizationFeatures const & physicalDeviceLineRasterizationFeatures ) const VULKAN_HPP_NOEXCEPT { std::size_t seed = 0; - VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceLineRasterizationFeaturesKHR.sType ); - VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceLineRasterizationFeaturesKHR.pNext ); - VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceLineRasterizationFeaturesKHR.rectangularLines ); - VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceLineRasterizationFeaturesKHR.bresenhamLines ); - VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceLineRasterizationFeaturesKHR.smoothLines ); - VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceLineRasterizationFeaturesKHR.stippledRectangularLines ); - VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceLineRasterizationFeaturesKHR.stippledBresenhamLines ); - VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceLineRasterizationFeaturesKHR.stippledSmoothLines ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceLineRasterizationFeatures.sType ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceLineRasterizationFeatures.pNext ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceLineRasterizationFeatures.rectangularLines ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceLineRasterizationFeatures.bresenhamLines ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceLineRasterizationFeatures.smoothLines ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceLineRasterizationFeatures.stippledRectangularLines ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceLineRasterizationFeatures.stippledBresenhamLines ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceLineRasterizationFeatures.stippledSmoothLines ); return seed; } }; template <> - struct hash + struct hash { - std::size_t operator()( VULKAN_HPP_NAMESPACE::PhysicalDeviceLineRasterizationPropertiesKHR const & physicalDeviceLineRasterizationPropertiesKHR ) const - VULKAN_HPP_NOEXCEPT + std::size_t + operator()( VULKAN_HPP_NAMESPACE::PhysicalDeviceLineRasterizationProperties const & physicalDeviceLineRasterizationProperties ) const VULKAN_HPP_NOEXCEPT { std::size_t seed = 0; - VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceLineRasterizationPropertiesKHR.sType ); - VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceLineRasterizationPropertiesKHR.pNext ); - VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceLineRasterizationPropertiesKHR.lineSubPixelPrecisionBits ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceLineRasterizationProperties.sType ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceLineRasterizationProperties.pNext ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceLineRasterizationProperties.lineSubPixelPrecisionBits ); return seed; } }; @@ -10343,64 +10338,60 @@ namespace std }; template <> - struct hash + struct hash { - std::size_t - operator()( VULKAN_HPP_NAMESPACE::PhysicalDeviceMaintenance5FeaturesKHR const & physicalDeviceMaintenance5FeaturesKHR ) const VULKAN_HPP_NOEXCEPT + std::size_t operator()( VULKAN_HPP_NAMESPACE::PhysicalDeviceMaintenance5Features const & physicalDeviceMaintenance5Features ) const VULKAN_HPP_NOEXCEPT { std::size_t seed = 0; - VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceMaintenance5FeaturesKHR.sType ); - VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceMaintenance5FeaturesKHR.pNext ); - VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceMaintenance5FeaturesKHR.maintenance5 ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceMaintenance5Features.sType ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceMaintenance5Features.pNext ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceMaintenance5Features.maintenance5 ); return seed; } }; template <> - struct hash + struct hash { - std::size_t - operator()( VULKAN_HPP_NAMESPACE::PhysicalDeviceMaintenance5PropertiesKHR const & physicalDeviceMaintenance5PropertiesKHR ) const VULKAN_HPP_NOEXCEPT + std::size_t operator()( VULKAN_HPP_NAMESPACE::PhysicalDeviceMaintenance5Properties const & physicalDeviceMaintenance5Properties ) const VULKAN_HPP_NOEXCEPT { std::size_t seed = 0; - VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceMaintenance5PropertiesKHR.sType ); - VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceMaintenance5PropertiesKHR.pNext ); - VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceMaintenance5PropertiesKHR.earlyFragmentMultisampleCoverageAfterSampleCounting ); - VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceMaintenance5PropertiesKHR.earlyFragmentSampleMaskTestBeforeSampleCounting ); - VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceMaintenance5PropertiesKHR.depthStencilSwizzleOneSupport ); - VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceMaintenance5PropertiesKHR.polygonModePointSize ); - VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceMaintenance5PropertiesKHR.nonStrictSinglePixelWideLinesUseParallelogram ); - VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceMaintenance5PropertiesKHR.nonStrictWideLinesUseParallelogram ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceMaintenance5Properties.sType ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceMaintenance5Properties.pNext ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceMaintenance5Properties.earlyFragmentMultisampleCoverageAfterSampleCounting ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceMaintenance5Properties.earlyFragmentSampleMaskTestBeforeSampleCounting ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceMaintenance5Properties.depthStencilSwizzleOneSupport ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceMaintenance5Properties.polygonModePointSize ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceMaintenance5Properties.nonStrictSinglePixelWideLinesUseParallelogram ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceMaintenance5Properties.nonStrictWideLinesUseParallelogram ); return seed; } }; template <> - struct hash + struct hash { - std::size_t - operator()( VULKAN_HPP_NAMESPACE::PhysicalDeviceMaintenance6FeaturesKHR const & physicalDeviceMaintenance6FeaturesKHR ) const VULKAN_HPP_NOEXCEPT + std::size_t operator()( VULKAN_HPP_NAMESPACE::PhysicalDeviceMaintenance6Features const & physicalDeviceMaintenance6Features ) const VULKAN_HPP_NOEXCEPT { std::size_t seed = 0; - VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceMaintenance6FeaturesKHR.sType ); - VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceMaintenance6FeaturesKHR.pNext ); - VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceMaintenance6FeaturesKHR.maintenance6 ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceMaintenance6Features.sType ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceMaintenance6Features.pNext ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceMaintenance6Features.maintenance6 ); return seed; } }; template <> - struct hash + struct hash { - std::size_t - operator()( VULKAN_HPP_NAMESPACE::PhysicalDeviceMaintenance6PropertiesKHR const & physicalDeviceMaintenance6PropertiesKHR ) const VULKAN_HPP_NOEXCEPT + std::size_t operator()( VULKAN_HPP_NAMESPACE::PhysicalDeviceMaintenance6Properties const & physicalDeviceMaintenance6Properties ) const VULKAN_HPP_NOEXCEPT { std::size_t seed = 0; - VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceMaintenance6PropertiesKHR.sType ); - VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceMaintenance6PropertiesKHR.pNext ); - VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceMaintenance6PropertiesKHR.blockTexelViewCompatibleMultipleLayers ); - VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceMaintenance6PropertiesKHR.maxCombinedImageSamplerDescriptorCount ); - VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceMaintenance6PropertiesKHR.fragmentShadingRateClampCombinerInputs ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceMaintenance6Properties.sType ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceMaintenance6Properties.pNext ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceMaintenance6Properties.blockTexelViewCompatibleMultipleLayers ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceMaintenance6Properties.maxCombinedImageSamplerDescriptorCount ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceMaintenance6Properties.fragmentShadingRateClampCombinerInputs ); return seed; } }; @@ -11086,46 +11077,46 @@ namespace std }; template <> - struct hash + struct hash { - std::size_t operator()( VULKAN_HPP_NAMESPACE::PhysicalDevicePipelineProtectedAccessFeaturesEXT const & physicalDevicePipelineProtectedAccessFeaturesEXT ) - const VULKAN_HPP_NOEXCEPT + std::size_t operator()( VULKAN_HPP_NAMESPACE::PhysicalDevicePipelineProtectedAccessFeatures const & physicalDevicePipelineProtectedAccessFeatures ) const + VULKAN_HPP_NOEXCEPT { std::size_t seed = 0; - VULKAN_HPP_HASH_COMBINE( seed, physicalDevicePipelineProtectedAccessFeaturesEXT.sType ); - VULKAN_HPP_HASH_COMBINE( seed, physicalDevicePipelineProtectedAccessFeaturesEXT.pNext ); - VULKAN_HPP_HASH_COMBINE( seed, physicalDevicePipelineProtectedAccessFeaturesEXT.pipelineProtectedAccess ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDevicePipelineProtectedAccessFeatures.sType ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDevicePipelineProtectedAccessFeatures.pNext ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDevicePipelineProtectedAccessFeatures.pipelineProtectedAccess ); return seed; } }; template <> - struct hash + struct hash { - std::size_t operator()( VULKAN_HPP_NAMESPACE::PhysicalDevicePipelineRobustnessFeaturesEXT const & physicalDevicePipelineRobustnessFeaturesEXT ) const - VULKAN_HPP_NOEXCEPT + std::size_t + operator()( VULKAN_HPP_NAMESPACE::PhysicalDevicePipelineRobustnessFeatures const & physicalDevicePipelineRobustnessFeatures ) const VULKAN_HPP_NOEXCEPT { std::size_t seed = 0; - VULKAN_HPP_HASH_COMBINE( seed, physicalDevicePipelineRobustnessFeaturesEXT.sType ); - VULKAN_HPP_HASH_COMBINE( seed, physicalDevicePipelineRobustnessFeaturesEXT.pNext ); - VULKAN_HPP_HASH_COMBINE( seed, physicalDevicePipelineRobustnessFeaturesEXT.pipelineRobustness ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDevicePipelineRobustnessFeatures.sType ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDevicePipelineRobustnessFeatures.pNext ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDevicePipelineRobustnessFeatures.pipelineRobustness ); return seed; } }; template <> - struct hash + struct hash { - std::size_t operator()( VULKAN_HPP_NAMESPACE::PhysicalDevicePipelineRobustnessPropertiesEXT const & physicalDevicePipelineRobustnessPropertiesEXT ) const + std::size_t operator()( VULKAN_HPP_NAMESPACE::PhysicalDevicePipelineRobustnessProperties const & physicalDevicePipelineRobustnessProperties ) const VULKAN_HPP_NOEXCEPT { std::size_t seed = 0; - VULKAN_HPP_HASH_COMBINE( seed, physicalDevicePipelineRobustnessPropertiesEXT.sType ); - VULKAN_HPP_HASH_COMBINE( seed, physicalDevicePipelineRobustnessPropertiesEXT.pNext ); - VULKAN_HPP_HASH_COMBINE( seed, physicalDevicePipelineRobustnessPropertiesEXT.defaultRobustnessStorageBuffers ); - VULKAN_HPP_HASH_COMBINE( seed, physicalDevicePipelineRobustnessPropertiesEXT.defaultRobustnessUniformBuffers ); - VULKAN_HPP_HASH_COMBINE( seed, physicalDevicePipelineRobustnessPropertiesEXT.defaultRobustnessVertexInputs ); - VULKAN_HPP_HASH_COMBINE( seed, physicalDevicePipelineRobustnessPropertiesEXT.defaultRobustnessImages ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDevicePipelineRobustnessProperties.sType ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDevicePipelineRobustnessProperties.pNext ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDevicePipelineRobustnessProperties.defaultRobustnessStorageBuffers ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDevicePipelineRobustnessProperties.defaultRobustnessUniformBuffers ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDevicePipelineRobustnessProperties.defaultRobustnessVertexInputs ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDevicePipelineRobustnessProperties.defaultRobustnessImages ); return seed; } }; @@ -11348,15 +11339,15 @@ namespace std }; template <> - struct hash + struct hash { std::size_t - operator()( VULKAN_HPP_NAMESPACE::PhysicalDevicePushDescriptorPropertiesKHR const & physicalDevicePushDescriptorPropertiesKHR ) const VULKAN_HPP_NOEXCEPT + operator()( VULKAN_HPP_NAMESPACE::PhysicalDevicePushDescriptorProperties const & physicalDevicePushDescriptorProperties ) const VULKAN_HPP_NOEXCEPT { std::size_t seed = 0; - VULKAN_HPP_HASH_COMBINE( seed, physicalDevicePushDescriptorPropertiesKHR.sType ); - VULKAN_HPP_HASH_COMBINE( seed, physicalDevicePushDescriptorPropertiesKHR.pNext ); - VULKAN_HPP_HASH_COMBINE( seed, physicalDevicePushDescriptorPropertiesKHR.maxPushDescriptors ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDevicePushDescriptorProperties.sType ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDevicePushDescriptorProperties.pNext ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDevicePushDescriptorProperties.maxPushDescriptors ); return seed; } }; @@ -12024,15 +12015,15 @@ namespace std # endif /*VK_ENABLE_BETA_EXTENSIONS*/ template <> - struct hash + struct hash { - std::size_t operator()( VULKAN_HPP_NAMESPACE::PhysicalDeviceShaderExpectAssumeFeaturesKHR const & physicalDeviceShaderExpectAssumeFeaturesKHR ) const - VULKAN_HPP_NOEXCEPT + std::size_t + operator()( VULKAN_HPP_NAMESPACE::PhysicalDeviceShaderExpectAssumeFeatures const & physicalDeviceShaderExpectAssumeFeatures ) const VULKAN_HPP_NOEXCEPT { std::size_t seed = 0; - VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceShaderExpectAssumeFeaturesKHR.sType ); - VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceShaderExpectAssumeFeaturesKHR.pNext ); - VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceShaderExpectAssumeFeaturesKHR.shaderExpectAssume ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceShaderExpectAssumeFeatures.sType ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceShaderExpectAssumeFeatures.pNext ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceShaderExpectAssumeFeatures.shaderExpectAssume ); return seed; } }; @@ -12053,15 +12044,15 @@ namespace std }; template <> - struct hash + struct hash { - std::size_t operator()( VULKAN_HPP_NAMESPACE::PhysicalDeviceShaderFloatControls2FeaturesKHR const & physicalDeviceShaderFloatControls2FeaturesKHR ) const + std::size_t operator()( VULKAN_HPP_NAMESPACE::PhysicalDeviceShaderFloatControls2Features const & physicalDeviceShaderFloatControls2Features ) const VULKAN_HPP_NOEXCEPT { std::size_t seed = 0; - VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceShaderFloatControls2FeaturesKHR.sType ); - VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceShaderFloatControls2FeaturesKHR.pNext ); - VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceShaderFloatControls2FeaturesKHR.shaderFloatControls2 ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceShaderFloatControls2Features.sType ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceShaderFloatControls2Features.pNext ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceShaderFloatControls2Features.shaderFloatControls2 ); return seed; } }; @@ -12333,16 +12324,16 @@ namespace std }; template <> - struct hash + struct hash { - std::size_t operator()( VULKAN_HPP_NAMESPACE::PhysicalDeviceShaderSubgroupRotateFeaturesKHR const & physicalDeviceShaderSubgroupRotateFeaturesKHR ) const + std::size_t operator()( VULKAN_HPP_NAMESPACE::PhysicalDeviceShaderSubgroupRotateFeatures const & physicalDeviceShaderSubgroupRotateFeatures ) const VULKAN_HPP_NOEXCEPT { std::size_t seed = 0; - VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceShaderSubgroupRotateFeaturesKHR.sType ); - VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceShaderSubgroupRotateFeaturesKHR.pNext ); - VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceShaderSubgroupRotateFeaturesKHR.shaderSubgroupRotate ); - VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceShaderSubgroupRotateFeaturesKHR.shaderSubgroupRotateClustered ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceShaderSubgroupRotateFeatures.sType ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceShaderSubgroupRotateFeatures.pNext ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceShaderSubgroupRotateFeatures.shaderSubgroupRotate ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceShaderSubgroupRotateFeatures.shaderSubgroupRotateClustered ); return seed; } }; @@ -12770,16 +12761,31 @@ namespace std }; template <> - struct hash + struct hash { - std::size_t operator()( - VULKAN_HPP_NAMESPACE::PhysicalDeviceVertexAttributeDivisorFeaturesKHR const & physicalDeviceVertexAttributeDivisorFeaturesKHR ) const VULKAN_HPP_NOEXCEPT + std::size_t operator()( VULKAN_HPP_NAMESPACE::PhysicalDeviceVertexAttributeDivisorFeatures const & physicalDeviceVertexAttributeDivisorFeatures ) const + VULKAN_HPP_NOEXCEPT { std::size_t seed = 0; - VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceVertexAttributeDivisorFeaturesKHR.sType ); - VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceVertexAttributeDivisorFeaturesKHR.pNext ); - VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceVertexAttributeDivisorFeaturesKHR.vertexAttributeInstanceRateDivisor ); - VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceVertexAttributeDivisorFeaturesKHR.vertexAttributeInstanceRateZeroDivisor ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceVertexAttributeDivisorFeatures.sType ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceVertexAttributeDivisorFeatures.pNext ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceVertexAttributeDivisorFeatures.vertexAttributeInstanceRateDivisor ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceVertexAttributeDivisorFeatures.vertexAttributeInstanceRateZeroDivisor ); + return seed; + } + }; + + template <> + struct hash + { + std::size_t operator()( VULKAN_HPP_NAMESPACE::PhysicalDeviceVertexAttributeDivisorProperties const & physicalDeviceVertexAttributeDivisorProperties ) const + VULKAN_HPP_NOEXCEPT + { + std::size_t seed = 0; + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceVertexAttributeDivisorProperties.sType ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceVertexAttributeDivisorProperties.pNext ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceVertexAttributeDivisorProperties.maxVertexAttribDivisor ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceVertexAttributeDivisorProperties.supportsNonZeroFirstInstance ); return seed; } }; @@ -12798,21 +12804,6 @@ namespace std } }; - template <> - struct hash - { - std::size_t operator()( VULKAN_HPP_NAMESPACE::PhysicalDeviceVertexAttributeDivisorPropertiesKHR const & physicalDeviceVertexAttributeDivisorPropertiesKHR ) - const VULKAN_HPP_NOEXCEPT - { - std::size_t seed = 0; - VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceVertexAttributeDivisorPropertiesKHR.sType ); - VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceVertexAttributeDivisorPropertiesKHR.pNext ); - VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceVertexAttributeDivisorPropertiesKHR.maxVertexAttribDivisor ); - VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceVertexAttributeDivisorPropertiesKHR.supportsNonZeroFirstInstance ); - return seed; - } - }; - template <> struct hash { @@ -13202,6 +13193,79 @@ namespace std } }; + template <> + struct hash + { + std::size_t operator()( VULKAN_HPP_NAMESPACE::PhysicalDeviceVulkan14Features const & physicalDeviceVulkan14Features ) const VULKAN_HPP_NOEXCEPT + { + std::size_t seed = 0; + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceVulkan14Features.sType ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceVulkan14Features.pNext ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceVulkan14Features.globalPriorityQuery ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceVulkan14Features.shaderSubgroupRotate ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceVulkan14Features.shaderSubgroupRotateClustered ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceVulkan14Features.shaderFloatControls2 ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceVulkan14Features.shaderExpectAssume ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceVulkan14Features.rectangularLines ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceVulkan14Features.bresenhamLines ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceVulkan14Features.smoothLines ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceVulkan14Features.stippledRectangularLines ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceVulkan14Features.stippledBresenhamLines ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceVulkan14Features.stippledSmoothLines ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceVulkan14Features.vertexAttributeInstanceRateDivisor ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceVulkan14Features.vertexAttributeInstanceRateZeroDivisor ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceVulkan14Features.indexTypeUint8 ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceVulkan14Features.dynamicRenderingLocalRead ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceVulkan14Features.maintenance5 ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceVulkan14Features.maintenance6 ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceVulkan14Features.pipelineProtectedAccess ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceVulkan14Features.pipelineRobustness ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceVulkan14Features.hostImageCopy ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceVulkan14Features.pushDescriptor ); + return seed; + } + }; + + template <> + struct hash + { + std::size_t operator()( VULKAN_HPP_NAMESPACE::PhysicalDeviceVulkan14Properties const & physicalDeviceVulkan14Properties ) const VULKAN_HPP_NOEXCEPT + { + std::size_t seed = 0; + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceVulkan14Properties.sType ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceVulkan14Properties.pNext ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceVulkan14Properties.lineSubPixelPrecisionBits ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceVulkan14Properties.maxVertexAttribDivisor ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceVulkan14Properties.supportsNonZeroFirstInstance ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceVulkan14Properties.maxPushDescriptors ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceVulkan14Properties.dynamicRenderingLocalReadDepthStencilAttachments ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceVulkan14Properties.dynamicRenderingLocalReadMultisampledAttachments ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceVulkan14Properties.earlyFragmentMultisampleCoverageAfterSampleCounting ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceVulkan14Properties.earlyFragmentSampleMaskTestBeforeSampleCounting ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceVulkan14Properties.depthStencilSwizzleOneSupport ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceVulkan14Properties.polygonModePointSize ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceVulkan14Properties.nonStrictSinglePixelWideLinesUseParallelogram ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceVulkan14Properties.nonStrictWideLinesUseParallelogram ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceVulkan14Properties.blockTexelViewCompatibleMultipleLayers ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceVulkan14Properties.maxCombinedImageSamplerDescriptorCount ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceVulkan14Properties.fragmentShadingRateClampCombinerInputs ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceVulkan14Properties.defaultRobustnessStorageBuffers ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceVulkan14Properties.defaultRobustnessUniformBuffers ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceVulkan14Properties.defaultRobustnessVertexInputs ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceVulkan14Properties.defaultRobustnessImages ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceVulkan14Properties.copySrcLayoutCount ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceVulkan14Properties.pCopySrcLayouts ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceVulkan14Properties.copyDstLayoutCount ); + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceVulkan14Properties.pCopyDstLayouts ); + for ( size_t i = 0; i < VK_UUID_SIZE; ++i ) + { + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceVulkan14Properties.optimalTilingLayoutUUID[i] ); + } + VULKAN_HPP_HASH_COMBINE( seed, physicalDeviceVulkan14Properties.identicalMemoryTypeRequirements ); + return seed; + } + }; + template <> struct hash { @@ -13528,14 +13592,14 @@ namespace std }; template <> - struct hash + struct hash { - std::size_t operator()( VULKAN_HPP_NAMESPACE::PipelineCreateFlags2CreateInfoKHR const & pipelineCreateFlags2CreateInfoKHR ) const VULKAN_HPP_NOEXCEPT + std::size_t operator()( VULKAN_HPP_NAMESPACE::PipelineCreateFlags2CreateInfo const & pipelineCreateFlags2CreateInfo ) const VULKAN_HPP_NOEXCEPT { std::size_t seed = 0; - VULKAN_HPP_HASH_COMBINE( seed, pipelineCreateFlags2CreateInfoKHR.sType ); - VULKAN_HPP_HASH_COMBINE( seed, pipelineCreateFlags2CreateInfoKHR.pNext ); - VULKAN_HPP_HASH_COMBINE( seed, pipelineCreateFlags2CreateInfoKHR.flags ); + VULKAN_HPP_HASH_COMBINE( seed, pipelineCreateFlags2CreateInfo.sType ); + VULKAN_HPP_HASH_COMBINE( seed, pipelineCreateFlags2CreateInfo.pNext ); + VULKAN_HPP_HASH_COMBINE( seed, pipelineCreateFlags2CreateInfo.flags ); return seed; } }; @@ -13774,18 +13838,18 @@ namespace std }; template <> - struct hash + struct hash { - std::size_t operator()( VULKAN_HPP_NAMESPACE::PipelineRasterizationLineStateCreateInfoKHR const & pipelineRasterizationLineStateCreateInfoKHR ) const - VULKAN_HPP_NOEXCEPT + std::size_t + operator()( VULKAN_HPP_NAMESPACE::PipelineRasterizationLineStateCreateInfo const & pipelineRasterizationLineStateCreateInfo ) const VULKAN_HPP_NOEXCEPT { std::size_t seed = 0; - VULKAN_HPP_HASH_COMBINE( seed, pipelineRasterizationLineStateCreateInfoKHR.sType ); - VULKAN_HPP_HASH_COMBINE( seed, pipelineRasterizationLineStateCreateInfoKHR.pNext ); - VULKAN_HPP_HASH_COMBINE( seed, pipelineRasterizationLineStateCreateInfoKHR.lineRasterizationMode ); - VULKAN_HPP_HASH_COMBINE( seed, pipelineRasterizationLineStateCreateInfoKHR.stippledLineEnable ); - VULKAN_HPP_HASH_COMBINE( seed, pipelineRasterizationLineStateCreateInfoKHR.lineStippleFactor ); - VULKAN_HPP_HASH_COMBINE( seed, pipelineRasterizationLineStateCreateInfoKHR.lineStipplePattern ); + VULKAN_HPP_HASH_COMBINE( seed, pipelineRasterizationLineStateCreateInfo.sType ); + VULKAN_HPP_HASH_COMBINE( seed, pipelineRasterizationLineStateCreateInfo.pNext ); + VULKAN_HPP_HASH_COMBINE( seed, pipelineRasterizationLineStateCreateInfo.lineRasterizationMode ); + VULKAN_HPP_HASH_COMBINE( seed, pipelineRasterizationLineStateCreateInfo.stippledLineEnable ); + VULKAN_HPP_HASH_COMBINE( seed, pipelineRasterizationLineStateCreateInfo.lineStippleFactor ); + VULKAN_HPP_HASH_COMBINE( seed, pipelineRasterizationLineStateCreateInfo.lineStipplePattern ); return seed; } }; @@ -13866,17 +13930,17 @@ namespace std }; template <> - struct hash + struct hash { - std::size_t operator()( VULKAN_HPP_NAMESPACE::PipelineRobustnessCreateInfoEXT const & pipelineRobustnessCreateInfoEXT ) const VULKAN_HPP_NOEXCEPT + std::size_t operator()( VULKAN_HPP_NAMESPACE::PipelineRobustnessCreateInfo const & pipelineRobustnessCreateInfo ) const VULKAN_HPP_NOEXCEPT { std::size_t seed = 0; - VULKAN_HPP_HASH_COMBINE( seed, pipelineRobustnessCreateInfoEXT.sType ); - VULKAN_HPP_HASH_COMBINE( seed, pipelineRobustnessCreateInfoEXT.pNext ); - VULKAN_HPP_HASH_COMBINE( seed, pipelineRobustnessCreateInfoEXT.storageBuffers ); - VULKAN_HPP_HASH_COMBINE( seed, pipelineRobustnessCreateInfoEXT.uniformBuffers ); - VULKAN_HPP_HASH_COMBINE( seed, pipelineRobustnessCreateInfoEXT.vertexInputs ); - VULKAN_HPP_HASH_COMBINE( seed, pipelineRobustnessCreateInfoEXT.images ); + VULKAN_HPP_HASH_COMBINE( seed, pipelineRobustnessCreateInfo.sType ); + VULKAN_HPP_HASH_COMBINE( seed, pipelineRobustnessCreateInfo.pNext ); + VULKAN_HPP_HASH_COMBINE( seed, pipelineRobustnessCreateInfo.storageBuffers ); + VULKAN_HPP_HASH_COMBINE( seed, pipelineRobustnessCreateInfo.uniformBuffers ); + VULKAN_HPP_HASH_COMBINE( seed, pipelineRobustnessCreateInfo.vertexInputs ); + VULKAN_HPP_HASH_COMBINE( seed, pipelineRobustnessCreateInfo.images ); return seed; } }; @@ -13960,29 +14024,28 @@ namespace std }; template <> - struct hash + struct hash { - std::size_t - operator()( VULKAN_HPP_NAMESPACE::VertexInputBindingDivisorDescriptionKHR const & vertexInputBindingDivisorDescriptionKHR ) const VULKAN_HPP_NOEXCEPT + std::size_t operator()( VULKAN_HPP_NAMESPACE::VertexInputBindingDivisorDescription const & vertexInputBindingDivisorDescription ) const VULKAN_HPP_NOEXCEPT { std::size_t seed = 0; - VULKAN_HPP_HASH_COMBINE( seed, vertexInputBindingDivisorDescriptionKHR.binding ); - VULKAN_HPP_HASH_COMBINE( seed, vertexInputBindingDivisorDescriptionKHR.divisor ); + VULKAN_HPP_HASH_COMBINE( seed, vertexInputBindingDivisorDescription.binding ); + VULKAN_HPP_HASH_COMBINE( seed, vertexInputBindingDivisorDescription.divisor ); return seed; } }; template <> - struct hash + struct hash { - std::size_t operator()( VULKAN_HPP_NAMESPACE::PipelineVertexInputDivisorStateCreateInfoKHR const & pipelineVertexInputDivisorStateCreateInfoKHR ) const - VULKAN_HPP_NOEXCEPT + std::size_t + operator()( VULKAN_HPP_NAMESPACE::PipelineVertexInputDivisorStateCreateInfo const & pipelineVertexInputDivisorStateCreateInfo ) const VULKAN_HPP_NOEXCEPT { std::size_t seed = 0; - VULKAN_HPP_HASH_COMBINE( seed, pipelineVertexInputDivisorStateCreateInfoKHR.sType ); - VULKAN_HPP_HASH_COMBINE( seed, pipelineVertexInputDivisorStateCreateInfoKHR.pNext ); - VULKAN_HPP_HASH_COMBINE( seed, pipelineVertexInputDivisorStateCreateInfoKHR.vertexBindingDivisorCount ); - VULKAN_HPP_HASH_COMBINE( seed, pipelineVertexInputDivisorStateCreateInfoKHR.pVertexBindingDivisors ); + VULKAN_HPP_HASH_COMBINE( seed, pipelineVertexInputDivisorStateCreateInfo.sType ); + VULKAN_HPP_HASH_COMBINE( seed, pipelineVertexInputDivisorStateCreateInfo.pNext ); + VULKAN_HPP_HASH_COMBINE( seed, pipelineVertexInputDivisorStateCreateInfo.vertexBindingDivisorCount ); + VULKAN_HPP_HASH_COMBINE( seed, pipelineVertexInputDivisorStateCreateInfo.pVertexBindingDivisors ); return seed; } }; @@ -14273,18 +14336,18 @@ namespace std }; template <> - struct hash + struct hash { - std::size_t operator()( VULKAN_HPP_NAMESPACE::PushConstantsInfoKHR const & pushConstantsInfoKHR ) const VULKAN_HPP_NOEXCEPT + std::size_t operator()( VULKAN_HPP_NAMESPACE::PushConstantsInfo const & pushConstantsInfo ) const VULKAN_HPP_NOEXCEPT { std::size_t seed = 0; - VULKAN_HPP_HASH_COMBINE( seed, pushConstantsInfoKHR.sType ); - VULKAN_HPP_HASH_COMBINE( seed, pushConstantsInfoKHR.pNext ); - VULKAN_HPP_HASH_COMBINE( seed, pushConstantsInfoKHR.layout ); - VULKAN_HPP_HASH_COMBINE( seed, pushConstantsInfoKHR.stageFlags ); - VULKAN_HPP_HASH_COMBINE( seed, pushConstantsInfoKHR.offset ); - VULKAN_HPP_HASH_COMBINE( seed, pushConstantsInfoKHR.size ); - VULKAN_HPP_HASH_COMBINE( seed, pushConstantsInfoKHR.pValues ); + VULKAN_HPP_HASH_COMBINE( seed, pushConstantsInfo.sType ); + VULKAN_HPP_HASH_COMBINE( seed, pushConstantsInfo.pNext ); + VULKAN_HPP_HASH_COMBINE( seed, pushConstantsInfo.layout ); + VULKAN_HPP_HASH_COMBINE( seed, pushConstantsInfo.stageFlags ); + VULKAN_HPP_HASH_COMBINE( seed, pushConstantsInfo.offset ); + VULKAN_HPP_HASH_COMBINE( seed, pushConstantsInfo.size ); + VULKAN_HPP_HASH_COMBINE( seed, pushConstantsInfo.pValues ); return seed; } }; @@ -14310,34 +14373,34 @@ namespace std }; template <> - struct hash + struct hash { - std::size_t operator()( VULKAN_HPP_NAMESPACE::PushDescriptorSetInfoKHR const & pushDescriptorSetInfoKHR ) const VULKAN_HPP_NOEXCEPT + std::size_t operator()( VULKAN_HPP_NAMESPACE::PushDescriptorSetInfo const & pushDescriptorSetInfo ) const VULKAN_HPP_NOEXCEPT { std::size_t seed = 0; - VULKAN_HPP_HASH_COMBINE( seed, pushDescriptorSetInfoKHR.sType ); - VULKAN_HPP_HASH_COMBINE( seed, pushDescriptorSetInfoKHR.pNext ); - VULKAN_HPP_HASH_COMBINE( seed, pushDescriptorSetInfoKHR.stageFlags ); - VULKAN_HPP_HASH_COMBINE( seed, pushDescriptorSetInfoKHR.layout ); - VULKAN_HPP_HASH_COMBINE( seed, pushDescriptorSetInfoKHR.set ); - VULKAN_HPP_HASH_COMBINE( seed, pushDescriptorSetInfoKHR.descriptorWriteCount ); - VULKAN_HPP_HASH_COMBINE( seed, pushDescriptorSetInfoKHR.pDescriptorWrites ); + VULKAN_HPP_HASH_COMBINE( seed, pushDescriptorSetInfo.sType ); + VULKAN_HPP_HASH_COMBINE( seed, pushDescriptorSetInfo.pNext ); + VULKAN_HPP_HASH_COMBINE( seed, pushDescriptorSetInfo.stageFlags ); + VULKAN_HPP_HASH_COMBINE( seed, pushDescriptorSetInfo.layout ); + VULKAN_HPP_HASH_COMBINE( seed, pushDescriptorSetInfo.set ); + VULKAN_HPP_HASH_COMBINE( seed, pushDescriptorSetInfo.descriptorWriteCount ); + VULKAN_HPP_HASH_COMBINE( seed, pushDescriptorSetInfo.pDescriptorWrites ); return seed; } }; template <> - struct hash + struct hash { - std::size_t operator()( VULKAN_HPP_NAMESPACE::PushDescriptorSetWithTemplateInfoKHR const & pushDescriptorSetWithTemplateInfoKHR ) const VULKAN_HPP_NOEXCEPT + std::size_t operator()( VULKAN_HPP_NAMESPACE::PushDescriptorSetWithTemplateInfo const & pushDescriptorSetWithTemplateInfo ) const VULKAN_HPP_NOEXCEPT { std::size_t seed = 0; - VULKAN_HPP_HASH_COMBINE( seed, pushDescriptorSetWithTemplateInfoKHR.sType ); - VULKAN_HPP_HASH_COMBINE( seed, pushDescriptorSetWithTemplateInfoKHR.pNext ); - VULKAN_HPP_HASH_COMBINE( seed, pushDescriptorSetWithTemplateInfoKHR.descriptorUpdateTemplate ); - VULKAN_HPP_HASH_COMBINE( seed, pushDescriptorSetWithTemplateInfoKHR.layout ); - VULKAN_HPP_HASH_COMBINE( seed, pushDescriptorSetWithTemplateInfoKHR.set ); - VULKAN_HPP_HASH_COMBINE( seed, pushDescriptorSetWithTemplateInfoKHR.pData ); + VULKAN_HPP_HASH_COMBINE( seed, pushDescriptorSetWithTemplateInfo.sType ); + VULKAN_HPP_HASH_COMBINE( seed, pushDescriptorSetWithTemplateInfo.pNext ); + VULKAN_HPP_HASH_COMBINE( seed, pushDescriptorSetWithTemplateInfo.descriptorUpdateTemplate ); + VULKAN_HPP_HASH_COMBINE( seed, pushDescriptorSetWithTemplateInfo.layout ); + VULKAN_HPP_HASH_COMBINE( seed, pushDescriptorSetWithTemplateInfo.set ); + VULKAN_HPP_HASH_COMBINE( seed, pushDescriptorSetWithTemplateInfo.pData ); return seed; } }; @@ -14441,18 +14504,17 @@ namespace std }; template <> - struct hash + struct hash { - std::size_t - operator()( VULKAN_HPP_NAMESPACE::QueueFamilyGlobalPriorityPropertiesKHR const & queueFamilyGlobalPriorityPropertiesKHR ) const VULKAN_HPP_NOEXCEPT + std::size_t operator()( VULKAN_HPP_NAMESPACE::QueueFamilyGlobalPriorityProperties const & queueFamilyGlobalPriorityProperties ) const VULKAN_HPP_NOEXCEPT { std::size_t seed = 0; - VULKAN_HPP_HASH_COMBINE( seed, queueFamilyGlobalPriorityPropertiesKHR.sType ); - VULKAN_HPP_HASH_COMBINE( seed, queueFamilyGlobalPriorityPropertiesKHR.pNext ); - VULKAN_HPP_HASH_COMBINE( seed, queueFamilyGlobalPriorityPropertiesKHR.priorityCount ); - for ( size_t i = 0; i < VK_MAX_GLOBAL_PRIORITY_SIZE_KHR; ++i ) + VULKAN_HPP_HASH_COMBINE( seed, queueFamilyGlobalPriorityProperties.sType ); + VULKAN_HPP_HASH_COMBINE( seed, queueFamilyGlobalPriorityProperties.pNext ); + VULKAN_HPP_HASH_COMBINE( seed, queueFamilyGlobalPriorityProperties.priorityCount ); + for ( size_t i = 0; i < VK_MAX_GLOBAL_PRIORITY_SIZE; ++i ) { - VULKAN_HPP_HASH_COMBINE( seed, queueFamilyGlobalPriorityPropertiesKHR.priorities[i] ); + VULKAN_HPP_HASH_COMBINE( seed, queueFamilyGlobalPriorityProperties.priorities[i] ); } return seed; } @@ -15012,32 +15074,32 @@ namespace std }; template <> - struct hash + struct hash { - std::size_t operator()( VULKAN_HPP_NAMESPACE::RenderingAreaInfoKHR const & renderingAreaInfoKHR ) const VULKAN_HPP_NOEXCEPT + std::size_t operator()( VULKAN_HPP_NAMESPACE::RenderingAreaInfo const & renderingAreaInfo ) const VULKAN_HPP_NOEXCEPT { std::size_t seed = 0; - VULKAN_HPP_HASH_COMBINE( seed, renderingAreaInfoKHR.sType ); - VULKAN_HPP_HASH_COMBINE( seed, renderingAreaInfoKHR.pNext ); - VULKAN_HPP_HASH_COMBINE( seed, renderingAreaInfoKHR.viewMask ); - VULKAN_HPP_HASH_COMBINE( seed, renderingAreaInfoKHR.colorAttachmentCount ); - VULKAN_HPP_HASH_COMBINE( seed, renderingAreaInfoKHR.pColorAttachmentFormats ); - VULKAN_HPP_HASH_COMBINE( seed, renderingAreaInfoKHR.depthAttachmentFormat ); - VULKAN_HPP_HASH_COMBINE( seed, renderingAreaInfoKHR.stencilAttachmentFormat ); + VULKAN_HPP_HASH_COMBINE( seed, renderingAreaInfo.sType ); + VULKAN_HPP_HASH_COMBINE( seed, renderingAreaInfo.pNext ); + VULKAN_HPP_HASH_COMBINE( seed, renderingAreaInfo.viewMask ); + VULKAN_HPP_HASH_COMBINE( seed, renderingAreaInfo.colorAttachmentCount ); + VULKAN_HPP_HASH_COMBINE( seed, renderingAreaInfo.pColorAttachmentFormats ); + VULKAN_HPP_HASH_COMBINE( seed, renderingAreaInfo.depthAttachmentFormat ); + VULKAN_HPP_HASH_COMBINE( seed, renderingAreaInfo.stencilAttachmentFormat ); return seed; } }; template <> - struct hash + struct hash { - std::size_t operator()( VULKAN_HPP_NAMESPACE::RenderingAttachmentLocationInfoKHR const & renderingAttachmentLocationInfoKHR ) const VULKAN_HPP_NOEXCEPT + std::size_t operator()( VULKAN_HPP_NAMESPACE::RenderingAttachmentLocationInfo const & renderingAttachmentLocationInfo ) const VULKAN_HPP_NOEXCEPT { std::size_t seed = 0; - VULKAN_HPP_HASH_COMBINE( seed, renderingAttachmentLocationInfoKHR.sType ); - VULKAN_HPP_HASH_COMBINE( seed, renderingAttachmentLocationInfoKHR.pNext ); - VULKAN_HPP_HASH_COMBINE( seed, renderingAttachmentLocationInfoKHR.colorAttachmentCount ); - VULKAN_HPP_HASH_COMBINE( seed, renderingAttachmentLocationInfoKHR.pColorAttachmentLocations ); + VULKAN_HPP_HASH_COMBINE( seed, renderingAttachmentLocationInfo.sType ); + VULKAN_HPP_HASH_COMBINE( seed, renderingAttachmentLocationInfo.pNext ); + VULKAN_HPP_HASH_COMBINE( seed, renderingAttachmentLocationInfo.colorAttachmentCount ); + VULKAN_HPP_HASH_COMBINE( seed, renderingAttachmentLocationInfo.pColorAttachmentLocations ); return seed; } }; @@ -15094,17 +15156,17 @@ namespace std }; template <> - struct hash + struct hash { - std::size_t operator()( VULKAN_HPP_NAMESPACE::RenderingInputAttachmentIndexInfoKHR const & renderingInputAttachmentIndexInfoKHR ) const VULKAN_HPP_NOEXCEPT + std::size_t operator()( VULKAN_HPP_NAMESPACE::RenderingInputAttachmentIndexInfo const & renderingInputAttachmentIndexInfo ) const VULKAN_HPP_NOEXCEPT { std::size_t seed = 0; - VULKAN_HPP_HASH_COMBINE( seed, renderingInputAttachmentIndexInfoKHR.sType ); - VULKAN_HPP_HASH_COMBINE( seed, renderingInputAttachmentIndexInfoKHR.pNext ); - VULKAN_HPP_HASH_COMBINE( seed, renderingInputAttachmentIndexInfoKHR.colorAttachmentCount ); - VULKAN_HPP_HASH_COMBINE( seed, renderingInputAttachmentIndexInfoKHR.pColorAttachmentInputIndices ); - VULKAN_HPP_HASH_COMBINE( seed, renderingInputAttachmentIndexInfoKHR.pDepthInputAttachmentIndex ); - VULKAN_HPP_HASH_COMBINE( seed, renderingInputAttachmentIndexInfoKHR.pStencilInputAttachmentIndex ); + VULKAN_HPP_HASH_COMBINE( seed, renderingInputAttachmentIndexInfo.sType ); + VULKAN_HPP_HASH_COMBINE( seed, renderingInputAttachmentIndexInfo.pNext ); + VULKAN_HPP_HASH_COMBINE( seed, renderingInputAttachmentIndexInfo.colorAttachmentCount ); + VULKAN_HPP_HASH_COMBINE( seed, renderingInputAttachmentIndexInfo.pColorAttachmentInputIndices ); + VULKAN_HPP_HASH_COMBINE( seed, renderingInputAttachmentIndexInfo.pDepthInputAttachmentIndex ); + VULKAN_HPP_HASH_COMBINE( seed, renderingInputAttachmentIndexInfo.pStencilInputAttachmentIndex ); return seed; } }; @@ -15815,27 +15877,27 @@ namespace std }; template <> - struct hash + struct hash { - std::size_t operator()( VULKAN_HPP_NAMESPACE::SubresourceHostMemcpySizeEXT const & subresourceHostMemcpySizeEXT ) const VULKAN_HPP_NOEXCEPT + std::size_t operator()( VULKAN_HPP_NAMESPACE::SubresourceHostMemcpySize const & subresourceHostMemcpySize ) const VULKAN_HPP_NOEXCEPT { std::size_t seed = 0; - VULKAN_HPP_HASH_COMBINE( seed, subresourceHostMemcpySizeEXT.sType ); - VULKAN_HPP_HASH_COMBINE( seed, subresourceHostMemcpySizeEXT.pNext ); - VULKAN_HPP_HASH_COMBINE( seed, subresourceHostMemcpySizeEXT.size ); + VULKAN_HPP_HASH_COMBINE( seed, subresourceHostMemcpySize.sType ); + VULKAN_HPP_HASH_COMBINE( seed, subresourceHostMemcpySize.pNext ); + VULKAN_HPP_HASH_COMBINE( seed, subresourceHostMemcpySize.size ); return seed; } }; template <> - struct hash + struct hash { - std::size_t operator()( VULKAN_HPP_NAMESPACE::SubresourceLayout2KHR const & subresourceLayout2KHR ) const VULKAN_HPP_NOEXCEPT + std::size_t operator()( VULKAN_HPP_NAMESPACE::SubresourceLayout2 const & subresourceLayout2 ) const VULKAN_HPP_NOEXCEPT { std::size_t seed = 0; - VULKAN_HPP_HASH_COMBINE( seed, subresourceLayout2KHR.sType ); - VULKAN_HPP_HASH_COMBINE( seed, subresourceLayout2KHR.pNext ); - VULKAN_HPP_HASH_COMBINE( seed, subresourceLayout2KHR.subresourceLayout ); + VULKAN_HPP_HASH_COMBINE( seed, subresourceLayout2.sType ); + VULKAN_HPP_HASH_COMBINE( seed, subresourceLayout2.pNext ); + VULKAN_HPP_HASH_COMBINE( seed, subresourceLayout2.subresourceLayout ); return seed; } }; diff --git a/third_party/vulkan/vulkan_raii.hpp b/third_party/vulkan/vulkan_raii.hpp index 08010d5..3cf1e0e 100644 --- a/third_party/vulkan/vulkan_raii.hpp +++ b/third_party/vulkan/vulkan_raii.hpp @@ -871,6 +871,29 @@ namespace VULKAN_HPP_NAMESPACE vkGetDeviceImageSparseMemoryRequirements = PFN_vkGetDeviceImageSparseMemoryRequirements( vkGetDeviceProcAddr( device, "vkGetDeviceImageSparseMemoryRequirements" ) ); + //=== VK_VERSION_1_4 === + vkCmdSetLineStipple = PFN_vkCmdSetLineStipple( vkGetDeviceProcAddr( device, "vkCmdSetLineStipple" ) ); + vkMapMemory2 = PFN_vkMapMemory2( vkGetDeviceProcAddr( device, "vkMapMemory2" ) ); + vkUnmapMemory2 = PFN_vkUnmapMemory2( vkGetDeviceProcAddr( device, "vkUnmapMemory2" ) ); + vkCmdBindIndexBuffer2 = PFN_vkCmdBindIndexBuffer2( vkGetDeviceProcAddr( device, "vkCmdBindIndexBuffer2" ) ); + vkGetRenderingAreaGranularity = PFN_vkGetRenderingAreaGranularity( vkGetDeviceProcAddr( device, "vkGetRenderingAreaGranularity" ) ); + vkGetDeviceImageSubresourceLayout = PFN_vkGetDeviceImageSubresourceLayout( vkGetDeviceProcAddr( device, "vkGetDeviceImageSubresourceLayout" ) ); + vkGetImageSubresourceLayout2 = PFN_vkGetImageSubresourceLayout2( vkGetDeviceProcAddr( device, "vkGetImageSubresourceLayout2" ) ); + vkCmdPushDescriptorSet = PFN_vkCmdPushDescriptorSet( vkGetDeviceProcAddr( device, "vkCmdPushDescriptorSet" ) ); + vkCmdPushDescriptorSetWithTemplate = PFN_vkCmdPushDescriptorSetWithTemplate( vkGetDeviceProcAddr( device, "vkCmdPushDescriptorSetWithTemplate" ) ); + vkCmdSetRenderingAttachmentLocations = + PFN_vkCmdSetRenderingAttachmentLocations( vkGetDeviceProcAddr( device, "vkCmdSetRenderingAttachmentLocations" ) ); + vkCmdSetRenderingInputAttachmentIndices = + PFN_vkCmdSetRenderingInputAttachmentIndices( vkGetDeviceProcAddr( device, "vkCmdSetRenderingInputAttachmentIndices" ) ); + vkCmdBindDescriptorSets2 = PFN_vkCmdBindDescriptorSets2( vkGetDeviceProcAddr( device, "vkCmdBindDescriptorSets2" ) ); + vkCmdPushConstants2 = PFN_vkCmdPushConstants2( vkGetDeviceProcAddr( device, "vkCmdPushConstants2" ) ); + vkCmdPushDescriptorSet2 = PFN_vkCmdPushDescriptorSet2( vkGetDeviceProcAddr( device, "vkCmdPushDescriptorSet2" ) ); + vkCmdPushDescriptorSetWithTemplate2 = PFN_vkCmdPushDescriptorSetWithTemplate2( vkGetDeviceProcAddr( device, "vkCmdPushDescriptorSetWithTemplate2" ) ); + vkCopyMemoryToImage = PFN_vkCopyMemoryToImage( vkGetDeviceProcAddr( device, "vkCopyMemoryToImage" ) ); + vkCopyImageToMemory = PFN_vkCopyImageToMemory( vkGetDeviceProcAddr( device, "vkCopyImageToMemory" ) ); + vkCopyImageToImage = PFN_vkCopyImageToImage( vkGetDeviceProcAddr( device, "vkCopyImageToImage" ) ); + vkTransitionImageLayout = PFN_vkTransitionImageLayout( vkGetDeviceProcAddr( device, "vkTransitionImageLayout" ) ); + //=== VK_KHR_swapchain === vkCreateSwapchainKHR = PFN_vkCreateSwapchainKHR( vkGetDeviceProcAddr( device, "vkCreateSwapchainKHR" ) ); vkDestroySwapchainKHR = PFN_vkDestroySwapchainKHR( vkGetDeviceProcAddr( device, "vkDestroySwapchainKHR" ) ); @@ -993,8 +1016,12 @@ namespace VULKAN_HPP_NAMESPACE //=== VK_KHR_push_descriptor === vkCmdPushDescriptorSetKHR = PFN_vkCmdPushDescriptorSetKHR( vkGetDeviceProcAddr( device, "vkCmdPushDescriptorSetKHR" ) ); + if ( !vkCmdPushDescriptorSet ) + vkCmdPushDescriptorSet = vkCmdPushDescriptorSetKHR; vkCmdPushDescriptorSetWithTemplateKHR = PFN_vkCmdPushDescriptorSetWithTemplateKHR( vkGetDeviceProcAddr( device, "vkCmdPushDescriptorSetWithTemplateKHR" ) ); + if ( !vkCmdPushDescriptorSetWithTemplate ) + vkCmdPushDescriptorSetWithTemplate = vkCmdPushDescriptorSetWithTemplateKHR; //=== VK_EXT_conditional_rendering === vkCmdBeginConditionalRenderingEXT = PFN_vkCmdBeginConditionalRenderingEXT( vkGetDeviceProcAddr( device, "vkCmdBeginConditionalRenderingEXT" ) ); @@ -1277,8 +1304,12 @@ namespace VULKAN_HPP_NAMESPACE //=== VK_KHR_dynamic_rendering_local_read === vkCmdSetRenderingAttachmentLocationsKHR = PFN_vkCmdSetRenderingAttachmentLocationsKHR( vkGetDeviceProcAddr( device, "vkCmdSetRenderingAttachmentLocationsKHR" ) ); + if ( !vkCmdSetRenderingAttachmentLocations ) + vkCmdSetRenderingAttachmentLocations = vkCmdSetRenderingAttachmentLocationsKHR; vkCmdSetRenderingInputAttachmentIndicesKHR = PFN_vkCmdSetRenderingInputAttachmentIndicesKHR( vkGetDeviceProcAddr( device, "vkCmdSetRenderingInputAttachmentIndicesKHR" ) ); + if ( !vkCmdSetRenderingInputAttachmentIndices ) + vkCmdSetRenderingInputAttachmentIndices = vkCmdSetRenderingInputAttachmentIndicesKHR; //=== VK_EXT_buffer_device_address === vkGetBufferDeviceAddressEXT = PFN_vkGetBufferDeviceAddressEXT( vkGetDeviceProcAddr( device, "vkGetBufferDeviceAddressEXT" ) ); @@ -1310,8 +1341,8 @@ namespace VULKAN_HPP_NAMESPACE //=== VK_EXT_line_rasterization === vkCmdSetLineStippleEXT = PFN_vkCmdSetLineStippleEXT( vkGetDeviceProcAddr( device, "vkCmdSetLineStippleEXT" ) ); - if ( !vkCmdSetLineStippleKHR ) - vkCmdSetLineStippleKHR = vkCmdSetLineStippleEXT; + if ( !vkCmdSetLineStipple ) + vkCmdSetLineStipple = vkCmdSetLineStippleEXT; //=== VK_EXT_host_query_reset === vkResetQueryPoolEXT = PFN_vkResetQueryPoolEXT( vkGetDeviceProcAddr( device, "vkResetQueryPoolEXT" ) ); @@ -1373,17 +1404,29 @@ namespace VULKAN_HPP_NAMESPACE PFN_vkGetPipelineExecutableInternalRepresentationsKHR( vkGetDeviceProcAddr( device, "vkGetPipelineExecutableInternalRepresentationsKHR" ) ); //=== VK_EXT_host_image_copy === - vkCopyMemoryToImageEXT = PFN_vkCopyMemoryToImageEXT( vkGetDeviceProcAddr( device, "vkCopyMemoryToImageEXT" ) ); - vkCopyImageToMemoryEXT = PFN_vkCopyImageToMemoryEXT( vkGetDeviceProcAddr( device, "vkCopyImageToMemoryEXT" ) ); - vkCopyImageToImageEXT = PFN_vkCopyImageToImageEXT( vkGetDeviceProcAddr( device, "vkCopyImageToImageEXT" ) ); - vkTransitionImageLayoutEXT = PFN_vkTransitionImageLayoutEXT( vkGetDeviceProcAddr( device, "vkTransitionImageLayoutEXT" ) ); + vkCopyMemoryToImageEXT = PFN_vkCopyMemoryToImageEXT( vkGetDeviceProcAddr( device, "vkCopyMemoryToImageEXT" ) ); + if ( !vkCopyMemoryToImage ) + vkCopyMemoryToImage = vkCopyMemoryToImageEXT; + vkCopyImageToMemoryEXT = PFN_vkCopyImageToMemoryEXT( vkGetDeviceProcAddr( device, "vkCopyImageToMemoryEXT" ) ); + if ( !vkCopyImageToMemory ) + vkCopyImageToMemory = vkCopyImageToMemoryEXT; + vkCopyImageToImageEXT = PFN_vkCopyImageToImageEXT( vkGetDeviceProcAddr( device, "vkCopyImageToImageEXT" ) ); + if ( !vkCopyImageToImage ) + vkCopyImageToImage = vkCopyImageToImageEXT; + vkTransitionImageLayoutEXT = PFN_vkTransitionImageLayoutEXT( vkGetDeviceProcAddr( device, "vkTransitionImageLayoutEXT" ) ); + if ( !vkTransitionImageLayout ) + vkTransitionImageLayout = vkTransitionImageLayoutEXT; vkGetImageSubresourceLayout2EXT = PFN_vkGetImageSubresourceLayout2EXT( vkGetDeviceProcAddr( device, "vkGetImageSubresourceLayout2EXT" ) ); - if ( !vkGetImageSubresourceLayout2KHR ) - vkGetImageSubresourceLayout2KHR = vkGetImageSubresourceLayout2EXT; + if ( !vkGetImageSubresourceLayout2 ) + vkGetImageSubresourceLayout2 = vkGetImageSubresourceLayout2EXT; //=== VK_KHR_map_memory2 === - vkMapMemory2KHR = PFN_vkMapMemory2KHR( vkGetDeviceProcAddr( device, "vkMapMemory2KHR" ) ); + vkMapMemory2KHR = PFN_vkMapMemory2KHR( vkGetDeviceProcAddr( device, "vkMapMemory2KHR" ) ); + if ( !vkMapMemory2 ) + vkMapMemory2 = vkMapMemory2KHR; vkUnmapMemory2KHR = PFN_vkUnmapMemory2KHR( vkGetDeviceProcAddr( device, "vkUnmapMemory2KHR" ) ); + if ( !vkUnmapMemory2 ) + vkUnmapMemory2 = vkUnmapMemory2KHR; //=== VK_EXT_swapchain_maintenance1 === vkReleaseSwapchainImagesEXT = PFN_vkReleaseSwapchainImagesEXT( vkGetDeviceProcAddr( device, "vkReleaseSwapchainImagesEXT" ) ); @@ -1679,11 +1722,19 @@ namespace VULKAN_HPP_NAMESPACE vkCmdOpticalFlowExecuteNV = PFN_vkCmdOpticalFlowExecuteNV( vkGetDeviceProcAddr( device, "vkCmdOpticalFlowExecuteNV" ) ); //=== VK_KHR_maintenance5 === - vkCmdBindIndexBuffer2KHR = PFN_vkCmdBindIndexBuffer2KHR( vkGetDeviceProcAddr( device, "vkCmdBindIndexBuffer2KHR" ) ); + vkCmdBindIndexBuffer2KHR = PFN_vkCmdBindIndexBuffer2KHR( vkGetDeviceProcAddr( device, "vkCmdBindIndexBuffer2KHR" ) ); + if ( !vkCmdBindIndexBuffer2 ) + vkCmdBindIndexBuffer2 = vkCmdBindIndexBuffer2KHR; vkGetRenderingAreaGranularityKHR = PFN_vkGetRenderingAreaGranularityKHR( vkGetDeviceProcAddr( device, "vkGetRenderingAreaGranularityKHR" ) ); + if ( !vkGetRenderingAreaGranularity ) + vkGetRenderingAreaGranularity = vkGetRenderingAreaGranularityKHR; vkGetDeviceImageSubresourceLayoutKHR = PFN_vkGetDeviceImageSubresourceLayoutKHR( vkGetDeviceProcAddr( device, "vkGetDeviceImageSubresourceLayoutKHR" ) ); + if ( !vkGetDeviceImageSubresourceLayout ) + vkGetDeviceImageSubresourceLayout = vkGetDeviceImageSubresourceLayoutKHR; vkGetImageSubresourceLayout2KHR = PFN_vkGetImageSubresourceLayout2KHR( vkGetDeviceProcAddr( device, "vkGetImageSubresourceLayout2KHR" ) ); + if ( !vkGetImageSubresourceLayout2 ) + vkGetImageSubresourceLayout2 = vkGetImageSubresourceLayout2KHR; //=== VK_AMD_anti_lag === vkAntiLagUpdateAMD = PFN_vkAntiLagUpdateAMD( vkGetDeviceProcAddr( device, "vkAntiLagUpdateAMD" ) ); @@ -1725,16 +1776,26 @@ namespace VULKAN_HPP_NAMESPACE //=== VK_KHR_line_rasterization === vkCmdSetLineStippleKHR = PFN_vkCmdSetLineStippleKHR( vkGetDeviceProcAddr( device, "vkCmdSetLineStippleKHR" ) ); + if ( !vkCmdSetLineStipple ) + vkCmdSetLineStipple = vkCmdSetLineStippleKHR; //=== VK_KHR_calibrated_timestamps === vkGetCalibratedTimestampsKHR = PFN_vkGetCalibratedTimestampsKHR( vkGetDeviceProcAddr( device, "vkGetCalibratedTimestampsKHR" ) ); //=== VK_KHR_maintenance6 === vkCmdBindDescriptorSets2KHR = PFN_vkCmdBindDescriptorSets2KHR( vkGetDeviceProcAddr( device, "vkCmdBindDescriptorSets2KHR" ) ); - vkCmdPushConstants2KHR = PFN_vkCmdPushConstants2KHR( vkGetDeviceProcAddr( device, "vkCmdPushConstants2KHR" ) ); - vkCmdPushDescriptorSet2KHR = PFN_vkCmdPushDescriptorSet2KHR( vkGetDeviceProcAddr( device, "vkCmdPushDescriptorSet2KHR" ) ); + if ( !vkCmdBindDescriptorSets2 ) + vkCmdBindDescriptorSets2 = vkCmdBindDescriptorSets2KHR; + vkCmdPushConstants2KHR = PFN_vkCmdPushConstants2KHR( vkGetDeviceProcAddr( device, "vkCmdPushConstants2KHR" ) ); + if ( !vkCmdPushConstants2 ) + vkCmdPushConstants2 = vkCmdPushConstants2KHR; + vkCmdPushDescriptorSet2KHR = PFN_vkCmdPushDescriptorSet2KHR( vkGetDeviceProcAddr( device, "vkCmdPushDescriptorSet2KHR" ) ); + if ( !vkCmdPushDescriptorSet2 ) + vkCmdPushDescriptorSet2 = vkCmdPushDescriptorSet2KHR; vkCmdPushDescriptorSetWithTemplate2KHR = PFN_vkCmdPushDescriptorSetWithTemplate2KHR( vkGetDeviceProcAddr( device, "vkCmdPushDescriptorSetWithTemplate2KHR" ) ); + if ( !vkCmdPushDescriptorSetWithTemplate2 ) + vkCmdPushDescriptorSetWithTemplate2 = vkCmdPushDescriptorSetWithTemplate2KHR; vkCmdSetDescriptorBufferOffsets2EXT = PFN_vkCmdSetDescriptorBufferOffsets2EXT( vkGetDeviceProcAddr( device, "vkCmdSetDescriptorBufferOffsets2EXT" ) ); vkCmdBindDescriptorBufferEmbeddedSamplers2EXT = PFN_vkCmdBindDescriptorBufferEmbeddedSamplers2EXT( vkGetDeviceProcAddr( device, "vkCmdBindDescriptorBufferEmbeddedSamplers2EXT" ) ); @@ -1949,6 +2010,27 @@ namespace VULKAN_HPP_NAMESPACE PFN_vkGetDeviceImageMemoryRequirements vkGetDeviceImageMemoryRequirements = 0; PFN_vkGetDeviceImageSparseMemoryRequirements vkGetDeviceImageSparseMemoryRequirements = 0; + //=== VK_VERSION_1_4 === + PFN_vkCmdSetLineStipple vkCmdSetLineStipple = 0; + PFN_vkMapMemory2 vkMapMemory2 = 0; + PFN_vkUnmapMemory2 vkUnmapMemory2 = 0; + PFN_vkCmdBindIndexBuffer2 vkCmdBindIndexBuffer2 = 0; + PFN_vkGetRenderingAreaGranularity vkGetRenderingAreaGranularity = 0; + PFN_vkGetDeviceImageSubresourceLayout vkGetDeviceImageSubresourceLayout = 0; + PFN_vkGetImageSubresourceLayout2 vkGetImageSubresourceLayout2 = 0; + PFN_vkCmdPushDescriptorSet vkCmdPushDescriptorSet = 0; + PFN_vkCmdPushDescriptorSetWithTemplate vkCmdPushDescriptorSetWithTemplate = 0; + PFN_vkCmdSetRenderingAttachmentLocations vkCmdSetRenderingAttachmentLocations = 0; + PFN_vkCmdSetRenderingInputAttachmentIndices vkCmdSetRenderingInputAttachmentIndices = 0; + PFN_vkCmdBindDescriptorSets2 vkCmdBindDescriptorSets2 = 0; + PFN_vkCmdPushConstants2 vkCmdPushConstants2 = 0; + PFN_vkCmdPushDescriptorSet2 vkCmdPushDescriptorSet2 = 0; + PFN_vkCmdPushDescriptorSetWithTemplate2 vkCmdPushDescriptorSetWithTemplate2 = 0; + PFN_vkCopyMemoryToImage vkCopyMemoryToImage = 0; + PFN_vkCopyImageToMemory vkCopyImageToMemory = 0; + PFN_vkCopyImageToImage vkCopyImageToImage = 0; + PFN_vkTransitionImageLayout vkTransitionImageLayout = 0; + //=== VK_KHR_swapchain === PFN_vkCreateSwapchainKHR vkCreateSwapchainKHR = 0; PFN_vkDestroySwapchainKHR vkDestroySwapchainKHR = 0; @@ -2772,6 +2854,12 @@ namespace VULKAN_HPP_NAMESPACE //=== RAII HANDLES === //==================== + template + struct isVulkanRAIIHandleType + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = false; + }; + class Context { public: @@ -3146,6 +3234,12 @@ namespace VULKAN_HPP_NAMESPACE std::unique_ptr m_dispatcher; }; + template <> + struct isVulkanRAIIHandleType + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; + }; + class PhysicalDevice { public: @@ -3591,6 +3685,12 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::detail::InstanceDispatcher const * m_dispatcher = nullptr; }; + template <> + struct isVulkanRAIIHandleType + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; + }; + class PhysicalDevices : public std::vector { public: @@ -3985,6 +4085,30 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NODISCARD std::vector getImageSparseMemoryRequirements( const VULKAN_HPP_NAMESPACE::DeviceImageMemoryRequirements & info ) const; + //=== VK_VERSION_1_4 === + + VULKAN_HPP_NODISCARD void * mapMemory2( const VULKAN_HPP_NAMESPACE::MemoryMapInfo & memoryMapInfo ) const; + + void unmapMemory2( const VULKAN_HPP_NAMESPACE::MemoryUnmapInfo & memoryUnmapInfo ) const; + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::Extent2D + getRenderingAreaGranularity( const VULKAN_HPP_NAMESPACE::RenderingAreaInfo & renderingAreaInfo ) const VULKAN_HPP_NOEXCEPT; + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::SubresourceLayout2 + getImageSubresourceLayout( const VULKAN_HPP_NAMESPACE::DeviceImageSubresourceInfo & info ) const VULKAN_HPP_NOEXCEPT; + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::StructureChain + getImageSubresourceLayout( const VULKAN_HPP_NAMESPACE::DeviceImageSubresourceInfo & info ) const VULKAN_HPP_NOEXCEPT; + + void copyMemoryToImage( const VULKAN_HPP_NAMESPACE::CopyMemoryToImageInfo & copyMemoryToImageInfo ) const; + + void copyImageToMemory( const VULKAN_HPP_NAMESPACE::CopyImageToMemoryInfo & copyImageToMemoryInfo ) const; + + void copyImageToImage( const VULKAN_HPP_NAMESPACE::CopyImageToImageInfo & copyImageToImageInfo ) const; + + void transitionImageLayout( VULKAN_HPP_NAMESPACE::ArrayProxy const & transitions ) const; + //=== VK_KHR_swapchain === VULKAN_HPP_NODISCARD @@ -4416,19 +4540,19 @@ namespace VULKAN_HPP_NAMESPACE //=== VK_EXT_host_image_copy === - void copyMemoryToImageEXT( const VULKAN_HPP_NAMESPACE::CopyMemoryToImageInfoEXT & copyMemoryToImageInfo ) const; + void copyMemoryToImageEXT( const VULKAN_HPP_NAMESPACE::CopyMemoryToImageInfo & copyMemoryToImageInfo ) const; - void copyImageToMemoryEXT( const VULKAN_HPP_NAMESPACE::CopyImageToMemoryInfoEXT & copyImageToMemoryInfo ) const; + void copyImageToMemoryEXT( const VULKAN_HPP_NAMESPACE::CopyImageToMemoryInfo & copyImageToMemoryInfo ) const; - void copyImageToImageEXT( const VULKAN_HPP_NAMESPACE::CopyImageToImageInfoEXT & copyImageToImageInfo ) const; + void copyImageToImageEXT( const VULKAN_HPP_NAMESPACE::CopyImageToImageInfo & copyImageToImageInfo ) const; - void transitionImageLayoutEXT( VULKAN_HPP_NAMESPACE::ArrayProxy const & transitions ) const; + void transitionImageLayoutEXT( VULKAN_HPP_NAMESPACE::ArrayProxy const & transitions ) const; //=== VK_KHR_map_memory2 === - VULKAN_HPP_NODISCARD void * mapMemory2KHR( const VULKAN_HPP_NAMESPACE::MemoryMapInfoKHR & memoryMapInfo ) const; + VULKAN_HPP_NODISCARD void * mapMemory2KHR( const VULKAN_HPP_NAMESPACE::MemoryMapInfo & memoryMapInfo ) const; - void unmapMemory2KHR( const VULKAN_HPP_NAMESPACE::MemoryUnmapInfoKHR & memoryUnmapInfo ) const; + void unmapMemory2KHR( const VULKAN_HPP_NAMESPACE::MemoryUnmapInfo & memoryUnmapInfo ) const; //=== VK_EXT_swapchain_maintenance1 === @@ -4662,14 +4786,14 @@ namespace VULKAN_HPP_NAMESPACE //=== VK_KHR_maintenance5 === VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::Extent2D - getRenderingAreaGranularityKHR( const VULKAN_HPP_NAMESPACE::RenderingAreaInfoKHR & renderingAreaInfo ) const VULKAN_HPP_NOEXCEPT; + getRenderingAreaGranularityKHR( const VULKAN_HPP_NAMESPACE::RenderingAreaInfo & renderingAreaInfo ) const VULKAN_HPP_NOEXCEPT; - VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::SubresourceLayout2KHR - getImageSubresourceLayoutKHR( const VULKAN_HPP_NAMESPACE::DeviceImageSubresourceInfoKHR & info ) const VULKAN_HPP_NOEXCEPT; + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::SubresourceLayout2 + getImageSubresourceLayoutKHR( const VULKAN_HPP_NAMESPACE::DeviceImageSubresourceInfo & info ) const VULKAN_HPP_NOEXCEPT; template VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::StructureChain - getImageSubresourceLayoutKHR( const VULKAN_HPP_NAMESPACE::DeviceImageSubresourceInfoKHR & info ) const VULKAN_HPP_NOEXCEPT; + getImageSubresourceLayoutKHR( const VULKAN_HPP_NAMESPACE::DeviceImageSubresourceInfo & info ) const VULKAN_HPP_NOEXCEPT; //=== VK_AMD_anti_lag === @@ -4754,6 +4878,12 @@ namespace VULKAN_HPP_NAMESPACE std::unique_ptr m_dispatcher; }; + template <> + struct isVulkanRAIIHandleType + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; + }; + class AccelerationStructureKHR { public: @@ -4874,6 +5004,12 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::detail::DeviceDispatcher const * m_dispatcher = nullptr; }; + template <> + struct isVulkanRAIIHandleType + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; + }; + class AccelerationStructureNV { public: @@ -5002,6 +5138,12 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::detail::DeviceDispatcher const * m_dispatcher = nullptr; }; + template <> + struct isVulkanRAIIHandleType + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; + }; + class Buffer { public: @@ -5127,6 +5269,12 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::detail::DeviceDispatcher const * m_dispatcher = nullptr; }; + template <> + struct isVulkanRAIIHandleType + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; + }; + # if defined( VK_USE_PLATFORM_FUCHSIA ) class BufferCollectionFUCHSIA { @@ -5255,6 +5403,13 @@ namespace VULKAN_HPP_NAMESPACE const VULKAN_HPP_NAMESPACE::AllocationCallbacks * m_allocator = {}; VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::detail::DeviceDispatcher const * m_dispatcher = nullptr; }; + + template <> + struct isVulkanRAIIHandleType + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; + }; + # endif /*VK_USE_PLATFORM_FUCHSIA*/ class BufferView @@ -5376,6 +5531,12 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::detail::DeviceDispatcher const * m_dispatcher = nullptr; }; + template <> + struct isVulkanRAIIHandleType + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; + }; + class CommandPool { public: @@ -5507,6 +5668,12 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::detail::DeviceDispatcher const * m_dispatcher = nullptr; }; + template <> + struct isVulkanRAIIHandleType + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; + }; + class CommandBuffer { public: @@ -5881,6 +6048,41 @@ namespace VULKAN_HPP_NAMESPACE void setPrimitiveRestartEnable( VULKAN_HPP_NAMESPACE::Bool32 primitiveRestartEnable ) const VULKAN_HPP_NOEXCEPT; + //=== VK_VERSION_1_4 === + + void setLineStipple( uint32_t lineStippleFactor, uint16_t lineStipplePattern ) const VULKAN_HPP_NOEXCEPT; + + void bindIndexBuffer2( VULKAN_HPP_NAMESPACE::Buffer buffer, + VULKAN_HPP_NAMESPACE::DeviceSize offset, + VULKAN_HPP_NAMESPACE::DeviceSize size, + VULKAN_HPP_NAMESPACE::IndexType indexType ) const VULKAN_HPP_NOEXCEPT; + + void pushDescriptorSet( VULKAN_HPP_NAMESPACE::PipelineBindPoint pipelineBindPoint, + VULKAN_HPP_NAMESPACE::PipelineLayout layout, + uint32_t set, + VULKAN_HPP_NAMESPACE::ArrayProxy const & descriptorWrites ) const + VULKAN_HPP_NOEXCEPT; + + template + void pushDescriptorSetWithTemplate( VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate descriptorUpdateTemplate, + VULKAN_HPP_NAMESPACE::PipelineLayout layout, + uint32_t set, + DataType const & data ) const VULKAN_HPP_NOEXCEPT; + + void setRenderingAttachmentLocations( const VULKAN_HPP_NAMESPACE::RenderingAttachmentLocationInfo & locationInfo ) const VULKAN_HPP_NOEXCEPT; + + void setRenderingInputAttachmentIndices( const VULKAN_HPP_NAMESPACE::RenderingInputAttachmentIndexInfo & inputAttachmentIndexInfo ) const + VULKAN_HPP_NOEXCEPT; + + void bindDescriptorSets2( const VULKAN_HPP_NAMESPACE::BindDescriptorSetsInfo & bindDescriptorSetsInfo ) const VULKAN_HPP_NOEXCEPT; + + void pushConstants2( const VULKAN_HPP_NAMESPACE::PushConstantsInfo & pushConstantsInfo ) const VULKAN_HPP_NOEXCEPT; + + void pushDescriptorSet2( const VULKAN_HPP_NAMESPACE::PushDescriptorSetInfo & pushDescriptorSetInfo ) const VULKAN_HPP_NOEXCEPT; + + void pushDescriptorSetWithTemplate2( const VULKAN_HPP_NAMESPACE::PushDescriptorSetWithTemplateInfo & pushDescriptorSetWithTemplateInfo ) const + VULKAN_HPP_NOEXCEPT; + //=== VK_EXT_debug_marker === void debugMarkerBeginEXT( const VULKAN_HPP_NAMESPACE::DebugMarkerMarkerInfoEXT & markerInfo ) const VULKAN_HPP_NOEXCEPT; @@ -6206,9 +6408,9 @@ namespace VULKAN_HPP_NAMESPACE //=== VK_KHR_dynamic_rendering_local_read === - void setRenderingAttachmentLocationsKHR( const VULKAN_HPP_NAMESPACE::RenderingAttachmentLocationInfoKHR & locationInfo ) const VULKAN_HPP_NOEXCEPT; + void setRenderingAttachmentLocationsKHR( const VULKAN_HPP_NAMESPACE::RenderingAttachmentLocationInfo & locationInfo ) const VULKAN_HPP_NOEXCEPT; - void setRenderingInputAttachmentIndicesKHR( const VULKAN_HPP_NAMESPACE::RenderingInputAttachmentIndexInfoKHR & inputAttachmentIndexInfo ) const + void setRenderingInputAttachmentIndicesKHR( const VULKAN_HPP_NAMESPACE::RenderingInputAttachmentIndexInfo & inputAttachmentIndexInfo ) const VULKAN_HPP_NOEXCEPT; //=== VK_EXT_line_rasterization === @@ -6539,13 +6741,13 @@ namespace VULKAN_HPP_NAMESPACE //=== VK_KHR_maintenance6 === - void bindDescriptorSets2KHR( const VULKAN_HPP_NAMESPACE::BindDescriptorSetsInfoKHR & bindDescriptorSetsInfo ) const VULKAN_HPP_NOEXCEPT; + void bindDescriptorSets2KHR( const VULKAN_HPP_NAMESPACE::BindDescriptorSetsInfo & bindDescriptorSetsInfo ) const VULKAN_HPP_NOEXCEPT; - void pushConstants2KHR( const VULKAN_HPP_NAMESPACE::PushConstantsInfoKHR & pushConstantsInfo ) const VULKAN_HPP_NOEXCEPT; + void pushConstants2KHR( const VULKAN_HPP_NAMESPACE::PushConstantsInfo & pushConstantsInfo ) const VULKAN_HPP_NOEXCEPT; - void pushDescriptorSet2KHR( const VULKAN_HPP_NAMESPACE::PushDescriptorSetInfoKHR & pushDescriptorSetInfo ) const VULKAN_HPP_NOEXCEPT; + void pushDescriptorSet2KHR( const VULKAN_HPP_NAMESPACE::PushDescriptorSetInfo & pushDescriptorSetInfo ) const VULKAN_HPP_NOEXCEPT; - void pushDescriptorSetWithTemplate2KHR( const VULKAN_HPP_NAMESPACE::PushDescriptorSetWithTemplateInfoKHR & pushDescriptorSetWithTemplateInfo ) const + void pushDescriptorSetWithTemplate2KHR( const VULKAN_HPP_NAMESPACE::PushDescriptorSetWithTemplateInfo & pushDescriptorSetWithTemplateInfo ) const VULKAN_HPP_NOEXCEPT; void setDescriptorBufferOffsets2EXT( const VULKAN_HPP_NAMESPACE::SetDescriptorBufferOffsetsInfoEXT & setDescriptorBufferOffsetsInfo ) const @@ -6569,6 +6771,12 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::detail::DeviceDispatcher const * m_dispatcher = nullptr; }; + template <> + struct isVulkanRAIIHandleType + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; + }; + class CommandBuffers : public std::vector { public: @@ -6714,6 +6922,12 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::detail::DeviceDispatcher const * m_dispatcher = nullptr; }; + template <> + struct isVulkanRAIIHandleType + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; + }; + class CuModuleNVX { public: @@ -6833,6 +7047,12 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::detail::DeviceDispatcher const * m_dispatcher = nullptr; }; + template <> + struct isVulkanRAIIHandleType + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; + }; + # if defined( VK_ENABLE_BETA_EXTENSIONS ) class CudaFunctionNV { @@ -6952,6 +7172,13 @@ namespace VULKAN_HPP_NAMESPACE const VULKAN_HPP_NAMESPACE::AllocationCallbacks * m_allocator = {}; VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::detail::DeviceDispatcher const * m_dispatcher = nullptr; }; + + template <> + struct isVulkanRAIIHandleType + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; + }; + # endif /*VK_ENABLE_BETA_EXTENSIONS*/ # if defined( VK_ENABLE_BETA_EXTENSIONS ) @@ -7077,6 +7304,13 @@ namespace VULKAN_HPP_NAMESPACE const VULKAN_HPP_NAMESPACE::AllocationCallbacks * m_allocator = {}; VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::detail::DeviceDispatcher const * m_dispatcher = nullptr; }; + + template <> + struct isVulkanRAIIHandleType + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; + }; + # endif /*VK_ENABLE_BETA_EXTENSIONS*/ class DebugReportCallbackEXT @@ -7199,6 +7433,12 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::detail::InstanceDispatcher const * m_dispatcher = nullptr; }; + template <> + struct isVulkanRAIIHandleType + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; + }; + class DebugUtilsMessengerEXT { public: @@ -7319,6 +7559,12 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::detail::InstanceDispatcher const * m_dispatcher = nullptr; }; + template <> + struct isVulkanRAIIHandleType + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; + }; + class DeferredOperationKHR { public: @@ -7446,6 +7692,12 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::detail::DeviceDispatcher const * m_dispatcher = nullptr; }; + template <> + struct isVulkanRAIIHandleType + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; + }; + class DescriptorPool { public: @@ -7570,6 +7822,12 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::detail::DeviceDispatcher const * m_dispatcher = nullptr; }; + template <> + struct isVulkanRAIIHandleType + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; + }; + class DescriptorSet { public: @@ -7691,6 +7949,12 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::detail::DeviceDispatcher const * m_dispatcher = nullptr; }; + template <> + struct isVulkanRAIIHandleType + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; + }; + class DescriptorSets : public std::vector { public: @@ -7843,6 +8107,12 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::detail::DeviceDispatcher const * m_dispatcher = nullptr; }; + template <> + struct isVulkanRAIIHandleType + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; + }; + class DescriptorUpdateTemplate { public: @@ -7963,6 +8233,12 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::detail::DeviceDispatcher const * m_dispatcher = nullptr; }; + template <> + struct isVulkanRAIIHandleType + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; + }; + class DeviceMemory { public: @@ -8102,6 +8378,12 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::detail::DeviceDispatcher const * m_dispatcher = nullptr; }; + template <> + struct isVulkanRAIIHandleType + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; + }; + class DisplayKHR { public: @@ -8247,6 +8529,12 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::detail::InstanceDispatcher const * m_dispatcher = nullptr; }; + template <> + struct isVulkanRAIIHandleType + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; + }; + class DisplayKHRs : public std::vector { public: @@ -8380,6 +8668,12 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::detail::InstanceDispatcher const * m_dispatcher = nullptr; }; + template <> + struct isVulkanRAIIHandleType + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; + }; + class Event { public: @@ -8507,6 +8801,12 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::detail::DeviceDispatcher const * m_dispatcher = nullptr; }; + template <> + struct isVulkanRAIIHandleType + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; + }; + class Fence { public: @@ -8649,6 +8949,12 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::detail::DeviceDispatcher const * m_dispatcher = nullptr; }; + template <> + struct isVulkanRAIIHandleType + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; + }; + class Framebuffer { public: @@ -8772,6 +9078,12 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::detail::DeviceDispatcher const * m_dispatcher = nullptr; }; + template <> + struct isVulkanRAIIHandleType + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; + }; + class Image { public: @@ -8895,27 +9207,36 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::SubresourceLayout getSubresourceLayout( const VULKAN_HPP_NAMESPACE::ImageSubresource & subresource ) const VULKAN_HPP_NOEXCEPT; + //=== VK_VERSION_1_4 === + + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::SubresourceLayout2 + getSubresourceLayout2( const VULKAN_HPP_NAMESPACE::ImageSubresource2 & subresource ) const VULKAN_HPP_NOEXCEPT; + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::StructureChain + getSubresourceLayout2( const VULKAN_HPP_NAMESPACE::ImageSubresource2 & subresource ) const VULKAN_HPP_NOEXCEPT; + //=== VK_EXT_image_drm_format_modifier === VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::ImageDrmFormatModifierPropertiesEXT getDrmFormatModifierPropertiesEXT() const; //=== VK_EXT_host_image_copy === - VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::SubresourceLayout2KHR - getSubresourceLayout2EXT( const VULKAN_HPP_NAMESPACE::ImageSubresource2KHR & subresource ) const VULKAN_HPP_NOEXCEPT; + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::SubresourceLayout2 + getSubresourceLayout2EXT( const VULKAN_HPP_NAMESPACE::ImageSubresource2 & subresource ) const VULKAN_HPP_NOEXCEPT; template VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::StructureChain - getSubresourceLayout2EXT( const VULKAN_HPP_NAMESPACE::ImageSubresource2KHR & subresource ) const VULKAN_HPP_NOEXCEPT; + getSubresourceLayout2EXT( const VULKAN_HPP_NAMESPACE::ImageSubresource2 & subresource ) const VULKAN_HPP_NOEXCEPT; //=== VK_KHR_maintenance5 === - VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::SubresourceLayout2KHR - getSubresourceLayout2KHR( const VULKAN_HPP_NAMESPACE::ImageSubresource2KHR & subresource ) const VULKAN_HPP_NOEXCEPT; + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::SubresourceLayout2 + getSubresourceLayout2KHR( const VULKAN_HPP_NAMESPACE::ImageSubresource2 & subresource ) const VULKAN_HPP_NOEXCEPT; template VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::StructureChain - getSubresourceLayout2KHR( const VULKAN_HPP_NAMESPACE::ImageSubresource2KHR & subresource ) const VULKAN_HPP_NOEXCEPT; + getSubresourceLayout2KHR( const VULKAN_HPP_NAMESPACE::ImageSubresource2 & subresource ) const VULKAN_HPP_NOEXCEPT; private: VULKAN_HPP_NAMESPACE::Device m_device = {}; @@ -8924,6 +9245,12 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::detail::DeviceDispatcher const * m_dispatcher = nullptr; }; + template <> + struct isVulkanRAIIHandleType + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; + }; + class ImageView { public: @@ -9047,6 +9374,12 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::detail::DeviceDispatcher const * m_dispatcher = nullptr; }; + template <> + struct isVulkanRAIIHandleType + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; + }; + class IndirectCommandsLayoutEXT { public: @@ -9167,6 +9500,12 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::detail::DeviceDispatcher const * m_dispatcher = nullptr; }; + template <> + struct isVulkanRAIIHandleType + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; + }; + class IndirectCommandsLayoutNV { public: @@ -9287,6 +9626,12 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::detail::DeviceDispatcher const * m_dispatcher = nullptr; }; + template <> + struct isVulkanRAIIHandleType + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; + }; + class IndirectExecutionSetEXT { public: @@ -9415,6 +9760,12 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::detail::DeviceDispatcher const * m_dispatcher = nullptr; }; + template <> + struct isVulkanRAIIHandleType + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; + }; + class MicromapEXT { public: @@ -9534,6 +9885,12 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::detail::DeviceDispatcher const * m_dispatcher = nullptr; }; + template <> + struct isVulkanRAIIHandleType + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; + }; + class OpticalFlowSessionNV { public: @@ -9660,6 +10017,12 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::detail::DeviceDispatcher const * m_dispatcher = nullptr; }; + template <> + struct isVulkanRAIIHandleType + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; + }; + class PerformanceConfigurationINTEL { public: @@ -9767,6 +10130,12 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::detail::DeviceDispatcher const * m_dispatcher = nullptr; }; + template <> + struct isVulkanRAIIHandleType + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; + }; + class PipelineCache { public: @@ -9893,6 +10262,12 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::detail::DeviceDispatcher const * m_dispatcher = nullptr; }; + template <> + struct isVulkanRAIIHandleType + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; + }; + class Pipeline { public: @@ -10110,6 +10485,12 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::detail::DeviceDispatcher const * m_dispatcher = nullptr; }; + template <> + struct isVulkanRAIIHandleType + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; + }; + class Pipelines : public std::vector { public: @@ -10305,6 +10686,12 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::detail::DeviceDispatcher const * m_dispatcher = nullptr; }; + template <> + struct isVulkanRAIIHandleType + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; + }; + class PipelineBinaryKHRs : public std::vector { public: @@ -10452,6 +10839,12 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::detail::DeviceDispatcher const * m_dispatcher = nullptr; }; + template <> + struct isVulkanRAIIHandleType + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; + }; + class PrivateDataSlot { public: @@ -10572,6 +10965,12 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::detail::DeviceDispatcher const * m_dispatcher = nullptr; }; + template <> + struct isVulkanRAIIHandleType + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; + }; + class QueryPool { public: @@ -10716,6 +11115,12 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::detail::DeviceDispatcher const * m_dispatcher = nullptr; }; + template <> + struct isVulkanRAIIHandleType + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; + }; + class Queue { public: @@ -10864,6 +11269,12 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::detail::DeviceDispatcher const * m_dispatcher = nullptr; }; + template <> + struct isVulkanRAIIHandleType + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; + }; + class RenderPass { public: @@ -11000,6 +11411,12 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::detail::DeviceDispatcher const * m_dispatcher = nullptr; }; + template <> + struct isVulkanRAIIHandleType + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; + }; + class Sampler { public: @@ -11119,6 +11536,12 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::detail::DeviceDispatcher const * m_dispatcher = nullptr; }; + template <> + struct isVulkanRAIIHandleType + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; + }; + class SamplerYcbcrConversion { public: @@ -11239,6 +11662,12 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::detail::DeviceDispatcher const * m_dispatcher = nullptr; }; + template <> + struct isVulkanRAIIHandleType + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; + }; + class Semaphore { public: @@ -11366,6 +11795,12 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::detail::DeviceDispatcher const * m_dispatcher = nullptr; }; + template <> + struct isVulkanRAIIHandleType + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; + }; + class ShaderEXT { public: @@ -11502,6 +11937,12 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::detail::DeviceDispatcher const * m_dispatcher = nullptr; }; + template <> + struct isVulkanRAIIHandleType + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; + }; + class ShaderEXTs : public std::vector { public: @@ -11652,6 +12093,12 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::detail::DeviceDispatcher const * m_dispatcher = nullptr; }; + template <> + struct isVulkanRAIIHandleType + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; + }; + class SurfaceKHR { public: @@ -11923,6 +12370,12 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::detail::InstanceDispatcher const * m_dispatcher = nullptr; }; + template <> + struct isVulkanRAIIHandleType + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; + }; + class SwapchainKHR { public: @@ -12091,6 +12544,12 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::detail::DeviceDispatcher const * m_dispatcher = nullptr; }; + template <> + struct isVulkanRAIIHandleType + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; + }; + class SwapchainKHRs : public std::vector { public: @@ -12244,6 +12703,12 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::detail::DeviceDispatcher const * m_dispatcher = nullptr; }; + template <> + struct isVulkanRAIIHandleType + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; + }; + class VideoSessionKHR { public: @@ -12370,6 +12835,12 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::detail::DeviceDispatcher const * m_dispatcher = nullptr; }; + template <> + struct isVulkanRAIIHandleType + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; + }; + class VideoSessionParametersKHR { public: @@ -12494,6 +12965,63 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::VULKAN_HPP_RAII_NAMESPACE::detail::DeviceDispatcher const * m_dispatcher = nullptr; }; + template <> + struct isVulkanRAIIHandleType + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; + }; + + // operators to compare vk::raii-handles +# if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) + template ::value, bool>::type = 0> + auto operator<=>( T const & a, T const & b ) VULKAN_HPP_NOEXCEPT + { + return *a <=> *b; + } +# else + template ::value, bool>::type = 0> + bool operator==( T const & a, T const & b ) VULKAN_HPP_NOEXCEPT + { + return *a == *b; + } + + template ::value, bool>::type = 0> + bool operator!=( T const & a, T const & b ) VULKAN_HPP_NOEXCEPT + { + return *a != *b; + } + + template ::value, bool>::type = 0> + bool operator<( T const & a, T const & b ) VULKAN_HPP_NOEXCEPT + { + return *a < *b; + } +# endif + + template ::value, bool>::type = 0> + bool operator==( const T & v, std::nullptr_t ) VULKAN_HPP_NOEXCEPT + { + return !*v; + } + + template ::value, bool>::type = 0> + bool operator==( std::nullptr_t, const T & v ) VULKAN_HPP_NOEXCEPT + { + return !*v; + } + + template ::value, bool>::type = 0> + bool operator!=( const T & v, std::nullptr_t ) VULKAN_HPP_NOEXCEPT + { + return *v; + } + + template ::value, bool>::type = 0> + bool operator!=( std::nullptr_t, const T & v ) VULKAN_HPP_NOEXCEPT + { + return *v; + } + //=========================== //=== COMMAND Definitions === //=========================== @@ -15550,6 +16078,257 @@ namespace VULKAN_HPP_NAMESPACE return sparseMemoryRequirements; } + //=== VK_VERSION_1_4 === + + VULKAN_HPP_INLINE void CommandBuffer::setLineStipple( uint32_t lineStippleFactor, uint16_t lineStipplePattern ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_ASSERT( getDispatcher()->vkCmdSetLineStipple && + "Function requires or or " ); + + getDispatcher()->vkCmdSetLineStipple( static_cast( m_commandBuffer ), lineStippleFactor, lineStipplePattern ); + } + + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE void * Device::mapMemory2( const VULKAN_HPP_NAMESPACE::MemoryMapInfo & memoryMapInfo ) const + { + VULKAN_HPP_ASSERT( getDispatcher()->vkMapMemory2 && "Function requires or " ); + + void * pData; + VULKAN_HPP_NAMESPACE::Result result = static_cast( + getDispatcher()->vkMapMemory2( static_cast( m_device ), reinterpret_cast( &memoryMapInfo ), &pData ) ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::mapMemory2" ); + + return pData; + } + + VULKAN_HPP_INLINE void Device::unmapMemory2( const VULKAN_HPP_NAMESPACE::MemoryUnmapInfo & memoryUnmapInfo ) const + { + VULKAN_HPP_ASSERT( getDispatcher()->vkUnmapMemory2 && "Function requires or " ); + + VULKAN_HPP_NAMESPACE::Result result = static_cast( + getDispatcher()->vkUnmapMemory2( static_cast( m_device ), reinterpret_cast( &memoryUnmapInfo ) ) ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::unmapMemory2" ); + } + + VULKAN_HPP_INLINE void CommandBuffer::bindIndexBuffer2( VULKAN_HPP_NAMESPACE::Buffer buffer, + VULKAN_HPP_NAMESPACE::DeviceSize offset, + VULKAN_HPP_NAMESPACE::DeviceSize size, + VULKAN_HPP_NAMESPACE::IndexType indexType ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_ASSERT( getDispatcher()->vkCmdBindIndexBuffer2 && "Function requires or " ); + + getDispatcher()->vkCmdBindIndexBuffer2( static_cast( m_commandBuffer ), + static_cast( buffer ), + static_cast( offset ), + static_cast( size ), + static_cast( indexType ) ); + } + + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::Extent2D + Device::getRenderingAreaGranularity( const VULKAN_HPP_NAMESPACE::RenderingAreaInfo & renderingAreaInfo ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_ASSERT( getDispatcher()->vkGetRenderingAreaGranularity && + "Function requires or " ); + + VULKAN_HPP_NAMESPACE::Extent2D granularity; + getDispatcher()->vkGetRenderingAreaGranularity( static_cast( m_device ), + reinterpret_cast( &renderingAreaInfo ), + reinterpret_cast( &granularity ) ); + + return granularity; + } + + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::SubresourceLayout2 + Device::getImageSubresourceLayout( const VULKAN_HPP_NAMESPACE::DeviceImageSubresourceInfo & info ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_ASSERT( getDispatcher()->vkGetDeviceImageSubresourceLayout && + "Function requires or " ); + + VULKAN_HPP_NAMESPACE::SubresourceLayout2 layout; + getDispatcher()->vkGetDeviceImageSubresourceLayout( static_cast( m_device ), + reinterpret_cast( &info ), + reinterpret_cast( &layout ) ); + + return layout; + } + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::StructureChain + Device::getImageSubresourceLayout( const VULKAN_HPP_NAMESPACE::DeviceImageSubresourceInfo & info ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_ASSERT( getDispatcher()->vkGetDeviceImageSubresourceLayout && + "Function requires or " ); + + VULKAN_HPP_NAMESPACE::StructureChain structureChain; + VULKAN_HPP_NAMESPACE::SubresourceLayout2 & layout = structureChain.template get(); + getDispatcher()->vkGetDeviceImageSubresourceLayout( static_cast( m_device ), + reinterpret_cast( &info ), + reinterpret_cast( &layout ) ); + + return structureChain; + } + + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::SubresourceLayout2 + Image::getSubresourceLayout2( const VULKAN_HPP_NAMESPACE::ImageSubresource2 & subresource ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_ASSERT( + getDispatcher()->vkGetImageSubresourceLayout2 && + "Function requires or or or " ); + + VULKAN_HPP_NAMESPACE::SubresourceLayout2 layout; + getDispatcher()->vkGetImageSubresourceLayout2( static_cast( m_device ), + static_cast( m_image ), + reinterpret_cast( &subresource ), + reinterpret_cast( &layout ) ); + + return layout; + } + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::StructureChain + Image::getSubresourceLayout2( const VULKAN_HPP_NAMESPACE::ImageSubresource2 & subresource ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_ASSERT( + getDispatcher()->vkGetImageSubresourceLayout2 && + "Function requires or or or " ); + + VULKAN_HPP_NAMESPACE::StructureChain structureChain; + VULKAN_HPP_NAMESPACE::SubresourceLayout2 & layout = structureChain.template get(); + getDispatcher()->vkGetImageSubresourceLayout2( static_cast( m_device ), + static_cast( m_image ), + reinterpret_cast( &subresource ), + reinterpret_cast( &layout ) ); + + return structureChain; + } + + VULKAN_HPP_INLINE void CommandBuffer::pushDescriptorSet( + VULKAN_HPP_NAMESPACE::PipelineBindPoint pipelineBindPoint, + VULKAN_HPP_NAMESPACE::PipelineLayout layout, + uint32_t set, + VULKAN_HPP_NAMESPACE::ArrayProxy const & descriptorWrites ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_ASSERT( getDispatcher()->vkCmdPushDescriptorSet && "Function requires or " ); + + getDispatcher()->vkCmdPushDescriptorSet( static_cast( m_commandBuffer ), + static_cast( pipelineBindPoint ), + static_cast( layout ), + set, + descriptorWrites.size(), + reinterpret_cast( descriptorWrites.data() ) ); + } + + template + VULKAN_HPP_INLINE void CommandBuffer::pushDescriptorSetWithTemplate( VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate descriptorUpdateTemplate, + VULKAN_HPP_NAMESPACE::PipelineLayout layout, + uint32_t set, + DataType const & data ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_ASSERT( + getDispatcher()->vkCmdPushDescriptorSetWithTemplate && + "Function requires or or " ); + + getDispatcher()->vkCmdPushDescriptorSetWithTemplate( static_cast( m_commandBuffer ), + static_cast( descriptorUpdateTemplate ), + static_cast( layout ), + set, + reinterpret_cast( &data ) ); + } + + VULKAN_HPP_INLINE void + CommandBuffer::setRenderingAttachmentLocations( const VULKAN_HPP_NAMESPACE::RenderingAttachmentLocationInfo & locationInfo ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_ASSERT( getDispatcher()->vkCmdSetRenderingAttachmentLocations && + "Function requires or " ); + + getDispatcher()->vkCmdSetRenderingAttachmentLocations( static_cast( m_commandBuffer ), + reinterpret_cast( &locationInfo ) ); + } + + VULKAN_HPP_INLINE void CommandBuffer::setRenderingInputAttachmentIndices( + const VULKAN_HPP_NAMESPACE::RenderingInputAttachmentIndexInfo & inputAttachmentIndexInfo ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_ASSERT( getDispatcher()->vkCmdSetRenderingInputAttachmentIndices && + "Function requires or " ); + + getDispatcher()->vkCmdSetRenderingInputAttachmentIndices( static_cast( m_commandBuffer ), + reinterpret_cast( &inputAttachmentIndexInfo ) ); + } + + VULKAN_HPP_INLINE void + CommandBuffer::bindDescriptorSets2( const VULKAN_HPP_NAMESPACE::BindDescriptorSetsInfo & bindDescriptorSetsInfo ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_ASSERT( getDispatcher()->vkCmdBindDescriptorSets2 && + "Function requires or " ); + + getDispatcher()->vkCmdBindDescriptorSets2( static_cast( m_commandBuffer ), + reinterpret_cast( &bindDescriptorSetsInfo ) ); + } + + VULKAN_HPP_INLINE void CommandBuffer::pushConstants2( const VULKAN_HPP_NAMESPACE::PushConstantsInfo & pushConstantsInfo ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_ASSERT( getDispatcher()->vkCmdPushConstants2 && "Function requires or " ); + + getDispatcher()->vkCmdPushConstants2( static_cast( m_commandBuffer ), + reinterpret_cast( &pushConstantsInfo ) ); + } + + VULKAN_HPP_INLINE void + CommandBuffer::pushDescriptorSet2( const VULKAN_HPP_NAMESPACE::PushDescriptorSetInfo & pushDescriptorSetInfo ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_ASSERT( getDispatcher()->vkCmdPushDescriptorSet2 && "Function requires or " ); + + getDispatcher()->vkCmdPushDescriptorSet2( static_cast( m_commandBuffer ), + reinterpret_cast( &pushDescriptorSetInfo ) ); + } + + VULKAN_HPP_INLINE void CommandBuffer::pushDescriptorSetWithTemplate2( + const VULKAN_HPP_NAMESPACE::PushDescriptorSetWithTemplateInfo & pushDescriptorSetWithTemplateInfo ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_ASSERT( getDispatcher()->vkCmdPushDescriptorSetWithTemplate2 && + "Function requires or " ); + + getDispatcher()->vkCmdPushDescriptorSetWithTemplate2( + static_cast( m_commandBuffer ), reinterpret_cast( &pushDescriptorSetWithTemplateInfo ) ); + } + + VULKAN_HPP_INLINE void Device::copyMemoryToImage( const VULKAN_HPP_NAMESPACE::CopyMemoryToImageInfo & copyMemoryToImageInfo ) const + { + VULKAN_HPP_ASSERT( getDispatcher()->vkCopyMemoryToImage && "Function requires or " ); + + VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkCopyMemoryToImage( + static_cast( m_device ), reinterpret_cast( ©MemoryToImageInfo ) ) ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::copyMemoryToImage" ); + } + + VULKAN_HPP_INLINE void Device::copyImageToMemory( const VULKAN_HPP_NAMESPACE::CopyImageToMemoryInfo & copyImageToMemoryInfo ) const + { + VULKAN_HPP_ASSERT( getDispatcher()->vkCopyImageToMemory && "Function requires or " ); + + VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkCopyImageToMemory( + static_cast( m_device ), reinterpret_cast( ©ImageToMemoryInfo ) ) ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::copyImageToMemory" ); + } + + VULKAN_HPP_INLINE void Device::copyImageToImage( const VULKAN_HPP_NAMESPACE::CopyImageToImageInfo & copyImageToImageInfo ) const + { + VULKAN_HPP_ASSERT( getDispatcher()->vkCopyImageToImage && "Function requires or " ); + + VULKAN_HPP_NAMESPACE::Result result = static_cast( + getDispatcher()->vkCopyImageToImage( static_cast( m_device ), reinterpret_cast( ©ImageToImageInfo ) ) ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::copyImageToImage" ); + } + + VULKAN_HPP_INLINE void + Device::transitionImageLayout( VULKAN_HPP_NAMESPACE::ArrayProxy const & transitions ) const + { + VULKAN_HPP_ASSERT( getDispatcher()->vkTransitionImageLayout && + "Function requires or " ); + + VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkTransitionImageLayout( + static_cast( m_device ), transitions.size(), reinterpret_cast( transitions.data() ) ) ); + VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::transitionImageLayout" ); + } + //=== VK_KHR_surface === VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::Bool32 PhysicalDevice::getSurfaceSupportKHR( uint32_t queueFamilyIndex, @@ -17455,7 +18234,8 @@ namespace VULKAN_HPP_NAMESPACE uint32_t set, VULKAN_HPP_NAMESPACE::ArrayProxy const & descriptorWrites ) const VULKAN_HPP_NOEXCEPT { - VULKAN_HPP_ASSERT( getDispatcher()->vkCmdPushDescriptorSetKHR && "Function requires " ); + VULKAN_HPP_ASSERT( getDispatcher()->vkCmdPushDescriptorSetKHR && + "Function requires or " ); getDispatcher()->vkCmdPushDescriptorSetKHR( static_cast( m_commandBuffer ), static_cast( pipelineBindPoint ), @@ -17471,8 +18251,9 @@ namespace VULKAN_HPP_NAMESPACE uint32_t set, DataType const & data ) const VULKAN_HPP_NOEXCEPT { - VULKAN_HPP_ASSERT( getDispatcher()->vkCmdPushDescriptorSetWithTemplateKHR && - "Function requires or " ); + VULKAN_HPP_ASSERT( + getDispatcher()->vkCmdPushDescriptorSetWithTemplateKHR && + "Function requires or or " ); getDispatcher()->vkCmdPushDescriptorSetWithTemplateKHR( static_cast( m_commandBuffer ), static_cast( descriptorUpdateTemplate ), @@ -20273,24 +21054,24 @@ namespace VULKAN_HPP_NAMESPACE //=== VK_KHR_dynamic_rendering_local_read === - VULKAN_HPP_INLINE void CommandBuffer::setRenderingAttachmentLocationsKHR( - const VULKAN_HPP_NAMESPACE::RenderingAttachmentLocationInfoKHR & locationInfo ) const VULKAN_HPP_NOEXCEPT + VULKAN_HPP_INLINE void + CommandBuffer::setRenderingAttachmentLocationsKHR( const VULKAN_HPP_NAMESPACE::RenderingAttachmentLocationInfo & locationInfo ) const VULKAN_HPP_NOEXCEPT { VULKAN_HPP_ASSERT( getDispatcher()->vkCmdSetRenderingAttachmentLocationsKHR && - "Function requires " ); + "Function requires or " ); getDispatcher()->vkCmdSetRenderingAttachmentLocationsKHR( static_cast( m_commandBuffer ), - reinterpret_cast( &locationInfo ) ); + reinterpret_cast( &locationInfo ) ); } VULKAN_HPP_INLINE void CommandBuffer::setRenderingInputAttachmentIndicesKHR( - const VULKAN_HPP_NAMESPACE::RenderingInputAttachmentIndexInfoKHR & inputAttachmentIndexInfo ) const VULKAN_HPP_NOEXCEPT + const VULKAN_HPP_NAMESPACE::RenderingInputAttachmentIndexInfo & inputAttachmentIndexInfo ) const VULKAN_HPP_NOEXCEPT { VULKAN_HPP_ASSERT( getDispatcher()->vkCmdSetRenderingInputAttachmentIndicesKHR && - "Function requires " ); + "Function requires or " ); - getDispatcher()->vkCmdSetRenderingInputAttachmentIndicesKHR( - static_cast( m_commandBuffer ), reinterpret_cast( &inputAttachmentIndexInfo ) ); + getDispatcher()->vkCmdSetRenderingInputAttachmentIndicesKHR( static_cast( m_commandBuffer ), + reinterpret_cast( &inputAttachmentIndexInfo ) ); } //=== VK_EXT_buffer_device_address === @@ -20563,7 +21344,7 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_INLINE void CommandBuffer::setLineStippleEXT( uint32_t lineStippleFactor, uint16_t lineStipplePattern ) const VULKAN_HPP_NOEXCEPT { VULKAN_HPP_ASSERT( getDispatcher()->vkCmdSetLineStippleEXT && - "Function requires or " ); + "Function requires or or " ); getDispatcher()->vkCmdSetLineStippleEXT( static_cast( m_commandBuffer ), lineStippleFactor, lineStipplePattern ); } @@ -20881,97 +21662,98 @@ namespace VULKAN_HPP_NAMESPACE //=== VK_EXT_host_image_copy === - VULKAN_HPP_INLINE void Device::copyMemoryToImageEXT( const VULKAN_HPP_NAMESPACE::CopyMemoryToImageInfoEXT & copyMemoryToImageInfo ) const + VULKAN_HPP_INLINE void Device::copyMemoryToImageEXT( const VULKAN_HPP_NAMESPACE::CopyMemoryToImageInfo & copyMemoryToImageInfo ) const { - VULKAN_HPP_ASSERT( getDispatcher()->vkCopyMemoryToImageEXT && "Function requires " ); + VULKAN_HPP_ASSERT( getDispatcher()->vkCopyMemoryToImageEXT && "Function requires or " ); VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkCopyMemoryToImageEXT( - static_cast( m_device ), reinterpret_cast( ©MemoryToImageInfo ) ) ); + static_cast( m_device ), reinterpret_cast( ©MemoryToImageInfo ) ) ); VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::copyMemoryToImageEXT" ); } - VULKAN_HPP_INLINE void Device::copyImageToMemoryEXT( const VULKAN_HPP_NAMESPACE::CopyImageToMemoryInfoEXT & copyImageToMemoryInfo ) const + VULKAN_HPP_INLINE void Device::copyImageToMemoryEXT( const VULKAN_HPP_NAMESPACE::CopyImageToMemoryInfo & copyImageToMemoryInfo ) const { - VULKAN_HPP_ASSERT( getDispatcher()->vkCopyImageToMemoryEXT && "Function requires " ); + VULKAN_HPP_ASSERT( getDispatcher()->vkCopyImageToMemoryEXT && "Function requires or " ); VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkCopyImageToMemoryEXT( - static_cast( m_device ), reinterpret_cast( ©ImageToMemoryInfo ) ) ); + static_cast( m_device ), reinterpret_cast( ©ImageToMemoryInfo ) ) ); VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::copyImageToMemoryEXT" ); } - VULKAN_HPP_INLINE void Device::copyImageToImageEXT( const VULKAN_HPP_NAMESPACE::CopyImageToImageInfoEXT & copyImageToImageInfo ) const + VULKAN_HPP_INLINE void Device::copyImageToImageEXT( const VULKAN_HPP_NAMESPACE::CopyImageToImageInfo & copyImageToImageInfo ) const { - VULKAN_HPP_ASSERT( getDispatcher()->vkCopyImageToImageEXT && "Function requires " ); + VULKAN_HPP_ASSERT( getDispatcher()->vkCopyImageToImageEXT && "Function requires or " ); VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkCopyImageToImageEXT( - static_cast( m_device ), reinterpret_cast( ©ImageToImageInfo ) ) ); + static_cast( m_device ), reinterpret_cast( ©ImageToImageInfo ) ) ); VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::copyImageToImageEXT" ); } - VULKAN_HPP_INLINE void Device::transitionImageLayoutEXT( - VULKAN_HPP_NAMESPACE::ArrayProxy const & transitions ) const + VULKAN_HPP_INLINE void + Device::transitionImageLayoutEXT( VULKAN_HPP_NAMESPACE::ArrayProxy const & transitions ) const { - VULKAN_HPP_ASSERT( getDispatcher()->vkTransitionImageLayoutEXT && "Function requires " ); + VULKAN_HPP_ASSERT( getDispatcher()->vkTransitionImageLayoutEXT && + "Function requires or " ); VULKAN_HPP_NAMESPACE::Result result = static_cast( getDispatcher()->vkTransitionImageLayoutEXT( - static_cast( m_device ), transitions.size(), reinterpret_cast( transitions.data() ) ) ); + static_cast( m_device ), transitions.size(), reinterpret_cast( transitions.data() ) ) ); VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::transitionImageLayoutEXT" ); } - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::SubresourceLayout2KHR - Image::getSubresourceLayout2EXT( const VULKAN_HPP_NAMESPACE::ImageSubresource2KHR & subresource ) const VULKAN_HPP_NOEXCEPT + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::SubresourceLayout2 + Image::getSubresourceLayout2EXT( const VULKAN_HPP_NAMESPACE::ImageSubresource2 & subresource ) const VULKAN_HPP_NOEXCEPT { VULKAN_HPP_ASSERT( getDispatcher()->vkGetImageSubresourceLayout2EXT && - "Function requires or or " ); + "Function requires or or or " ); - VULKAN_HPP_NAMESPACE::SubresourceLayout2KHR layout; + VULKAN_HPP_NAMESPACE::SubresourceLayout2 layout; getDispatcher()->vkGetImageSubresourceLayout2EXT( static_cast( m_device ), static_cast( m_image ), - reinterpret_cast( &subresource ), - reinterpret_cast( &layout ) ); + reinterpret_cast( &subresource ), + reinterpret_cast( &layout ) ); return layout; } template VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::StructureChain - Image::getSubresourceLayout2EXT( const VULKAN_HPP_NAMESPACE::ImageSubresource2KHR & subresource ) const VULKAN_HPP_NOEXCEPT + Image::getSubresourceLayout2EXT( const VULKAN_HPP_NAMESPACE::ImageSubresource2 & subresource ) const VULKAN_HPP_NOEXCEPT { VULKAN_HPP_ASSERT( getDispatcher()->vkGetImageSubresourceLayout2EXT && - "Function requires or or " ); + "Function requires or or or " ); VULKAN_HPP_NAMESPACE::StructureChain structureChain; - VULKAN_HPP_NAMESPACE::SubresourceLayout2KHR & layout = structureChain.template get(); + VULKAN_HPP_NAMESPACE::SubresourceLayout2 & layout = structureChain.template get(); getDispatcher()->vkGetImageSubresourceLayout2EXT( static_cast( m_device ), static_cast( m_image ), - reinterpret_cast( &subresource ), - reinterpret_cast( &layout ) ); + reinterpret_cast( &subresource ), + reinterpret_cast( &layout ) ); return structureChain; } //=== VK_KHR_map_memory2 === - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE void * Device::mapMemory2KHR( const VULKAN_HPP_NAMESPACE::MemoryMapInfoKHR & memoryMapInfo ) const + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE void * Device::mapMemory2KHR( const VULKAN_HPP_NAMESPACE::MemoryMapInfo & memoryMapInfo ) const { - VULKAN_HPP_ASSERT( getDispatcher()->vkMapMemory2KHR && "Function requires " ); + VULKAN_HPP_ASSERT( getDispatcher()->vkMapMemory2KHR && "Function requires or " ); void * pData; VULKAN_HPP_NAMESPACE::Result result = static_cast( - getDispatcher()->vkMapMemory2KHR( static_cast( m_device ), reinterpret_cast( &memoryMapInfo ), &pData ) ); + getDispatcher()->vkMapMemory2KHR( static_cast( m_device ), reinterpret_cast( &memoryMapInfo ), &pData ) ); VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::mapMemory2KHR" ); return pData; } - VULKAN_HPP_INLINE void Device::unmapMemory2KHR( const VULKAN_HPP_NAMESPACE::MemoryUnmapInfoKHR & memoryUnmapInfo ) const + VULKAN_HPP_INLINE void Device::unmapMemory2KHR( const VULKAN_HPP_NAMESPACE::MemoryUnmapInfo & memoryUnmapInfo ) const { - VULKAN_HPP_ASSERT( getDispatcher()->vkUnmapMemory2KHR && "Function requires " ); + VULKAN_HPP_ASSERT( getDispatcher()->vkUnmapMemory2KHR && "Function requires or " ); VULKAN_HPP_NAMESPACE::Result result = static_cast( - getDispatcher()->vkUnmapMemory2KHR( static_cast( m_device ), reinterpret_cast( &memoryUnmapInfo ) ) ); + getDispatcher()->vkUnmapMemory2KHR( static_cast( m_device ), reinterpret_cast( &memoryUnmapInfo ) ) ); VULKAN_HPP_NAMESPACE::detail::resultCheck( result, VULKAN_HPP_NAMESPACE_STRING "::Device::unmapMemory2KHR" ); } @@ -23098,7 +23880,8 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::DeviceSize size, VULKAN_HPP_NAMESPACE::IndexType indexType ) const VULKAN_HPP_NOEXCEPT { - VULKAN_HPP_ASSERT( getDispatcher()->vkCmdBindIndexBuffer2KHR && "Function requires " ); + VULKAN_HPP_ASSERT( getDispatcher()->vkCmdBindIndexBuffer2KHR && + "Function requires or " ); getDispatcher()->vkCmdBindIndexBuffer2KHR( static_cast( m_commandBuffer ), static_cast( buffer ), @@ -23108,78 +23891,79 @@ namespace VULKAN_HPP_NAMESPACE } VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::Extent2D - Device::getRenderingAreaGranularityKHR( const VULKAN_HPP_NAMESPACE::RenderingAreaInfoKHR & renderingAreaInfo ) const VULKAN_HPP_NOEXCEPT + Device::getRenderingAreaGranularityKHR( const VULKAN_HPP_NAMESPACE::RenderingAreaInfo & renderingAreaInfo ) const VULKAN_HPP_NOEXCEPT { - VULKAN_HPP_ASSERT( getDispatcher()->vkGetRenderingAreaGranularityKHR && "Function requires " ); + VULKAN_HPP_ASSERT( getDispatcher()->vkGetRenderingAreaGranularityKHR && + "Function requires or " ); VULKAN_HPP_NAMESPACE::Extent2D granularity; getDispatcher()->vkGetRenderingAreaGranularityKHR( static_cast( m_device ), - reinterpret_cast( &renderingAreaInfo ), + reinterpret_cast( &renderingAreaInfo ), reinterpret_cast( &granularity ) ); return granularity; } - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::SubresourceLayout2KHR - Device::getImageSubresourceLayoutKHR( const VULKAN_HPP_NAMESPACE::DeviceImageSubresourceInfoKHR & info ) const VULKAN_HPP_NOEXCEPT + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::SubresourceLayout2 + Device::getImageSubresourceLayoutKHR( const VULKAN_HPP_NAMESPACE::DeviceImageSubresourceInfo & info ) const VULKAN_HPP_NOEXCEPT { VULKAN_HPP_ASSERT( getDispatcher()->vkGetDeviceImageSubresourceLayoutKHR && - "Function requires " ); + "Function requires or " ); - VULKAN_HPP_NAMESPACE::SubresourceLayout2KHR layout; + VULKAN_HPP_NAMESPACE::SubresourceLayout2 layout; getDispatcher()->vkGetDeviceImageSubresourceLayoutKHR( static_cast( m_device ), - reinterpret_cast( &info ), - reinterpret_cast( &layout ) ); + reinterpret_cast( &info ), + reinterpret_cast( &layout ) ); return layout; } template VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::StructureChain - Device::getImageSubresourceLayoutKHR( const VULKAN_HPP_NAMESPACE::DeviceImageSubresourceInfoKHR & info ) const VULKAN_HPP_NOEXCEPT + Device::getImageSubresourceLayoutKHR( const VULKAN_HPP_NAMESPACE::DeviceImageSubresourceInfo & info ) const VULKAN_HPP_NOEXCEPT { VULKAN_HPP_ASSERT( getDispatcher()->vkGetDeviceImageSubresourceLayoutKHR && - "Function requires " ); + "Function requires or " ); VULKAN_HPP_NAMESPACE::StructureChain structureChain; - VULKAN_HPP_NAMESPACE::SubresourceLayout2KHR & layout = structureChain.template get(); + VULKAN_HPP_NAMESPACE::SubresourceLayout2 & layout = structureChain.template get(); getDispatcher()->vkGetDeviceImageSubresourceLayoutKHR( static_cast( m_device ), - reinterpret_cast( &info ), - reinterpret_cast( &layout ) ); + reinterpret_cast( &info ), + reinterpret_cast( &layout ) ); return structureChain; } - VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::SubresourceLayout2KHR - Image::getSubresourceLayout2KHR( const VULKAN_HPP_NAMESPACE::ImageSubresource2KHR & subresource ) const VULKAN_HPP_NOEXCEPT + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::SubresourceLayout2 + Image::getSubresourceLayout2KHR( const VULKAN_HPP_NAMESPACE::ImageSubresource2 & subresource ) const VULKAN_HPP_NOEXCEPT { VULKAN_HPP_ASSERT( getDispatcher()->vkGetImageSubresourceLayout2KHR && - "Function requires or or " ); + "Function requires or or or " ); - VULKAN_HPP_NAMESPACE::SubresourceLayout2KHR layout; + VULKAN_HPP_NAMESPACE::SubresourceLayout2 layout; getDispatcher()->vkGetImageSubresourceLayout2KHR( static_cast( m_device ), static_cast( m_image ), - reinterpret_cast( &subresource ), - reinterpret_cast( &layout ) ); + reinterpret_cast( &subresource ), + reinterpret_cast( &layout ) ); return layout; } template VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::StructureChain - Image::getSubresourceLayout2KHR( const VULKAN_HPP_NAMESPACE::ImageSubresource2KHR & subresource ) const VULKAN_HPP_NOEXCEPT + Image::getSubresourceLayout2KHR( const VULKAN_HPP_NAMESPACE::ImageSubresource2 & subresource ) const VULKAN_HPP_NOEXCEPT { VULKAN_HPP_ASSERT( getDispatcher()->vkGetImageSubresourceLayout2KHR && - "Function requires or or " ); + "Function requires or or or " ); VULKAN_HPP_NAMESPACE::StructureChain structureChain; - VULKAN_HPP_NAMESPACE::SubresourceLayout2KHR & layout = structureChain.template get(); + VULKAN_HPP_NAMESPACE::SubresourceLayout2 & layout = structureChain.template get(); getDispatcher()->vkGetImageSubresourceLayout2KHR( static_cast( m_device ), static_cast( m_image ), - reinterpret_cast( &subresource ), - reinterpret_cast( &layout ) ); + reinterpret_cast( &subresource ), + reinterpret_cast( &layout ) ); return structureChain; } @@ -23606,7 +24390,7 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_INLINE void CommandBuffer::setLineStippleKHR( uint32_t lineStippleFactor, uint16_t lineStipplePattern ) const VULKAN_HPP_NOEXCEPT { VULKAN_HPP_ASSERT( getDispatcher()->vkCmdSetLineStippleKHR && - "Function requires or " ); + "Function requires or or " ); getDispatcher()->vkCmdSetLineStippleKHR( static_cast( m_commandBuffer ), lineStippleFactor, lineStipplePattern ); } @@ -23681,40 +24465,41 @@ namespace VULKAN_HPP_NAMESPACE //=== VK_KHR_maintenance6 === VULKAN_HPP_INLINE void - CommandBuffer::bindDescriptorSets2KHR( const VULKAN_HPP_NAMESPACE::BindDescriptorSetsInfoKHR & bindDescriptorSetsInfo ) const VULKAN_HPP_NOEXCEPT + CommandBuffer::bindDescriptorSets2KHR( const VULKAN_HPP_NAMESPACE::BindDescriptorSetsInfo & bindDescriptorSetsInfo ) const VULKAN_HPP_NOEXCEPT { - VULKAN_HPP_ASSERT( getDispatcher()->vkCmdBindDescriptorSets2KHR && "Function requires " ); + VULKAN_HPP_ASSERT( getDispatcher()->vkCmdBindDescriptorSets2KHR && + "Function requires or " ); getDispatcher()->vkCmdBindDescriptorSets2KHR( static_cast( m_commandBuffer ), - reinterpret_cast( &bindDescriptorSetsInfo ) ); + reinterpret_cast( &bindDescriptorSetsInfo ) ); } - VULKAN_HPP_INLINE void CommandBuffer::pushConstants2KHR( const VULKAN_HPP_NAMESPACE::PushConstantsInfoKHR & pushConstantsInfo ) const VULKAN_HPP_NOEXCEPT + VULKAN_HPP_INLINE void CommandBuffer::pushConstants2KHR( const VULKAN_HPP_NAMESPACE::PushConstantsInfo & pushConstantsInfo ) const VULKAN_HPP_NOEXCEPT { - VULKAN_HPP_ASSERT( getDispatcher()->vkCmdPushConstants2KHR && "Function requires " ); + VULKAN_HPP_ASSERT( getDispatcher()->vkCmdPushConstants2KHR && "Function requires or " ); getDispatcher()->vkCmdPushConstants2KHR( static_cast( m_commandBuffer ), - reinterpret_cast( &pushConstantsInfo ) ); + reinterpret_cast( &pushConstantsInfo ) ); } VULKAN_HPP_INLINE void - CommandBuffer::pushDescriptorSet2KHR( const VULKAN_HPP_NAMESPACE::PushDescriptorSetInfoKHR & pushDescriptorSetInfo ) const VULKAN_HPP_NOEXCEPT + CommandBuffer::pushDescriptorSet2KHR( const VULKAN_HPP_NAMESPACE::PushDescriptorSetInfo & pushDescriptorSetInfo ) const VULKAN_HPP_NOEXCEPT { - VULKAN_HPP_ASSERT( getDispatcher()->vkCmdPushDescriptorSet2KHR && "Function requires " ); + VULKAN_HPP_ASSERT( getDispatcher()->vkCmdPushDescriptorSet2KHR && + "Function requires or " ); getDispatcher()->vkCmdPushDescriptorSet2KHR( static_cast( m_commandBuffer ), - reinterpret_cast( &pushDescriptorSetInfo ) ); + reinterpret_cast( &pushDescriptorSetInfo ) ); } VULKAN_HPP_INLINE void CommandBuffer::pushDescriptorSetWithTemplate2KHR( - const VULKAN_HPP_NAMESPACE::PushDescriptorSetWithTemplateInfoKHR & pushDescriptorSetWithTemplateInfo ) const VULKAN_HPP_NOEXCEPT + const VULKAN_HPP_NAMESPACE::PushDescriptorSetWithTemplateInfo & pushDescriptorSetWithTemplateInfo ) const VULKAN_HPP_NOEXCEPT { VULKAN_HPP_ASSERT( getDispatcher()->vkCmdPushDescriptorSetWithTemplate2KHR && - "Function requires " ); + "Function requires or " ); getDispatcher()->vkCmdPushDescriptorSetWithTemplate2KHR( - static_cast( m_commandBuffer ), - reinterpret_cast( &pushDescriptorSetWithTemplateInfo ) ); + static_cast( m_commandBuffer ), reinterpret_cast( &pushDescriptorSetWithTemplateInfo ) ); } VULKAN_HPP_INLINE void CommandBuffer::setDescriptorBufferOffsets2EXT( diff --git a/third_party/vulkan/vulkan_static_assertions.hpp b/third_party/vulkan/vulkan_static_assertions.hpp index fa1dde6..108374c 100644 --- a/third_party/vulkan/vulkan_static_assertions.hpp +++ b/third_party/vulkan/vulkan_static_assertions.hpp @@ -1811,6 +1811,333 @@ VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, "DeviceImageMemoryRequirements is not nothrow_move_constructible!" ); +//=== VK_VERSION_1_4 === + +VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::PhysicalDeviceVulkan14Features ) == sizeof( VkPhysicalDeviceVulkan14Features ), + "struct and wrapper have different size!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, + "PhysicalDeviceVulkan14Features is not nothrow_move_constructible!" ); + +VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::PhysicalDeviceVulkan14Properties ) == sizeof( VkPhysicalDeviceVulkan14Properties ), + "struct and wrapper have different size!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, + "PhysicalDeviceVulkan14Properties is not nothrow_move_constructible!" ); + +VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::DeviceQueueGlobalPriorityCreateInfo ) == sizeof( VkDeviceQueueGlobalPriorityCreateInfo ), + "struct and wrapper have different size!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, + "struct wrapper is not a standard layout!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, + "DeviceQueueGlobalPriorityCreateInfo is not nothrow_move_constructible!" ); + +VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::PhysicalDeviceGlobalPriorityQueryFeatures ) == sizeof( VkPhysicalDeviceGlobalPriorityQueryFeatures ), + "struct and wrapper have different size!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, + "struct wrapper is not a standard layout!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, + "PhysicalDeviceGlobalPriorityQueryFeatures is not nothrow_move_constructible!" ); + +VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::QueueFamilyGlobalPriorityProperties ) == sizeof( VkQueueFamilyGlobalPriorityProperties ), + "struct and wrapper have different size!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, + "struct wrapper is not a standard layout!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, + "QueueFamilyGlobalPriorityProperties is not nothrow_move_constructible!" ); + +VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::PhysicalDeviceShaderSubgroupRotateFeatures ) == sizeof( VkPhysicalDeviceShaderSubgroupRotateFeatures ), + "struct and wrapper have different size!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, + "struct wrapper is not a standard layout!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, + "PhysicalDeviceShaderSubgroupRotateFeatures is not nothrow_move_constructible!" ); + +VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::PhysicalDeviceShaderFloatControls2Features ) == sizeof( VkPhysicalDeviceShaderFloatControls2Features ), + "struct and wrapper have different size!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, + "struct wrapper is not a standard layout!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, + "PhysicalDeviceShaderFloatControls2Features is not nothrow_move_constructible!" ); + +VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::PhysicalDeviceShaderExpectAssumeFeatures ) == sizeof( VkPhysicalDeviceShaderExpectAssumeFeatures ), + "struct and wrapper have different size!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, + "struct wrapper is not a standard layout!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, + "PhysicalDeviceShaderExpectAssumeFeatures is not nothrow_move_constructible!" ); + +VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::PhysicalDeviceLineRasterizationFeatures ) == sizeof( VkPhysicalDeviceLineRasterizationFeatures ), + "struct and wrapper have different size!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, + "struct wrapper is not a standard layout!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, + "PhysicalDeviceLineRasterizationFeatures is not nothrow_move_constructible!" ); + +VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::PhysicalDeviceLineRasterizationProperties ) == sizeof( VkPhysicalDeviceLineRasterizationProperties ), + "struct and wrapper have different size!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, + "struct wrapper is not a standard layout!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, + "PhysicalDeviceLineRasterizationProperties is not nothrow_move_constructible!" ); + +VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::PipelineRasterizationLineStateCreateInfo ) == sizeof( VkPipelineRasterizationLineStateCreateInfo ), + "struct and wrapper have different size!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, + "struct wrapper is not a standard layout!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, + "PipelineRasterizationLineStateCreateInfo is not nothrow_move_constructible!" ); + +VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::PhysicalDeviceVertexAttributeDivisorProperties ) == + sizeof( VkPhysicalDeviceVertexAttributeDivisorProperties ), + "struct and wrapper have different size!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, + "struct wrapper is not a standard layout!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, + "PhysicalDeviceVertexAttributeDivisorProperties is not nothrow_move_constructible!" ); + +VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::VertexInputBindingDivisorDescription ) == sizeof( VkVertexInputBindingDivisorDescription ), + "struct and wrapper have different size!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, + "struct wrapper is not a standard layout!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, + "VertexInputBindingDivisorDescription is not nothrow_move_constructible!" ); + +VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::PipelineVertexInputDivisorStateCreateInfo ) == sizeof( VkPipelineVertexInputDivisorStateCreateInfo ), + "struct and wrapper have different size!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, + "struct wrapper is not a standard layout!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, + "PipelineVertexInputDivisorStateCreateInfo is not nothrow_move_constructible!" ); + +VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::PhysicalDeviceVertexAttributeDivisorFeatures ) == + sizeof( VkPhysicalDeviceVertexAttributeDivisorFeatures ), + "struct and wrapper have different size!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, + "struct wrapper is not a standard layout!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, + "PhysicalDeviceVertexAttributeDivisorFeatures is not nothrow_move_constructible!" ); + +VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::PhysicalDeviceIndexTypeUint8Features ) == sizeof( VkPhysicalDeviceIndexTypeUint8Features ), + "struct and wrapper have different size!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, + "struct wrapper is not a standard layout!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, + "PhysicalDeviceIndexTypeUint8Features is not nothrow_move_constructible!" ); + +VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::MemoryMapInfo ) == sizeof( VkMemoryMapInfo ), "struct and wrapper have different size!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, "MemoryMapInfo is not nothrow_move_constructible!" ); + +VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::MemoryUnmapInfo ) == sizeof( VkMemoryUnmapInfo ), "struct and wrapper have different size!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, + "MemoryUnmapInfo is not nothrow_move_constructible!" ); + +VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::PhysicalDeviceMaintenance5Features ) == sizeof( VkPhysicalDeviceMaintenance5Features ), + "struct and wrapper have different size!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, + "struct wrapper is not a standard layout!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, + "PhysicalDeviceMaintenance5Features is not nothrow_move_constructible!" ); + +VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::PhysicalDeviceMaintenance5Properties ) == sizeof( VkPhysicalDeviceMaintenance5Properties ), + "struct and wrapper have different size!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, + "struct wrapper is not a standard layout!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, + "PhysicalDeviceMaintenance5Properties is not nothrow_move_constructible!" ); + +VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::RenderingAreaInfo ) == sizeof( VkRenderingAreaInfo ), "struct and wrapper have different size!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, + "RenderingAreaInfo is not nothrow_move_constructible!" ); + +VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::DeviceImageSubresourceInfo ) == sizeof( VkDeviceImageSubresourceInfo ), + "struct and wrapper have different size!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, + "DeviceImageSubresourceInfo is not nothrow_move_constructible!" ); + +VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::ImageSubresource2 ) == sizeof( VkImageSubresource2 ), "struct and wrapper have different size!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, + "ImageSubresource2 is not nothrow_move_constructible!" ); + +VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::SubresourceLayout2 ) == sizeof( VkSubresourceLayout2 ), "struct and wrapper have different size!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, + "SubresourceLayout2 is not nothrow_move_constructible!" ); + +VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::PipelineCreateFlags2CreateInfo ) == sizeof( VkPipelineCreateFlags2CreateInfo ), + "struct and wrapper have different size!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, + "PipelineCreateFlags2CreateInfo is not nothrow_move_constructible!" ); + +VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::BufferUsageFlags2CreateInfo ) == sizeof( VkBufferUsageFlags2CreateInfo ), + "struct and wrapper have different size!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, + "BufferUsageFlags2CreateInfo is not nothrow_move_constructible!" ); + +VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::PhysicalDevicePushDescriptorProperties ) == sizeof( VkPhysicalDevicePushDescriptorProperties ), + "struct and wrapper have different size!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, + "struct wrapper is not a standard layout!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, + "PhysicalDevicePushDescriptorProperties is not nothrow_move_constructible!" ); + +VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::PhysicalDeviceDynamicRenderingLocalReadFeatures ) == + sizeof( VkPhysicalDeviceDynamicRenderingLocalReadFeatures ), + "struct and wrapper have different size!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, + "struct wrapper is not a standard layout!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, + "PhysicalDeviceDynamicRenderingLocalReadFeatures is not nothrow_move_constructible!" ); + +VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::RenderingAttachmentLocationInfo ) == sizeof( VkRenderingAttachmentLocationInfo ), + "struct and wrapper have different size!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, + "RenderingAttachmentLocationInfo is not nothrow_move_constructible!" ); + +VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::RenderingInputAttachmentIndexInfo ) == sizeof( VkRenderingInputAttachmentIndexInfo ), + "struct and wrapper have different size!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, + "RenderingInputAttachmentIndexInfo is not nothrow_move_constructible!" ); + +VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::PhysicalDeviceMaintenance6Features ) == sizeof( VkPhysicalDeviceMaintenance6Features ), + "struct and wrapper have different size!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, + "struct wrapper is not a standard layout!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, + "PhysicalDeviceMaintenance6Features is not nothrow_move_constructible!" ); + +VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::PhysicalDeviceMaintenance6Properties ) == sizeof( VkPhysicalDeviceMaintenance6Properties ), + "struct and wrapper have different size!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, + "struct wrapper is not a standard layout!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, + "PhysicalDeviceMaintenance6Properties is not nothrow_move_constructible!" ); + +VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::BindMemoryStatus ) == sizeof( VkBindMemoryStatus ), "struct and wrapper have different size!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, + "BindMemoryStatus is not nothrow_move_constructible!" ); + +VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::BindDescriptorSetsInfo ) == sizeof( VkBindDescriptorSetsInfo ), + "struct and wrapper have different size!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, + "BindDescriptorSetsInfo is not nothrow_move_constructible!" ); + +VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::PushConstantsInfo ) == sizeof( VkPushConstantsInfo ), "struct and wrapper have different size!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, + "PushConstantsInfo is not nothrow_move_constructible!" ); + +VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::PushDescriptorSetInfo ) == sizeof( VkPushDescriptorSetInfo ), + "struct and wrapper have different size!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, + "PushDescriptorSetInfo is not nothrow_move_constructible!" ); + +VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::PushDescriptorSetWithTemplateInfo ) == sizeof( VkPushDescriptorSetWithTemplateInfo ), + "struct and wrapper have different size!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, + "PushDescriptorSetWithTemplateInfo is not nothrow_move_constructible!" ); + +VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::PhysicalDevicePipelineProtectedAccessFeatures ) == + sizeof( VkPhysicalDevicePipelineProtectedAccessFeatures ), + "struct and wrapper have different size!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, + "struct wrapper is not a standard layout!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, + "PhysicalDevicePipelineProtectedAccessFeatures is not nothrow_move_constructible!" ); + +VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::PhysicalDevicePipelineRobustnessFeatures ) == sizeof( VkPhysicalDevicePipelineRobustnessFeatures ), + "struct and wrapper have different size!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, + "struct wrapper is not a standard layout!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, + "PhysicalDevicePipelineRobustnessFeatures is not nothrow_move_constructible!" ); + +VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::PhysicalDevicePipelineRobustnessProperties ) == sizeof( VkPhysicalDevicePipelineRobustnessProperties ), + "struct and wrapper have different size!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, + "struct wrapper is not a standard layout!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, + "PhysicalDevicePipelineRobustnessProperties is not nothrow_move_constructible!" ); + +VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::PipelineRobustnessCreateInfo ) == sizeof( VkPipelineRobustnessCreateInfo ), + "struct and wrapper have different size!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, + "PipelineRobustnessCreateInfo is not nothrow_move_constructible!" ); + +VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::PhysicalDeviceHostImageCopyFeatures ) == sizeof( VkPhysicalDeviceHostImageCopyFeatures ), + "struct and wrapper have different size!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, + "struct wrapper is not a standard layout!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, + "PhysicalDeviceHostImageCopyFeatures is not nothrow_move_constructible!" ); + +VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::PhysicalDeviceHostImageCopyProperties ) == sizeof( VkPhysicalDeviceHostImageCopyProperties ), + "struct and wrapper have different size!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, + "struct wrapper is not a standard layout!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, + "PhysicalDeviceHostImageCopyProperties is not nothrow_move_constructible!" ); + +VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::MemoryToImageCopy ) == sizeof( VkMemoryToImageCopy ), "struct and wrapper have different size!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, + "MemoryToImageCopy is not nothrow_move_constructible!" ); + +VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::ImageToMemoryCopy ) == sizeof( VkImageToMemoryCopy ), "struct and wrapper have different size!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, + "ImageToMemoryCopy is not nothrow_move_constructible!" ); + +VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::CopyMemoryToImageInfo ) == sizeof( VkCopyMemoryToImageInfo ), + "struct and wrapper have different size!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, + "CopyMemoryToImageInfo is not nothrow_move_constructible!" ); + +VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::CopyImageToMemoryInfo ) == sizeof( VkCopyImageToMemoryInfo ), + "struct and wrapper have different size!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, + "CopyImageToMemoryInfo is not nothrow_move_constructible!" ); + +VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::CopyImageToImageInfo ) == sizeof( VkCopyImageToImageInfo ), "struct and wrapper have different size!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, + "CopyImageToImageInfo is not nothrow_move_constructible!" ); + +VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::HostImageLayoutTransitionInfo ) == sizeof( VkHostImageLayoutTransitionInfo ), + "struct and wrapper have different size!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, + "HostImageLayoutTransitionInfo is not nothrow_move_constructible!" ); + +VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::SubresourceHostMemcpySize ) == sizeof( VkSubresourceHostMemcpySize ), + "struct and wrapper have different size!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, + "SubresourceHostMemcpySize is not nothrow_move_constructible!" ); + +VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::HostImageCopyDevicePerformanceQuery ) == sizeof( VkHostImageCopyDevicePerformanceQuery ), + "struct and wrapper have different size!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, + "struct wrapper is not a standard layout!" ); +VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, + "HostImageCopyDevicePerformanceQuery is not nothrow_move_constructible!" ); + //=== VK_KHR_surface === VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::SurfaceKHR ) == sizeof( VkSurfaceKHR ), "handle and wrapper have different size!" ); @@ -2648,30 +2975,6 @@ VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, "PhysicalDeviceASTCDecodeFeaturesEXT is not nothrow_move_constructible!" ); -//=== VK_EXT_pipeline_robustness === - -VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::PhysicalDevicePipelineRobustnessFeaturesEXT ) == - sizeof( VkPhysicalDevicePipelineRobustnessFeaturesEXT ), - "struct and wrapper have different size!" ); -VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, - "struct wrapper is not a standard layout!" ); -VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, - "PhysicalDevicePipelineRobustnessFeaturesEXT is not nothrow_move_constructible!" ); - -VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::PhysicalDevicePipelineRobustnessPropertiesEXT ) == - sizeof( VkPhysicalDevicePipelineRobustnessPropertiesEXT ), - "struct and wrapper have different size!" ); -VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, - "struct wrapper is not a standard layout!" ); -VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, - "PhysicalDevicePipelineRobustnessPropertiesEXT is not nothrow_move_constructible!" ); - -VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::PipelineRobustnessCreateInfoEXT ) == sizeof( VkPipelineRobustnessCreateInfoEXT ), - "struct and wrapper have different size!" ); -VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); -VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, - "PipelineRobustnessCreateInfoEXT is not nothrow_move_constructible!" ); - #if defined( VK_USE_PLATFORM_WIN32_KHR ) //=== VK_KHR_external_memory_win32 === @@ -2772,15 +3075,6 @@ VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, "SemaphoreGetFdInfoKHR is not nothrow_move_constructible!" ); -//=== VK_KHR_push_descriptor === - -VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::PhysicalDevicePushDescriptorPropertiesKHR ) == sizeof( VkPhysicalDevicePushDescriptorPropertiesKHR ), - "struct and wrapper have different size!" ); -VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, - "struct wrapper is not a standard layout!" ); -VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, - "PhysicalDevicePushDescriptorPropertiesKHR is not nothrow_move_constructible!" ); - //=== VK_EXT_conditional_rendering === VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::ConditionalRenderingBeginInfoEXT ) == sizeof( VkConditionalRenderingBeginInfoEXT ), @@ -3998,30 +4292,6 @@ VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, "VideoDecodeH265DpbSlotInfoKHR is not nothrow_move_constructible!" ); -//=== VK_KHR_global_priority === - -VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::DeviceQueueGlobalPriorityCreateInfoKHR ) == sizeof( VkDeviceQueueGlobalPriorityCreateInfoKHR ), - "struct and wrapper have different size!" ); -VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, - "struct wrapper is not a standard layout!" ); -VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, - "DeviceQueueGlobalPriorityCreateInfoKHR is not nothrow_move_constructible!" ); - -VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::PhysicalDeviceGlobalPriorityQueryFeaturesKHR ) == - sizeof( VkPhysicalDeviceGlobalPriorityQueryFeaturesKHR ), - "struct and wrapper have different size!" ); -VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, - "struct wrapper is not a standard layout!" ); -VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, - "PhysicalDeviceGlobalPriorityQueryFeaturesKHR is not nothrow_move_constructible!" ); - -VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::QueueFamilyGlobalPriorityPropertiesKHR ) == sizeof( VkQueueFamilyGlobalPriorityPropertiesKHR ), - "struct and wrapper have different size!" ); -VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, - "struct wrapper is not a standard layout!" ); -VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, - "QueueFamilyGlobalPriorityPropertiesKHR is not nothrow_move_constructible!" ); - //=== VK_AMD_memory_overallocation_behavior === VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::DeviceMemoryOverallocationCreateInfoAMD ) == sizeof( VkDeviceMemoryOverallocationCreateInfoAMD ), @@ -4337,30 +4607,6 @@ VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, "PhysicalDeviceCoherentMemoryFeaturesAMD is not nothrow_move_constructible!" ); -//=== VK_KHR_dynamic_rendering_local_read === - -VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::PhysicalDeviceDynamicRenderingLocalReadFeaturesKHR ) == - sizeof( VkPhysicalDeviceDynamicRenderingLocalReadFeaturesKHR ), - "struct and wrapper have different size!" ); -VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, - "struct wrapper is not a standard layout!" ); -VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, - "PhysicalDeviceDynamicRenderingLocalReadFeaturesKHR is not nothrow_move_constructible!" ); - -VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::RenderingAttachmentLocationInfoKHR ) == sizeof( VkRenderingAttachmentLocationInfoKHR ), - "struct and wrapper have different size!" ); -VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, - "struct wrapper is not a standard layout!" ); -VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, - "RenderingAttachmentLocationInfoKHR is not nothrow_move_constructible!" ); - -VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::RenderingInputAttachmentIndexInfoKHR ) == sizeof( VkRenderingInputAttachmentIndexInfoKHR ), - "struct and wrapper have different size!" ); -VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, - "struct wrapper is not a standard layout!" ); -VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, - "RenderingInputAttachmentIndexInfoKHR is not nothrow_move_constructible!" ); - //=== VK_EXT_shader_image_atomic_int64 === VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::PhysicalDeviceShaderImageAtomicInt64FeaturesEXT ) == @@ -4651,81 +4897,6 @@ VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, "PipelineExecutableInternalRepresentationKHR is not nothrow_move_constructible!" ); -//=== VK_EXT_host_image_copy === - -VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::PhysicalDeviceHostImageCopyFeaturesEXT ) == sizeof( VkPhysicalDeviceHostImageCopyFeaturesEXT ), - "struct and wrapper have different size!" ); -VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, - "struct wrapper is not a standard layout!" ); -VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, - "PhysicalDeviceHostImageCopyFeaturesEXT is not nothrow_move_constructible!" ); - -VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::PhysicalDeviceHostImageCopyPropertiesEXT ) == sizeof( VkPhysicalDeviceHostImageCopyPropertiesEXT ), - "struct and wrapper have different size!" ); -VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, - "struct wrapper is not a standard layout!" ); -VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, - "PhysicalDeviceHostImageCopyPropertiesEXT is not nothrow_move_constructible!" ); - -VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::MemoryToImageCopyEXT ) == sizeof( VkMemoryToImageCopyEXT ), "struct and wrapper have different size!" ); -VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); -VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, - "MemoryToImageCopyEXT is not nothrow_move_constructible!" ); - -VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::ImageToMemoryCopyEXT ) == sizeof( VkImageToMemoryCopyEXT ), "struct and wrapper have different size!" ); -VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); -VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, - "ImageToMemoryCopyEXT is not nothrow_move_constructible!" ); - -VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::CopyMemoryToImageInfoEXT ) == sizeof( VkCopyMemoryToImageInfoEXT ), - "struct and wrapper have different size!" ); -VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); -VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, - "CopyMemoryToImageInfoEXT is not nothrow_move_constructible!" ); - -VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::CopyImageToMemoryInfoEXT ) == sizeof( VkCopyImageToMemoryInfoEXT ), - "struct and wrapper have different size!" ); -VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); -VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, - "CopyImageToMemoryInfoEXT is not nothrow_move_constructible!" ); - -VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::CopyImageToImageInfoEXT ) == sizeof( VkCopyImageToImageInfoEXT ), - "struct and wrapper have different size!" ); -VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); -VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, - "CopyImageToImageInfoEXT is not nothrow_move_constructible!" ); - -VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::HostImageLayoutTransitionInfoEXT ) == sizeof( VkHostImageLayoutTransitionInfoEXT ), - "struct and wrapper have different size!" ); -VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); -VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, - "HostImageLayoutTransitionInfoEXT is not nothrow_move_constructible!" ); - -VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::SubresourceHostMemcpySizeEXT ) == sizeof( VkSubresourceHostMemcpySizeEXT ), - "struct and wrapper have different size!" ); -VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); -VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, - "SubresourceHostMemcpySizeEXT is not nothrow_move_constructible!" ); - -VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::HostImageCopyDevicePerformanceQueryEXT ) == sizeof( VkHostImageCopyDevicePerformanceQueryEXT ), - "struct and wrapper have different size!" ); -VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, - "struct wrapper is not a standard layout!" ); -VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, - "HostImageCopyDevicePerformanceQueryEXT is not nothrow_move_constructible!" ); - -//=== VK_KHR_map_memory2 === - -VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::MemoryMapInfoKHR ) == sizeof( VkMemoryMapInfoKHR ), "struct and wrapper have different size!" ); -VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); -VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, - "MemoryMapInfoKHR is not nothrow_move_constructible!" ); - -VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::MemoryUnmapInfoKHR ) == sizeof( VkMemoryUnmapInfoKHR ), "struct and wrapper have different size!" ); -VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); -VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, - "MemoryUnmapInfoKHR is not nothrow_move_constructible!" ); - //=== VK_EXT_map_memory_placed === VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::PhysicalDeviceMapMemoryPlacedFeaturesEXT ) == sizeof( VkPhysicalDeviceMapMemoryPlacedFeaturesEXT ), @@ -6314,16 +6485,6 @@ VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, "PhysicalDeviceShaderCorePropertiesARM is not nothrow_move_constructible!" ); -//=== VK_KHR_shader_subgroup_rotate === - -VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::PhysicalDeviceShaderSubgroupRotateFeaturesKHR ) == - sizeof( VkPhysicalDeviceShaderSubgroupRotateFeaturesKHR ), - "struct and wrapper have different size!" ); -VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, - "struct wrapper is not a standard layout!" ); -VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, - "PhysicalDeviceShaderSubgroupRotateFeaturesKHR is not nothrow_move_constructible!" ); - //=== VK_ARM_scheduling_controls === VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::DeviceQueueShaderCoreControlCreateInfoARM ) == sizeof( VkDeviceQueueShaderCoreControlCreateInfoARM ), @@ -6823,16 +6984,6 @@ VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, "PhysicalDeviceLegacyDitheringFeaturesEXT is not nothrow_move_constructible!" ); -//=== VK_EXT_pipeline_protected_access === - -VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::PhysicalDevicePipelineProtectedAccessFeaturesEXT ) == - sizeof( VkPhysicalDevicePipelineProtectedAccessFeaturesEXT ), - "struct and wrapper have different size!" ); -VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, - "struct wrapper is not a standard layout!" ); -VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, - "PhysicalDevicePipelineProtectedAccessFeaturesEXT is not nothrow_move_constructible!" ); - #if defined( VK_USE_PLATFORM_ANDROID_KHR ) //=== VK_ANDROID_external_format_resolve === @@ -6861,56 +7012,6 @@ VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, - "struct wrapper is not a standard layout!" ); -VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, - "PhysicalDeviceMaintenance5FeaturesKHR is not nothrow_move_constructible!" ); - -VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::PhysicalDeviceMaintenance5PropertiesKHR ) == sizeof( VkPhysicalDeviceMaintenance5PropertiesKHR ), - "struct and wrapper have different size!" ); -VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, - "struct wrapper is not a standard layout!" ); -VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, - "PhysicalDeviceMaintenance5PropertiesKHR is not nothrow_move_constructible!" ); - -VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::RenderingAreaInfoKHR ) == sizeof( VkRenderingAreaInfoKHR ), "struct and wrapper have different size!" ); -VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); -VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, - "RenderingAreaInfoKHR is not nothrow_move_constructible!" ); - -VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::DeviceImageSubresourceInfoKHR ) == sizeof( VkDeviceImageSubresourceInfoKHR ), - "struct and wrapper have different size!" ); -VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); -VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, - "DeviceImageSubresourceInfoKHR is not nothrow_move_constructible!" ); - -VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::ImageSubresource2KHR ) == sizeof( VkImageSubresource2KHR ), "struct and wrapper have different size!" ); -VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); -VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, - "ImageSubresource2KHR is not nothrow_move_constructible!" ); - -VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::SubresourceLayout2KHR ) == sizeof( VkSubresourceLayout2KHR ), - "struct and wrapper have different size!" ); -VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); -VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, - "SubresourceLayout2KHR is not nothrow_move_constructible!" ); - -VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::PipelineCreateFlags2CreateInfoKHR ) == sizeof( VkPipelineCreateFlags2CreateInfoKHR ), - "struct and wrapper have different size!" ); -VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); -VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, - "PipelineCreateFlags2CreateInfoKHR is not nothrow_move_constructible!" ); - -VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::BufferUsageFlags2CreateInfoKHR ) == sizeof( VkBufferUsageFlags2CreateInfoKHR ), - "struct and wrapper have different size!" ); -VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); -VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, - "BufferUsageFlags2CreateInfoKHR is not nothrow_move_constructible!" ); - //=== VK_AMD_anti_lag === VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::PhysicalDeviceAntiLagFeaturesAMD ) == sizeof( VkPhysicalDeviceAntiLagFeaturesAMD ), @@ -7556,49 +7657,6 @@ VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, "PhysicalDeviceAttachmentFeedbackLoopDynamicStateFeaturesEXT is not nothrow_move_constructible!" ); -//=== VK_KHR_vertex_attribute_divisor === - -VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::PhysicalDeviceVertexAttributeDivisorPropertiesKHR ) == - sizeof( VkPhysicalDeviceVertexAttributeDivisorPropertiesKHR ), - "struct and wrapper have different size!" ); -VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, - "struct wrapper is not a standard layout!" ); -VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, - "PhysicalDeviceVertexAttributeDivisorPropertiesKHR is not nothrow_move_constructible!" ); - -VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::VertexInputBindingDivisorDescriptionKHR ) == sizeof( VkVertexInputBindingDivisorDescriptionKHR ), - "struct and wrapper have different size!" ); -VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, - "struct wrapper is not a standard layout!" ); -VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, - "VertexInputBindingDivisorDescriptionKHR is not nothrow_move_constructible!" ); - -VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::PipelineVertexInputDivisorStateCreateInfoKHR ) == - sizeof( VkPipelineVertexInputDivisorStateCreateInfoKHR ), - "struct and wrapper have different size!" ); -VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, - "struct wrapper is not a standard layout!" ); -VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, - "PipelineVertexInputDivisorStateCreateInfoKHR is not nothrow_move_constructible!" ); - -VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::PhysicalDeviceVertexAttributeDivisorFeaturesKHR ) == - sizeof( VkPhysicalDeviceVertexAttributeDivisorFeaturesKHR ), - "struct and wrapper have different size!" ); -VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, - "struct wrapper is not a standard layout!" ); -VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, - "PhysicalDeviceVertexAttributeDivisorFeaturesKHR is not nothrow_move_constructible!" ); - -//=== VK_KHR_shader_float_controls2 === - -VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::PhysicalDeviceShaderFloatControls2FeaturesKHR ) == - sizeof( VkPhysicalDeviceShaderFloatControls2FeaturesKHR ), - "struct and wrapper have different size!" ); -VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, - "struct wrapper is not a standard layout!" ); -VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, - "PhysicalDeviceShaderFloatControls2FeaturesKHR is not nothrow_move_constructible!" ); - #if defined( VK_USE_PLATFORM_SCREEN_QNX ) //=== VK_QNX_external_memory_screen_buffer === @@ -7643,40 +7701,6 @@ VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, "PhysicalDeviceLayeredDriverPropertiesMSFT is not nothrow_move_constructible!" ); -//=== VK_KHR_index_type_uint8 === - -VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::PhysicalDeviceIndexTypeUint8FeaturesKHR ) == sizeof( VkPhysicalDeviceIndexTypeUint8FeaturesKHR ), - "struct and wrapper have different size!" ); -VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, - "struct wrapper is not a standard layout!" ); -VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, - "PhysicalDeviceIndexTypeUint8FeaturesKHR is not nothrow_move_constructible!" ); - -//=== VK_KHR_line_rasterization === - -VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::PhysicalDeviceLineRasterizationFeaturesKHR ) == sizeof( VkPhysicalDeviceLineRasterizationFeaturesKHR ), - "struct and wrapper have different size!" ); -VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, - "struct wrapper is not a standard layout!" ); -VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, - "PhysicalDeviceLineRasterizationFeaturesKHR is not nothrow_move_constructible!" ); - -VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::PhysicalDeviceLineRasterizationPropertiesKHR ) == - sizeof( VkPhysicalDeviceLineRasterizationPropertiesKHR ), - "struct and wrapper have different size!" ); -VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, - "struct wrapper is not a standard layout!" ); -VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, - "PhysicalDeviceLineRasterizationPropertiesKHR is not nothrow_move_constructible!" ); - -VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::PipelineRasterizationLineStateCreateInfoKHR ) == - sizeof( VkPipelineRasterizationLineStateCreateInfoKHR ), - "struct and wrapper have different size!" ); -VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, - "struct wrapper is not a standard layout!" ); -VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, - "PipelineRasterizationLineStateCreateInfoKHR is not nothrow_move_constructible!" ); - //=== VK_KHR_calibrated_timestamps === VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::CalibratedTimestampInfoKHR ) == sizeof( VkCalibratedTimestampInfoKHR ), @@ -7685,61 +7709,8 @@ VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, "CalibratedTimestampInfoKHR is not nothrow_move_constructible!" ); -//=== VK_KHR_shader_expect_assume === - -VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::PhysicalDeviceShaderExpectAssumeFeaturesKHR ) == - sizeof( VkPhysicalDeviceShaderExpectAssumeFeaturesKHR ), - "struct and wrapper have different size!" ); -VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, - "struct wrapper is not a standard layout!" ); -VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, - "PhysicalDeviceShaderExpectAssumeFeaturesKHR is not nothrow_move_constructible!" ); - //=== VK_KHR_maintenance6 === -VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::PhysicalDeviceMaintenance6FeaturesKHR ) == sizeof( VkPhysicalDeviceMaintenance6FeaturesKHR ), - "struct and wrapper have different size!" ); -VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, - "struct wrapper is not a standard layout!" ); -VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, - "PhysicalDeviceMaintenance6FeaturesKHR is not nothrow_move_constructible!" ); - -VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::PhysicalDeviceMaintenance6PropertiesKHR ) == sizeof( VkPhysicalDeviceMaintenance6PropertiesKHR ), - "struct and wrapper have different size!" ); -VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, - "struct wrapper is not a standard layout!" ); -VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, - "PhysicalDeviceMaintenance6PropertiesKHR is not nothrow_move_constructible!" ); - -VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::BindMemoryStatusKHR ) == sizeof( VkBindMemoryStatusKHR ), "struct and wrapper have different size!" ); -VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); -VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, - "BindMemoryStatusKHR is not nothrow_move_constructible!" ); - -VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::BindDescriptorSetsInfoKHR ) == sizeof( VkBindDescriptorSetsInfoKHR ), - "struct and wrapper have different size!" ); -VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); -VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, - "BindDescriptorSetsInfoKHR is not nothrow_move_constructible!" ); - -VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::PushConstantsInfoKHR ) == sizeof( VkPushConstantsInfoKHR ), "struct and wrapper have different size!" ); -VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); -VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, - "PushConstantsInfoKHR is not nothrow_move_constructible!" ); - -VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::PushDescriptorSetInfoKHR ) == sizeof( VkPushDescriptorSetInfoKHR ), - "struct and wrapper have different size!" ); -VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); -VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, - "PushDescriptorSetInfoKHR is not nothrow_move_constructible!" ); - -VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::PushDescriptorSetWithTemplateInfoKHR ) == sizeof( VkPushDescriptorSetWithTemplateInfoKHR ), - "struct and wrapper have different size!" ); -VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, - "struct wrapper is not a standard layout!" ); -VULKAN_HPP_STATIC_ASSERT( std::is_nothrow_move_constructible::value, - "PushDescriptorSetWithTemplateInfoKHR is not nothrow_move_constructible!" ); - VULKAN_HPP_STATIC_ASSERT( sizeof( VULKAN_HPP_NAMESPACE::SetDescriptorBufferOffsetsInfoEXT ) == sizeof( VkSetDescriptorBufferOffsetsInfoEXT ), "struct and wrapper have different size!" ); VULKAN_HPP_STATIC_ASSERT( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); diff --git a/third_party/vulkan/vulkan_structs.hpp b/third_party/vulkan/vulkan_structs.hpp index e2937f9..054482c 100644 --- a/third_party/vulkan/vulkan_structs.hpp +++ b/third_party/vulkan/vulkan_structs.hpp @@ -7839,22 +7839,22 @@ namespace VULKAN_HPP_NAMESPACE using Type = BindDescriptorBufferEmbeddedSamplersInfoEXT; }; - struct BindDescriptorSetsInfoKHR + struct BindDescriptorSetsInfo { - using NativeType = VkBindDescriptorSetsInfoKHR; + using NativeType = VkBindDescriptorSetsInfo; static const bool allowDuplicate = false; - static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eBindDescriptorSetsInfoKHR; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eBindDescriptorSetsInfo; #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) - VULKAN_HPP_CONSTEXPR BindDescriptorSetsInfoKHR( VULKAN_HPP_NAMESPACE::ShaderStageFlags stageFlags_ = {}, - VULKAN_HPP_NAMESPACE::PipelineLayout layout_ = {}, - uint32_t firstSet_ = {}, - uint32_t descriptorSetCount_ = {}, - const VULKAN_HPP_NAMESPACE::DescriptorSet * pDescriptorSets_ = {}, - uint32_t dynamicOffsetCount_ = {}, - const uint32_t * pDynamicOffsets_ = {}, - const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR BindDescriptorSetsInfo( VULKAN_HPP_NAMESPACE::ShaderStageFlags stageFlags_ = {}, + VULKAN_HPP_NAMESPACE::PipelineLayout layout_ = {}, + uint32_t firstSet_ = {}, + uint32_t descriptorSetCount_ = {}, + const VULKAN_HPP_NAMESPACE::DescriptorSet * pDescriptorSets_ = {}, + uint32_t dynamicOffsetCount_ = {}, + const uint32_t * pDynamicOffsets_ = {}, + const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT : pNext{ pNext_ } , stageFlags{ stageFlags_ } , layout{ layout_ } @@ -7866,20 +7866,20 @@ namespace VULKAN_HPP_NAMESPACE { } - VULKAN_HPP_CONSTEXPR BindDescriptorSetsInfoKHR( BindDescriptorSetsInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + VULKAN_HPP_CONSTEXPR BindDescriptorSetsInfo( BindDescriptorSetsInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; - BindDescriptorSetsInfoKHR( VkBindDescriptorSetsInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT - : BindDescriptorSetsInfoKHR( *reinterpret_cast( &rhs ) ) + BindDescriptorSetsInfo( VkBindDescriptorSetsInfo const & rhs ) VULKAN_HPP_NOEXCEPT + : BindDescriptorSetsInfo( *reinterpret_cast( &rhs ) ) { } # if !defined( VULKAN_HPP_DISABLE_ENHANCED_MODE ) - BindDescriptorSetsInfoKHR( VULKAN_HPP_NAMESPACE::ShaderStageFlags stageFlags_, - VULKAN_HPP_NAMESPACE::PipelineLayout layout_, - uint32_t firstSet_, - VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & descriptorSets_, - VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & dynamicOffsets_ = {}, - const void * pNext_ = nullptr ) + BindDescriptorSetsInfo( VULKAN_HPP_NAMESPACE::ShaderStageFlags stageFlags_, + VULKAN_HPP_NAMESPACE::PipelineLayout layout_, + uint32_t firstSet_, + VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & descriptorSets_, + VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & dynamicOffsets_ = {}, + const void * pNext_ = nullptr ) : pNext( pNext_ ) , stageFlags( stageFlags_ ) , layout( layout_ ) @@ -7892,54 +7892,54 @@ namespace VULKAN_HPP_NAMESPACE } # endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - BindDescriptorSetsInfoKHR & operator=( BindDescriptorSetsInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + BindDescriptorSetsInfo & operator=( BindDescriptorSetsInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; #endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ - BindDescriptorSetsInfoKHR & operator=( VkBindDescriptorSetsInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + BindDescriptorSetsInfo & operator=( VkBindDescriptorSetsInfo const & rhs ) VULKAN_HPP_NOEXCEPT { - *this = *reinterpret_cast( &rhs ); + *this = *reinterpret_cast( &rhs ); return *this; } #if !defined( VULKAN_HPP_NO_STRUCT_SETTERS ) - VULKAN_HPP_CONSTEXPR_14 BindDescriptorSetsInfoKHR & setPNext( const void * pNext_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 BindDescriptorSetsInfo & setPNext( const void * pNext_ ) VULKAN_HPP_NOEXCEPT { pNext = pNext_; return *this; } - VULKAN_HPP_CONSTEXPR_14 BindDescriptorSetsInfoKHR & setStageFlags( VULKAN_HPP_NAMESPACE::ShaderStageFlags stageFlags_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 BindDescriptorSetsInfo & setStageFlags( VULKAN_HPP_NAMESPACE::ShaderStageFlags stageFlags_ ) VULKAN_HPP_NOEXCEPT { stageFlags = stageFlags_; return *this; } - VULKAN_HPP_CONSTEXPR_14 BindDescriptorSetsInfoKHR & setLayout( VULKAN_HPP_NAMESPACE::PipelineLayout layout_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 BindDescriptorSetsInfo & setLayout( VULKAN_HPP_NAMESPACE::PipelineLayout layout_ ) VULKAN_HPP_NOEXCEPT { layout = layout_; return *this; } - VULKAN_HPP_CONSTEXPR_14 BindDescriptorSetsInfoKHR & setFirstSet( uint32_t firstSet_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 BindDescriptorSetsInfo & setFirstSet( uint32_t firstSet_ ) VULKAN_HPP_NOEXCEPT { firstSet = firstSet_; return *this; } - VULKAN_HPP_CONSTEXPR_14 BindDescriptorSetsInfoKHR & setDescriptorSetCount( uint32_t descriptorSetCount_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 BindDescriptorSetsInfo & setDescriptorSetCount( uint32_t descriptorSetCount_ ) VULKAN_HPP_NOEXCEPT { descriptorSetCount = descriptorSetCount_; return *this; } - VULKAN_HPP_CONSTEXPR_14 BindDescriptorSetsInfoKHR & setPDescriptorSets( const VULKAN_HPP_NAMESPACE::DescriptorSet * pDescriptorSets_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 BindDescriptorSetsInfo & setPDescriptorSets( const VULKAN_HPP_NAMESPACE::DescriptorSet * pDescriptorSets_ ) VULKAN_HPP_NOEXCEPT { pDescriptorSets = pDescriptorSets_; return *this; } # if !defined( VULKAN_HPP_DISABLE_ENHANCED_MODE ) - BindDescriptorSetsInfoKHR & + BindDescriptorSetsInfo & setDescriptorSets( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & descriptorSets_ ) VULKAN_HPP_NOEXCEPT { descriptorSetCount = static_cast( descriptorSets_.size() ); @@ -7948,20 +7948,20 @@ namespace VULKAN_HPP_NAMESPACE } # endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - VULKAN_HPP_CONSTEXPR_14 BindDescriptorSetsInfoKHR & setDynamicOffsetCount( uint32_t dynamicOffsetCount_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 BindDescriptorSetsInfo & setDynamicOffsetCount( uint32_t dynamicOffsetCount_ ) VULKAN_HPP_NOEXCEPT { dynamicOffsetCount = dynamicOffsetCount_; return *this; } - VULKAN_HPP_CONSTEXPR_14 BindDescriptorSetsInfoKHR & setPDynamicOffsets( const uint32_t * pDynamicOffsets_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 BindDescriptorSetsInfo & setPDynamicOffsets( const uint32_t * pDynamicOffsets_ ) VULKAN_HPP_NOEXCEPT { pDynamicOffsets = pDynamicOffsets_; return *this; } # if !defined( VULKAN_HPP_DISABLE_ENHANCED_MODE ) - BindDescriptorSetsInfoKHR & setDynamicOffsets( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & dynamicOffsets_ ) VULKAN_HPP_NOEXCEPT + BindDescriptorSetsInfo & setDynamicOffsets( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & dynamicOffsets_ ) VULKAN_HPP_NOEXCEPT { dynamicOffsetCount = static_cast( dynamicOffsets_.size() ); pDynamicOffsets = dynamicOffsets_.data(); @@ -7970,14 +7970,14 @@ namespace VULKAN_HPP_NAMESPACE # endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ #endif /*VULKAN_HPP_NO_STRUCT_SETTERS*/ - operator VkBindDescriptorSetsInfoKHR const &() const VULKAN_HPP_NOEXCEPT + operator VkBindDescriptorSetsInfo const &() const VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + return *reinterpret_cast( this ); } - operator VkBindDescriptorSetsInfoKHR &() VULKAN_HPP_NOEXCEPT + operator VkBindDescriptorSetsInfo &() VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + return *reinterpret_cast( this ); } #if defined( VULKAN_HPP_USE_REFLECT ) @@ -8001,9 +8001,9 @@ namespace VULKAN_HPP_NAMESPACE #endif #if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( BindDescriptorSetsInfoKHR const & ) const = default; + auto operator<=>( BindDescriptorSetsInfo const & ) const = default; #else - bool operator==( BindDescriptorSetsInfoKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator==( BindDescriptorSetsInfo const & rhs ) const VULKAN_HPP_NOEXCEPT { # if defined( VULKAN_HPP_USE_REFLECT ) return this->reflect() == rhs.reflect(); @@ -8014,14 +8014,14 @@ namespace VULKAN_HPP_NAMESPACE # endif } - bool operator!=( BindDescriptorSetsInfoKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator!=( BindDescriptorSetsInfo const & rhs ) const VULKAN_HPP_NOEXCEPT { return !operator==( rhs ); } #endif public: - VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eBindDescriptorSetsInfoKHR; + VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eBindDescriptorSetsInfo; const void * pNext = {}; VULKAN_HPP_NAMESPACE::ShaderStageFlags stageFlags = {}; VULKAN_HPP_NAMESPACE::PipelineLayout layout = {}; @@ -8033,11 +8033,13 @@ namespace VULKAN_HPP_NAMESPACE }; template <> - struct CppType + struct CppType { - using Type = BindDescriptorSetsInfoKHR; + using Type = BindDescriptorSetsInfo; }; + using BindDescriptorSetsInfoKHR = BindDescriptorSetsInfo; + struct Offset2D { using NativeType = VkOffset2D; @@ -8889,57 +8891,55 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::IndexType indexType = VULKAN_HPP_NAMESPACE::IndexType::eUint16; }; - struct BindMemoryStatusKHR + struct BindMemoryStatus { - using NativeType = VkBindMemoryStatusKHR; + using NativeType = VkBindMemoryStatus; static const bool allowDuplicate = false; - static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eBindMemoryStatusKHR; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eBindMemoryStatus; #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) - VULKAN_HPP_CONSTEXPR BindMemoryStatusKHR( VULKAN_HPP_NAMESPACE::Result * pResult_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR BindMemoryStatus( VULKAN_HPP_NAMESPACE::Result * pResult_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT : pNext{ pNext_ } , pResult{ pResult_ } { } - VULKAN_HPP_CONSTEXPR BindMemoryStatusKHR( BindMemoryStatusKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + VULKAN_HPP_CONSTEXPR BindMemoryStatus( BindMemoryStatus const & rhs ) VULKAN_HPP_NOEXCEPT = default; - BindMemoryStatusKHR( VkBindMemoryStatusKHR const & rhs ) VULKAN_HPP_NOEXCEPT : BindMemoryStatusKHR( *reinterpret_cast( &rhs ) ) - { - } + BindMemoryStatus( VkBindMemoryStatus const & rhs ) VULKAN_HPP_NOEXCEPT : BindMemoryStatus( *reinterpret_cast( &rhs ) ) {} - BindMemoryStatusKHR & operator=( BindMemoryStatusKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + BindMemoryStatus & operator=( BindMemoryStatus const & rhs ) VULKAN_HPP_NOEXCEPT = default; #endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ - BindMemoryStatusKHR & operator=( VkBindMemoryStatusKHR const & rhs ) VULKAN_HPP_NOEXCEPT + BindMemoryStatus & operator=( VkBindMemoryStatus const & rhs ) VULKAN_HPP_NOEXCEPT { - *this = *reinterpret_cast( &rhs ); + *this = *reinterpret_cast( &rhs ); return *this; } #if !defined( VULKAN_HPP_NO_STRUCT_SETTERS ) - VULKAN_HPP_CONSTEXPR_14 BindMemoryStatusKHR & setPNext( const void * pNext_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 BindMemoryStatus & setPNext( const void * pNext_ ) VULKAN_HPP_NOEXCEPT { pNext = pNext_; return *this; } - VULKAN_HPP_CONSTEXPR_14 BindMemoryStatusKHR & setPResult( VULKAN_HPP_NAMESPACE::Result * pResult_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 BindMemoryStatus & setPResult( VULKAN_HPP_NAMESPACE::Result * pResult_ ) VULKAN_HPP_NOEXCEPT { pResult = pResult_; return *this; } #endif /*VULKAN_HPP_NO_STRUCT_SETTERS*/ - operator VkBindMemoryStatusKHR const &() const VULKAN_HPP_NOEXCEPT + operator VkBindMemoryStatus const &() const VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + return *reinterpret_cast( this ); } - operator VkBindMemoryStatusKHR &() VULKAN_HPP_NOEXCEPT + operator VkBindMemoryStatus &() VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + return *reinterpret_cast( this ); } #if defined( VULKAN_HPP_USE_REFLECT ) @@ -8955,9 +8955,9 @@ namespace VULKAN_HPP_NAMESPACE #endif #if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( BindMemoryStatusKHR const & ) const = default; + auto operator<=>( BindMemoryStatus const & ) const = default; #else - bool operator==( BindMemoryStatusKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator==( BindMemoryStatus const & rhs ) const VULKAN_HPP_NOEXCEPT { # if defined( VULKAN_HPP_USE_REFLECT ) return this->reflect() == rhs.reflect(); @@ -8966,24 +8966,26 @@ namespace VULKAN_HPP_NAMESPACE # endif } - bool operator!=( BindMemoryStatusKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator!=( BindMemoryStatus const & rhs ) const VULKAN_HPP_NOEXCEPT { return !operator==( rhs ); } #endif public: - VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eBindMemoryStatusKHR; + VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eBindMemoryStatus; const void * pNext = {}; VULKAN_HPP_NAMESPACE::Result * pResult = {}; }; template <> - struct CppType + struct CppType { - using Type = BindMemoryStatusKHR; + using Type = BindMemoryStatus; }; + using BindMemoryStatusKHR = BindMemoryStatus; + struct BindPipelineIndirectCommandNV { using NativeType = VkBindPipelineIndirectCommandNV; @@ -13444,66 +13446,65 @@ namespace VULKAN_HPP_NAMESPACE using BufferOpaqueCaptureAddressCreateInfoKHR = BufferOpaqueCaptureAddressCreateInfo; - struct BufferUsageFlags2CreateInfoKHR + struct BufferUsageFlags2CreateInfo { - using NativeType = VkBufferUsageFlags2CreateInfoKHR; + using NativeType = VkBufferUsageFlags2CreateInfo; static const bool allowDuplicate = false; - static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eBufferUsageFlags2CreateInfoKHR; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eBufferUsageFlags2CreateInfo; #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) - VULKAN_HPP_CONSTEXPR BufferUsageFlags2CreateInfoKHR( VULKAN_HPP_NAMESPACE::BufferUsageFlags2KHR usage_ = {}, - const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR BufferUsageFlags2CreateInfo( VULKAN_HPP_NAMESPACE::BufferUsageFlags2 usage_ = {}, const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT : pNext{ pNext_ } , usage{ usage_ } { } - VULKAN_HPP_CONSTEXPR BufferUsageFlags2CreateInfoKHR( BufferUsageFlags2CreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + VULKAN_HPP_CONSTEXPR BufferUsageFlags2CreateInfo( BufferUsageFlags2CreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; - BufferUsageFlags2CreateInfoKHR( VkBufferUsageFlags2CreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT - : BufferUsageFlags2CreateInfoKHR( *reinterpret_cast( &rhs ) ) + BufferUsageFlags2CreateInfo( VkBufferUsageFlags2CreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + : BufferUsageFlags2CreateInfo( *reinterpret_cast( &rhs ) ) { } - BufferUsageFlags2CreateInfoKHR & operator=( BufferUsageFlags2CreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + BufferUsageFlags2CreateInfo & operator=( BufferUsageFlags2CreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; #endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ - BufferUsageFlags2CreateInfoKHR & operator=( VkBufferUsageFlags2CreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + BufferUsageFlags2CreateInfo & operator=( VkBufferUsageFlags2CreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT { - *this = *reinterpret_cast( &rhs ); + *this = *reinterpret_cast( &rhs ); return *this; } #if !defined( VULKAN_HPP_NO_STRUCT_SETTERS ) - VULKAN_HPP_CONSTEXPR_14 BufferUsageFlags2CreateInfoKHR & setPNext( const void * pNext_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 BufferUsageFlags2CreateInfo & setPNext( const void * pNext_ ) VULKAN_HPP_NOEXCEPT { pNext = pNext_; return *this; } - VULKAN_HPP_CONSTEXPR_14 BufferUsageFlags2CreateInfoKHR & setUsage( VULKAN_HPP_NAMESPACE::BufferUsageFlags2KHR usage_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 BufferUsageFlags2CreateInfo & setUsage( VULKAN_HPP_NAMESPACE::BufferUsageFlags2 usage_ ) VULKAN_HPP_NOEXCEPT { usage = usage_; return *this; } #endif /*VULKAN_HPP_NO_STRUCT_SETTERS*/ - operator VkBufferUsageFlags2CreateInfoKHR const &() const VULKAN_HPP_NOEXCEPT + operator VkBufferUsageFlags2CreateInfo const &() const VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + return *reinterpret_cast( this ); } - operator VkBufferUsageFlags2CreateInfoKHR &() VULKAN_HPP_NOEXCEPT + operator VkBufferUsageFlags2CreateInfo &() VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + return *reinterpret_cast( this ); } #if defined( VULKAN_HPP_USE_REFLECT ) # if 14 <= VULKAN_HPP_CPP_VERSION auto # else - std::tuple + std::tuple # endif reflect() const VULKAN_HPP_NOEXCEPT { @@ -13512,9 +13513,9 @@ namespace VULKAN_HPP_NAMESPACE #endif #if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( BufferUsageFlags2CreateInfoKHR const & ) const = default; + auto operator<=>( BufferUsageFlags2CreateInfo const & ) const = default; #else - bool operator==( BufferUsageFlags2CreateInfoKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator==( BufferUsageFlags2CreateInfo const & rhs ) const VULKAN_HPP_NOEXCEPT { # if defined( VULKAN_HPP_USE_REFLECT ) return this->reflect() == rhs.reflect(); @@ -13523,24 +13524,26 @@ namespace VULKAN_HPP_NAMESPACE # endif } - bool operator!=( BufferUsageFlags2CreateInfoKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator!=( BufferUsageFlags2CreateInfo const & rhs ) const VULKAN_HPP_NOEXCEPT { return !operator==( rhs ); } #endif public: - VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eBufferUsageFlags2CreateInfoKHR; - const void * pNext = {}; - VULKAN_HPP_NAMESPACE::BufferUsageFlags2KHR usage = {}; + VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eBufferUsageFlags2CreateInfo; + const void * pNext = {}; + VULKAN_HPP_NAMESPACE::BufferUsageFlags2 usage = {}; }; template <> - struct CppType + struct CppType { - using Type = BufferUsageFlags2CreateInfoKHR; + using Type = BufferUsageFlags2CreateInfo; }; + using BufferUsageFlags2CreateInfoKHR = BufferUsageFlags2CreateInfo; + struct BufferViewCreateInfo { using NativeType = VkBufferViewCreateInfo; @@ -18530,22 +18533,22 @@ namespace VULKAN_HPP_NAMESPACE using CopyImageToBufferInfo2KHR = CopyImageToBufferInfo2; - struct CopyImageToImageInfoEXT + struct CopyImageToImageInfo { - using NativeType = VkCopyImageToImageInfoEXT; + using NativeType = VkCopyImageToImageInfo; static const bool allowDuplicate = false; - static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eCopyImageToImageInfoEXT; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eCopyImageToImageInfo; #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) - VULKAN_HPP_CONSTEXPR CopyImageToImageInfoEXT( VULKAN_HPP_NAMESPACE::HostImageCopyFlagsEXT flags_ = {}, - VULKAN_HPP_NAMESPACE::Image srcImage_ = {}, - VULKAN_HPP_NAMESPACE::ImageLayout srcImageLayout_ = VULKAN_HPP_NAMESPACE::ImageLayout::eUndefined, - VULKAN_HPP_NAMESPACE::Image dstImage_ = {}, - VULKAN_HPP_NAMESPACE::ImageLayout dstImageLayout_ = VULKAN_HPP_NAMESPACE::ImageLayout::eUndefined, - uint32_t regionCount_ = {}, - const VULKAN_HPP_NAMESPACE::ImageCopy2 * pRegions_ = {}, - const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR CopyImageToImageInfo( VULKAN_HPP_NAMESPACE::HostImageCopyFlags flags_ = {}, + VULKAN_HPP_NAMESPACE::Image srcImage_ = {}, + VULKAN_HPP_NAMESPACE::ImageLayout srcImageLayout_ = VULKAN_HPP_NAMESPACE::ImageLayout::eUndefined, + VULKAN_HPP_NAMESPACE::Image dstImage_ = {}, + VULKAN_HPP_NAMESPACE::ImageLayout dstImageLayout_ = VULKAN_HPP_NAMESPACE::ImageLayout::eUndefined, + uint32_t regionCount_ = {}, + const VULKAN_HPP_NAMESPACE::ImageCopy2 * pRegions_ = {}, + const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT : pNext{ pNext_ } , flags{ flags_ } , srcImage{ srcImage_ } @@ -18557,21 +18560,21 @@ namespace VULKAN_HPP_NAMESPACE { } - VULKAN_HPP_CONSTEXPR CopyImageToImageInfoEXT( CopyImageToImageInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + VULKAN_HPP_CONSTEXPR CopyImageToImageInfo( CopyImageToImageInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; - CopyImageToImageInfoEXT( VkCopyImageToImageInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT - : CopyImageToImageInfoEXT( *reinterpret_cast( &rhs ) ) + CopyImageToImageInfo( VkCopyImageToImageInfo const & rhs ) VULKAN_HPP_NOEXCEPT + : CopyImageToImageInfo( *reinterpret_cast( &rhs ) ) { } # if !defined( VULKAN_HPP_DISABLE_ENHANCED_MODE ) - CopyImageToImageInfoEXT( VULKAN_HPP_NAMESPACE::HostImageCopyFlagsEXT flags_, - VULKAN_HPP_NAMESPACE::Image srcImage_, - VULKAN_HPP_NAMESPACE::ImageLayout srcImageLayout_, - VULKAN_HPP_NAMESPACE::Image dstImage_, - VULKAN_HPP_NAMESPACE::ImageLayout dstImageLayout_, - VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & regions_, - const void * pNext_ = nullptr ) + CopyImageToImageInfo( VULKAN_HPP_NAMESPACE::HostImageCopyFlags flags_, + VULKAN_HPP_NAMESPACE::Image srcImage_, + VULKAN_HPP_NAMESPACE::ImageLayout srcImageLayout_, + VULKAN_HPP_NAMESPACE::Image dstImage_, + VULKAN_HPP_NAMESPACE::ImageLayout dstImageLayout_, + VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & regions_, + const void * pNext_ = nullptr ) : pNext( pNext_ ) , flags( flags_ ) , srcImage( srcImage_ ) @@ -18584,66 +18587,66 @@ namespace VULKAN_HPP_NAMESPACE } # endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - CopyImageToImageInfoEXT & operator=( CopyImageToImageInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + CopyImageToImageInfo & operator=( CopyImageToImageInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; #endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ - CopyImageToImageInfoEXT & operator=( VkCopyImageToImageInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + CopyImageToImageInfo & operator=( VkCopyImageToImageInfo const & rhs ) VULKAN_HPP_NOEXCEPT { - *this = *reinterpret_cast( &rhs ); + *this = *reinterpret_cast( &rhs ); return *this; } #if !defined( VULKAN_HPP_NO_STRUCT_SETTERS ) - VULKAN_HPP_CONSTEXPR_14 CopyImageToImageInfoEXT & setPNext( const void * pNext_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 CopyImageToImageInfo & setPNext( const void * pNext_ ) VULKAN_HPP_NOEXCEPT { pNext = pNext_; return *this; } - VULKAN_HPP_CONSTEXPR_14 CopyImageToImageInfoEXT & setFlags( VULKAN_HPP_NAMESPACE::HostImageCopyFlagsEXT flags_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 CopyImageToImageInfo & setFlags( VULKAN_HPP_NAMESPACE::HostImageCopyFlags flags_ ) VULKAN_HPP_NOEXCEPT { flags = flags_; return *this; } - VULKAN_HPP_CONSTEXPR_14 CopyImageToImageInfoEXT & setSrcImage( VULKAN_HPP_NAMESPACE::Image srcImage_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 CopyImageToImageInfo & setSrcImage( VULKAN_HPP_NAMESPACE::Image srcImage_ ) VULKAN_HPP_NOEXCEPT { srcImage = srcImage_; return *this; } - VULKAN_HPP_CONSTEXPR_14 CopyImageToImageInfoEXT & setSrcImageLayout( VULKAN_HPP_NAMESPACE::ImageLayout srcImageLayout_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 CopyImageToImageInfo & setSrcImageLayout( VULKAN_HPP_NAMESPACE::ImageLayout srcImageLayout_ ) VULKAN_HPP_NOEXCEPT { srcImageLayout = srcImageLayout_; return *this; } - VULKAN_HPP_CONSTEXPR_14 CopyImageToImageInfoEXT & setDstImage( VULKAN_HPP_NAMESPACE::Image dstImage_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 CopyImageToImageInfo & setDstImage( VULKAN_HPP_NAMESPACE::Image dstImage_ ) VULKAN_HPP_NOEXCEPT { dstImage = dstImage_; return *this; } - VULKAN_HPP_CONSTEXPR_14 CopyImageToImageInfoEXT & setDstImageLayout( VULKAN_HPP_NAMESPACE::ImageLayout dstImageLayout_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 CopyImageToImageInfo & setDstImageLayout( VULKAN_HPP_NAMESPACE::ImageLayout dstImageLayout_ ) VULKAN_HPP_NOEXCEPT { dstImageLayout = dstImageLayout_; return *this; } - VULKAN_HPP_CONSTEXPR_14 CopyImageToImageInfoEXT & setRegionCount( uint32_t regionCount_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 CopyImageToImageInfo & setRegionCount( uint32_t regionCount_ ) VULKAN_HPP_NOEXCEPT { regionCount = regionCount_; return *this; } - VULKAN_HPP_CONSTEXPR_14 CopyImageToImageInfoEXT & setPRegions( const VULKAN_HPP_NAMESPACE::ImageCopy2 * pRegions_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 CopyImageToImageInfo & setPRegions( const VULKAN_HPP_NAMESPACE::ImageCopy2 * pRegions_ ) VULKAN_HPP_NOEXCEPT { pRegions = pRegions_; return *this; } # if !defined( VULKAN_HPP_DISABLE_ENHANCED_MODE ) - CopyImageToImageInfoEXT & + CopyImageToImageInfo & setRegions( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & regions_ ) VULKAN_HPP_NOEXCEPT { regionCount = static_cast( regions_.size() ); @@ -18653,14 +18656,14 @@ namespace VULKAN_HPP_NAMESPACE # endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ #endif /*VULKAN_HPP_NO_STRUCT_SETTERS*/ - operator VkCopyImageToImageInfoEXT const &() const VULKAN_HPP_NOEXCEPT + operator VkCopyImageToImageInfo const &() const VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + return *reinterpret_cast( this ); } - operator VkCopyImageToImageInfoEXT &() VULKAN_HPP_NOEXCEPT + operator VkCopyImageToImageInfo &() VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + return *reinterpret_cast( this ); } #if defined( VULKAN_HPP_USE_REFLECT ) @@ -18669,7 +18672,7 @@ namespace VULKAN_HPP_NAMESPACE # else std::tuple( CopyImageToImageInfoEXT const & ) const = default; + auto operator<=>( CopyImageToImageInfo const & ) const = default; #else - bool operator==( CopyImageToImageInfoEXT const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator==( CopyImageToImageInfo const & rhs ) const VULKAN_HPP_NOEXCEPT { # if defined( VULKAN_HPP_USE_REFLECT ) return this->reflect() == rhs.reflect(); @@ -18697,45 +18700,47 @@ namespace VULKAN_HPP_NAMESPACE # endif } - bool operator!=( CopyImageToImageInfoEXT const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator!=( CopyImageToImageInfo const & rhs ) const VULKAN_HPP_NOEXCEPT { return !operator==( rhs ); } #endif public: - VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eCopyImageToImageInfoEXT; - const void * pNext = {}; - VULKAN_HPP_NAMESPACE::HostImageCopyFlagsEXT flags = {}; - VULKAN_HPP_NAMESPACE::Image srcImage = {}; - VULKAN_HPP_NAMESPACE::ImageLayout srcImageLayout = VULKAN_HPP_NAMESPACE::ImageLayout::eUndefined; - VULKAN_HPP_NAMESPACE::Image dstImage = {}; - VULKAN_HPP_NAMESPACE::ImageLayout dstImageLayout = VULKAN_HPP_NAMESPACE::ImageLayout::eUndefined; - uint32_t regionCount = {}; - const VULKAN_HPP_NAMESPACE::ImageCopy2 * pRegions = {}; + VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eCopyImageToImageInfo; + const void * pNext = {}; + VULKAN_HPP_NAMESPACE::HostImageCopyFlags flags = {}; + VULKAN_HPP_NAMESPACE::Image srcImage = {}; + VULKAN_HPP_NAMESPACE::ImageLayout srcImageLayout = VULKAN_HPP_NAMESPACE::ImageLayout::eUndefined; + VULKAN_HPP_NAMESPACE::Image dstImage = {}; + VULKAN_HPP_NAMESPACE::ImageLayout dstImageLayout = VULKAN_HPP_NAMESPACE::ImageLayout::eUndefined; + uint32_t regionCount = {}; + const VULKAN_HPP_NAMESPACE::ImageCopy2 * pRegions = {}; }; template <> - struct CppType + struct CppType { - using Type = CopyImageToImageInfoEXT; + using Type = CopyImageToImageInfo; }; - struct ImageToMemoryCopyEXT + using CopyImageToImageInfoEXT = CopyImageToImageInfo; + + struct ImageToMemoryCopy { - using NativeType = VkImageToMemoryCopyEXT; + using NativeType = VkImageToMemoryCopy; static const bool allowDuplicate = false; - static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eImageToMemoryCopyEXT; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eImageToMemoryCopy; #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) - VULKAN_HPP_CONSTEXPR ImageToMemoryCopyEXT( void * pHostPointer_ = {}, - uint32_t memoryRowLength_ = {}, - uint32_t memoryImageHeight_ = {}, - VULKAN_HPP_NAMESPACE::ImageSubresourceLayers imageSubresource_ = {}, - VULKAN_HPP_NAMESPACE::Offset3D imageOffset_ = {}, - VULKAN_HPP_NAMESPACE::Extent3D imageExtent_ = {}, - const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR ImageToMemoryCopy( void * pHostPointer_ = {}, + uint32_t memoryRowLength_ = {}, + uint32_t memoryImageHeight_ = {}, + VULKAN_HPP_NAMESPACE::ImageSubresourceLayers imageSubresource_ = {}, + VULKAN_HPP_NAMESPACE::Offset3D imageOffset_ = {}, + VULKAN_HPP_NAMESPACE::Extent3D imageExtent_ = {}, + const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT : pNext{ pNext_ } , pHostPointer{ pHostPointer_ } , memoryRowLength{ memoryRowLength_ } @@ -18746,75 +18751,72 @@ namespace VULKAN_HPP_NAMESPACE { } - VULKAN_HPP_CONSTEXPR ImageToMemoryCopyEXT( ImageToMemoryCopyEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + VULKAN_HPP_CONSTEXPR ImageToMemoryCopy( ImageToMemoryCopy const & rhs ) VULKAN_HPP_NOEXCEPT = default; - ImageToMemoryCopyEXT( VkImageToMemoryCopyEXT const & rhs ) VULKAN_HPP_NOEXCEPT - : ImageToMemoryCopyEXT( *reinterpret_cast( &rhs ) ) - { - } + ImageToMemoryCopy( VkImageToMemoryCopy const & rhs ) VULKAN_HPP_NOEXCEPT : ImageToMemoryCopy( *reinterpret_cast( &rhs ) ) {} - ImageToMemoryCopyEXT & operator=( ImageToMemoryCopyEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + ImageToMemoryCopy & operator=( ImageToMemoryCopy const & rhs ) VULKAN_HPP_NOEXCEPT = default; #endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ - ImageToMemoryCopyEXT & operator=( VkImageToMemoryCopyEXT const & rhs ) VULKAN_HPP_NOEXCEPT + ImageToMemoryCopy & operator=( VkImageToMemoryCopy const & rhs ) VULKAN_HPP_NOEXCEPT { - *this = *reinterpret_cast( &rhs ); + *this = *reinterpret_cast( &rhs ); return *this; } #if !defined( VULKAN_HPP_NO_STRUCT_SETTERS ) - VULKAN_HPP_CONSTEXPR_14 ImageToMemoryCopyEXT & setPNext( const void * pNext_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 ImageToMemoryCopy & setPNext( const void * pNext_ ) VULKAN_HPP_NOEXCEPT { pNext = pNext_; return *this; } - VULKAN_HPP_CONSTEXPR_14 ImageToMemoryCopyEXT & setPHostPointer( void * pHostPointer_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 ImageToMemoryCopy & setPHostPointer( void * pHostPointer_ ) VULKAN_HPP_NOEXCEPT { pHostPointer = pHostPointer_; return *this; } - VULKAN_HPP_CONSTEXPR_14 ImageToMemoryCopyEXT & setMemoryRowLength( uint32_t memoryRowLength_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 ImageToMemoryCopy & setMemoryRowLength( uint32_t memoryRowLength_ ) VULKAN_HPP_NOEXCEPT { memoryRowLength = memoryRowLength_; return *this; } - VULKAN_HPP_CONSTEXPR_14 ImageToMemoryCopyEXT & setMemoryImageHeight( uint32_t memoryImageHeight_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 ImageToMemoryCopy & setMemoryImageHeight( uint32_t memoryImageHeight_ ) VULKAN_HPP_NOEXCEPT { memoryImageHeight = memoryImageHeight_; return *this; } - VULKAN_HPP_CONSTEXPR_14 ImageToMemoryCopyEXT & + VULKAN_HPP_CONSTEXPR_14 ImageToMemoryCopy & setImageSubresource( VULKAN_HPP_NAMESPACE::ImageSubresourceLayers const & imageSubresource_ ) VULKAN_HPP_NOEXCEPT { imageSubresource = imageSubresource_; return *this; } - VULKAN_HPP_CONSTEXPR_14 ImageToMemoryCopyEXT & setImageOffset( VULKAN_HPP_NAMESPACE::Offset3D const & imageOffset_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 ImageToMemoryCopy & setImageOffset( VULKAN_HPP_NAMESPACE::Offset3D const & imageOffset_ ) VULKAN_HPP_NOEXCEPT { imageOffset = imageOffset_; return *this; } - VULKAN_HPP_CONSTEXPR_14 ImageToMemoryCopyEXT & setImageExtent( VULKAN_HPP_NAMESPACE::Extent3D const & imageExtent_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 ImageToMemoryCopy & setImageExtent( VULKAN_HPP_NAMESPACE::Extent3D const & imageExtent_ ) VULKAN_HPP_NOEXCEPT { imageExtent = imageExtent_; return *this; } #endif /*VULKAN_HPP_NO_STRUCT_SETTERS*/ - operator VkImageToMemoryCopyEXT const &() const VULKAN_HPP_NOEXCEPT + operator VkImageToMemoryCopy const &() const VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + return *reinterpret_cast( this ); } - operator VkImageToMemoryCopyEXT &() VULKAN_HPP_NOEXCEPT + operator VkImageToMemoryCopy &() VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + return *reinterpret_cast( this ); } #if defined( VULKAN_HPP_USE_REFLECT ) @@ -18837,9 +18839,9 @@ namespace VULKAN_HPP_NAMESPACE #endif #if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( ImageToMemoryCopyEXT const & ) const = default; + auto operator<=>( ImageToMemoryCopy const & ) const = default; #else - bool operator==( ImageToMemoryCopyEXT const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator==( ImageToMemoryCopy const & rhs ) const VULKAN_HPP_NOEXCEPT { # if defined( VULKAN_HPP_USE_REFLECT ) return this->reflect() == rhs.reflect(); @@ -18850,14 +18852,14 @@ namespace VULKAN_HPP_NAMESPACE # endif } - bool operator!=( ImageToMemoryCopyEXT const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator!=( ImageToMemoryCopy const & rhs ) const VULKAN_HPP_NOEXCEPT { return !operator==( rhs ); } #endif public: - VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eImageToMemoryCopyEXT; + VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eImageToMemoryCopy; const void * pNext = {}; void * pHostPointer = {}; uint32_t memoryRowLength = {}; @@ -18868,25 +18870,27 @@ namespace VULKAN_HPP_NAMESPACE }; template <> - struct CppType + struct CppType { - using Type = ImageToMemoryCopyEXT; + using Type = ImageToMemoryCopy; }; - struct CopyImageToMemoryInfoEXT + using ImageToMemoryCopyEXT = ImageToMemoryCopy; + + struct CopyImageToMemoryInfo { - using NativeType = VkCopyImageToMemoryInfoEXT; + using NativeType = VkCopyImageToMemoryInfo; static const bool allowDuplicate = false; - static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eCopyImageToMemoryInfoEXT; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eCopyImageToMemoryInfo; #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) - VULKAN_HPP_CONSTEXPR CopyImageToMemoryInfoEXT( VULKAN_HPP_NAMESPACE::HostImageCopyFlagsEXT flags_ = {}, - VULKAN_HPP_NAMESPACE::Image srcImage_ = {}, - VULKAN_HPP_NAMESPACE::ImageLayout srcImageLayout_ = VULKAN_HPP_NAMESPACE::ImageLayout::eUndefined, - uint32_t regionCount_ = {}, - const VULKAN_HPP_NAMESPACE::ImageToMemoryCopyEXT * pRegions_ = {}, - const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR CopyImageToMemoryInfo( VULKAN_HPP_NAMESPACE::HostImageCopyFlags flags_ = {}, + VULKAN_HPP_NAMESPACE::Image srcImage_ = {}, + VULKAN_HPP_NAMESPACE::ImageLayout srcImageLayout_ = VULKAN_HPP_NAMESPACE::ImageLayout::eUndefined, + uint32_t regionCount_ = {}, + const VULKAN_HPP_NAMESPACE::ImageToMemoryCopy * pRegions_ = {}, + const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT : pNext{ pNext_ } , flags{ flags_ } , srcImage{ srcImage_ } @@ -18896,19 +18900,19 @@ namespace VULKAN_HPP_NAMESPACE { } - VULKAN_HPP_CONSTEXPR CopyImageToMemoryInfoEXT( CopyImageToMemoryInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + VULKAN_HPP_CONSTEXPR CopyImageToMemoryInfo( CopyImageToMemoryInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; - CopyImageToMemoryInfoEXT( VkCopyImageToMemoryInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT - : CopyImageToMemoryInfoEXT( *reinterpret_cast( &rhs ) ) + CopyImageToMemoryInfo( VkCopyImageToMemoryInfo const & rhs ) VULKAN_HPP_NOEXCEPT + : CopyImageToMemoryInfo( *reinterpret_cast( &rhs ) ) { } # if !defined( VULKAN_HPP_DISABLE_ENHANCED_MODE ) - CopyImageToMemoryInfoEXT( VULKAN_HPP_NAMESPACE::HostImageCopyFlagsEXT flags_, - VULKAN_HPP_NAMESPACE::Image srcImage_, - VULKAN_HPP_NAMESPACE::ImageLayout srcImageLayout_, - VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & regions_, - const void * pNext_ = nullptr ) + CopyImageToMemoryInfo( VULKAN_HPP_NAMESPACE::HostImageCopyFlags flags_, + VULKAN_HPP_NAMESPACE::Image srcImage_, + VULKAN_HPP_NAMESPACE::ImageLayout srcImageLayout_, + VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & regions_, + const void * pNext_ = nullptr ) : pNext( pNext_ ) , flags( flags_ ) , srcImage( srcImage_ ) @@ -18919,55 +18923,55 @@ namespace VULKAN_HPP_NAMESPACE } # endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - CopyImageToMemoryInfoEXT & operator=( CopyImageToMemoryInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + CopyImageToMemoryInfo & operator=( CopyImageToMemoryInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; #endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ - CopyImageToMemoryInfoEXT & operator=( VkCopyImageToMemoryInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + CopyImageToMemoryInfo & operator=( VkCopyImageToMemoryInfo const & rhs ) VULKAN_HPP_NOEXCEPT { - *this = *reinterpret_cast( &rhs ); + *this = *reinterpret_cast( &rhs ); return *this; } #if !defined( VULKAN_HPP_NO_STRUCT_SETTERS ) - VULKAN_HPP_CONSTEXPR_14 CopyImageToMemoryInfoEXT & setPNext( const void * pNext_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 CopyImageToMemoryInfo & setPNext( const void * pNext_ ) VULKAN_HPP_NOEXCEPT { pNext = pNext_; return *this; } - VULKAN_HPP_CONSTEXPR_14 CopyImageToMemoryInfoEXT & setFlags( VULKAN_HPP_NAMESPACE::HostImageCopyFlagsEXT flags_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 CopyImageToMemoryInfo & setFlags( VULKAN_HPP_NAMESPACE::HostImageCopyFlags flags_ ) VULKAN_HPP_NOEXCEPT { flags = flags_; return *this; } - VULKAN_HPP_CONSTEXPR_14 CopyImageToMemoryInfoEXT & setSrcImage( VULKAN_HPP_NAMESPACE::Image srcImage_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 CopyImageToMemoryInfo & setSrcImage( VULKAN_HPP_NAMESPACE::Image srcImage_ ) VULKAN_HPP_NOEXCEPT { srcImage = srcImage_; return *this; } - VULKAN_HPP_CONSTEXPR_14 CopyImageToMemoryInfoEXT & setSrcImageLayout( VULKAN_HPP_NAMESPACE::ImageLayout srcImageLayout_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 CopyImageToMemoryInfo & setSrcImageLayout( VULKAN_HPP_NAMESPACE::ImageLayout srcImageLayout_ ) VULKAN_HPP_NOEXCEPT { srcImageLayout = srcImageLayout_; return *this; } - VULKAN_HPP_CONSTEXPR_14 CopyImageToMemoryInfoEXT & setRegionCount( uint32_t regionCount_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 CopyImageToMemoryInfo & setRegionCount( uint32_t regionCount_ ) VULKAN_HPP_NOEXCEPT { regionCount = regionCount_; return *this; } - VULKAN_HPP_CONSTEXPR_14 CopyImageToMemoryInfoEXT & setPRegions( const VULKAN_HPP_NAMESPACE::ImageToMemoryCopyEXT * pRegions_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 CopyImageToMemoryInfo & setPRegions( const VULKAN_HPP_NAMESPACE::ImageToMemoryCopy * pRegions_ ) VULKAN_HPP_NOEXCEPT { pRegions = pRegions_; return *this; } # if !defined( VULKAN_HPP_DISABLE_ENHANCED_MODE ) - CopyImageToMemoryInfoEXT & - setRegions( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & regions_ ) VULKAN_HPP_NOEXCEPT + CopyImageToMemoryInfo & + setRegions( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & regions_ ) VULKAN_HPP_NOEXCEPT { regionCount = static_cast( regions_.size() ); pRegions = regions_.data(); @@ -18976,14 +18980,14 @@ namespace VULKAN_HPP_NAMESPACE # endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ #endif /*VULKAN_HPP_NO_STRUCT_SETTERS*/ - operator VkCopyImageToMemoryInfoEXT const &() const VULKAN_HPP_NOEXCEPT + operator VkCopyImageToMemoryInfo const &() const VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + return *reinterpret_cast( this ); } - operator VkCopyImageToMemoryInfoEXT &() VULKAN_HPP_NOEXCEPT + operator VkCopyImageToMemoryInfo &() VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + return *reinterpret_cast( this ); } #if defined( VULKAN_HPP_USE_REFLECT ) @@ -18992,11 +18996,11 @@ namespace VULKAN_HPP_NAMESPACE # else std::tuple + const VULKAN_HPP_NAMESPACE::ImageToMemoryCopy * const &> # endif reflect() const VULKAN_HPP_NOEXCEPT { @@ -19005,9 +19009,9 @@ namespace VULKAN_HPP_NAMESPACE #endif #if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( CopyImageToMemoryInfoEXT const & ) const = default; + auto operator<=>( CopyImageToMemoryInfo const & ) const = default; #else - bool operator==( CopyImageToMemoryInfoEXT const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator==( CopyImageToMemoryInfo const & rhs ) const VULKAN_HPP_NOEXCEPT { # if defined( VULKAN_HPP_USE_REFLECT ) return this->reflect() == rhs.reflect(); @@ -19017,28 +19021,30 @@ namespace VULKAN_HPP_NAMESPACE # endif } - bool operator!=( CopyImageToMemoryInfoEXT const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator!=( CopyImageToMemoryInfo const & rhs ) const VULKAN_HPP_NOEXCEPT { return !operator==( rhs ); } #endif public: - VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eCopyImageToMemoryInfoEXT; - const void * pNext = {}; - VULKAN_HPP_NAMESPACE::HostImageCopyFlagsEXT flags = {}; - VULKAN_HPP_NAMESPACE::Image srcImage = {}; - VULKAN_HPP_NAMESPACE::ImageLayout srcImageLayout = VULKAN_HPP_NAMESPACE::ImageLayout::eUndefined; - uint32_t regionCount = {}; - const VULKAN_HPP_NAMESPACE::ImageToMemoryCopyEXT * pRegions = {}; + VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eCopyImageToMemoryInfo; + const void * pNext = {}; + VULKAN_HPP_NAMESPACE::HostImageCopyFlags flags = {}; + VULKAN_HPP_NAMESPACE::Image srcImage = {}; + VULKAN_HPP_NAMESPACE::ImageLayout srcImageLayout = VULKAN_HPP_NAMESPACE::ImageLayout::eUndefined; + uint32_t regionCount = {}; + const VULKAN_HPP_NAMESPACE::ImageToMemoryCopy * pRegions = {}; }; template <> - struct CppType + struct CppType { - using Type = CopyImageToMemoryInfoEXT; + using Type = CopyImageToMemoryInfo; }; + using CopyImageToMemoryInfoEXT = CopyImageToMemoryInfo; + struct CopyMemoryIndirectCommandNV { using NativeType = VkCopyMemoryIndirectCommandNV; @@ -19369,21 +19375,21 @@ namespace VULKAN_HPP_NAMESPACE VULKAN_HPP_NAMESPACE::Extent3D imageExtent = {}; }; - struct MemoryToImageCopyEXT + struct MemoryToImageCopy { - using NativeType = VkMemoryToImageCopyEXT; + using NativeType = VkMemoryToImageCopy; static const bool allowDuplicate = false; - static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eMemoryToImageCopyEXT; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eMemoryToImageCopy; #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) - VULKAN_HPP_CONSTEXPR MemoryToImageCopyEXT( const void * pHostPointer_ = {}, - uint32_t memoryRowLength_ = {}, - uint32_t memoryImageHeight_ = {}, - VULKAN_HPP_NAMESPACE::ImageSubresourceLayers imageSubresource_ = {}, - VULKAN_HPP_NAMESPACE::Offset3D imageOffset_ = {}, - VULKAN_HPP_NAMESPACE::Extent3D imageExtent_ = {}, - const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR MemoryToImageCopy( const void * pHostPointer_ = {}, + uint32_t memoryRowLength_ = {}, + uint32_t memoryImageHeight_ = {}, + VULKAN_HPP_NAMESPACE::ImageSubresourceLayers imageSubresource_ = {}, + VULKAN_HPP_NAMESPACE::Offset3D imageOffset_ = {}, + VULKAN_HPP_NAMESPACE::Extent3D imageExtent_ = {}, + const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT : pNext{ pNext_ } , pHostPointer{ pHostPointer_ } , memoryRowLength{ memoryRowLength_ } @@ -19394,75 +19400,72 @@ namespace VULKAN_HPP_NAMESPACE { } - VULKAN_HPP_CONSTEXPR MemoryToImageCopyEXT( MemoryToImageCopyEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + VULKAN_HPP_CONSTEXPR MemoryToImageCopy( MemoryToImageCopy const & rhs ) VULKAN_HPP_NOEXCEPT = default; - MemoryToImageCopyEXT( VkMemoryToImageCopyEXT const & rhs ) VULKAN_HPP_NOEXCEPT - : MemoryToImageCopyEXT( *reinterpret_cast( &rhs ) ) - { - } + MemoryToImageCopy( VkMemoryToImageCopy const & rhs ) VULKAN_HPP_NOEXCEPT : MemoryToImageCopy( *reinterpret_cast( &rhs ) ) {} - MemoryToImageCopyEXT & operator=( MemoryToImageCopyEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + MemoryToImageCopy & operator=( MemoryToImageCopy const & rhs ) VULKAN_HPP_NOEXCEPT = default; #endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ - MemoryToImageCopyEXT & operator=( VkMemoryToImageCopyEXT const & rhs ) VULKAN_HPP_NOEXCEPT + MemoryToImageCopy & operator=( VkMemoryToImageCopy const & rhs ) VULKAN_HPP_NOEXCEPT { - *this = *reinterpret_cast( &rhs ); + *this = *reinterpret_cast( &rhs ); return *this; } #if !defined( VULKAN_HPP_NO_STRUCT_SETTERS ) - VULKAN_HPP_CONSTEXPR_14 MemoryToImageCopyEXT & setPNext( const void * pNext_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 MemoryToImageCopy & setPNext( const void * pNext_ ) VULKAN_HPP_NOEXCEPT { pNext = pNext_; return *this; } - VULKAN_HPP_CONSTEXPR_14 MemoryToImageCopyEXT & setPHostPointer( const void * pHostPointer_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 MemoryToImageCopy & setPHostPointer( const void * pHostPointer_ ) VULKAN_HPP_NOEXCEPT { pHostPointer = pHostPointer_; return *this; } - VULKAN_HPP_CONSTEXPR_14 MemoryToImageCopyEXT & setMemoryRowLength( uint32_t memoryRowLength_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 MemoryToImageCopy & setMemoryRowLength( uint32_t memoryRowLength_ ) VULKAN_HPP_NOEXCEPT { memoryRowLength = memoryRowLength_; return *this; } - VULKAN_HPP_CONSTEXPR_14 MemoryToImageCopyEXT & setMemoryImageHeight( uint32_t memoryImageHeight_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 MemoryToImageCopy & setMemoryImageHeight( uint32_t memoryImageHeight_ ) VULKAN_HPP_NOEXCEPT { memoryImageHeight = memoryImageHeight_; return *this; } - VULKAN_HPP_CONSTEXPR_14 MemoryToImageCopyEXT & + VULKAN_HPP_CONSTEXPR_14 MemoryToImageCopy & setImageSubresource( VULKAN_HPP_NAMESPACE::ImageSubresourceLayers const & imageSubresource_ ) VULKAN_HPP_NOEXCEPT { imageSubresource = imageSubresource_; return *this; } - VULKAN_HPP_CONSTEXPR_14 MemoryToImageCopyEXT & setImageOffset( VULKAN_HPP_NAMESPACE::Offset3D const & imageOffset_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 MemoryToImageCopy & setImageOffset( VULKAN_HPP_NAMESPACE::Offset3D const & imageOffset_ ) VULKAN_HPP_NOEXCEPT { imageOffset = imageOffset_; return *this; } - VULKAN_HPP_CONSTEXPR_14 MemoryToImageCopyEXT & setImageExtent( VULKAN_HPP_NAMESPACE::Extent3D const & imageExtent_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 MemoryToImageCopy & setImageExtent( VULKAN_HPP_NAMESPACE::Extent3D const & imageExtent_ ) VULKAN_HPP_NOEXCEPT { imageExtent = imageExtent_; return *this; } #endif /*VULKAN_HPP_NO_STRUCT_SETTERS*/ - operator VkMemoryToImageCopyEXT const &() const VULKAN_HPP_NOEXCEPT + operator VkMemoryToImageCopy const &() const VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + return *reinterpret_cast( this ); } - operator VkMemoryToImageCopyEXT &() VULKAN_HPP_NOEXCEPT + operator VkMemoryToImageCopy &() VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + return *reinterpret_cast( this ); } #if defined( VULKAN_HPP_USE_REFLECT ) @@ -19485,9 +19488,9 @@ namespace VULKAN_HPP_NAMESPACE #endif #if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( MemoryToImageCopyEXT const & ) const = default; + auto operator<=>( MemoryToImageCopy const & ) const = default; #else - bool operator==( MemoryToImageCopyEXT const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator==( MemoryToImageCopy const & rhs ) const VULKAN_HPP_NOEXCEPT { # if defined( VULKAN_HPP_USE_REFLECT ) return this->reflect() == rhs.reflect(); @@ -19498,14 +19501,14 @@ namespace VULKAN_HPP_NAMESPACE # endif } - bool operator!=( MemoryToImageCopyEXT const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator!=( MemoryToImageCopy const & rhs ) const VULKAN_HPP_NOEXCEPT { return !operator==( rhs ); } #endif public: - VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eMemoryToImageCopyEXT; + VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eMemoryToImageCopy; const void * pNext = {}; const void * pHostPointer = {}; uint32_t memoryRowLength = {}; @@ -19516,25 +19519,27 @@ namespace VULKAN_HPP_NAMESPACE }; template <> - struct CppType + struct CppType { - using Type = MemoryToImageCopyEXT; + using Type = MemoryToImageCopy; }; - struct CopyMemoryToImageInfoEXT + using MemoryToImageCopyEXT = MemoryToImageCopy; + + struct CopyMemoryToImageInfo { - using NativeType = VkCopyMemoryToImageInfoEXT; + using NativeType = VkCopyMemoryToImageInfo; static const bool allowDuplicate = false; - static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eCopyMemoryToImageInfoEXT; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eCopyMemoryToImageInfo; #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) - VULKAN_HPP_CONSTEXPR CopyMemoryToImageInfoEXT( VULKAN_HPP_NAMESPACE::HostImageCopyFlagsEXT flags_ = {}, - VULKAN_HPP_NAMESPACE::Image dstImage_ = {}, - VULKAN_HPP_NAMESPACE::ImageLayout dstImageLayout_ = VULKAN_HPP_NAMESPACE::ImageLayout::eUndefined, - uint32_t regionCount_ = {}, - const VULKAN_HPP_NAMESPACE::MemoryToImageCopyEXT * pRegions_ = {}, - const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR CopyMemoryToImageInfo( VULKAN_HPP_NAMESPACE::HostImageCopyFlags flags_ = {}, + VULKAN_HPP_NAMESPACE::Image dstImage_ = {}, + VULKAN_HPP_NAMESPACE::ImageLayout dstImageLayout_ = VULKAN_HPP_NAMESPACE::ImageLayout::eUndefined, + uint32_t regionCount_ = {}, + const VULKAN_HPP_NAMESPACE::MemoryToImageCopy * pRegions_ = {}, + const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT : pNext{ pNext_ } , flags{ flags_ } , dstImage{ dstImage_ } @@ -19544,19 +19549,19 @@ namespace VULKAN_HPP_NAMESPACE { } - VULKAN_HPP_CONSTEXPR CopyMemoryToImageInfoEXT( CopyMemoryToImageInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + VULKAN_HPP_CONSTEXPR CopyMemoryToImageInfo( CopyMemoryToImageInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; - CopyMemoryToImageInfoEXT( VkCopyMemoryToImageInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT - : CopyMemoryToImageInfoEXT( *reinterpret_cast( &rhs ) ) + CopyMemoryToImageInfo( VkCopyMemoryToImageInfo const & rhs ) VULKAN_HPP_NOEXCEPT + : CopyMemoryToImageInfo( *reinterpret_cast( &rhs ) ) { } # if !defined( VULKAN_HPP_DISABLE_ENHANCED_MODE ) - CopyMemoryToImageInfoEXT( VULKAN_HPP_NAMESPACE::HostImageCopyFlagsEXT flags_, - VULKAN_HPP_NAMESPACE::Image dstImage_, - VULKAN_HPP_NAMESPACE::ImageLayout dstImageLayout_, - VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & regions_, - const void * pNext_ = nullptr ) + CopyMemoryToImageInfo( VULKAN_HPP_NAMESPACE::HostImageCopyFlags flags_, + VULKAN_HPP_NAMESPACE::Image dstImage_, + VULKAN_HPP_NAMESPACE::ImageLayout dstImageLayout_, + VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & regions_, + const void * pNext_ = nullptr ) : pNext( pNext_ ) , flags( flags_ ) , dstImage( dstImage_ ) @@ -19567,55 +19572,55 @@ namespace VULKAN_HPP_NAMESPACE } # endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - CopyMemoryToImageInfoEXT & operator=( CopyMemoryToImageInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + CopyMemoryToImageInfo & operator=( CopyMemoryToImageInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; #endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ - CopyMemoryToImageInfoEXT & operator=( VkCopyMemoryToImageInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + CopyMemoryToImageInfo & operator=( VkCopyMemoryToImageInfo const & rhs ) VULKAN_HPP_NOEXCEPT { - *this = *reinterpret_cast( &rhs ); + *this = *reinterpret_cast( &rhs ); return *this; } #if !defined( VULKAN_HPP_NO_STRUCT_SETTERS ) - VULKAN_HPP_CONSTEXPR_14 CopyMemoryToImageInfoEXT & setPNext( const void * pNext_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 CopyMemoryToImageInfo & setPNext( const void * pNext_ ) VULKAN_HPP_NOEXCEPT { pNext = pNext_; return *this; } - VULKAN_HPP_CONSTEXPR_14 CopyMemoryToImageInfoEXT & setFlags( VULKAN_HPP_NAMESPACE::HostImageCopyFlagsEXT flags_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 CopyMemoryToImageInfo & setFlags( VULKAN_HPP_NAMESPACE::HostImageCopyFlags flags_ ) VULKAN_HPP_NOEXCEPT { flags = flags_; return *this; } - VULKAN_HPP_CONSTEXPR_14 CopyMemoryToImageInfoEXT & setDstImage( VULKAN_HPP_NAMESPACE::Image dstImage_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 CopyMemoryToImageInfo & setDstImage( VULKAN_HPP_NAMESPACE::Image dstImage_ ) VULKAN_HPP_NOEXCEPT { dstImage = dstImage_; return *this; } - VULKAN_HPP_CONSTEXPR_14 CopyMemoryToImageInfoEXT & setDstImageLayout( VULKAN_HPP_NAMESPACE::ImageLayout dstImageLayout_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 CopyMemoryToImageInfo & setDstImageLayout( VULKAN_HPP_NAMESPACE::ImageLayout dstImageLayout_ ) VULKAN_HPP_NOEXCEPT { dstImageLayout = dstImageLayout_; return *this; } - VULKAN_HPP_CONSTEXPR_14 CopyMemoryToImageInfoEXT & setRegionCount( uint32_t regionCount_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 CopyMemoryToImageInfo & setRegionCount( uint32_t regionCount_ ) VULKAN_HPP_NOEXCEPT { regionCount = regionCount_; return *this; } - VULKAN_HPP_CONSTEXPR_14 CopyMemoryToImageInfoEXT & setPRegions( const VULKAN_HPP_NAMESPACE::MemoryToImageCopyEXT * pRegions_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 CopyMemoryToImageInfo & setPRegions( const VULKAN_HPP_NAMESPACE::MemoryToImageCopy * pRegions_ ) VULKAN_HPP_NOEXCEPT { pRegions = pRegions_; return *this; } # if !defined( VULKAN_HPP_DISABLE_ENHANCED_MODE ) - CopyMemoryToImageInfoEXT & - setRegions( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & regions_ ) VULKAN_HPP_NOEXCEPT + CopyMemoryToImageInfo & + setRegions( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & regions_ ) VULKAN_HPP_NOEXCEPT { regionCount = static_cast( regions_.size() ); pRegions = regions_.data(); @@ -19624,14 +19629,14 @@ namespace VULKAN_HPP_NAMESPACE # endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ #endif /*VULKAN_HPP_NO_STRUCT_SETTERS*/ - operator VkCopyMemoryToImageInfoEXT const &() const VULKAN_HPP_NOEXCEPT + operator VkCopyMemoryToImageInfo const &() const VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + return *reinterpret_cast( this ); } - operator VkCopyMemoryToImageInfoEXT &() VULKAN_HPP_NOEXCEPT + operator VkCopyMemoryToImageInfo &() VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + return *reinterpret_cast( this ); } #if defined( VULKAN_HPP_USE_REFLECT ) @@ -19640,11 +19645,11 @@ namespace VULKAN_HPP_NAMESPACE # else std::tuple + const VULKAN_HPP_NAMESPACE::MemoryToImageCopy * const &> # endif reflect() const VULKAN_HPP_NOEXCEPT { @@ -19653,9 +19658,9 @@ namespace VULKAN_HPP_NAMESPACE #endif #if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( CopyMemoryToImageInfoEXT const & ) const = default; + auto operator<=>( CopyMemoryToImageInfo const & ) const = default; #else - bool operator==( CopyMemoryToImageInfoEXT const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator==( CopyMemoryToImageInfo const & rhs ) const VULKAN_HPP_NOEXCEPT { # if defined( VULKAN_HPP_USE_REFLECT ) return this->reflect() == rhs.reflect(); @@ -19665,28 +19670,30 @@ namespace VULKAN_HPP_NAMESPACE # endif } - bool operator!=( CopyMemoryToImageInfoEXT const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator!=( CopyMemoryToImageInfo const & rhs ) const VULKAN_HPP_NOEXCEPT { return !operator==( rhs ); } #endif public: - VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eCopyMemoryToImageInfoEXT; - const void * pNext = {}; - VULKAN_HPP_NAMESPACE::HostImageCopyFlagsEXT flags = {}; - VULKAN_HPP_NAMESPACE::Image dstImage = {}; - VULKAN_HPP_NAMESPACE::ImageLayout dstImageLayout = VULKAN_HPP_NAMESPACE::ImageLayout::eUndefined; - uint32_t regionCount = {}; - const VULKAN_HPP_NAMESPACE::MemoryToImageCopyEXT * pRegions = {}; + VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eCopyMemoryToImageInfo; + const void * pNext = {}; + VULKAN_HPP_NAMESPACE::HostImageCopyFlags flags = {}; + VULKAN_HPP_NAMESPACE::Image dstImage = {}; + VULKAN_HPP_NAMESPACE::ImageLayout dstImageLayout = VULKAN_HPP_NAMESPACE::ImageLayout::eUndefined; + uint32_t regionCount = {}; + const VULKAN_HPP_NAMESPACE::MemoryToImageCopy * pRegions = {}; }; template <> - struct CppType + struct CppType { - using Type = CopyMemoryToImageInfoEXT; + using Type = CopyMemoryToImageInfo; }; + using CopyMemoryToImageInfoEXT = CopyMemoryToImageInfo; + struct CopyMemoryToMicromapInfoEXT { using NativeType = VkCopyMemoryToMicromapInfoEXT; @@ -30222,58 +30229,55 @@ namespace VULKAN_HPP_NAMESPACE using DeviceImageMemoryRequirementsKHR = DeviceImageMemoryRequirements; - struct ImageSubresource2KHR + struct ImageSubresource2 { - using NativeType = VkImageSubresource2KHR; + using NativeType = VkImageSubresource2; static const bool allowDuplicate = false; - static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eImageSubresource2KHR; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eImageSubresource2; #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) - VULKAN_HPP_CONSTEXPR ImageSubresource2KHR( VULKAN_HPP_NAMESPACE::ImageSubresource imageSubresource_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR ImageSubresource2( VULKAN_HPP_NAMESPACE::ImageSubresource imageSubresource_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT : pNext{ pNext_ } , imageSubresource{ imageSubresource_ } { } - VULKAN_HPP_CONSTEXPR ImageSubresource2KHR( ImageSubresource2KHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + VULKAN_HPP_CONSTEXPR ImageSubresource2( ImageSubresource2 const & rhs ) VULKAN_HPP_NOEXCEPT = default; - ImageSubresource2KHR( VkImageSubresource2KHR const & rhs ) VULKAN_HPP_NOEXCEPT - : ImageSubresource2KHR( *reinterpret_cast( &rhs ) ) - { - } + ImageSubresource2( VkImageSubresource2 const & rhs ) VULKAN_HPP_NOEXCEPT : ImageSubresource2( *reinterpret_cast( &rhs ) ) {} - ImageSubresource2KHR & operator=( ImageSubresource2KHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + ImageSubresource2 & operator=( ImageSubresource2 const & rhs ) VULKAN_HPP_NOEXCEPT = default; #endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ - ImageSubresource2KHR & operator=( VkImageSubresource2KHR const & rhs ) VULKAN_HPP_NOEXCEPT + ImageSubresource2 & operator=( VkImageSubresource2 const & rhs ) VULKAN_HPP_NOEXCEPT { - *this = *reinterpret_cast( &rhs ); + *this = *reinterpret_cast( &rhs ); return *this; } #if !defined( VULKAN_HPP_NO_STRUCT_SETTERS ) - VULKAN_HPP_CONSTEXPR_14 ImageSubresource2KHR & setPNext( void * pNext_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 ImageSubresource2 & setPNext( void * pNext_ ) VULKAN_HPP_NOEXCEPT { pNext = pNext_; return *this; } - VULKAN_HPP_CONSTEXPR_14 ImageSubresource2KHR & setImageSubresource( VULKAN_HPP_NAMESPACE::ImageSubresource const & imageSubresource_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 ImageSubresource2 & setImageSubresource( VULKAN_HPP_NAMESPACE::ImageSubresource const & imageSubresource_ ) VULKAN_HPP_NOEXCEPT { imageSubresource = imageSubresource_; return *this; } #endif /*VULKAN_HPP_NO_STRUCT_SETTERS*/ - operator VkImageSubresource2KHR const &() const VULKAN_HPP_NOEXCEPT + operator VkImageSubresource2 const &() const VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + return *reinterpret_cast( this ); } - operator VkImageSubresource2KHR &() VULKAN_HPP_NOEXCEPT + operator VkImageSubresource2 &() VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + return *reinterpret_cast( this ); } #if defined( VULKAN_HPP_USE_REFLECT ) @@ -30289,9 +30293,9 @@ namespace VULKAN_HPP_NAMESPACE #endif #if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( ImageSubresource2KHR const & ) const = default; + auto operator<=>( ImageSubresource2 const & ) const = default; #else - bool operator==( ImageSubresource2KHR const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator==( ImageSubresource2 const & rhs ) const VULKAN_HPP_NOEXCEPT { # if defined( VULKAN_HPP_USE_REFLECT ) return this->reflect() == rhs.reflect(); @@ -30300,88 +30304,88 @@ namespace VULKAN_HPP_NAMESPACE # endif } - bool operator!=( ImageSubresource2KHR const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator!=( ImageSubresource2 const & rhs ) const VULKAN_HPP_NOEXCEPT { return !operator==( rhs ); } #endif public: - VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eImageSubresource2KHR; + VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eImageSubresource2; void * pNext = {}; VULKAN_HPP_NAMESPACE::ImageSubresource imageSubresource = {}; }; template <> - struct CppType + struct CppType { - using Type = ImageSubresource2KHR; + using Type = ImageSubresource2; }; - using ImageSubresource2EXT = ImageSubresource2KHR; + using ImageSubresource2EXT = ImageSubresource2; + using ImageSubresource2KHR = ImageSubresource2; - struct DeviceImageSubresourceInfoKHR + struct DeviceImageSubresourceInfo { - using NativeType = VkDeviceImageSubresourceInfoKHR; + using NativeType = VkDeviceImageSubresourceInfo; static const bool allowDuplicate = false; - static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eDeviceImageSubresourceInfoKHR; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eDeviceImageSubresourceInfo; #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) - VULKAN_HPP_CONSTEXPR DeviceImageSubresourceInfoKHR( const VULKAN_HPP_NAMESPACE::ImageCreateInfo * pCreateInfo_ = {}, - const VULKAN_HPP_NAMESPACE::ImageSubresource2KHR * pSubresource_ = {}, - const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR DeviceImageSubresourceInfo( const VULKAN_HPP_NAMESPACE::ImageCreateInfo * pCreateInfo_ = {}, + const VULKAN_HPP_NAMESPACE::ImageSubresource2 * pSubresource_ = {}, + const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT : pNext{ pNext_ } , pCreateInfo{ pCreateInfo_ } , pSubresource{ pSubresource_ } { } - VULKAN_HPP_CONSTEXPR DeviceImageSubresourceInfoKHR( DeviceImageSubresourceInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + VULKAN_HPP_CONSTEXPR DeviceImageSubresourceInfo( DeviceImageSubresourceInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; - DeviceImageSubresourceInfoKHR( VkDeviceImageSubresourceInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT - : DeviceImageSubresourceInfoKHR( *reinterpret_cast( &rhs ) ) + DeviceImageSubresourceInfo( VkDeviceImageSubresourceInfo const & rhs ) VULKAN_HPP_NOEXCEPT + : DeviceImageSubresourceInfo( *reinterpret_cast( &rhs ) ) { } - DeviceImageSubresourceInfoKHR & operator=( DeviceImageSubresourceInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + DeviceImageSubresourceInfo & operator=( DeviceImageSubresourceInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; #endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ - DeviceImageSubresourceInfoKHR & operator=( VkDeviceImageSubresourceInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + DeviceImageSubresourceInfo & operator=( VkDeviceImageSubresourceInfo const & rhs ) VULKAN_HPP_NOEXCEPT { - *this = *reinterpret_cast( &rhs ); + *this = *reinterpret_cast( &rhs ); return *this; } #if !defined( VULKAN_HPP_NO_STRUCT_SETTERS ) - VULKAN_HPP_CONSTEXPR_14 DeviceImageSubresourceInfoKHR & setPNext( const void * pNext_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 DeviceImageSubresourceInfo & setPNext( const void * pNext_ ) VULKAN_HPP_NOEXCEPT { pNext = pNext_; return *this; } - VULKAN_HPP_CONSTEXPR_14 DeviceImageSubresourceInfoKHR & setPCreateInfo( const VULKAN_HPP_NAMESPACE::ImageCreateInfo * pCreateInfo_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 DeviceImageSubresourceInfo & setPCreateInfo( const VULKAN_HPP_NAMESPACE::ImageCreateInfo * pCreateInfo_ ) VULKAN_HPP_NOEXCEPT { pCreateInfo = pCreateInfo_; return *this; } - VULKAN_HPP_CONSTEXPR_14 DeviceImageSubresourceInfoKHR & - setPSubresource( const VULKAN_HPP_NAMESPACE::ImageSubresource2KHR * pSubresource_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 DeviceImageSubresourceInfo & setPSubresource( const VULKAN_HPP_NAMESPACE::ImageSubresource2 * pSubresource_ ) VULKAN_HPP_NOEXCEPT { pSubresource = pSubresource_; return *this; } #endif /*VULKAN_HPP_NO_STRUCT_SETTERS*/ - operator VkDeviceImageSubresourceInfoKHR const &() const VULKAN_HPP_NOEXCEPT + operator VkDeviceImageSubresourceInfo const &() const VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + return *reinterpret_cast( this ); } - operator VkDeviceImageSubresourceInfoKHR &() VULKAN_HPP_NOEXCEPT + operator VkDeviceImageSubresourceInfo &() VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + return *reinterpret_cast( this ); } #if defined( VULKAN_HPP_USE_REFLECT ) @@ -30391,7 +30395,7 @@ namespace VULKAN_HPP_NAMESPACE std::tuple + const VULKAN_HPP_NAMESPACE::ImageSubresource2 * const &> # endif reflect() const VULKAN_HPP_NOEXCEPT { @@ -30400,9 +30404,9 @@ namespace VULKAN_HPP_NAMESPACE #endif #if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( DeviceImageSubresourceInfoKHR const & ) const = default; + auto operator<=>( DeviceImageSubresourceInfo const & ) const = default; #else - bool operator==( DeviceImageSubresourceInfoKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator==( DeviceImageSubresourceInfo const & rhs ) const VULKAN_HPP_NOEXCEPT { # if defined( VULKAN_HPP_USE_REFLECT ) return this->reflect() == rhs.reflect(); @@ -30411,25 +30415,27 @@ namespace VULKAN_HPP_NAMESPACE # endif } - bool operator!=( DeviceImageSubresourceInfoKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator!=( DeviceImageSubresourceInfo const & rhs ) const VULKAN_HPP_NOEXCEPT { return !operator==( rhs ); } #endif public: - VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eDeviceImageSubresourceInfoKHR; - const void * pNext = {}; - const VULKAN_HPP_NAMESPACE::ImageCreateInfo * pCreateInfo = {}; - const VULKAN_HPP_NAMESPACE::ImageSubresource2KHR * pSubresource = {}; + VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eDeviceImageSubresourceInfo; + const void * pNext = {}; + const VULKAN_HPP_NAMESPACE::ImageCreateInfo * pCreateInfo = {}; + const VULKAN_HPP_NAMESPACE::ImageSubresource2 * pSubresource = {}; }; template <> - struct CppType + struct CppType { - using Type = DeviceImageSubresourceInfoKHR; + using Type = DeviceImageSubresourceInfo; }; + using DeviceImageSubresourceInfoKHR = DeviceImageSubresourceInfo; + struct DeviceMemoryOpaqueCaptureAddressInfo { using NativeType = VkDeviceMemoryOpaqueCaptureAddressInfo; @@ -30980,68 +30986,68 @@ namespace VULKAN_HPP_NAMESPACE using DevicePrivateDataCreateInfoEXT = DevicePrivateDataCreateInfo; - struct DeviceQueueGlobalPriorityCreateInfoKHR + struct DeviceQueueGlobalPriorityCreateInfo { - using NativeType = VkDeviceQueueGlobalPriorityCreateInfoKHR; + using NativeType = VkDeviceQueueGlobalPriorityCreateInfo; static const bool allowDuplicate = false; - static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eDeviceQueueGlobalPriorityCreateInfoKHR; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eDeviceQueueGlobalPriorityCreateInfo; #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR - DeviceQueueGlobalPriorityCreateInfoKHR( VULKAN_HPP_NAMESPACE::QueueGlobalPriorityKHR globalPriority_ = VULKAN_HPP_NAMESPACE::QueueGlobalPriorityKHR::eLow, - const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT + DeviceQueueGlobalPriorityCreateInfo( VULKAN_HPP_NAMESPACE::QueueGlobalPriority globalPriority_ = VULKAN_HPP_NAMESPACE::QueueGlobalPriority::eLow, + const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT : pNext{ pNext_ } , globalPriority{ globalPriority_ } { } - VULKAN_HPP_CONSTEXPR DeviceQueueGlobalPriorityCreateInfoKHR( DeviceQueueGlobalPriorityCreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + VULKAN_HPP_CONSTEXPR DeviceQueueGlobalPriorityCreateInfo( DeviceQueueGlobalPriorityCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; - DeviceQueueGlobalPriorityCreateInfoKHR( VkDeviceQueueGlobalPriorityCreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT - : DeviceQueueGlobalPriorityCreateInfoKHR( *reinterpret_cast( &rhs ) ) + DeviceQueueGlobalPriorityCreateInfo( VkDeviceQueueGlobalPriorityCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + : DeviceQueueGlobalPriorityCreateInfo( *reinterpret_cast( &rhs ) ) { } - DeviceQueueGlobalPriorityCreateInfoKHR & operator=( DeviceQueueGlobalPriorityCreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + DeviceQueueGlobalPriorityCreateInfo & operator=( DeviceQueueGlobalPriorityCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; #endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ - DeviceQueueGlobalPriorityCreateInfoKHR & operator=( VkDeviceQueueGlobalPriorityCreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + DeviceQueueGlobalPriorityCreateInfo & operator=( VkDeviceQueueGlobalPriorityCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT { - *this = *reinterpret_cast( &rhs ); + *this = *reinterpret_cast( &rhs ); return *this; } #if !defined( VULKAN_HPP_NO_STRUCT_SETTERS ) - VULKAN_HPP_CONSTEXPR_14 DeviceQueueGlobalPriorityCreateInfoKHR & setPNext( const void * pNext_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 DeviceQueueGlobalPriorityCreateInfo & setPNext( const void * pNext_ ) VULKAN_HPP_NOEXCEPT { pNext = pNext_; return *this; } - VULKAN_HPP_CONSTEXPR_14 DeviceQueueGlobalPriorityCreateInfoKHR & - setGlobalPriority( VULKAN_HPP_NAMESPACE::QueueGlobalPriorityKHR globalPriority_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 DeviceQueueGlobalPriorityCreateInfo & + setGlobalPriority( VULKAN_HPP_NAMESPACE::QueueGlobalPriority globalPriority_ ) VULKAN_HPP_NOEXCEPT { globalPriority = globalPriority_; return *this; } #endif /*VULKAN_HPP_NO_STRUCT_SETTERS*/ - operator VkDeviceQueueGlobalPriorityCreateInfoKHR const &() const VULKAN_HPP_NOEXCEPT + operator VkDeviceQueueGlobalPriorityCreateInfo const &() const VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + return *reinterpret_cast( this ); } - operator VkDeviceQueueGlobalPriorityCreateInfoKHR &() VULKAN_HPP_NOEXCEPT + operator VkDeviceQueueGlobalPriorityCreateInfo &() VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + return *reinterpret_cast( this ); } #if defined( VULKAN_HPP_USE_REFLECT ) # if 14 <= VULKAN_HPP_CPP_VERSION auto # else - std::tuple + std::tuple # endif reflect() const VULKAN_HPP_NOEXCEPT { @@ -31050,9 +31056,9 @@ namespace VULKAN_HPP_NAMESPACE #endif #if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( DeviceQueueGlobalPriorityCreateInfoKHR const & ) const = default; + auto operator<=>( DeviceQueueGlobalPriorityCreateInfo const & ) const = default; #else - bool operator==( DeviceQueueGlobalPriorityCreateInfoKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator==( DeviceQueueGlobalPriorityCreateInfo const & rhs ) const VULKAN_HPP_NOEXCEPT { # if defined( VULKAN_HPP_USE_REFLECT ) return this->reflect() == rhs.reflect(); @@ -31061,25 +31067,26 @@ namespace VULKAN_HPP_NAMESPACE # endif } - bool operator!=( DeviceQueueGlobalPriorityCreateInfoKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator!=( DeviceQueueGlobalPriorityCreateInfo const & rhs ) const VULKAN_HPP_NOEXCEPT { return !operator==( rhs ); } #endif public: - VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eDeviceQueueGlobalPriorityCreateInfoKHR; - const void * pNext = {}; - VULKAN_HPP_NAMESPACE::QueueGlobalPriorityKHR globalPriority = VULKAN_HPP_NAMESPACE::QueueGlobalPriorityKHR::eLow; + VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eDeviceQueueGlobalPriorityCreateInfo; + const void * pNext = {}; + VULKAN_HPP_NAMESPACE::QueueGlobalPriority globalPriority = VULKAN_HPP_NAMESPACE::QueueGlobalPriority::eLow; }; template <> - struct CppType + struct CppType { - using Type = DeviceQueueGlobalPriorityCreateInfoKHR; + using Type = DeviceQueueGlobalPriorityCreateInfo; }; - using DeviceQueueGlobalPriorityCreateInfoEXT = DeviceQueueGlobalPriorityCreateInfoKHR; + using DeviceQueueGlobalPriorityCreateInfoEXT = DeviceQueueGlobalPriorityCreateInfo; + using DeviceQueueGlobalPriorityCreateInfoKHR = DeviceQueueGlobalPriorityCreateInfo; struct DeviceQueueInfo2 { @@ -44209,47 +44216,47 @@ namespace VULKAN_HPP_NAMESPACE using Type = HeadlessSurfaceCreateInfoEXT; }; - struct HostImageCopyDevicePerformanceQueryEXT + struct HostImageCopyDevicePerformanceQuery { - using NativeType = VkHostImageCopyDevicePerformanceQueryEXT; + using NativeType = VkHostImageCopyDevicePerformanceQuery; static const bool allowDuplicate = false; - static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eHostImageCopyDevicePerformanceQueryEXT; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eHostImageCopyDevicePerformanceQuery; #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) - VULKAN_HPP_CONSTEXPR HostImageCopyDevicePerformanceQueryEXT( VULKAN_HPP_NAMESPACE::Bool32 optimalDeviceAccess_ = {}, - VULKAN_HPP_NAMESPACE::Bool32 identicalMemoryLayout_ = {}, - void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR HostImageCopyDevicePerformanceQuery( VULKAN_HPP_NAMESPACE::Bool32 optimalDeviceAccess_ = {}, + VULKAN_HPP_NAMESPACE::Bool32 identicalMemoryLayout_ = {}, + void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT : pNext{ pNext_ } , optimalDeviceAccess{ optimalDeviceAccess_ } , identicalMemoryLayout{ identicalMemoryLayout_ } { } - VULKAN_HPP_CONSTEXPR HostImageCopyDevicePerformanceQueryEXT( HostImageCopyDevicePerformanceQueryEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + VULKAN_HPP_CONSTEXPR HostImageCopyDevicePerformanceQuery( HostImageCopyDevicePerformanceQuery const & rhs ) VULKAN_HPP_NOEXCEPT = default; - HostImageCopyDevicePerformanceQueryEXT( VkHostImageCopyDevicePerformanceQueryEXT const & rhs ) VULKAN_HPP_NOEXCEPT - : HostImageCopyDevicePerformanceQueryEXT( *reinterpret_cast( &rhs ) ) + HostImageCopyDevicePerformanceQuery( VkHostImageCopyDevicePerformanceQuery const & rhs ) VULKAN_HPP_NOEXCEPT + : HostImageCopyDevicePerformanceQuery( *reinterpret_cast( &rhs ) ) { } - HostImageCopyDevicePerformanceQueryEXT & operator=( HostImageCopyDevicePerformanceQueryEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + HostImageCopyDevicePerformanceQuery & operator=( HostImageCopyDevicePerformanceQuery const & rhs ) VULKAN_HPP_NOEXCEPT = default; #endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ - HostImageCopyDevicePerformanceQueryEXT & operator=( VkHostImageCopyDevicePerformanceQueryEXT const & rhs ) VULKAN_HPP_NOEXCEPT + HostImageCopyDevicePerformanceQuery & operator=( VkHostImageCopyDevicePerformanceQuery const & rhs ) VULKAN_HPP_NOEXCEPT { - *this = *reinterpret_cast( &rhs ); + *this = *reinterpret_cast( &rhs ); return *this; } - operator VkHostImageCopyDevicePerformanceQueryEXT const &() const VULKAN_HPP_NOEXCEPT + operator VkHostImageCopyDevicePerformanceQuery const &() const VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + return *reinterpret_cast( this ); } - operator VkHostImageCopyDevicePerformanceQueryEXT &() VULKAN_HPP_NOEXCEPT + operator VkHostImageCopyDevicePerformanceQuery &() VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + return *reinterpret_cast( this ); } #if defined( VULKAN_HPP_USE_REFLECT ) @@ -44265,9 +44272,9 @@ namespace VULKAN_HPP_NAMESPACE #endif #if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( HostImageCopyDevicePerformanceQueryEXT const & ) const = default; + auto operator<=>( HostImageCopyDevicePerformanceQuery const & ) const = default; #else - bool operator==( HostImageCopyDevicePerformanceQueryEXT const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator==( HostImageCopyDevicePerformanceQuery const & rhs ) const VULKAN_HPP_NOEXCEPT { # if defined( VULKAN_HPP_USE_REFLECT ) return this->reflect() == rhs.reflect(); @@ -44277,38 +44284,40 @@ namespace VULKAN_HPP_NAMESPACE # endif } - bool operator!=( HostImageCopyDevicePerformanceQueryEXT const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator!=( HostImageCopyDevicePerformanceQuery const & rhs ) const VULKAN_HPP_NOEXCEPT { return !operator==( rhs ); } #endif public: - VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eHostImageCopyDevicePerformanceQueryEXT; + VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eHostImageCopyDevicePerformanceQuery; void * pNext = {}; VULKAN_HPP_NAMESPACE::Bool32 optimalDeviceAccess = {}; VULKAN_HPP_NAMESPACE::Bool32 identicalMemoryLayout = {}; }; template <> - struct CppType + struct CppType { - using Type = HostImageCopyDevicePerformanceQueryEXT; + using Type = HostImageCopyDevicePerformanceQuery; }; - struct HostImageLayoutTransitionInfoEXT + using HostImageCopyDevicePerformanceQueryEXT = HostImageCopyDevicePerformanceQuery; + + struct HostImageLayoutTransitionInfo { - using NativeType = VkHostImageLayoutTransitionInfoEXT; + using NativeType = VkHostImageLayoutTransitionInfo; static const bool allowDuplicate = false; - static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eHostImageLayoutTransitionInfoEXT; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eHostImageLayoutTransitionInfo; #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) - VULKAN_HPP_CONSTEXPR HostImageLayoutTransitionInfoEXT( VULKAN_HPP_NAMESPACE::Image image_ = {}, - VULKAN_HPP_NAMESPACE::ImageLayout oldLayout_ = VULKAN_HPP_NAMESPACE::ImageLayout::eUndefined, - VULKAN_HPP_NAMESPACE::ImageLayout newLayout_ = VULKAN_HPP_NAMESPACE::ImageLayout::eUndefined, - VULKAN_HPP_NAMESPACE::ImageSubresourceRange subresourceRange_ = {}, - const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR HostImageLayoutTransitionInfo( VULKAN_HPP_NAMESPACE::Image image_ = {}, + VULKAN_HPP_NAMESPACE::ImageLayout oldLayout_ = VULKAN_HPP_NAMESPACE::ImageLayout::eUndefined, + VULKAN_HPP_NAMESPACE::ImageLayout newLayout_ = VULKAN_HPP_NAMESPACE::ImageLayout::eUndefined, + VULKAN_HPP_NAMESPACE::ImageSubresourceRange subresourceRange_ = {}, + const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT : pNext{ pNext_ } , image{ image_ } , oldLayout{ oldLayout_ } @@ -44317,48 +44326,48 @@ namespace VULKAN_HPP_NAMESPACE { } - VULKAN_HPP_CONSTEXPR HostImageLayoutTransitionInfoEXT( HostImageLayoutTransitionInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + VULKAN_HPP_CONSTEXPR HostImageLayoutTransitionInfo( HostImageLayoutTransitionInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; - HostImageLayoutTransitionInfoEXT( VkHostImageLayoutTransitionInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT - : HostImageLayoutTransitionInfoEXT( *reinterpret_cast( &rhs ) ) + HostImageLayoutTransitionInfo( VkHostImageLayoutTransitionInfo const & rhs ) VULKAN_HPP_NOEXCEPT + : HostImageLayoutTransitionInfo( *reinterpret_cast( &rhs ) ) { } - HostImageLayoutTransitionInfoEXT & operator=( HostImageLayoutTransitionInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + HostImageLayoutTransitionInfo & operator=( HostImageLayoutTransitionInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; #endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ - HostImageLayoutTransitionInfoEXT & operator=( VkHostImageLayoutTransitionInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + HostImageLayoutTransitionInfo & operator=( VkHostImageLayoutTransitionInfo const & rhs ) VULKAN_HPP_NOEXCEPT { - *this = *reinterpret_cast( &rhs ); + *this = *reinterpret_cast( &rhs ); return *this; } #if !defined( VULKAN_HPP_NO_STRUCT_SETTERS ) - VULKAN_HPP_CONSTEXPR_14 HostImageLayoutTransitionInfoEXT & setPNext( const void * pNext_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 HostImageLayoutTransitionInfo & setPNext( const void * pNext_ ) VULKAN_HPP_NOEXCEPT { pNext = pNext_; return *this; } - VULKAN_HPP_CONSTEXPR_14 HostImageLayoutTransitionInfoEXT & setImage( VULKAN_HPP_NAMESPACE::Image image_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 HostImageLayoutTransitionInfo & setImage( VULKAN_HPP_NAMESPACE::Image image_ ) VULKAN_HPP_NOEXCEPT { image = image_; return *this; } - VULKAN_HPP_CONSTEXPR_14 HostImageLayoutTransitionInfoEXT & setOldLayout( VULKAN_HPP_NAMESPACE::ImageLayout oldLayout_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 HostImageLayoutTransitionInfo & setOldLayout( VULKAN_HPP_NAMESPACE::ImageLayout oldLayout_ ) VULKAN_HPP_NOEXCEPT { oldLayout = oldLayout_; return *this; } - VULKAN_HPP_CONSTEXPR_14 HostImageLayoutTransitionInfoEXT & setNewLayout( VULKAN_HPP_NAMESPACE::ImageLayout newLayout_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 HostImageLayoutTransitionInfo & setNewLayout( VULKAN_HPP_NAMESPACE::ImageLayout newLayout_ ) VULKAN_HPP_NOEXCEPT { newLayout = newLayout_; return *this; } - VULKAN_HPP_CONSTEXPR_14 HostImageLayoutTransitionInfoEXT & + VULKAN_HPP_CONSTEXPR_14 HostImageLayoutTransitionInfo & setSubresourceRange( VULKAN_HPP_NAMESPACE::ImageSubresourceRange const & subresourceRange_ ) VULKAN_HPP_NOEXCEPT { subresourceRange = subresourceRange_; @@ -44366,14 +44375,14 @@ namespace VULKAN_HPP_NAMESPACE } #endif /*VULKAN_HPP_NO_STRUCT_SETTERS*/ - operator VkHostImageLayoutTransitionInfoEXT const &() const VULKAN_HPP_NOEXCEPT + operator VkHostImageLayoutTransitionInfo const &() const VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + return *reinterpret_cast( this ); } - operator VkHostImageLayoutTransitionInfoEXT &() VULKAN_HPP_NOEXCEPT + operator VkHostImageLayoutTransitionInfo &() VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + return *reinterpret_cast( this ); } #if defined( VULKAN_HPP_USE_REFLECT ) @@ -44394,9 +44403,9 @@ namespace VULKAN_HPP_NAMESPACE #endif #if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( HostImageLayoutTransitionInfoEXT const & ) const = default; + auto operator<=>( HostImageLayoutTransitionInfo const & ) const = default; #else - bool operator==( HostImageLayoutTransitionInfoEXT const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator==( HostImageLayoutTransitionInfo const & rhs ) const VULKAN_HPP_NOEXCEPT { # if defined( VULKAN_HPP_USE_REFLECT ) return this->reflect() == rhs.reflect(); @@ -44406,14 +44415,14 @@ namespace VULKAN_HPP_NAMESPACE # endif } - bool operator!=( HostImageLayoutTransitionInfoEXT const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator!=( HostImageLayoutTransitionInfo const & rhs ) const VULKAN_HPP_NOEXCEPT { return !operator==( rhs ); } #endif public: - VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eHostImageLayoutTransitionInfoEXT; + VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eHostImageLayoutTransitionInfo; const void * pNext = {}; VULKAN_HPP_NAMESPACE::Image image = {}; VULKAN_HPP_NAMESPACE::ImageLayout oldLayout = VULKAN_HPP_NAMESPACE::ImageLayout::eUndefined; @@ -44422,11 +44431,13 @@ namespace VULKAN_HPP_NAMESPACE }; template <> - struct CppType + struct CppType { - using Type = HostImageLayoutTransitionInfoEXT; + using Type = HostImageLayoutTransitionInfo; }; + using HostImageLayoutTransitionInfoEXT = HostImageLayoutTransitionInfo; + #if defined( VK_USE_PLATFORM_IOS_MVK ) struct IOSSurfaceCreateInfoMVK { @@ -54888,19 +54899,19 @@ namespace VULKAN_HPP_NAMESPACE using Type = MemoryHostPointerPropertiesEXT; }; - struct MemoryMapInfoKHR + struct MemoryMapInfo { - using NativeType = VkMemoryMapInfoKHR; + using NativeType = VkMemoryMapInfo; static const bool allowDuplicate = false; - static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eMemoryMapInfoKHR; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eMemoryMapInfo; #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) - VULKAN_HPP_CONSTEXPR MemoryMapInfoKHR( VULKAN_HPP_NAMESPACE::MemoryMapFlags flags_ = {}, - VULKAN_HPP_NAMESPACE::DeviceMemory memory_ = {}, - VULKAN_HPP_NAMESPACE::DeviceSize offset_ = {}, - VULKAN_HPP_NAMESPACE::DeviceSize size_ = {}, - const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR MemoryMapInfo( VULKAN_HPP_NAMESPACE::MemoryMapFlags flags_ = {}, + VULKAN_HPP_NAMESPACE::DeviceMemory memory_ = {}, + VULKAN_HPP_NAMESPACE::DeviceSize offset_ = {}, + VULKAN_HPP_NAMESPACE::DeviceSize size_ = {}, + const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT : pNext{ pNext_ } , flags{ flags_ } , memory{ memory_ } @@ -54909,59 +54920,59 @@ namespace VULKAN_HPP_NAMESPACE { } - VULKAN_HPP_CONSTEXPR MemoryMapInfoKHR( MemoryMapInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + VULKAN_HPP_CONSTEXPR MemoryMapInfo( MemoryMapInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; - MemoryMapInfoKHR( VkMemoryMapInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT : MemoryMapInfoKHR( *reinterpret_cast( &rhs ) ) {} + MemoryMapInfo( VkMemoryMapInfo const & rhs ) VULKAN_HPP_NOEXCEPT : MemoryMapInfo( *reinterpret_cast( &rhs ) ) {} - MemoryMapInfoKHR & operator=( MemoryMapInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + MemoryMapInfo & operator=( MemoryMapInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; #endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ - MemoryMapInfoKHR & operator=( VkMemoryMapInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + MemoryMapInfo & operator=( VkMemoryMapInfo const & rhs ) VULKAN_HPP_NOEXCEPT { - *this = *reinterpret_cast( &rhs ); + *this = *reinterpret_cast( &rhs ); return *this; } #if !defined( VULKAN_HPP_NO_STRUCT_SETTERS ) - VULKAN_HPP_CONSTEXPR_14 MemoryMapInfoKHR & setPNext( const void * pNext_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 MemoryMapInfo & setPNext( const void * pNext_ ) VULKAN_HPP_NOEXCEPT { pNext = pNext_; return *this; } - VULKAN_HPP_CONSTEXPR_14 MemoryMapInfoKHR & setFlags( VULKAN_HPP_NAMESPACE::MemoryMapFlags flags_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 MemoryMapInfo & setFlags( VULKAN_HPP_NAMESPACE::MemoryMapFlags flags_ ) VULKAN_HPP_NOEXCEPT { flags = flags_; return *this; } - VULKAN_HPP_CONSTEXPR_14 MemoryMapInfoKHR & setMemory( VULKAN_HPP_NAMESPACE::DeviceMemory memory_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 MemoryMapInfo & setMemory( VULKAN_HPP_NAMESPACE::DeviceMemory memory_ ) VULKAN_HPP_NOEXCEPT { memory = memory_; return *this; } - VULKAN_HPP_CONSTEXPR_14 MemoryMapInfoKHR & setOffset( VULKAN_HPP_NAMESPACE::DeviceSize offset_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 MemoryMapInfo & setOffset( VULKAN_HPP_NAMESPACE::DeviceSize offset_ ) VULKAN_HPP_NOEXCEPT { offset = offset_; return *this; } - VULKAN_HPP_CONSTEXPR_14 MemoryMapInfoKHR & setSize( VULKAN_HPP_NAMESPACE::DeviceSize size_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 MemoryMapInfo & setSize( VULKAN_HPP_NAMESPACE::DeviceSize size_ ) VULKAN_HPP_NOEXCEPT { size = size_; return *this; } #endif /*VULKAN_HPP_NO_STRUCT_SETTERS*/ - operator VkMemoryMapInfoKHR const &() const VULKAN_HPP_NOEXCEPT + operator VkMemoryMapInfo const &() const VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + return *reinterpret_cast( this ); } - operator VkMemoryMapInfoKHR &() VULKAN_HPP_NOEXCEPT + operator VkMemoryMapInfo &() VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + return *reinterpret_cast( this ); } #if defined( VULKAN_HPP_USE_REFLECT ) @@ -54982,9 +54993,9 @@ namespace VULKAN_HPP_NAMESPACE #endif #if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( MemoryMapInfoKHR const & ) const = default; + auto operator<=>( MemoryMapInfo const & ) const = default; #else - bool operator==( MemoryMapInfoKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator==( MemoryMapInfo const & rhs ) const VULKAN_HPP_NOEXCEPT { # if defined( VULKAN_HPP_USE_REFLECT ) return this->reflect() == rhs.reflect(); @@ -54994,14 +55005,14 @@ namespace VULKAN_HPP_NAMESPACE # endif } - bool operator!=( MemoryMapInfoKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator!=( MemoryMapInfo const & rhs ) const VULKAN_HPP_NOEXCEPT { return !operator==( rhs ); } #endif public: - VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eMemoryMapInfoKHR; + VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eMemoryMapInfo; const void * pNext = {}; VULKAN_HPP_NAMESPACE::MemoryMapFlags flags = {}; VULKAN_HPP_NAMESPACE::DeviceMemory memory = {}; @@ -55010,11 +55021,13 @@ namespace VULKAN_HPP_NAMESPACE }; template <> - struct CppType + struct CppType { - using Type = MemoryMapInfoKHR; + using Type = MemoryMapInfo; }; + using MemoryMapInfoKHR = MemoryMapInfo; + struct MemoryMapPlacedInfoEXT { using NativeType = VkMemoryMapPlacedInfoEXT; @@ -55530,64 +55543,64 @@ namespace VULKAN_HPP_NAMESPACE uint32_t heapIndex = {}; }; - struct MemoryUnmapInfoKHR + struct MemoryUnmapInfo { - using NativeType = VkMemoryUnmapInfoKHR; + using NativeType = VkMemoryUnmapInfo; static const bool allowDuplicate = false; - static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eMemoryUnmapInfoKHR; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eMemoryUnmapInfo; #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) - VULKAN_HPP_CONSTEXPR MemoryUnmapInfoKHR( VULKAN_HPP_NAMESPACE::MemoryUnmapFlagsKHR flags_ = {}, - VULKAN_HPP_NAMESPACE::DeviceMemory memory_ = {}, - const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR MemoryUnmapInfo( VULKAN_HPP_NAMESPACE::MemoryUnmapFlags flags_ = {}, + VULKAN_HPP_NAMESPACE::DeviceMemory memory_ = {}, + const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT : pNext{ pNext_ } , flags{ flags_ } , memory{ memory_ } { } - VULKAN_HPP_CONSTEXPR MemoryUnmapInfoKHR( MemoryUnmapInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + VULKAN_HPP_CONSTEXPR MemoryUnmapInfo( MemoryUnmapInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; - MemoryUnmapInfoKHR( VkMemoryUnmapInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT : MemoryUnmapInfoKHR( *reinterpret_cast( &rhs ) ) {} + MemoryUnmapInfo( VkMemoryUnmapInfo const & rhs ) VULKAN_HPP_NOEXCEPT : MemoryUnmapInfo( *reinterpret_cast( &rhs ) ) {} - MemoryUnmapInfoKHR & operator=( MemoryUnmapInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + MemoryUnmapInfo & operator=( MemoryUnmapInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; #endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ - MemoryUnmapInfoKHR & operator=( VkMemoryUnmapInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + MemoryUnmapInfo & operator=( VkMemoryUnmapInfo const & rhs ) VULKAN_HPP_NOEXCEPT { - *this = *reinterpret_cast( &rhs ); + *this = *reinterpret_cast( &rhs ); return *this; } #if !defined( VULKAN_HPP_NO_STRUCT_SETTERS ) - VULKAN_HPP_CONSTEXPR_14 MemoryUnmapInfoKHR & setPNext( const void * pNext_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 MemoryUnmapInfo & setPNext( const void * pNext_ ) VULKAN_HPP_NOEXCEPT { pNext = pNext_; return *this; } - VULKAN_HPP_CONSTEXPR_14 MemoryUnmapInfoKHR & setFlags( VULKAN_HPP_NAMESPACE::MemoryUnmapFlagsKHR flags_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 MemoryUnmapInfo & setFlags( VULKAN_HPP_NAMESPACE::MemoryUnmapFlags flags_ ) VULKAN_HPP_NOEXCEPT { flags = flags_; return *this; } - VULKAN_HPP_CONSTEXPR_14 MemoryUnmapInfoKHR & setMemory( VULKAN_HPP_NAMESPACE::DeviceMemory memory_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 MemoryUnmapInfo & setMemory( VULKAN_HPP_NAMESPACE::DeviceMemory memory_ ) VULKAN_HPP_NOEXCEPT { memory = memory_; return *this; } #endif /*VULKAN_HPP_NO_STRUCT_SETTERS*/ - operator VkMemoryUnmapInfoKHR const &() const VULKAN_HPP_NOEXCEPT + operator VkMemoryUnmapInfo const &() const VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + return *reinterpret_cast( this ); } - operator VkMemoryUnmapInfoKHR &() VULKAN_HPP_NOEXCEPT + operator VkMemoryUnmapInfo &() VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + return *reinterpret_cast( this ); } #if defined( VULKAN_HPP_USE_REFLECT ) @@ -55596,7 +55609,7 @@ namespace VULKAN_HPP_NAMESPACE # else std::tuple # endif reflect() const VULKAN_HPP_NOEXCEPT @@ -55606,9 +55619,9 @@ namespace VULKAN_HPP_NAMESPACE #endif #if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( MemoryUnmapInfoKHR const & ) const = default; + auto operator<=>( MemoryUnmapInfo const & ) const = default; #else - bool operator==( MemoryUnmapInfoKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator==( MemoryUnmapInfo const & rhs ) const VULKAN_HPP_NOEXCEPT { # if defined( VULKAN_HPP_USE_REFLECT ) return this->reflect() == rhs.reflect(); @@ -55617,25 +55630,27 @@ namespace VULKAN_HPP_NAMESPACE # endif } - bool operator!=( MemoryUnmapInfoKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator!=( MemoryUnmapInfo const & rhs ) const VULKAN_HPP_NOEXCEPT { return !operator==( rhs ); } #endif public: - VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eMemoryUnmapInfoKHR; - const void * pNext = {}; - VULKAN_HPP_NAMESPACE::MemoryUnmapFlagsKHR flags = {}; - VULKAN_HPP_NAMESPACE::DeviceMemory memory = {}; + VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eMemoryUnmapInfo; + const void * pNext = {}; + VULKAN_HPP_NAMESPACE::MemoryUnmapFlags flags = {}; + VULKAN_HPP_NAMESPACE::DeviceMemory memory = {}; }; template <> - struct CppType + struct CppType { - using Type = MemoryUnmapInfoKHR; + using Type = MemoryUnmapInfo; }; + using MemoryUnmapInfoKHR = MemoryUnmapInfo; + #if defined( VK_USE_PLATFORM_WIN32_KHR ) struct MemoryWin32HandlePropertiesKHR { @@ -67171,47 +67186,46 @@ namespace VULKAN_HPP_NAMESPACE using PhysicalDeviceDynamicRenderingFeaturesKHR = PhysicalDeviceDynamicRenderingFeatures; - struct PhysicalDeviceDynamicRenderingLocalReadFeaturesKHR + struct PhysicalDeviceDynamicRenderingLocalReadFeatures { - using NativeType = VkPhysicalDeviceDynamicRenderingLocalReadFeaturesKHR; + using NativeType = VkPhysicalDeviceDynamicRenderingLocalReadFeatures; static const bool allowDuplicate = false; - static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceDynamicRenderingLocalReadFeaturesKHR; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceDynamicRenderingLocalReadFeatures; #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) - VULKAN_HPP_CONSTEXPR PhysicalDeviceDynamicRenderingLocalReadFeaturesKHR( VULKAN_HPP_NAMESPACE::Bool32 dynamicRenderingLocalRead_ = {}, - void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR PhysicalDeviceDynamicRenderingLocalReadFeatures( VULKAN_HPP_NAMESPACE::Bool32 dynamicRenderingLocalRead_ = {}, + void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT : pNext{ pNext_ } , dynamicRenderingLocalRead{ dynamicRenderingLocalRead_ } { } VULKAN_HPP_CONSTEXPR - PhysicalDeviceDynamicRenderingLocalReadFeaturesKHR( PhysicalDeviceDynamicRenderingLocalReadFeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + PhysicalDeviceDynamicRenderingLocalReadFeatures( PhysicalDeviceDynamicRenderingLocalReadFeatures const & rhs ) VULKAN_HPP_NOEXCEPT = default; - PhysicalDeviceDynamicRenderingLocalReadFeaturesKHR( VkPhysicalDeviceDynamicRenderingLocalReadFeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT - : PhysicalDeviceDynamicRenderingLocalReadFeaturesKHR( *reinterpret_cast( &rhs ) ) + PhysicalDeviceDynamicRenderingLocalReadFeatures( VkPhysicalDeviceDynamicRenderingLocalReadFeatures const & rhs ) VULKAN_HPP_NOEXCEPT + : PhysicalDeviceDynamicRenderingLocalReadFeatures( *reinterpret_cast( &rhs ) ) { } - PhysicalDeviceDynamicRenderingLocalReadFeaturesKHR & - operator=( PhysicalDeviceDynamicRenderingLocalReadFeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + PhysicalDeviceDynamicRenderingLocalReadFeatures & operator=( PhysicalDeviceDynamicRenderingLocalReadFeatures const & rhs ) VULKAN_HPP_NOEXCEPT = default; #endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ - PhysicalDeviceDynamicRenderingLocalReadFeaturesKHR & operator=( VkPhysicalDeviceDynamicRenderingLocalReadFeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + PhysicalDeviceDynamicRenderingLocalReadFeatures & operator=( VkPhysicalDeviceDynamicRenderingLocalReadFeatures const & rhs ) VULKAN_HPP_NOEXCEPT { - *this = *reinterpret_cast( &rhs ); + *this = *reinterpret_cast( &rhs ); return *this; } #if !defined( VULKAN_HPP_NO_STRUCT_SETTERS ) - VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceDynamicRenderingLocalReadFeaturesKHR & setPNext( void * pNext_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceDynamicRenderingLocalReadFeatures & setPNext( void * pNext_ ) VULKAN_HPP_NOEXCEPT { pNext = pNext_; return *this; } - VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceDynamicRenderingLocalReadFeaturesKHR & + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceDynamicRenderingLocalReadFeatures & setDynamicRenderingLocalRead( VULKAN_HPP_NAMESPACE::Bool32 dynamicRenderingLocalRead_ ) VULKAN_HPP_NOEXCEPT { dynamicRenderingLocalRead = dynamicRenderingLocalRead_; @@ -67219,14 +67233,14 @@ namespace VULKAN_HPP_NAMESPACE } #endif /*VULKAN_HPP_NO_STRUCT_SETTERS*/ - operator VkPhysicalDeviceDynamicRenderingLocalReadFeaturesKHR const &() const VULKAN_HPP_NOEXCEPT + operator VkPhysicalDeviceDynamicRenderingLocalReadFeatures const &() const VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + return *reinterpret_cast( this ); } - operator VkPhysicalDeviceDynamicRenderingLocalReadFeaturesKHR &() VULKAN_HPP_NOEXCEPT + operator VkPhysicalDeviceDynamicRenderingLocalReadFeatures &() VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + return *reinterpret_cast( this ); } #if defined( VULKAN_HPP_USE_REFLECT ) @@ -67242,9 +67256,9 @@ namespace VULKAN_HPP_NAMESPACE #endif #if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( PhysicalDeviceDynamicRenderingLocalReadFeaturesKHR const & ) const = default; + auto operator<=>( PhysicalDeviceDynamicRenderingLocalReadFeatures const & ) const = default; #else - bool operator==( PhysicalDeviceDynamicRenderingLocalReadFeaturesKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator==( PhysicalDeviceDynamicRenderingLocalReadFeatures const & rhs ) const VULKAN_HPP_NOEXCEPT { # if defined( VULKAN_HPP_USE_REFLECT ) return this->reflect() == rhs.reflect(); @@ -67253,24 +67267,26 @@ namespace VULKAN_HPP_NAMESPACE # endif } - bool operator!=( PhysicalDeviceDynamicRenderingLocalReadFeaturesKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator!=( PhysicalDeviceDynamicRenderingLocalReadFeatures const & rhs ) const VULKAN_HPP_NOEXCEPT { return !operator==( rhs ); } #endif public: - VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceDynamicRenderingLocalReadFeaturesKHR; + VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceDynamicRenderingLocalReadFeatures; void * pNext = {}; VULKAN_HPP_NAMESPACE::Bool32 dynamicRenderingLocalRead = {}; }; template <> - struct CppType + struct CppType { - using Type = PhysicalDeviceDynamicRenderingLocalReadFeaturesKHR; + using Type = PhysicalDeviceDynamicRenderingLocalReadFeatures; }; + using PhysicalDeviceDynamicRenderingLocalReadFeaturesKHR = PhysicalDeviceDynamicRenderingLocalReadFeatures; + struct PhysicalDeviceDynamicRenderingUnusedAttachmentsFeaturesEXT { using NativeType = VkPhysicalDeviceDynamicRenderingUnusedAttachmentsFeaturesEXT; @@ -71417,45 +71433,45 @@ namespace VULKAN_HPP_NAMESPACE using Type = PhysicalDeviceFrameBoundaryFeaturesEXT; }; - struct PhysicalDeviceGlobalPriorityQueryFeaturesKHR + struct PhysicalDeviceGlobalPriorityQueryFeatures { - using NativeType = VkPhysicalDeviceGlobalPriorityQueryFeaturesKHR; + using NativeType = VkPhysicalDeviceGlobalPriorityQueryFeatures; static const bool allowDuplicate = false; - static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceGlobalPriorityQueryFeaturesKHR; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceGlobalPriorityQueryFeatures; #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) - VULKAN_HPP_CONSTEXPR PhysicalDeviceGlobalPriorityQueryFeaturesKHR( VULKAN_HPP_NAMESPACE::Bool32 globalPriorityQuery_ = {}, - void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR PhysicalDeviceGlobalPriorityQueryFeatures( VULKAN_HPP_NAMESPACE::Bool32 globalPriorityQuery_ = {}, + void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT : pNext{ pNext_ } , globalPriorityQuery{ globalPriorityQuery_ } { } - VULKAN_HPP_CONSTEXPR PhysicalDeviceGlobalPriorityQueryFeaturesKHR( PhysicalDeviceGlobalPriorityQueryFeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + VULKAN_HPP_CONSTEXPR PhysicalDeviceGlobalPriorityQueryFeatures( PhysicalDeviceGlobalPriorityQueryFeatures const & rhs ) VULKAN_HPP_NOEXCEPT = default; - PhysicalDeviceGlobalPriorityQueryFeaturesKHR( VkPhysicalDeviceGlobalPriorityQueryFeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT - : PhysicalDeviceGlobalPriorityQueryFeaturesKHR( *reinterpret_cast( &rhs ) ) + PhysicalDeviceGlobalPriorityQueryFeatures( VkPhysicalDeviceGlobalPriorityQueryFeatures const & rhs ) VULKAN_HPP_NOEXCEPT + : PhysicalDeviceGlobalPriorityQueryFeatures( *reinterpret_cast( &rhs ) ) { } - PhysicalDeviceGlobalPriorityQueryFeaturesKHR & operator=( PhysicalDeviceGlobalPriorityQueryFeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + PhysicalDeviceGlobalPriorityQueryFeatures & operator=( PhysicalDeviceGlobalPriorityQueryFeatures const & rhs ) VULKAN_HPP_NOEXCEPT = default; #endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ - PhysicalDeviceGlobalPriorityQueryFeaturesKHR & operator=( VkPhysicalDeviceGlobalPriorityQueryFeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + PhysicalDeviceGlobalPriorityQueryFeatures & operator=( VkPhysicalDeviceGlobalPriorityQueryFeatures const & rhs ) VULKAN_HPP_NOEXCEPT { - *this = *reinterpret_cast( &rhs ); + *this = *reinterpret_cast( &rhs ); return *this; } #if !defined( VULKAN_HPP_NO_STRUCT_SETTERS ) - VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceGlobalPriorityQueryFeaturesKHR & setPNext( void * pNext_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceGlobalPriorityQueryFeatures & setPNext( void * pNext_ ) VULKAN_HPP_NOEXCEPT { pNext = pNext_; return *this; } - VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceGlobalPriorityQueryFeaturesKHR & + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceGlobalPriorityQueryFeatures & setGlobalPriorityQuery( VULKAN_HPP_NAMESPACE::Bool32 globalPriorityQuery_ ) VULKAN_HPP_NOEXCEPT { globalPriorityQuery = globalPriorityQuery_; @@ -71463,14 +71479,14 @@ namespace VULKAN_HPP_NAMESPACE } #endif /*VULKAN_HPP_NO_STRUCT_SETTERS*/ - operator VkPhysicalDeviceGlobalPriorityQueryFeaturesKHR const &() const VULKAN_HPP_NOEXCEPT + operator VkPhysicalDeviceGlobalPriorityQueryFeatures const &() const VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + return *reinterpret_cast( this ); } - operator VkPhysicalDeviceGlobalPriorityQueryFeaturesKHR &() VULKAN_HPP_NOEXCEPT + operator VkPhysicalDeviceGlobalPriorityQueryFeatures &() VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + return *reinterpret_cast( this ); } #if defined( VULKAN_HPP_USE_REFLECT ) @@ -71486,9 +71502,9 @@ namespace VULKAN_HPP_NAMESPACE #endif #if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( PhysicalDeviceGlobalPriorityQueryFeaturesKHR const & ) const = default; + auto operator<=>( PhysicalDeviceGlobalPriorityQueryFeatures const & ) const = default; #else - bool operator==( PhysicalDeviceGlobalPriorityQueryFeaturesKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator==( PhysicalDeviceGlobalPriorityQueryFeatures const & rhs ) const VULKAN_HPP_NOEXCEPT { # if defined( VULKAN_HPP_USE_REFLECT ) return this->reflect() == rhs.reflect(); @@ -71497,25 +71513,26 @@ namespace VULKAN_HPP_NAMESPACE # endif } - bool operator!=( PhysicalDeviceGlobalPriorityQueryFeaturesKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator!=( PhysicalDeviceGlobalPriorityQueryFeatures const & rhs ) const VULKAN_HPP_NOEXCEPT { return !operator==( rhs ); } #endif public: - VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceGlobalPriorityQueryFeaturesKHR; + VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceGlobalPriorityQueryFeatures; void * pNext = {}; VULKAN_HPP_NAMESPACE::Bool32 globalPriorityQuery = {}; }; template <> - struct CppType + struct CppType { - using Type = PhysicalDeviceGlobalPriorityQueryFeaturesKHR; + using Type = PhysicalDeviceGlobalPriorityQueryFeatures; }; - using PhysicalDeviceGlobalPriorityQueryFeaturesEXT = PhysicalDeviceGlobalPriorityQueryFeaturesKHR; + using PhysicalDeviceGlobalPriorityQueryFeaturesEXT = PhysicalDeviceGlobalPriorityQueryFeatures; + using PhysicalDeviceGlobalPriorityQueryFeaturesKHR = PhysicalDeviceGlobalPriorityQueryFeatures; struct PhysicalDeviceGraphicsPipelineLibraryFeaturesEXT { @@ -71935,58 +71952,58 @@ namespace VULKAN_HPP_NAMESPACE using Type = PhysicalDeviceHdrVividFeaturesHUAWEI; }; - struct PhysicalDeviceHostImageCopyFeaturesEXT + struct PhysicalDeviceHostImageCopyFeatures { - using NativeType = VkPhysicalDeviceHostImageCopyFeaturesEXT; + using NativeType = VkPhysicalDeviceHostImageCopyFeatures; static const bool allowDuplicate = false; - static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceHostImageCopyFeaturesEXT; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceHostImageCopyFeatures; #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) - VULKAN_HPP_CONSTEXPR PhysicalDeviceHostImageCopyFeaturesEXT( VULKAN_HPP_NAMESPACE::Bool32 hostImageCopy_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR PhysicalDeviceHostImageCopyFeatures( VULKAN_HPP_NAMESPACE::Bool32 hostImageCopy_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT : pNext{ pNext_ } , hostImageCopy{ hostImageCopy_ } { } - VULKAN_HPP_CONSTEXPR PhysicalDeviceHostImageCopyFeaturesEXT( PhysicalDeviceHostImageCopyFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + VULKAN_HPP_CONSTEXPR PhysicalDeviceHostImageCopyFeatures( PhysicalDeviceHostImageCopyFeatures const & rhs ) VULKAN_HPP_NOEXCEPT = default; - PhysicalDeviceHostImageCopyFeaturesEXT( VkPhysicalDeviceHostImageCopyFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT - : PhysicalDeviceHostImageCopyFeaturesEXT( *reinterpret_cast( &rhs ) ) + PhysicalDeviceHostImageCopyFeatures( VkPhysicalDeviceHostImageCopyFeatures const & rhs ) VULKAN_HPP_NOEXCEPT + : PhysicalDeviceHostImageCopyFeatures( *reinterpret_cast( &rhs ) ) { } - PhysicalDeviceHostImageCopyFeaturesEXT & operator=( PhysicalDeviceHostImageCopyFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + PhysicalDeviceHostImageCopyFeatures & operator=( PhysicalDeviceHostImageCopyFeatures const & rhs ) VULKAN_HPP_NOEXCEPT = default; #endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ - PhysicalDeviceHostImageCopyFeaturesEXT & operator=( VkPhysicalDeviceHostImageCopyFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + PhysicalDeviceHostImageCopyFeatures & operator=( VkPhysicalDeviceHostImageCopyFeatures const & rhs ) VULKAN_HPP_NOEXCEPT { - *this = *reinterpret_cast( &rhs ); + *this = *reinterpret_cast( &rhs ); return *this; } #if !defined( VULKAN_HPP_NO_STRUCT_SETTERS ) - VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceHostImageCopyFeaturesEXT & setPNext( void * pNext_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceHostImageCopyFeatures & setPNext( void * pNext_ ) VULKAN_HPP_NOEXCEPT { pNext = pNext_; return *this; } - VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceHostImageCopyFeaturesEXT & setHostImageCopy( VULKAN_HPP_NAMESPACE::Bool32 hostImageCopy_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceHostImageCopyFeatures & setHostImageCopy( VULKAN_HPP_NAMESPACE::Bool32 hostImageCopy_ ) VULKAN_HPP_NOEXCEPT { hostImageCopy = hostImageCopy_; return *this; } #endif /*VULKAN_HPP_NO_STRUCT_SETTERS*/ - operator VkPhysicalDeviceHostImageCopyFeaturesEXT const &() const VULKAN_HPP_NOEXCEPT + operator VkPhysicalDeviceHostImageCopyFeatures const &() const VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + return *reinterpret_cast( this ); } - operator VkPhysicalDeviceHostImageCopyFeaturesEXT &() VULKAN_HPP_NOEXCEPT + operator VkPhysicalDeviceHostImageCopyFeatures &() VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + return *reinterpret_cast( this ); } #if defined( VULKAN_HPP_USE_REFLECT ) @@ -72002,9 +72019,9 @@ namespace VULKAN_HPP_NAMESPACE #endif #if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( PhysicalDeviceHostImageCopyFeaturesEXT const & ) const = default; + auto operator<=>( PhysicalDeviceHostImageCopyFeatures const & ) const = default; #else - bool operator==( PhysicalDeviceHostImageCopyFeaturesEXT const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator==( PhysicalDeviceHostImageCopyFeatures const & rhs ) const VULKAN_HPP_NOEXCEPT { # if defined( VULKAN_HPP_USE_REFLECT ) return this->reflect() == rhs.reflect(); @@ -72013,39 +72030,41 @@ namespace VULKAN_HPP_NAMESPACE # endif } - bool operator!=( PhysicalDeviceHostImageCopyFeaturesEXT const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator!=( PhysicalDeviceHostImageCopyFeatures const & rhs ) const VULKAN_HPP_NOEXCEPT { return !operator==( rhs ); } #endif public: - VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceHostImageCopyFeaturesEXT; + VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceHostImageCopyFeatures; void * pNext = {}; VULKAN_HPP_NAMESPACE::Bool32 hostImageCopy = {}; }; template <> - struct CppType + struct CppType { - using Type = PhysicalDeviceHostImageCopyFeaturesEXT; + using Type = PhysicalDeviceHostImageCopyFeatures; }; - struct PhysicalDeviceHostImageCopyPropertiesEXT + using PhysicalDeviceHostImageCopyFeaturesEXT = PhysicalDeviceHostImageCopyFeatures; + + struct PhysicalDeviceHostImageCopyProperties { - using NativeType = VkPhysicalDeviceHostImageCopyPropertiesEXT; + using NativeType = VkPhysicalDeviceHostImageCopyProperties; static const bool allowDuplicate = false; - static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceHostImageCopyPropertiesEXT; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceHostImageCopyProperties; #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) - VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceHostImageCopyPropertiesEXT( uint32_t copySrcLayoutCount_ = {}, - VULKAN_HPP_NAMESPACE::ImageLayout * pCopySrcLayouts_ = {}, - uint32_t copyDstLayoutCount_ = {}, - VULKAN_HPP_NAMESPACE::ImageLayout * pCopyDstLayouts_ = {}, - std::array const & optimalTilingLayoutUUID_ = {}, - VULKAN_HPP_NAMESPACE::Bool32 identicalMemoryTypeRequirements_ = {}, - void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceHostImageCopyProperties( uint32_t copySrcLayoutCount_ = {}, + VULKAN_HPP_NAMESPACE::ImageLayout * pCopySrcLayouts_ = {}, + uint32_t copyDstLayoutCount_ = {}, + VULKAN_HPP_NAMESPACE::ImageLayout * pCopyDstLayouts_ = {}, + std::array const & optimalTilingLayoutUUID_ = {}, + VULKAN_HPP_NAMESPACE::Bool32 identicalMemoryTypeRequirements_ = {}, + void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT : pNext{ pNext_ } , copySrcLayoutCount{ copySrcLayoutCount_ } , pCopySrcLayouts{ pCopySrcLayouts_ } @@ -72056,19 +72075,19 @@ namespace VULKAN_HPP_NAMESPACE { } - VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceHostImageCopyPropertiesEXT( PhysicalDeviceHostImageCopyPropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceHostImageCopyProperties( PhysicalDeviceHostImageCopyProperties const & rhs ) VULKAN_HPP_NOEXCEPT = default; - PhysicalDeviceHostImageCopyPropertiesEXT( VkPhysicalDeviceHostImageCopyPropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT - : PhysicalDeviceHostImageCopyPropertiesEXT( *reinterpret_cast( &rhs ) ) + PhysicalDeviceHostImageCopyProperties( VkPhysicalDeviceHostImageCopyProperties const & rhs ) VULKAN_HPP_NOEXCEPT + : PhysicalDeviceHostImageCopyProperties( *reinterpret_cast( &rhs ) ) { } # if !defined( VULKAN_HPP_DISABLE_ENHANCED_MODE ) - PhysicalDeviceHostImageCopyPropertiesEXT( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & copySrcLayouts_, - VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & copyDstLayouts_ = {}, - std::array const & optimalTilingLayoutUUID_ = {}, - VULKAN_HPP_NAMESPACE::Bool32 identicalMemoryTypeRequirements_ = {}, - void * pNext_ = nullptr ) + PhysicalDeviceHostImageCopyProperties( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & copySrcLayouts_, + VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & copyDstLayouts_ = {}, + std::array const & optimalTilingLayoutUUID_ = {}, + VULKAN_HPP_NAMESPACE::Bool32 identicalMemoryTypeRequirements_ = {}, + void * pNext_ = nullptr ) : pNext( pNext_ ) , copySrcLayoutCount( static_cast( copySrcLayouts_.size() ) ) , pCopySrcLayouts( copySrcLayouts_.data() ) @@ -72080,29 +72099,29 @@ namespace VULKAN_HPP_NAMESPACE } # endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - PhysicalDeviceHostImageCopyPropertiesEXT & operator=( PhysicalDeviceHostImageCopyPropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + PhysicalDeviceHostImageCopyProperties & operator=( PhysicalDeviceHostImageCopyProperties const & rhs ) VULKAN_HPP_NOEXCEPT = default; #endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ - PhysicalDeviceHostImageCopyPropertiesEXT & operator=( VkPhysicalDeviceHostImageCopyPropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + PhysicalDeviceHostImageCopyProperties & operator=( VkPhysicalDeviceHostImageCopyProperties const & rhs ) VULKAN_HPP_NOEXCEPT { - *this = *reinterpret_cast( &rhs ); + *this = *reinterpret_cast( &rhs ); return *this; } #if !defined( VULKAN_HPP_NO_STRUCT_SETTERS ) - VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceHostImageCopyPropertiesEXT & setPNext( void * pNext_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceHostImageCopyProperties & setPNext( void * pNext_ ) VULKAN_HPP_NOEXCEPT { pNext = pNext_; return *this; } - VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceHostImageCopyPropertiesEXT & setCopySrcLayoutCount( uint32_t copySrcLayoutCount_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceHostImageCopyProperties & setCopySrcLayoutCount( uint32_t copySrcLayoutCount_ ) VULKAN_HPP_NOEXCEPT { copySrcLayoutCount = copySrcLayoutCount_; return *this; } - VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceHostImageCopyPropertiesEXT & + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceHostImageCopyProperties & setPCopySrcLayouts( VULKAN_HPP_NAMESPACE::ImageLayout * pCopySrcLayouts_ ) VULKAN_HPP_NOEXCEPT { pCopySrcLayouts = pCopySrcLayouts_; @@ -72110,7 +72129,7 @@ namespace VULKAN_HPP_NAMESPACE } # if !defined( VULKAN_HPP_DISABLE_ENHANCED_MODE ) - PhysicalDeviceHostImageCopyPropertiesEXT & + PhysicalDeviceHostImageCopyProperties & setCopySrcLayouts( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & copySrcLayouts_ ) VULKAN_HPP_NOEXCEPT { copySrcLayoutCount = static_cast( copySrcLayouts_.size() ); @@ -72119,13 +72138,13 @@ namespace VULKAN_HPP_NAMESPACE } # endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceHostImageCopyPropertiesEXT & setCopyDstLayoutCount( uint32_t copyDstLayoutCount_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceHostImageCopyProperties & setCopyDstLayoutCount( uint32_t copyDstLayoutCount_ ) VULKAN_HPP_NOEXCEPT { copyDstLayoutCount = copyDstLayoutCount_; return *this; } - VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceHostImageCopyPropertiesEXT & + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceHostImageCopyProperties & setPCopyDstLayouts( VULKAN_HPP_NAMESPACE::ImageLayout * pCopyDstLayouts_ ) VULKAN_HPP_NOEXCEPT { pCopyDstLayouts = pCopyDstLayouts_; @@ -72133,7 +72152,7 @@ namespace VULKAN_HPP_NAMESPACE } # if !defined( VULKAN_HPP_DISABLE_ENHANCED_MODE ) - PhysicalDeviceHostImageCopyPropertiesEXT & + PhysicalDeviceHostImageCopyProperties & setCopyDstLayouts( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & copyDstLayouts_ ) VULKAN_HPP_NOEXCEPT { copyDstLayoutCount = static_cast( copyDstLayouts_.size() ); @@ -72142,14 +72161,14 @@ namespace VULKAN_HPP_NAMESPACE } # endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceHostImageCopyPropertiesEXT & + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceHostImageCopyProperties & setOptimalTilingLayoutUUID( std::array optimalTilingLayoutUUID_ ) VULKAN_HPP_NOEXCEPT { optimalTilingLayoutUUID = optimalTilingLayoutUUID_; return *this; } - VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceHostImageCopyPropertiesEXT & + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceHostImageCopyProperties & setIdenticalMemoryTypeRequirements( VULKAN_HPP_NAMESPACE::Bool32 identicalMemoryTypeRequirements_ ) VULKAN_HPP_NOEXCEPT { identicalMemoryTypeRequirements = identicalMemoryTypeRequirements_; @@ -72157,14 +72176,14 @@ namespace VULKAN_HPP_NAMESPACE } #endif /*VULKAN_HPP_NO_STRUCT_SETTERS*/ - operator VkPhysicalDeviceHostImageCopyPropertiesEXT const &() const VULKAN_HPP_NOEXCEPT + operator VkPhysicalDeviceHostImageCopyProperties const &() const VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + return *reinterpret_cast( this ); } - operator VkPhysicalDeviceHostImageCopyPropertiesEXT &() VULKAN_HPP_NOEXCEPT + operator VkPhysicalDeviceHostImageCopyProperties &() VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + return *reinterpret_cast( this ); } #if defined( VULKAN_HPP_USE_REFLECT ) @@ -72188,9 +72207,9 @@ namespace VULKAN_HPP_NAMESPACE #endif #if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( PhysicalDeviceHostImageCopyPropertiesEXT const & ) const = default; + auto operator<=>( PhysicalDeviceHostImageCopyProperties const & ) const = default; #else - bool operator==( PhysicalDeviceHostImageCopyPropertiesEXT const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator==( PhysicalDeviceHostImageCopyProperties const & rhs ) const VULKAN_HPP_NOEXCEPT { # if defined( VULKAN_HPP_USE_REFLECT ) return this->reflect() == rhs.reflect(); @@ -72201,14 +72220,14 @@ namespace VULKAN_HPP_NAMESPACE # endif } - bool operator!=( PhysicalDeviceHostImageCopyPropertiesEXT const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator!=( PhysicalDeviceHostImageCopyProperties const & rhs ) const VULKAN_HPP_NOEXCEPT { return !operator==( rhs ); } #endif public: - VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceHostImageCopyPropertiesEXT; + VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceHostImageCopyProperties; void * pNext = {}; uint32_t copySrcLayoutCount = {}; VULKAN_HPP_NAMESPACE::ImageLayout * pCopySrcLayouts = {}; @@ -72219,11 +72238,13 @@ namespace VULKAN_HPP_NAMESPACE }; template <> - struct CppType + struct CppType { - using Type = PhysicalDeviceHostImageCopyPropertiesEXT; + using Type = PhysicalDeviceHostImageCopyProperties; }; + using PhysicalDeviceHostImageCopyPropertiesEXT = PhysicalDeviceHostImageCopyProperties; + struct PhysicalDeviceHostQueryResetFeatures { using NativeType = VkPhysicalDeviceHostQueryResetFeatures; @@ -74125,59 +74146,58 @@ namespace VULKAN_HPP_NAMESPACE using PhysicalDeviceImagelessFramebufferFeaturesKHR = PhysicalDeviceImagelessFramebufferFeatures; - struct PhysicalDeviceIndexTypeUint8FeaturesKHR + struct PhysicalDeviceIndexTypeUint8Features { - using NativeType = VkPhysicalDeviceIndexTypeUint8FeaturesKHR; + using NativeType = VkPhysicalDeviceIndexTypeUint8Features; static const bool allowDuplicate = false; - static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceIndexTypeUint8FeaturesKHR; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceIndexTypeUint8Features; #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) - VULKAN_HPP_CONSTEXPR PhysicalDeviceIndexTypeUint8FeaturesKHR( VULKAN_HPP_NAMESPACE::Bool32 indexTypeUint8_ = {}, - void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR PhysicalDeviceIndexTypeUint8Features( VULKAN_HPP_NAMESPACE::Bool32 indexTypeUint8_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT : pNext{ pNext_ } , indexTypeUint8{ indexTypeUint8_ } { } - VULKAN_HPP_CONSTEXPR PhysicalDeviceIndexTypeUint8FeaturesKHR( PhysicalDeviceIndexTypeUint8FeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + VULKAN_HPP_CONSTEXPR PhysicalDeviceIndexTypeUint8Features( PhysicalDeviceIndexTypeUint8Features const & rhs ) VULKAN_HPP_NOEXCEPT = default; - PhysicalDeviceIndexTypeUint8FeaturesKHR( VkPhysicalDeviceIndexTypeUint8FeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT - : PhysicalDeviceIndexTypeUint8FeaturesKHR( *reinterpret_cast( &rhs ) ) + PhysicalDeviceIndexTypeUint8Features( VkPhysicalDeviceIndexTypeUint8Features const & rhs ) VULKAN_HPP_NOEXCEPT + : PhysicalDeviceIndexTypeUint8Features( *reinterpret_cast( &rhs ) ) { } - PhysicalDeviceIndexTypeUint8FeaturesKHR & operator=( PhysicalDeviceIndexTypeUint8FeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + PhysicalDeviceIndexTypeUint8Features & operator=( PhysicalDeviceIndexTypeUint8Features const & rhs ) VULKAN_HPP_NOEXCEPT = default; #endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ - PhysicalDeviceIndexTypeUint8FeaturesKHR & operator=( VkPhysicalDeviceIndexTypeUint8FeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + PhysicalDeviceIndexTypeUint8Features & operator=( VkPhysicalDeviceIndexTypeUint8Features const & rhs ) VULKAN_HPP_NOEXCEPT { - *this = *reinterpret_cast( &rhs ); + *this = *reinterpret_cast( &rhs ); return *this; } #if !defined( VULKAN_HPP_NO_STRUCT_SETTERS ) - VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceIndexTypeUint8FeaturesKHR & setPNext( void * pNext_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceIndexTypeUint8Features & setPNext( void * pNext_ ) VULKAN_HPP_NOEXCEPT { pNext = pNext_; return *this; } - VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceIndexTypeUint8FeaturesKHR & setIndexTypeUint8( VULKAN_HPP_NAMESPACE::Bool32 indexTypeUint8_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceIndexTypeUint8Features & setIndexTypeUint8( VULKAN_HPP_NAMESPACE::Bool32 indexTypeUint8_ ) VULKAN_HPP_NOEXCEPT { indexTypeUint8 = indexTypeUint8_; return *this; } #endif /*VULKAN_HPP_NO_STRUCT_SETTERS*/ - operator VkPhysicalDeviceIndexTypeUint8FeaturesKHR const &() const VULKAN_HPP_NOEXCEPT + operator VkPhysicalDeviceIndexTypeUint8Features const &() const VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + return *reinterpret_cast( this ); } - operator VkPhysicalDeviceIndexTypeUint8FeaturesKHR &() VULKAN_HPP_NOEXCEPT + operator VkPhysicalDeviceIndexTypeUint8Features &() VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + return *reinterpret_cast( this ); } #if defined( VULKAN_HPP_USE_REFLECT ) @@ -74193,9 +74213,9 @@ namespace VULKAN_HPP_NAMESPACE #endif #if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( PhysicalDeviceIndexTypeUint8FeaturesKHR const & ) const = default; + auto operator<=>( PhysicalDeviceIndexTypeUint8Features const & ) const = default; #else - bool operator==( PhysicalDeviceIndexTypeUint8FeaturesKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator==( PhysicalDeviceIndexTypeUint8Features const & rhs ) const VULKAN_HPP_NOEXCEPT { # if defined( VULKAN_HPP_USE_REFLECT ) return this->reflect() == rhs.reflect(); @@ -74204,25 +74224,26 @@ namespace VULKAN_HPP_NAMESPACE # endif } - bool operator!=( PhysicalDeviceIndexTypeUint8FeaturesKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator!=( PhysicalDeviceIndexTypeUint8Features const & rhs ) const VULKAN_HPP_NOEXCEPT { return !operator==( rhs ); } #endif public: - VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceIndexTypeUint8FeaturesKHR; + VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceIndexTypeUint8Features; void * pNext = {}; VULKAN_HPP_NAMESPACE::Bool32 indexTypeUint8 = {}; }; template <> - struct CppType + struct CppType { - using Type = PhysicalDeviceIndexTypeUint8FeaturesKHR; + using Type = PhysicalDeviceIndexTypeUint8Features; }; - using PhysicalDeviceIndexTypeUint8FeaturesEXT = PhysicalDeviceIndexTypeUint8FeaturesKHR; + using PhysicalDeviceIndexTypeUint8FeaturesEXT = PhysicalDeviceIndexTypeUint8Features; + using PhysicalDeviceIndexTypeUint8FeaturesKHR = PhysicalDeviceIndexTypeUint8Features; struct PhysicalDeviceInheritedViewportScissorFeaturesNV { @@ -76291,21 +76312,21 @@ namespace VULKAN_HPP_NAMESPACE using Type = PhysicalDeviceLegacyVertexAttributesPropertiesEXT; }; - struct PhysicalDeviceLineRasterizationFeaturesKHR + struct PhysicalDeviceLineRasterizationFeatures { - using NativeType = VkPhysicalDeviceLineRasterizationFeaturesKHR; + using NativeType = VkPhysicalDeviceLineRasterizationFeatures; static const bool allowDuplicate = false; - static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceLineRasterizationFeaturesKHR; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceLineRasterizationFeatures; #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) - VULKAN_HPP_CONSTEXPR PhysicalDeviceLineRasterizationFeaturesKHR( VULKAN_HPP_NAMESPACE::Bool32 rectangularLines_ = {}, - VULKAN_HPP_NAMESPACE::Bool32 bresenhamLines_ = {}, - VULKAN_HPP_NAMESPACE::Bool32 smoothLines_ = {}, - VULKAN_HPP_NAMESPACE::Bool32 stippledRectangularLines_ = {}, - VULKAN_HPP_NAMESPACE::Bool32 stippledBresenhamLines_ = {}, - VULKAN_HPP_NAMESPACE::Bool32 stippledSmoothLines_ = {}, - void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR PhysicalDeviceLineRasterizationFeatures( VULKAN_HPP_NAMESPACE::Bool32 rectangularLines_ = {}, + VULKAN_HPP_NAMESPACE::Bool32 bresenhamLines_ = {}, + VULKAN_HPP_NAMESPACE::Bool32 smoothLines_ = {}, + VULKAN_HPP_NAMESPACE::Bool32 stippledRectangularLines_ = {}, + VULKAN_HPP_NAMESPACE::Bool32 stippledBresenhamLines_ = {}, + VULKAN_HPP_NAMESPACE::Bool32 stippledSmoothLines_ = {}, + void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT : pNext{ pNext_ } , rectangularLines{ rectangularLines_ } , bresenhamLines{ bresenhamLines_ } @@ -76316,63 +76337,62 @@ namespace VULKAN_HPP_NAMESPACE { } - VULKAN_HPP_CONSTEXPR PhysicalDeviceLineRasterizationFeaturesKHR( PhysicalDeviceLineRasterizationFeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + VULKAN_HPP_CONSTEXPR PhysicalDeviceLineRasterizationFeatures( PhysicalDeviceLineRasterizationFeatures const & rhs ) VULKAN_HPP_NOEXCEPT = default; - PhysicalDeviceLineRasterizationFeaturesKHR( VkPhysicalDeviceLineRasterizationFeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT - : PhysicalDeviceLineRasterizationFeaturesKHR( *reinterpret_cast( &rhs ) ) + PhysicalDeviceLineRasterizationFeatures( VkPhysicalDeviceLineRasterizationFeatures const & rhs ) VULKAN_HPP_NOEXCEPT + : PhysicalDeviceLineRasterizationFeatures( *reinterpret_cast( &rhs ) ) { } - PhysicalDeviceLineRasterizationFeaturesKHR & operator=( PhysicalDeviceLineRasterizationFeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + PhysicalDeviceLineRasterizationFeatures & operator=( PhysicalDeviceLineRasterizationFeatures const & rhs ) VULKAN_HPP_NOEXCEPT = default; #endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ - PhysicalDeviceLineRasterizationFeaturesKHR & operator=( VkPhysicalDeviceLineRasterizationFeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + PhysicalDeviceLineRasterizationFeatures & operator=( VkPhysicalDeviceLineRasterizationFeatures const & rhs ) VULKAN_HPP_NOEXCEPT { - *this = *reinterpret_cast( &rhs ); + *this = *reinterpret_cast( &rhs ); return *this; } #if !defined( VULKAN_HPP_NO_STRUCT_SETTERS ) - VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceLineRasterizationFeaturesKHR & setPNext( void * pNext_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceLineRasterizationFeatures & setPNext( void * pNext_ ) VULKAN_HPP_NOEXCEPT { pNext = pNext_; return *this; } - VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceLineRasterizationFeaturesKHR & - setRectangularLines( VULKAN_HPP_NAMESPACE::Bool32 rectangularLines_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceLineRasterizationFeatures & setRectangularLines( VULKAN_HPP_NAMESPACE::Bool32 rectangularLines_ ) VULKAN_HPP_NOEXCEPT { rectangularLines = rectangularLines_; return *this; } - VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceLineRasterizationFeaturesKHR & setBresenhamLines( VULKAN_HPP_NAMESPACE::Bool32 bresenhamLines_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceLineRasterizationFeatures & setBresenhamLines( VULKAN_HPP_NAMESPACE::Bool32 bresenhamLines_ ) VULKAN_HPP_NOEXCEPT { bresenhamLines = bresenhamLines_; return *this; } - VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceLineRasterizationFeaturesKHR & setSmoothLines( VULKAN_HPP_NAMESPACE::Bool32 smoothLines_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceLineRasterizationFeatures & setSmoothLines( VULKAN_HPP_NAMESPACE::Bool32 smoothLines_ ) VULKAN_HPP_NOEXCEPT { smoothLines = smoothLines_; return *this; } - VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceLineRasterizationFeaturesKHR & + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceLineRasterizationFeatures & setStippledRectangularLines( VULKAN_HPP_NAMESPACE::Bool32 stippledRectangularLines_ ) VULKAN_HPP_NOEXCEPT { stippledRectangularLines = stippledRectangularLines_; return *this; } - VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceLineRasterizationFeaturesKHR & + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceLineRasterizationFeatures & setStippledBresenhamLines( VULKAN_HPP_NAMESPACE::Bool32 stippledBresenhamLines_ ) VULKAN_HPP_NOEXCEPT { stippledBresenhamLines = stippledBresenhamLines_; return *this; } - VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceLineRasterizationFeaturesKHR & + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceLineRasterizationFeatures & setStippledSmoothLines( VULKAN_HPP_NAMESPACE::Bool32 stippledSmoothLines_ ) VULKAN_HPP_NOEXCEPT { stippledSmoothLines = stippledSmoothLines_; @@ -76380,14 +76400,14 @@ namespace VULKAN_HPP_NAMESPACE } #endif /*VULKAN_HPP_NO_STRUCT_SETTERS*/ - operator VkPhysicalDeviceLineRasterizationFeaturesKHR const &() const VULKAN_HPP_NOEXCEPT + operator VkPhysicalDeviceLineRasterizationFeatures const &() const VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + return *reinterpret_cast( this ); } - operator VkPhysicalDeviceLineRasterizationFeaturesKHR &() VULKAN_HPP_NOEXCEPT + operator VkPhysicalDeviceLineRasterizationFeatures &() VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + return *reinterpret_cast( this ); } #if defined( VULKAN_HPP_USE_REFLECT ) @@ -76410,9 +76430,9 @@ namespace VULKAN_HPP_NAMESPACE #endif #if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( PhysicalDeviceLineRasterizationFeaturesKHR const & ) const = default; + auto operator<=>( PhysicalDeviceLineRasterizationFeatures const & ) const = default; #else - bool operator==( PhysicalDeviceLineRasterizationFeaturesKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator==( PhysicalDeviceLineRasterizationFeatures const & rhs ) const VULKAN_HPP_NOEXCEPT { # if defined( VULKAN_HPP_USE_REFLECT ) return this->reflect() == rhs.reflect(); @@ -76423,14 +76443,14 @@ namespace VULKAN_HPP_NAMESPACE # endif } - bool operator!=( PhysicalDeviceLineRasterizationFeaturesKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator!=( PhysicalDeviceLineRasterizationFeatures const & rhs ) const VULKAN_HPP_NOEXCEPT { return !operator==( rhs ); } #endif public: - VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceLineRasterizationFeaturesKHR; + VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceLineRasterizationFeatures; void * pNext = {}; VULKAN_HPP_NAMESPACE::Bool32 rectangularLines = {}; VULKAN_HPP_NAMESPACE::Bool32 bresenhamLines = {}; @@ -76441,51 +76461,52 @@ namespace VULKAN_HPP_NAMESPACE }; template <> - struct CppType + struct CppType { - using Type = PhysicalDeviceLineRasterizationFeaturesKHR; + using Type = PhysicalDeviceLineRasterizationFeatures; }; - using PhysicalDeviceLineRasterizationFeaturesEXT = PhysicalDeviceLineRasterizationFeaturesKHR; + using PhysicalDeviceLineRasterizationFeaturesEXT = PhysicalDeviceLineRasterizationFeatures; + using PhysicalDeviceLineRasterizationFeaturesKHR = PhysicalDeviceLineRasterizationFeatures; - struct PhysicalDeviceLineRasterizationPropertiesKHR + struct PhysicalDeviceLineRasterizationProperties { - using NativeType = VkPhysicalDeviceLineRasterizationPropertiesKHR; + using NativeType = VkPhysicalDeviceLineRasterizationProperties; static const bool allowDuplicate = false; - static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceLineRasterizationPropertiesKHR; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceLineRasterizationProperties; #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) - VULKAN_HPP_CONSTEXPR PhysicalDeviceLineRasterizationPropertiesKHR( uint32_t lineSubPixelPrecisionBits_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR PhysicalDeviceLineRasterizationProperties( uint32_t lineSubPixelPrecisionBits_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT : pNext{ pNext_ } , lineSubPixelPrecisionBits{ lineSubPixelPrecisionBits_ } { } - VULKAN_HPP_CONSTEXPR PhysicalDeviceLineRasterizationPropertiesKHR( PhysicalDeviceLineRasterizationPropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + VULKAN_HPP_CONSTEXPR PhysicalDeviceLineRasterizationProperties( PhysicalDeviceLineRasterizationProperties const & rhs ) VULKAN_HPP_NOEXCEPT = default; - PhysicalDeviceLineRasterizationPropertiesKHR( VkPhysicalDeviceLineRasterizationPropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT - : PhysicalDeviceLineRasterizationPropertiesKHR( *reinterpret_cast( &rhs ) ) + PhysicalDeviceLineRasterizationProperties( VkPhysicalDeviceLineRasterizationProperties const & rhs ) VULKAN_HPP_NOEXCEPT + : PhysicalDeviceLineRasterizationProperties( *reinterpret_cast( &rhs ) ) { } - PhysicalDeviceLineRasterizationPropertiesKHR & operator=( PhysicalDeviceLineRasterizationPropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + PhysicalDeviceLineRasterizationProperties & operator=( PhysicalDeviceLineRasterizationProperties const & rhs ) VULKAN_HPP_NOEXCEPT = default; #endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ - PhysicalDeviceLineRasterizationPropertiesKHR & operator=( VkPhysicalDeviceLineRasterizationPropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + PhysicalDeviceLineRasterizationProperties & operator=( VkPhysicalDeviceLineRasterizationProperties const & rhs ) VULKAN_HPP_NOEXCEPT { - *this = *reinterpret_cast( &rhs ); + *this = *reinterpret_cast( &rhs ); return *this; } - operator VkPhysicalDeviceLineRasterizationPropertiesKHR const &() const VULKAN_HPP_NOEXCEPT + operator VkPhysicalDeviceLineRasterizationProperties const &() const VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + return *reinterpret_cast( this ); } - operator VkPhysicalDeviceLineRasterizationPropertiesKHR &() VULKAN_HPP_NOEXCEPT + operator VkPhysicalDeviceLineRasterizationProperties &() VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + return *reinterpret_cast( this ); } #if defined( VULKAN_HPP_USE_REFLECT ) @@ -76501,9 +76522,9 @@ namespace VULKAN_HPP_NAMESPACE #endif #if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( PhysicalDeviceLineRasterizationPropertiesKHR const & ) const = default; + auto operator<=>( PhysicalDeviceLineRasterizationProperties const & ) const = default; #else - bool operator==( PhysicalDeviceLineRasterizationPropertiesKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator==( PhysicalDeviceLineRasterizationProperties const & rhs ) const VULKAN_HPP_NOEXCEPT { # if defined( VULKAN_HPP_USE_REFLECT ) return this->reflect() == rhs.reflect(); @@ -76512,25 +76533,26 @@ namespace VULKAN_HPP_NAMESPACE # endif } - bool operator!=( PhysicalDeviceLineRasterizationPropertiesKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator!=( PhysicalDeviceLineRasterizationProperties const & rhs ) const VULKAN_HPP_NOEXCEPT { return !operator==( rhs ); } #endif public: - VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceLineRasterizationPropertiesKHR; + VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceLineRasterizationProperties; void * pNext = {}; uint32_t lineSubPixelPrecisionBits = {}; }; template <> - struct CppType + struct CppType { - using Type = PhysicalDeviceLineRasterizationPropertiesKHR; + using Type = PhysicalDeviceLineRasterizationProperties; }; - using PhysicalDeviceLineRasterizationPropertiesEXT = PhysicalDeviceLineRasterizationPropertiesKHR; + using PhysicalDeviceLineRasterizationPropertiesEXT = PhysicalDeviceLineRasterizationProperties; + using PhysicalDeviceLineRasterizationPropertiesKHR = PhysicalDeviceLineRasterizationProperties; struct PhysicalDeviceLinearColorAttachmentFeaturesNV { @@ -76903,58 +76925,58 @@ namespace VULKAN_HPP_NAMESPACE using PhysicalDeviceMaintenance4PropertiesKHR = PhysicalDeviceMaintenance4Properties; - struct PhysicalDeviceMaintenance5FeaturesKHR + struct PhysicalDeviceMaintenance5Features { - using NativeType = VkPhysicalDeviceMaintenance5FeaturesKHR; + using NativeType = VkPhysicalDeviceMaintenance5Features; static const bool allowDuplicate = false; - static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceMaintenance5FeaturesKHR; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceMaintenance5Features; #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) - VULKAN_HPP_CONSTEXPR PhysicalDeviceMaintenance5FeaturesKHR( VULKAN_HPP_NAMESPACE::Bool32 maintenance5_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR PhysicalDeviceMaintenance5Features( VULKAN_HPP_NAMESPACE::Bool32 maintenance5_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT : pNext{ pNext_ } , maintenance5{ maintenance5_ } { } - VULKAN_HPP_CONSTEXPR PhysicalDeviceMaintenance5FeaturesKHR( PhysicalDeviceMaintenance5FeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + VULKAN_HPP_CONSTEXPR PhysicalDeviceMaintenance5Features( PhysicalDeviceMaintenance5Features const & rhs ) VULKAN_HPP_NOEXCEPT = default; - PhysicalDeviceMaintenance5FeaturesKHR( VkPhysicalDeviceMaintenance5FeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT - : PhysicalDeviceMaintenance5FeaturesKHR( *reinterpret_cast( &rhs ) ) + PhysicalDeviceMaintenance5Features( VkPhysicalDeviceMaintenance5Features const & rhs ) VULKAN_HPP_NOEXCEPT + : PhysicalDeviceMaintenance5Features( *reinterpret_cast( &rhs ) ) { } - PhysicalDeviceMaintenance5FeaturesKHR & operator=( PhysicalDeviceMaintenance5FeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + PhysicalDeviceMaintenance5Features & operator=( PhysicalDeviceMaintenance5Features const & rhs ) VULKAN_HPP_NOEXCEPT = default; #endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ - PhysicalDeviceMaintenance5FeaturesKHR & operator=( VkPhysicalDeviceMaintenance5FeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + PhysicalDeviceMaintenance5Features & operator=( VkPhysicalDeviceMaintenance5Features const & rhs ) VULKAN_HPP_NOEXCEPT { - *this = *reinterpret_cast( &rhs ); + *this = *reinterpret_cast( &rhs ); return *this; } #if !defined( VULKAN_HPP_NO_STRUCT_SETTERS ) - VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceMaintenance5FeaturesKHR & setPNext( void * pNext_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceMaintenance5Features & setPNext( void * pNext_ ) VULKAN_HPP_NOEXCEPT { pNext = pNext_; return *this; } - VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceMaintenance5FeaturesKHR & setMaintenance5( VULKAN_HPP_NAMESPACE::Bool32 maintenance5_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceMaintenance5Features & setMaintenance5( VULKAN_HPP_NAMESPACE::Bool32 maintenance5_ ) VULKAN_HPP_NOEXCEPT { maintenance5 = maintenance5_; return *this; } #endif /*VULKAN_HPP_NO_STRUCT_SETTERS*/ - operator VkPhysicalDeviceMaintenance5FeaturesKHR const &() const VULKAN_HPP_NOEXCEPT + operator VkPhysicalDeviceMaintenance5Features const &() const VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + return *reinterpret_cast( this ); } - operator VkPhysicalDeviceMaintenance5FeaturesKHR &() VULKAN_HPP_NOEXCEPT + operator VkPhysicalDeviceMaintenance5Features &() VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + return *reinterpret_cast( this ); } #if defined( VULKAN_HPP_USE_REFLECT ) @@ -76970,9 +76992,9 @@ namespace VULKAN_HPP_NAMESPACE #endif #if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( PhysicalDeviceMaintenance5FeaturesKHR const & ) const = default; + auto operator<=>( PhysicalDeviceMaintenance5Features const & ) const = default; #else - bool operator==( PhysicalDeviceMaintenance5FeaturesKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator==( PhysicalDeviceMaintenance5Features const & rhs ) const VULKAN_HPP_NOEXCEPT { # if defined( VULKAN_HPP_USE_REFLECT ) return this->reflect() == rhs.reflect(); @@ -76981,39 +77003,41 @@ namespace VULKAN_HPP_NAMESPACE # endif } - bool operator!=( PhysicalDeviceMaintenance5FeaturesKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator!=( PhysicalDeviceMaintenance5Features const & rhs ) const VULKAN_HPP_NOEXCEPT { return !operator==( rhs ); } #endif public: - VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceMaintenance5FeaturesKHR; + VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceMaintenance5Features; void * pNext = {}; VULKAN_HPP_NAMESPACE::Bool32 maintenance5 = {}; }; template <> - struct CppType + struct CppType { - using Type = PhysicalDeviceMaintenance5FeaturesKHR; + using Type = PhysicalDeviceMaintenance5Features; }; - struct PhysicalDeviceMaintenance5PropertiesKHR + using PhysicalDeviceMaintenance5FeaturesKHR = PhysicalDeviceMaintenance5Features; + + struct PhysicalDeviceMaintenance5Properties { - using NativeType = VkPhysicalDeviceMaintenance5PropertiesKHR; + using NativeType = VkPhysicalDeviceMaintenance5Properties; static const bool allowDuplicate = false; - static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceMaintenance5PropertiesKHR; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceMaintenance5Properties; #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) - VULKAN_HPP_CONSTEXPR PhysicalDeviceMaintenance5PropertiesKHR( VULKAN_HPP_NAMESPACE::Bool32 earlyFragmentMultisampleCoverageAfterSampleCounting_ = {}, - VULKAN_HPP_NAMESPACE::Bool32 earlyFragmentSampleMaskTestBeforeSampleCounting_ = {}, - VULKAN_HPP_NAMESPACE::Bool32 depthStencilSwizzleOneSupport_ = {}, - VULKAN_HPP_NAMESPACE::Bool32 polygonModePointSize_ = {}, - VULKAN_HPP_NAMESPACE::Bool32 nonStrictSinglePixelWideLinesUseParallelogram_ = {}, - VULKAN_HPP_NAMESPACE::Bool32 nonStrictWideLinesUseParallelogram_ = {}, - void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR PhysicalDeviceMaintenance5Properties( VULKAN_HPP_NAMESPACE::Bool32 earlyFragmentMultisampleCoverageAfterSampleCounting_ = {}, + VULKAN_HPP_NAMESPACE::Bool32 earlyFragmentSampleMaskTestBeforeSampleCounting_ = {}, + VULKAN_HPP_NAMESPACE::Bool32 depthStencilSwizzleOneSupport_ = {}, + VULKAN_HPP_NAMESPACE::Bool32 polygonModePointSize_ = {}, + VULKAN_HPP_NAMESPACE::Bool32 nonStrictSinglePixelWideLinesUseParallelogram_ = {}, + VULKAN_HPP_NAMESPACE::Bool32 nonStrictWideLinesUseParallelogram_ = {}, + void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT : pNext{ pNext_ } , earlyFragmentMultisampleCoverageAfterSampleCounting{ earlyFragmentMultisampleCoverageAfterSampleCounting_ } , earlyFragmentSampleMaskTestBeforeSampleCounting{ earlyFragmentSampleMaskTestBeforeSampleCounting_ } @@ -77024,30 +77048,30 @@ namespace VULKAN_HPP_NAMESPACE { } - VULKAN_HPP_CONSTEXPR PhysicalDeviceMaintenance5PropertiesKHR( PhysicalDeviceMaintenance5PropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + VULKAN_HPP_CONSTEXPR PhysicalDeviceMaintenance5Properties( PhysicalDeviceMaintenance5Properties const & rhs ) VULKAN_HPP_NOEXCEPT = default; - PhysicalDeviceMaintenance5PropertiesKHR( VkPhysicalDeviceMaintenance5PropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT - : PhysicalDeviceMaintenance5PropertiesKHR( *reinterpret_cast( &rhs ) ) + PhysicalDeviceMaintenance5Properties( VkPhysicalDeviceMaintenance5Properties const & rhs ) VULKAN_HPP_NOEXCEPT + : PhysicalDeviceMaintenance5Properties( *reinterpret_cast( &rhs ) ) { } - PhysicalDeviceMaintenance5PropertiesKHR & operator=( PhysicalDeviceMaintenance5PropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + PhysicalDeviceMaintenance5Properties & operator=( PhysicalDeviceMaintenance5Properties const & rhs ) VULKAN_HPP_NOEXCEPT = default; #endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ - PhysicalDeviceMaintenance5PropertiesKHR & operator=( VkPhysicalDeviceMaintenance5PropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + PhysicalDeviceMaintenance5Properties & operator=( VkPhysicalDeviceMaintenance5Properties const & rhs ) VULKAN_HPP_NOEXCEPT { - *this = *reinterpret_cast( &rhs ); + *this = *reinterpret_cast( &rhs ); return *this; } - operator VkPhysicalDeviceMaintenance5PropertiesKHR const &() const VULKAN_HPP_NOEXCEPT + operator VkPhysicalDeviceMaintenance5Properties const &() const VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + return *reinterpret_cast( this ); } - operator VkPhysicalDeviceMaintenance5PropertiesKHR &() VULKAN_HPP_NOEXCEPT + operator VkPhysicalDeviceMaintenance5Properties &() VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + return *reinterpret_cast( this ); } #if defined( VULKAN_HPP_USE_REFLECT ) @@ -77077,9 +77101,9 @@ namespace VULKAN_HPP_NAMESPACE #endif #if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( PhysicalDeviceMaintenance5PropertiesKHR const & ) const = default; + auto operator<=>( PhysicalDeviceMaintenance5Properties const & ) const = default; #else - bool operator==( PhysicalDeviceMaintenance5PropertiesKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator==( PhysicalDeviceMaintenance5Properties const & rhs ) const VULKAN_HPP_NOEXCEPT { # if defined( VULKAN_HPP_USE_REFLECT ) return this->reflect() == rhs.reflect(); @@ -77093,14 +77117,14 @@ namespace VULKAN_HPP_NAMESPACE # endif } - bool operator!=( PhysicalDeviceMaintenance5PropertiesKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator!=( PhysicalDeviceMaintenance5Properties const & rhs ) const VULKAN_HPP_NOEXCEPT { return !operator==( rhs ); } #endif public: - VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceMaintenance5PropertiesKHR; + VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceMaintenance5Properties; void * pNext = {}; VULKAN_HPP_NAMESPACE::Bool32 earlyFragmentMultisampleCoverageAfterSampleCounting = {}; VULKAN_HPP_NAMESPACE::Bool32 earlyFragmentSampleMaskTestBeforeSampleCounting = {}; @@ -77111,63 +77135,65 @@ namespace VULKAN_HPP_NAMESPACE }; template <> - struct CppType + struct CppType { - using Type = PhysicalDeviceMaintenance5PropertiesKHR; + using Type = PhysicalDeviceMaintenance5Properties; }; - struct PhysicalDeviceMaintenance6FeaturesKHR + using PhysicalDeviceMaintenance5PropertiesKHR = PhysicalDeviceMaintenance5Properties; + + struct PhysicalDeviceMaintenance6Features { - using NativeType = VkPhysicalDeviceMaintenance6FeaturesKHR; + using NativeType = VkPhysicalDeviceMaintenance6Features; static const bool allowDuplicate = false; - static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceMaintenance6FeaturesKHR; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceMaintenance6Features; #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) - VULKAN_HPP_CONSTEXPR PhysicalDeviceMaintenance6FeaturesKHR( VULKAN_HPP_NAMESPACE::Bool32 maintenance6_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR PhysicalDeviceMaintenance6Features( VULKAN_HPP_NAMESPACE::Bool32 maintenance6_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT : pNext{ pNext_ } , maintenance6{ maintenance6_ } { } - VULKAN_HPP_CONSTEXPR PhysicalDeviceMaintenance6FeaturesKHR( PhysicalDeviceMaintenance6FeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + VULKAN_HPP_CONSTEXPR PhysicalDeviceMaintenance6Features( PhysicalDeviceMaintenance6Features const & rhs ) VULKAN_HPP_NOEXCEPT = default; - PhysicalDeviceMaintenance6FeaturesKHR( VkPhysicalDeviceMaintenance6FeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT - : PhysicalDeviceMaintenance6FeaturesKHR( *reinterpret_cast( &rhs ) ) + PhysicalDeviceMaintenance6Features( VkPhysicalDeviceMaintenance6Features const & rhs ) VULKAN_HPP_NOEXCEPT + : PhysicalDeviceMaintenance6Features( *reinterpret_cast( &rhs ) ) { } - PhysicalDeviceMaintenance6FeaturesKHR & operator=( PhysicalDeviceMaintenance6FeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + PhysicalDeviceMaintenance6Features & operator=( PhysicalDeviceMaintenance6Features const & rhs ) VULKAN_HPP_NOEXCEPT = default; #endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ - PhysicalDeviceMaintenance6FeaturesKHR & operator=( VkPhysicalDeviceMaintenance6FeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + PhysicalDeviceMaintenance6Features & operator=( VkPhysicalDeviceMaintenance6Features const & rhs ) VULKAN_HPP_NOEXCEPT { - *this = *reinterpret_cast( &rhs ); + *this = *reinterpret_cast( &rhs ); return *this; } #if !defined( VULKAN_HPP_NO_STRUCT_SETTERS ) - VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceMaintenance6FeaturesKHR & setPNext( void * pNext_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceMaintenance6Features & setPNext( void * pNext_ ) VULKAN_HPP_NOEXCEPT { pNext = pNext_; return *this; } - VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceMaintenance6FeaturesKHR & setMaintenance6( VULKAN_HPP_NAMESPACE::Bool32 maintenance6_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceMaintenance6Features & setMaintenance6( VULKAN_HPP_NAMESPACE::Bool32 maintenance6_ ) VULKAN_HPP_NOEXCEPT { maintenance6 = maintenance6_; return *this; } #endif /*VULKAN_HPP_NO_STRUCT_SETTERS*/ - operator VkPhysicalDeviceMaintenance6FeaturesKHR const &() const VULKAN_HPP_NOEXCEPT + operator VkPhysicalDeviceMaintenance6Features const &() const VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + return *reinterpret_cast( this ); } - operator VkPhysicalDeviceMaintenance6FeaturesKHR &() VULKAN_HPP_NOEXCEPT + operator VkPhysicalDeviceMaintenance6Features &() VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + return *reinterpret_cast( this ); } #if defined( VULKAN_HPP_USE_REFLECT ) @@ -77183,9 +77209,9 @@ namespace VULKAN_HPP_NAMESPACE #endif #if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( PhysicalDeviceMaintenance6FeaturesKHR const & ) const = default; + auto operator<=>( PhysicalDeviceMaintenance6Features const & ) const = default; #else - bool operator==( PhysicalDeviceMaintenance6FeaturesKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator==( PhysicalDeviceMaintenance6Features const & rhs ) const VULKAN_HPP_NOEXCEPT { # if defined( VULKAN_HPP_USE_REFLECT ) return this->reflect() == rhs.reflect(); @@ -77194,36 +77220,38 @@ namespace VULKAN_HPP_NAMESPACE # endif } - bool operator!=( PhysicalDeviceMaintenance6FeaturesKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator!=( PhysicalDeviceMaintenance6Features const & rhs ) const VULKAN_HPP_NOEXCEPT { return !operator==( rhs ); } #endif public: - VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceMaintenance6FeaturesKHR; + VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceMaintenance6Features; void * pNext = {}; VULKAN_HPP_NAMESPACE::Bool32 maintenance6 = {}; }; template <> - struct CppType + struct CppType { - using Type = PhysicalDeviceMaintenance6FeaturesKHR; + using Type = PhysicalDeviceMaintenance6Features; }; - struct PhysicalDeviceMaintenance6PropertiesKHR + using PhysicalDeviceMaintenance6FeaturesKHR = PhysicalDeviceMaintenance6Features; + + struct PhysicalDeviceMaintenance6Properties { - using NativeType = VkPhysicalDeviceMaintenance6PropertiesKHR; + using NativeType = VkPhysicalDeviceMaintenance6Properties; static const bool allowDuplicate = false; - static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceMaintenance6PropertiesKHR; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceMaintenance6Properties; #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) - VULKAN_HPP_CONSTEXPR PhysicalDeviceMaintenance6PropertiesKHR( VULKAN_HPP_NAMESPACE::Bool32 blockTexelViewCompatibleMultipleLayers_ = {}, - uint32_t maxCombinedImageSamplerDescriptorCount_ = {}, - VULKAN_HPP_NAMESPACE::Bool32 fragmentShadingRateClampCombinerInputs_ = {}, - void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR PhysicalDeviceMaintenance6Properties( VULKAN_HPP_NAMESPACE::Bool32 blockTexelViewCompatibleMultipleLayers_ = {}, + uint32_t maxCombinedImageSamplerDescriptorCount_ = {}, + VULKAN_HPP_NAMESPACE::Bool32 fragmentShadingRateClampCombinerInputs_ = {}, + void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT : pNext{ pNext_ } , blockTexelViewCompatibleMultipleLayers{ blockTexelViewCompatibleMultipleLayers_ } , maxCombinedImageSamplerDescriptorCount{ maxCombinedImageSamplerDescriptorCount_ } @@ -77231,30 +77259,30 @@ namespace VULKAN_HPP_NAMESPACE { } - VULKAN_HPP_CONSTEXPR PhysicalDeviceMaintenance6PropertiesKHR( PhysicalDeviceMaintenance6PropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + VULKAN_HPP_CONSTEXPR PhysicalDeviceMaintenance6Properties( PhysicalDeviceMaintenance6Properties const & rhs ) VULKAN_HPP_NOEXCEPT = default; - PhysicalDeviceMaintenance6PropertiesKHR( VkPhysicalDeviceMaintenance6PropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT - : PhysicalDeviceMaintenance6PropertiesKHR( *reinterpret_cast( &rhs ) ) + PhysicalDeviceMaintenance6Properties( VkPhysicalDeviceMaintenance6Properties const & rhs ) VULKAN_HPP_NOEXCEPT + : PhysicalDeviceMaintenance6Properties( *reinterpret_cast( &rhs ) ) { } - PhysicalDeviceMaintenance6PropertiesKHR & operator=( PhysicalDeviceMaintenance6PropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + PhysicalDeviceMaintenance6Properties & operator=( PhysicalDeviceMaintenance6Properties const & rhs ) VULKAN_HPP_NOEXCEPT = default; #endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ - PhysicalDeviceMaintenance6PropertiesKHR & operator=( VkPhysicalDeviceMaintenance6PropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + PhysicalDeviceMaintenance6Properties & operator=( VkPhysicalDeviceMaintenance6Properties const & rhs ) VULKAN_HPP_NOEXCEPT { - *this = *reinterpret_cast( &rhs ); + *this = *reinterpret_cast( &rhs ); return *this; } - operator VkPhysicalDeviceMaintenance6PropertiesKHR const &() const VULKAN_HPP_NOEXCEPT + operator VkPhysicalDeviceMaintenance6Properties const &() const VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + return *reinterpret_cast( this ); } - operator VkPhysicalDeviceMaintenance6PropertiesKHR &() VULKAN_HPP_NOEXCEPT + operator VkPhysicalDeviceMaintenance6Properties &() VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + return *reinterpret_cast( this ); } #if defined( VULKAN_HPP_USE_REFLECT ) @@ -77274,9 +77302,9 @@ namespace VULKAN_HPP_NAMESPACE #endif #if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( PhysicalDeviceMaintenance6PropertiesKHR const & ) const = default; + auto operator<=>( PhysicalDeviceMaintenance6Properties const & ) const = default; #else - bool operator==( PhysicalDeviceMaintenance6PropertiesKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator==( PhysicalDeviceMaintenance6Properties const & rhs ) const VULKAN_HPP_NOEXCEPT { # if defined( VULKAN_HPP_USE_REFLECT ) return this->reflect() == rhs.reflect(); @@ -77287,14 +77315,14 @@ namespace VULKAN_HPP_NAMESPACE # endif } - bool operator!=( PhysicalDeviceMaintenance6PropertiesKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator!=( PhysicalDeviceMaintenance6Properties const & rhs ) const VULKAN_HPP_NOEXCEPT { return !operator==( rhs ); } #endif public: - VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceMaintenance6PropertiesKHR; + VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceMaintenance6Properties; void * pNext = {}; VULKAN_HPP_NAMESPACE::Bool32 blockTexelViewCompatibleMultipleLayers = {}; uint32_t maxCombinedImageSamplerDescriptorCount = {}; @@ -77302,11 +77330,13 @@ namespace VULKAN_HPP_NAMESPACE }; template <> - struct CppType + struct CppType { - using Type = PhysicalDeviceMaintenance6PropertiesKHR; + using Type = PhysicalDeviceMaintenance6Properties; }; + using PhysicalDeviceMaintenance6PropertiesKHR = PhysicalDeviceMaintenance6Properties; + struct PhysicalDeviceMaintenance7FeaturesKHR { using NativeType = VkPhysicalDeviceMaintenance7FeaturesKHR; @@ -81725,46 +81755,46 @@ namespace VULKAN_HPP_NAMESPACE using Type = PhysicalDevicePipelinePropertiesFeaturesEXT; }; - struct PhysicalDevicePipelineProtectedAccessFeaturesEXT + struct PhysicalDevicePipelineProtectedAccessFeatures { - using NativeType = VkPhysicalDevicePipelineProtectedAccessFeaturesEXT; + using NativeType = VkPhysicalDevicePipelineProtectedAccessFeatures; static const bool allowDuplicate = false; - static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDevicePipelineProtectedAccessFeaturesEXT; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDevicePipelineProtectedAccessFeatures; #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) - VULKAN_HPP_CONSTEXPR PhysicalDevicePipelineProtectedAccessFeaturesEXT( VULKAN_HPP_NAMESPACE::Bool32 pipelineProtectedAccess_ = {}, - void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR PhysicalDevicePipelineProtectedAccessFeatures( VULKAN_HPP_NAMESPACE::Bool32 pipelineProtectedAccess_ = {}, + void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT : pNext{ pNext_ } , pipelineProtectedAccess{ pipelineProtectedAccess_ } { } VULKAN_HPP_CONSTEXPR - PhysicalDevicePipelineProtectedAccessFeaturesEXT( PhysicalDevicePipelineProtectedAccessFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + PhysicalDevicePipelineProtectedAccessFeatures( PhysicalDevicePipelineProtectedAccessFeatures const & rhs ) VULKAN_HPP_NOEXCEPT = default; - PhysicalDevicePipelineProtectedAccessFeaturesEXT( VkPhysicalDevicePipelineProtectedAccessFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT - : PhysicalDevicePipelineProtectedAccessFeaturesEXT( *reinterpret_cast( &rhs ) ) + PhysicalDevicePipelineProtectedAccessFeatures( VkPhysicalDevicePipelineProtectedAccessFeatures const & rhs ) VULKAN_HPP_NOEXCEPT + : PhysicalDevicePipelineProtectedAccessFeatures( *reinterpret_cast( &rhs ) ) { } - PhysicalDevicePipelineProtectedAccessFeaturesEXT & operator=( PhysicalDevicePipelineProtectedAccessFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + PhysicalDevicePipelineProtectedAccessFeatures & operator=( PhysicalDevicePipelineProtectedAccessFeatures const & rhs ) VULKAN_HPP_NOEXCEPT = default; #endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ - PhysicalDevicePipelineProtectedAccessFeaturesEXT & operator=( VkPhysicalDevicePipelineProtectedAccessFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + PhysicalDevicePipelineProtectedAccessFeatures & operator=( VkPhysicalDevicePipelineProtectedAccessFeatures const & rhs ) VULKAN_HPP_NOEXCEPT { - *this = *reinterpret_cast( &rhs ); + *this = *reinterpret_cast( &rhs ); return *this; } #if !defined( VULKAN_HPP_NO_STRUCT_SETTERS ) - VULKAN_HPP_CONSTEXPR_14 PhysicalDevicePipelineProtectedAccessFeaturesEXT & setPNext( void * pNext_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 PhysicalDevicePipelineProtectedAccessFeatures & setPNext( void * pNext_ ) VULKAN_HPP_NOEXCEPT { pNext = pNext_; return *this; } - VULKAN_HPP_CONSTEXPR_14 PhysicalDevicePipelineProtectedAccessFeaturesEXT & + VULKAN_HPP_CONSTEXPR_14 PhysicalDevicePipelineProtectedAccessFeatures & setPipelineProtectedAccess( VULKAN_HPP_NAMESPACE::Bool32 pipelineProtectedAccess_ ) VULKAN_HPP_NOEXCEPT { pipelineProtectedAccess = pipelineProtectedAccess_; @@ -81772,14 +81802,14 @@ namespace VULKAN_HPP_NAMESPACE } #endif /*VULKAN_HPP_NO_STRUCT_SETTERS*/ - operator VkPhysicalDevicePipelineProtectedAccessFeaturesEXT const &() const VULKAN_HPP_NOEXCEPT + operator VkPhysicalDevicePipelineProtectedAccessFeatures const &() const VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + return *reinterpret_cast( this ); } - operator VkPhysicalDevicePipelineProtectedAccessFeaturesEXT &() VULKAN_HPP_NOEXCEPT + operator VkPhysicalDevicePipelineProtectedAccessFeatures &() VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + return *reinterpret_cast( this ); } #if defined( VULKAN_HPP_USE_REFLECT ) @@ -81795,9 +81825,9 @@ namespace VULKAN_HPP_NAMESPACE #endif #if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( PhysicalDevicePipelineProtectedAccessFeaturesEXT const & ) const = default; + auto operator<=>( PhysicalDevicePipelineProtectedAccessFeatures const & ) const = default; #else - bool operator==( PhysicalDevicePipelineProtectedAccessFeaturesEXT const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator==( PhysicalDevicePipelineProtectedAccessFeatures const & rhs ) const VULKAN_HPP_NOEXCEPT { # if defined( VULKAN_HPP_USE_REFLECT ) return this->reflect() == rhs.reflect(); @@ -81806,63 +81836,65 @@ namespace VULKAN_HPP_NAMESPACE # endif } - bool operator!=( PhysicalDevicePipelineProtectedAccessFeaturesEXT const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator!=( PhysicalDevicePipelineProtectedAccessFeatures const & rhs ) const VULKAN_HPP_NOEXCEPT { return !operator==( rhs ); } #endif public: - VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDevicePipelineProtectedAccessFeaturesEXT; + VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDevicePipelineProtectedAccessFeatures; void * pNext = {}; VULKAN_HPP_NAMESPACE::Bool32 pipelineProtectedAccess = {}; }; template <> - struct CppType + struct CppType { - using Type = PhysicalDevicePipelineProtectedAccessFeaturesEXT; + using Type = PhysicalDevicePipelineProtectedAccessFeatures; }; - struct PhysicalDevicePipelineRobustnessFeaturesEXT + using PhysicalDevicePipelineProtectedAccessFeaturesEXT = PhysicalDevicePipelineProtectedAccessFeatures; + + struct PhysicalDevicePipelineRobustnessFeatures { - using NativeType = VkPhysicalDevicePipelineRobustnessFeaturesEXT; + using NativeType = VkPhysicalDevicePipelineRobustnessFeatures; static const bool allowDuplicate = false; - static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDevicePipelineRobustnessFeaturesEXT; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDevicePipelineRobustnessFeatures; #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) - VULKAN_HPP_CONSTEXPR PhysicalDevicePipelineRobustnessFeaturesEXT( VULKAN_HPP_NAMESPACE::Bool32 pipelineRobustness_ = {}, - void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR PhysicalDevicePipelineRobustnessFeatures( VULKAN_HPP_NAMESPACE::Bool32 pipelineRobustness_ = {}, + void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT : pNext{ pNext_ } , pipelineRobustness{ pipelineRobustness_ } { } - VULKAN_HPP_CONSTEXPR PhysicalDevicePipelineRobustnessFeaturesEXT( PhysicalDevicePipelineRobustnessFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + VULKAN_HPP_CONSTEXPR PhysicalDevicePipelineRobustnessFeatures( PhysicalDevicePipelineRobustnessFeatures const & rhs ) VULKAN_HPP_NOEXCEPT = default; - PhysicalDevicePipelineRobustnessFeaturesEXT( VkPhysicalDevicePipelineRobustnessFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT - : PhysicalDevicePipelineRobustnessFeaturesEXT( *reinterpret_cast( &rhs ) ) + PhysicalDevicePipelineRobustnessFeatures( VkPhysicalDevicePipelineRobustnessFeatures const & rhs ) VULKAN_HPP_NOEXCEPT + : PhysicalDevicePipelineRobustnessFeatures( *reinterpret_cast( &rhs ) ) { } - PhysicalDevicePipelineRobustnessFeaturesEXT & operator=( PhysicalDevicePipelineRobustnessFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + PhysicalDevicePipelineRobustnessFeatures & operator=( PhysicalDevicePipelineRobustnessFeatures const & rhs ) VULKAN_HPP_NOEXCEPT = default; #endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ - PhysicalDevicePipelineRobustnessFeaturesEXT & operator=( VkPhysicalDevicePipelineRobustnessFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + PhysicalDevicePipelineRobustnessFeatures & operator=( VkPhysicalDevicePipelineRobustnessFeatures const & rhs ) VULKAN_HPP_NOEXCEPT { - *this = *reinterpret_cast( &rhs ); + *this = *reinterpret_cast( &rhs ); return *this; } #if !defined( VULKAN_HPP_NO_STRUCT_SETTERS ) - VULKAN_HPP_CONSTEXPR_14 PhysicalDevicePipelineRobustnessFeaturesEXT & setPNext( void * pNext_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 PhysicalDevicePipelineRobustnessFeatures & setPNext( void * pNext_ ) VULKAN_HPP_NOEXCEPT { pNext = pNext_; return *this; } - VULKAN_HPP_CONSTEXPR_14 PhysicalDevicePipelineRobustnessFeaturesEXT & + VULKAN_HPP_CONSTEXPR_14 PhysicalDevicePipelineRobustnessFeatures & setPipelineRobustness( VULKAN_HPP_NAMESPACE::Bool32 pipelineRobustness_ ) VULKAN_HPP_NOEXCEPT { pipelineRobustness = pipelineRobustness_; @@ -81870,14 +81902,14 @@ namespace VULKAN_HPP_NAMESPACE } #endif /*VULKAN_HPP_NO_STRUCT_SETTERS*/ - operator VkPhysicalDevicePipelineRobustnessFeaturesEXT const &() const VULKAN_HPP_NOEXCEPT + operator VkPhysicalDevicePipelineRobustnessFeatures const &() const VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + return *reinterpret_cast( this ); } - operator VkPhysicalDevicePipelineRobustnessFeaturesEXT &() VULKAN_HPP_NOEXCEPT + operator VkPhysicalDevicePipelineRobustnessFeatures &() VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + return *reinterpret_cast( this ); } #if defined( VULKAN_HPP_USE_REFLECT ) @@ -81893,9 +81925,9 @@ namespace VULKAN_HPP_NAMESPACE #endif #if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( PhysicalDevicePipelineRobustnessFeaturesEXT const & ) const = default; + auto operator<=>( PhysicalDevicePipelineRobustnessFeatures const & ) const = default; #else - bool operator==( PhysicalDevicePipelineRobustnessFeaturesEXT const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator==( PhysicalDevicePipelineRobustnessFeatures const & rhs ) const VULKAN_HPP_NOEXCEPT { # if defined( VULKAN_HPP_USE_REFLECT ) return this->reflect() == rhs.reflect(); @@ -81904,42 +81936,43 @@ namespace VULKAN_HPP_NAMESPACE # endif } - bool operator!=( PhysicalDevicePipelineRobustnessFeaturesEXT const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator!=( PhysicalDevicePipelineRobustnessFeatures const & rhs ) const VULKAN_HPP_NOEXCEPT { return !operator==( rhs ); } #endif public: - VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDevicePipelineRobustnessFeaturesEXT; + VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDevicePipelineRobustnessFeatures; void * pNext = {}; VULKAN_HPP_NAMESPACE::Bool32 pipelineRobustness = {}; }; template <> - struct CppType + struct CppType { - using Type = PhysicalDevicePipelineRobustnessFeaturesEXT; + using Type = PhysicalDevicePipelineRobustnessFeatures; }; - struct PhysicalDevicePipelineRobustnessPropertiesEXT + using PhysicalDevicePipelineRobustnessFeaturesEXT = PhysicalDevicePipelineRobustnessFeatures; + + struct PhysicalDevicePipelineRobustnessProperties { - using NativeType = VkPhysicalDevicePipelineRobustnessPropertiesEXT; + using NativeType = VkPhysicalDevicePipelineRobustnessProperties; static const bool allowDuplicate = false; - static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDevicePipelineRobustnessPropertiesEXT; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDevicePipelineRobustnessProperties; #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) - VULKAN_HPP_CONSTEXPR - PhysicalDevicePipelineRobustnessPropertiesEXT( VULKAN_HPP_NAMESPACE::PipelineRobustnessBufferBehaviorEXT defaultRobustnessStorageBuffers_ = - VULKAN_HPP_NAMESPACE::PipelineRobustnessBufferBehaviorEXT::eDeviceDefault, - VULKAN_HPP_NAMESPACE::PipelineRobustnessBufferBehaviorEXT defaultRobustnessUniformBuffers_ = - VULKAN_HPP_NAMESPACE::PipelineRobustnessBufferBehaviorEXT::eDeviceDefault, - VULKAN_HPP_NAMESPACE::PipelineRobustnessBufferBehaviorEXT defaultRobustnessVertexInputs_ = - VULKAN_HPP_NAMESPACE::PipelineRobustnessBufferBehaviorEXT::eDeviceDefault, - VULKAN_HPP_NAMESPACE::PipelineRobustnessImageBehaviorEXT defaultRobustnessImages_ = - VULKAN_HPP_NAMESPACE::PipelineRobustnessImageBehaviorEXT::eDeviceDefault, - void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR PhysicalDevicePipelineRobustnessProperties( + VULKAN_HPP_NAMESPACE::PipelineRobustnessBufferBehavior defaultRobustnessStorageBuffers_ = + VULKAN_HPP_NAMESPACE::PipelineRobustnessBufferBehavior::eDeviceDefault, + VULKAN_HPP_NAMESPACE::PipelineRobustnessBufferBehavior defaultRobustnessUniformBuffers_ = + VULKAN_HPP_NAMESPACE::PipelineRobustnessBufferBehavior::eDeviceDefault, + VULKAN_HPP_NAMESPACE::PipelineRobustnessBufferBehavior defaultRobustnessVertexInputs_ = + VULKAN_HPP_NAMESPACE::PipelineRobustnessBufferBehavior::eDeviceDefault, + VULKAN_HPP_NAMESPACE::PipelineRobustnessImageBehavior defaultRobustnessImages_ = VULKAN_HPP_NAMESPACE::PipelineRobustnessImageBehavior::eDeviceDefault, + void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT : pNext{ pNext_ } , defaultRobustnessStorageBuffers{ defaultRobustnessStorageBuffers_ } , defaultRobustnessUniformBuffers{ defaultRobustnessUniformBuffers_ } @@ -81948,31 +81981,30 @@ namespace VULKAN_HPP_NAMESPACE { } - VULKAN_HPP_CONSTEXPR - PhysicalDevicePipelineRobustnessPropertiesEXT( PhysicalDevicePipelineRobustnessPropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + VULKAN_HPP_CONSTEXPR PhysicalDevicePipelineRobustnessProperties( PhysicalDevicePipelineRobustnessProperties const & rhs ) VULKAN_HPP_NOEXCEPT = default; - PhysicalDevicePipelineRobustnessPropertiesEXT( VkPhysicalDevicePipelineRobustnessPropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT - : PhysicalDevicePipelineRobustnessPropertiesEXT( *reinterpret_cast( &rhs ) ) + PhysicalDevicePipelineRobustnessProperties( VkPhysicalDevicePipelineRobustnessProperties const & rhs ) VULKAN_HPP_NOEXCEPT + : PhysicalDevicePipelineRobustnessProperties( *reinterpret_cast( &rhs ) ) { } - PhysicalDevicePipelineRobustnessPropertiesEXT & operator=( PhysicalDevicePipelineRobustnessPropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + PhysicalDevicePipelineRobustnessProperties & operator=( PhysicalDevicePipelineRobustnessProperties const & rhs ) VULKAN_HPP_NOEXCEPT = default; #endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ - PhysicalDevicePipelineRobustnessPropertiesEXT & operator=( VkPhysicalDevicePipelineRobustnessPropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + PhysicalDevicePipelineRobustnessProperties & operator=( VkPhysicalDevicePipelineRobustnessProperties const & rhs ) VULKAN_HPP_NOEXCEPT { - *this = *reinterpret_cast( &rhs ); + *this = *reinterpret_cast( &rhs ); return *this; } - operator VkPhysicalDevicePipelineRobustnessPropertiesEXT const &() const VULKAN_HPP_NOEXCEPT + operator VkPhysicalDevicePipelineRobustnessProperties const &() const VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + return *reinterpret_cast( this ); } - operator VkPhysicalDevicePipelineRobustnessPropertiesEXT &() VULKAN_HPP_NOEXCEPT + operator VkPhysicalDevicePipelineRobustnessProperties &() VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + return *reinterpret_cast( this ); } #if defined( VULKAN_HPP_USE_REFLECT ) @@ -81981,10 +82013,10 @@ namespace VULKAN_HPP_NAMESPACE # else std::tuple + VULKAN_HPP_NAMESPACE::PipelineRobustnessBufferBehavior const &, + VULKAN_HPP_NAMESPACE::PipelineRobustnessBufferBehavior const &, + VULKAN_HPP_NAMESPACE::PipelineRobustnessBufferBehavior const &, + VULKAN_HPP_NAMESPACE::PipelineRobustnessImageBehavior const &> # endif reflect() const VULKAN_HPP_NOEXCEPT { @@ -81993,9 +82025,9 @@ namespace VULKAN_HPP_NAMESPACE #endif #if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( PhysicalDevicePipelineRobustnessPropertiesEXT const & ) const = default; + auto operator<=>( PhysicalDevicePipelineRobustnessProperties const & ) const = default; #else - bool operator==( PhysicalDevicePipelineRobustnessPropertiesEXT const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator==( PhysicalDevicePipelineRobustnessProperties const & rhs ) const VULKAN_HPP_NOEXCEPT { # if defined( VULKAN_HPP_USE_REFLECT ) return this->reflect() == rhs.reflect(); @@ -82006,30 +82038,32 @@ namespace VULKAN_HPP_NAMESPACE # endif } - bool operator!=( PhysicalDevicePipelineRobustnessPropertiesEXT const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator!=( PhysicalDevicePipelineRobustnessProperties const & rhs ) const VULKAN_HPP_NOEXCEPT { return !operator==( rhs ); } #endif public: - VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDevicePipelineRobustnessPropertiesEXT; - void * pNext = {}; - VULKAN_HPP_NAMESPACE::PipelineRobustnessBufferBehaviorEXT defaultRobustnessStorageBuffers = - VULKAN_HPP_NAMESPACE::PipelineRobustnessBufferBehaviorEXT::eDeviceDefault; - VULKAN_HPP_NAMESPACE::PipelineRobustnessBufferBehaviorEXT defaultRobustnessUniformBuffers = - VULKAN_HPP_NAMESPACE::PipelineRobustnessBufferBehaviorEXT::eDeviceDefault; - VULKAN_HPP_NAMESPACE::PipelineRobustnessBufferBehaviorEXT defaultRobustnessVertexInputs = - VULKAN_HPP_NAMESPACE::PipelineRobustnessBufferBehaviorEXT::eDeviceDefault; - VULKAN_HPP_NAMESPACE::PipelineRobustnessImageBehaviorEXT defaultRobustnessImages = VULKAN_HPP_NAMESPACE::PipelineRobustnessImageBehaviorEXT::eDeviceDefault; + VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDevicePipelineRobustnessProperties; + void * pNext = {}; + VULKAN_HPP_NAMESPACE::PipelineRobustnessBufferBehavior defaultRobustnessStorageBuffers = + VULKAN_HPP_NAMESPACE::PipelineRobustnessBufferBehavior::eDeviceDefault; + VULKAN_HPP_NAMESPACE::PipelineRobustnessBufferBehavior defaultRobustnessUniformBuffers = + VULKAN_HPP_NAMESPACE::PipelineRobustnessBufferBehavior::eDeviceDefault; + VULKAN_HPP_NAMESPACE::PipelineRobustnessBufferBehavior defaultRobustnessVertexInputs = + VULKAN_HPP_NAMESPACE::PipelineRobustnessBufferBehavior::eDeviceDefault; + VULKAN_HPP_NAMESPACE::PipelineRobustnessImageBehavior defaultRobustnessImages = VULKAN_HPP_NAMESPACE::PipelineRobustnessImageBehavior::eDeviceDefault; }; template <> - struct CppType + struct CppType { - using Type = PhysicalDevicePipelineRobustnessPropertiesEXT; + using Type = PhysicalDevicePipelineRobustnessProperties; }; + using PhysicalDevicePipelineRobustnessPropertiesEXT = PhysicalDevicePipelineRobustnessProperties; + struct PhysicalDevicePointClippingProperties { using NativeType = VkPhysicalDevicePointClippingProperties; @@ -83593,44 +83627,44 @@ namespace VULKAN_HPP_NAMESPACE using Type = PhysicalDeviceProvokingVertexPropertiesEXT; }; - struct PhysicalDevicePushDescriptorPropertiesKHR + struct PhysicalDevicePushDescriptorProperties { - using NativeType = VkPhysicalDevicePushDescriptorPropertiesKHR; + using NativeType = VkPhysicalDevicePushDescriptorProperties; static const bool allowDuplicate = false; - static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDevicePushDescriptorPropertiesKHR; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDevicePushDescriptorProperties; #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) - VULKAN_HPP_CONSTEXPR PhysicalDevicePushDescriptorPropertiesKHR( uint32_t maxPushDescriptors_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR PhysicalDevicePushDescriptorProperties( uint32_t maxPushDescriptors_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT : pNext{ pNext_ } , maxPushDescriptors{ maxPushDescriptors_ } { } - VULKAN_HPP_CONSTEXPR PhysicalDevicePushDescriptorPropertiesKHR( PhysicalDevicePushDescriptorPropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + VULKAN_HPP_CONSTEXPR PhysicalDevicePushDescriptorProperties( PhysicalDevicePushDescriptorProperties const & rhs ) VULKAN_HPP_NOEXCEPT = default; - PhysicalDevicePushDescriptorPropertiesKHR( VkPhysicalDevicePushDescriptorPropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT - : PhysicalDevicePushDescriptorPropertiesKHR( *reinterpret_cast( &rhs ) ) + PhysicalDevicePushDescriptorProperties( VkPhysicalDevicePushDescriptorProperties const & rhs ) VULKAN_HPP_NOEXCEPT + : PhysicalDevicePushDescriptorProperties( *reinterpret_cast( &rhs ) ) { } - PhysicalDevicePushDescriptorPropertiesKHR & operator=( PhysicalDevicePushDescriptorPropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + PhysicalDevicePushDescriptorProperties & operator=( PhysicalDevicePushDescriptorProperties const & rhs ) VULKAN_HPP_NOEXCEPT = default; #endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ - PhysicalDevicePushDescriptorPropertiesKHR & operator=( VkPhysicalDevicePushDescriptorPropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + PhysicalDevicePushDescriptorProperties & operator=( VkPhysicalDevicePushDescriptorProperties const & rhs ) VULKAN_HPP_NOEXCEPT { - *this = *reinterpret_cast( &rhs ); + *this = *reinterpret_cast( &rhs ); return *this; } - operator VkPhysicalDevicePushDescriptorPropertiesKHR const &() const VULKAN_HPP_NOEXCEPT + operator VkPhysicalDevicePushDescriptorProperties const &() const VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + return *reinterpret_cast( this ); } - operator VkPhysicalDevicePushDescriptorPropertiesKHR &() VULKAN_HPP_NOEXCEPT + operator VkPhysicalDevicePushDescriptorProperties &() VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + return *reinterpret_cast( this ); } #if defined( VULKAN_HPP_USE_REFLECT ) @@ -83646,9 +83680,9 @@ namespace VULKAN_HPP_NAMESPACE #endif #if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( PhysicalDevicePushDescriptorPropertiesKHR const & ) const = default; + auto operator<=>( PhysicalDevicePushDescriptorProperties const & ) const = default; #else - bool operator==( PhysicalDevicePushDescriptorPropertiesKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator==( PhysicalDevicePushDescriptorProperties const & rhs ) const VULKAN_HPP_NOEXCEPT { # if defined( VULKAN_HPP_USE_REFLECT ) return this->reflect() == rhs.reflect(); @@ -83657,24 +83691,26 @@ namespace VULKAN_HPP_NAMESPACE # endif } - bool operator!=( PhysicalDevicePushDescriptorPropertiesKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator!=( PhysicalDevicePushDescriptorProperties const & rhs ) const VULKAN_HPP_NOEXCEPT { return !operator==( rhs ); } #endif public: - VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDevicePushDescriptorPropertiesKHR; + VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDevicePushDescriptorProperties; void * pNext = {}; uint32_t maxPushDescriptors = {}; }; template <> - struct CppType + struct CppType { - using Type = PhysicalDevicePushDescriptorPropertiesKHR; + using Type = PhysicalDevicePushDescriptorProperties; }; + using PhysicalDevicePushDescriptorPropertiesKHR = PhysicalDevicePushDescriptorProperties; + struct PhysicalDeviceRGBA10X6FormatsFeaturesEXT { using NativeType = VkPhysicalDeviceRGBA10X6FormatsFeaturesEXT; @@ -88319,45 +88355,45 @@ namespace VULKAN_HPP_NAMESPACE }; #endif /*VK_ENABLE_BETA_EXTENSIONS*/ - struct PhysicalDeviceShaderExpectAssumeFeaturesKHR + struct PhysicalDeviceShaderExpectAssumeFeatures { - using NativeType = VkPhysicalDeviceShaderExpectAssumeFeaturesKHR; + using NativeType = VkPhysicalDeviceShaderExpectAssumeFeatures; static const bool allowDuplicate = false; - static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceShaderExpectAssumeFeaturesKHR; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceShaderExpectAssumeFeatures; #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) - VULKAN_HPP_CONSTEXPR PhysicalDeviceShaderExpectAssumeFeaturesKHR( VULKAN_HPP_NAMESPACE::Bool32 shaderExpectAssume_ = {}, - void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR PhysicalDeviceShaderExpectAssumeFeatures( VULKAN_HPP_NAMESPACE::Bool32 shaderExpectAssume_ = {}, + void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT : pNext{ pNext_ } , shaderExpectAssume{ shaderExpectAssume_ } { } - VULKAN_HPP_CONSTEXPR PhysicalDeviceShaderExpectAssumeFeaturesKHR( PhysicalDeviceShaderExpectAssumeFeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + VULKAN_HPP_CONSTEXPR PhysicalDeviceShaderExpectAssumeFeatures( PhysicalDeviceShaderExpectAssumeFeatures const & rhs ) VULKAN_HPP_NOEXCEPT = default; - PhysicalDeviceShaderExpectAssumeFeaturesKHR( VkPhysicalDeviceShaderExpectAssumeFeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT - : PhysicalDeviceShaderExpectAssumeFeaturesKHR( *reinterpret_cast( &rhs ) ) + PhysicalDeviceShaderExpectAssumeFeatures( VkPhysicalDeviceShaderExpectAssumeFeatures const & rhs ) VULKAN_HPP_NOEXCEPT + : PhysicalDeviceShaderExpectAssumeFeatures( *reinterpret_cast( &rhs ) ) { } - PhysicalDeviceShaderExpectAssumeFeaturesKHR & operator=( PhysicalDeviceShaderExpectAssumeFeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + PhysicalDeviceShaderExpectAssumeFeatures & operator=( PhysicalDeviceShaderExpectAssumeFeatures const & rhs ) VULKAN_HPP_NOEXCEPT = default; #endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ - PhysicalDeviceShaderExpectAssumeFeaturesKHR & operator=( VkPhysicalDeviceShaderExpectAssumeFeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + PhysicalDeviceShaderExpectAssumeFeatures & operator=( VkPhysicalDeviceShaderExpectAssumeFeatures const & rhs ) VULKAN_HPP_NOEXCEPT { - *this = *reinterpret_cast( &rhs ); + *this = *reinterpret_cast( &rhs ); return *this; } #if !defined( VULKAN_HPP_NO_STRUCT_SETTERS ) - VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceShaderExpectAssumeFeaturesKHR & setPNext( void * pNext_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceShaderExpectAssumeFeatures & setPNext( void * pNext_ ) VULKAN_HPP_NOEXCEPT { pNext = pNext_; return *this; } - VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceShaderExpectAssumeFeaturesKHR & + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceShaderExpectAssumeFeatures & setShaderExpectAssume( VULKAN_HPP_NAMESPACE::Bool32 shaderExpectAssume_ ) VULKAN_HPP_NOEXCEPT { shaderExpectAssume = shaderExpectAssume_; @@ -88365,14 +88401,14 @@ namespace VULKAN_HPP_NAMESPACE } #endif /*VULKAN_HPP_NO_STRUCT_SETTERS*/ - operator VkPhysicalDeviceShaderExpectAssumeFeaturesKHR const &() const VULKAN_HPP_NOEXCEPT + operator VkPhysicalDeviceShaderExpectAssumeFeatures const &() const VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + return *reinterpret_cast( this ); } - operator VkPhysicalDeviceShaderExpectAssumeFeaturesKHR &() VULKAN_HPP_NOEXCEPT + operator VkPhysicalDeviceShaderExpectAssumeFeatures &() VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + return *reinterpret_cast( this ); } #if defined( VULKAN_HPP_USE_REFLECT ) @@ -88388,9 +88424,9 @@ namespace VULKAN_HPP_NAMESPACE #endif #if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( PhysicalDeviceShaderExpectAssumeFeaturesKHR const & ) const = default; + auto operator<=>( PhysicalDeviceShaderExpectAssumeFeatures const & ) const = default; #else - bool operator==( PhysicalDeviceShaderExpectAssumeFeaturesKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator==( PhysicalDeviceShaderExpectAssumeFeatures const & rhs ) const VULKAN_HPP_NOEXCEPT { # if defined( VULKAN_HPP_USE_REFLECT ) return this->reflect() == rhs.reflect(); @@ -88399,24 +88435,26 @@ namespace VULKAN_HPP_NAMESPACE # endif } - bool operator!=( PhysicalDeviceShaderExpectAssumeFeaturesKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator!=( PhysicalDeviceShaderExpectAssumeFeatures const & rhs ) const VULKAN_HPP_NOEXCEPT { return !operator==( rhs ); } #endif public: - VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceShaderExpectAssumeFeaturesKHR; + VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceShaderExpectAssumeFeatures; void * pNext = {}; VULKAN_HPP_NAMESPACE::Bool32 shaderExpectAssume = {}; }; template <> - struct CppType + struct CppType { - using Type = PhysicalDeviceShaderExpectAssumeFeaturesKHR; + using Type = PhysicalDeviceShaderExpectAssumeFeatures; }; + using PhysicalDeviceShaderExpectAssumeFeaturesKHR = PhysicalDeviceShaderExpectAssumeFeatures; + struct PhysicalDeviceShaderFloat16Int8Features { using NativeType = VkPhysicalDeviceShaderFloat16Int8Features; @@ -88526,46 +88564,45 @@ namespace VULKAN_HPP_NAMESPACE using PhysicalDeviceFloat16Int8FeaturesKHR = PhysicalDeviceShaderFloat16Int8Features; using PhysicalDeviceShaderFloat16Int8FeaturesKHR = PhysicalDeviceShaderFloat16Int8Features; - struct PhysicalDeviceShaderFloatControls2FeaturesKHR + struct PhysicalDeviceShaderFloatControls2Features { - using NativeType = VkPhysicalDeviceShaderFloatControls2FeaturesKHR; + using NativeType = VkPhysicalDeviceShaderFloatControls2Features; static const bool allowDuplicate = false; - static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceShaderFloatControls2FeaturesKHR; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceShaderFloatControls2Features; #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) - VULKAN_HPP_CONSTEXPR PhysicalDeviceShaderFloatControls2FeaturesKHR( VULKAN_HPP_NAMESPACE::Bool32 shaderFloatControls2_ = {}, - void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR PhysicalDeviceShaderFloatControls2Features( VULKAN_HPP_NAMESPACE::Bool32 shaderFloatControls2_ = {}, + void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT : pNext{ pNext_ } , shaderFloatControls2{ shaderFloatControls2_ } { } - VULKAN_HPP_CONSTEXPR - PhysicalDeviceShaderFloatControls2FeaturesKHR( PhysicalDeviceShaderFloatControls2FeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + VULKAN_HPP_CONSTEXPR PhysicalDeviceShaderFloatControls2Features( PhysicalDeviceShaderFloatControls2Features const & rhs ) VULKAN_HPP_NOEXCEPT = default; - PhysicalDeviceShaderFloatControls2FeaturesKHR( VkPhysicalDeviceShaderFloatControls2FeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT - : PhysicalDeviceShaderFloatControls2FeaturesKHR( *reinterpret_cast( &rhs ) ) + PhysicalDeviceShaderFloatControls2Features( VkPhysicalDeviceShaderFloatControls2Features const & rhs ) VULKAN_HPP_NOEXCEPT + : PhysicalDeviceShaderFloatControls2Features( *reinterpret_cast( &rhs ) ) { } - PhysicalDeviceShaderFloatControls2FeaturesKHR & operator=( PhysicalDeviceShaderFloatControls2FeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + PhysicalDeviceShaderFloatControls2Features & operator=( PhysicalDeviceShaderFloatControls2Features const & rhs ) VULKAN_HPP_NOEXCEPT = default; #endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ - PhysicalDeviceShaderFloatControls2FeaturesKHR & operator=( VkPhysicalDeviceShaderFloatControls2FeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + PhysicalDeviceShaderFloatControls2Features & operator=( VkPhysicalDeviceShaderFloatControls2Features const & rhs ) VULKAN_HPP_NOEXCEPT { - *this = *reinterpret_cast( &rhs ); + *this = *reinterpret_cast( &rhs ); return *this; } #if !defined( VULKAN_HPP_NO_STRUCT_SETTERS ) - VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceShaderFloatControls2FeaturesKHR & setPNext( void * pNext_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceShaderFloatControls2Features & setPNext( void * pNext_ ) VULKAN_HPP_NOEXCEPT { pNext = pNext_; return *this; } - VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceShaderFloatControls2FeaturesKHR & + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceShaderFloatControls2Features & setShaderFloatControls2( VULKAN_HPP_NAMESPACE::Bool32 shaderFloatControls2_ ) VULKAN_HPP_NOEXCEPT { shaderFloatControls2 = shaderFloatControls2_; @@ -88573,14 +88610,14 @@ namespace VULKAN_HPP_NAMESPACE } #endif /*VULKAN_HPP_NO_STRUCT_SETTERS*/ - operator VkPhysicalDeviceShaderFloatControls2FeaturesKHR const &() const VULKAN_HPP_NOEXCEPT + operator VkPhysicalDeviceShaderFloatControls2Features const &() const VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + return *reinterpret_cast( this ); } - operator VkPhysicalDeviceShaderFloatControls2FeaturesKHR &() VULKAN_HPP_NOEXCEPT + operator VkPhysicalDeviceShaderFloatControls2Features &() VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + return *reinterpret_cast( this ); } #if defined( VULKAN_HPP_USE_REFLECT ) @@ -88596,9 +88633,9 @@ namespace VULKAN_HPP_NAMESPACE #endif #if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( PhysicalDeviceShaderFloatControls2FeaturesKHR const & ) const = default; + auto operator<=>( PhysicalDeviceShaderFloatControls2Features const & ) const = default; #else - bool operator==( PhysicalDeviceShaderFloatControls2FeaturesKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator==( PhysicalDeviceShaderFloatControls2Features const & rhs ) const VULKAN_HPP_NOEXCEPT { # if defined( VULKAN_HPP_USE_REFLECT ) return this->reflect() == rhs.reflect(); @@ -88607,24 +88644,26 @@ namespace VULKAN_HPP_NAMESPACE # endif } - bool operator!=( PhysicalDeviceShaderFloatControls2FeaturesKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator!=( PhysicalDeviceShaderFloatControls2Features const & rhs ) const VULKAN_HPP_NOEXCEPT { return !operator==( rhs ); } #endif public: - VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceShaderFloatControls2FeaturesKHR; + VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceShaderFloatControls2Features; void * pNext = {}; VULKAN_HPP_NAMESPACE::Bool32 shaderFloatControls2 = {}; }; template <> - struct CppType + struct CppType { - using Type = PhysicalDeviceShaderFloatControls2FeaturesKHR; + using Type = PhysicalDeviceShaderFloatControls2Features; }; + using PhysicalDeviceShaderFloatControls2FeaturesKHR = PhysicalDeviceShaderFloatControls2Features; + struct PhysicalDeviceShaderImageAtomicInt64FeaturesEXT { using NativeType = VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT; @@ -90358,55 +90397,54 @@ namespace VULKAN_HPP_NAMESPACE using PhysicalDeviceShaderSubgroupExtendedTypesFeaturesKHR = PhysicalDeviceShaderSubgroupExtendedTypesFeatures; - struct PhysicalDeviceShaderSubgroupRotateFeaturesKHR + struct PhysicalDeviceShaderSubgroupRotateFeatures { - using NativeType = VkPhysicalDeviceShaderSubgroupRotateFeaturesKHR; + using NativeType = VkPhysicalDeviceShaderSubgroupRotateFeatures; static const bool allowDuplicate = false; - static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceShaderSubgroupRotateFeaturesKHR; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceShaderSubgroupRotateFeatures; #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) - VULKAN_HPP_CONSTEXPR PhysicalDeviceShaderSubgroupRotateFeaturesKHR( VULKAN_HPP_NAMESPACE::Bool32 shaderSubgroupRotate_ = {}, - VULKAN_HPP_NAMESPACE::Bool32 shaderSubgroupRotateClustered_ = {}, - void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR PhysicalDeviceShaderSubgroupRotateFeatures( VULKAN_HPP_NAMESPACE::Bool32 shaderSubgroupRotate_ = {}, + VULKAN_HPP_NAMESPACE::Bool32 shaderSubgroupRotateClustered_ = {}, + void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT : pNext{ pNext_ } , shaderSubgroupRotate{ shaderSubgroupRotate_ } , shaderSubgroupRotateClustered{ shaderSubgroupRotateClustered_ } { } - VULKAN_HPP_CONSTEXPR - PhysicalDeviceShaderSubgroupRotateFeaturesKHR( PhysicalDeviceShaderSubgroupRotateFeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + VULKAN_HPP_CONSTEXPR PhysicalDeviceShaderSubgroupRotateFeatures( PhysicalDeviceShaderSubgroupRotateFeatures const & rhs ) VULKAN_HPP_NOEXCEPT = default; - PhysicalDeviceShaderSubgroupRotateFeaturesKHR( VkPhysicalDeviceShaderSubgroupRotateFeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT - : PhysicalDeviceShaderSubgroupRotateFeaturesKHR( *reinterpret_cast( &rhs ) ) + PhysicalDeviceShaderSubgroupRotateFeatures( VkPhysicalDeviceShaderSubgroupRotateFeatures const & rhs ) VULKAN_HPP_NOEXCEPT + : PhysicalDeviceShaderSubgroupRotateFeatures( *reinterpret_cast( &rhs ) ) { } - PhysicalDeviceShaderSubgroupRotateFeaturesKHR & operator=( PhysicalDeviceShaderSubgroupRotateFeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + PhysicalDeviceShaderSubgroupRotateFeatures & operator=( PhysicalDeviceShaderSubgroupRotateFeatures const & rhs ) VULKAN_HPP_NOEXCEPT = default; #endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ - PhysicalDeviceShaderSubgroupRotateFeaturesKHR & operator=( VkPhysicalDeviceShaderSubgroupRotateFeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + PhysicalDeviceShaderSubgroupRotateFeatures & operator=( VkPhysicalDeviceShaderSubgroupRotateFeatures const & rhs ) VULKAN_HPP_NOEXCEPT { - *this = *reinterpret_cast( &rhs ); + *this = *reinterpret_cast( &rhs ); return *this; } #if !defined( VULKAN_HPP_NO_STRUCT_SETTERS ) - VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceShaderSubgroupRotateFeaturesKHR & setPNext( void * pNext_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceShaderSubgroupRotateFeatures & setPNext( void * pNext_ ) VULKAN_HPP_NOEXCEPT { pNext = pNext_; return *this; } - VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceShaderSubgroupRotateFeaturesKHR & + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceShaderSubgroupRotateFeatures & setShaderSubgroupRotate( VULKAN_HPP_NAMESPACE::Bool32 shaderSubgroupRotate_ ) VULKAN_HPP_NOEXCEPT { shaderSubgroupRotate = shaderSubgroupRotate_; return *this; } - VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceShaderSubgroupRotateFeaturesKHR & + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceShaderSubgroupRotateFeatures & setShaderSubgroupRotateClustered( VULKAN_HPP_NAMESPACE::Bool32 shaderSubgroupRotateClustered_ ) VULKAN_HPP_NOEXCEPT { shaderSubgroupRotateClustered = shaderSubgroupRotateClustered_; @@ -90414,14 +90452,14 @@ namespace VULKAN_HPP_NAMESPACE } #endif /*VULKAN_HPP_NO_STRUCT_SETTERS*/ - operator VkPhysicalDeviceShaderSubgroupRotateFeaturesKHR const &() const VULKAN_HPP_NOEXCEPT + operator VkPhysicalDeviceShaderSubgroupRotateFeatures const &() const VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + return *reinterpret_cast( this ); } - operator VkPhysicalDeviceShaderSubgroupRotateFeaturesKHR &() VULKAN_HPP_NOEXCEPT + operator VkPhysicalDeviceShaderSubgroupRotateFeatures &() VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + return *reinterpret_cast( this ); } #if defined( VULKAN_HPP_USE_REFLECT ) @@ -90437,9 +90475,9 @@ namespace VULKAN_HPP_NAMESPACE #endif #if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( PhysicalDeviceShaderSubgroupRotateFeaturesKHR const & ) const = default; + auto operator<=>( PhysicalDeviceShaderSubgroupRotateFeatures const & ) const = default; #else - bool operator==( PhysicalDeviceShaderSubgroupRotateFeaturesKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator==( PhysicalDeviceShaderSubgroupRotateFeatures const & rhs ) const VULKAN_HPP_NOEXCEPT { # if defined( VULKAN_HPP_USE_REFLECT ) return this->reflect() == rhs.reflect(); @@ -90449,25 +90487,27 @@ namespace VULKAN_HPP_NAMESPACE # endif } - bool operator!=( PhysicalDeviceShaderSubgroupRotateFeaturesKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator!=( PhysicalDeviceShaderSubgroupRotateFeatures const & rhs ) const VULKAN_HPP_NOEXCEPT { return !operator==( rhs ); } #endif public: - VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceShaderSubgroupRotateFeaturesKHR; + VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceShaderSubgroupRotateFeatures; void * pNext = {}; VULKAN_HPP_NAMESPACE::Bool32 shaderSubgroupRotate = {}; VULKAN_HPP_NAMESPACE::Bool32 shaderSubgroupRotateClustered = {}; }; template <> - struct CppType + struct CppType { - using Type = PhysicalDeviceShaderSubgroupRotateFeaturesKHR; + using Type = PhysicalDeviceShaderSubgroupRotateFeatures; }; + using PhysicalDeviceShaderSubgroupRotateFeaturesKHR = PhysicalDeviceShaderSubgroupRotateFeatures; + struct PhysicalDeviceShaderSubgroupUniformControlFlowFeaturesKHR { using NativeType = VkPhysicalDeviceShaderSubgroupUniformControlFlowFeaturesKHR; @@ -93282,55 +93322,54 @@ namespace VULKAN_HPP_NAMESPACE using PhysicalDeviceVariablePointerFeaturesKHR = PhysicalDeviceVariablePointersFeatures; using PhysicalDeviceVariablePointersFeaturesKHR = PhysicalDeviceVariablePointersFeatures; - struct PhysicalDeviceVertexAttributeDivisorFeaturesKHR + struct PhysicalDeviceVertexAttributeDivisorFeatures { - using NativeType = VkPhysicalDeviceVertexAttributeDivisorFeaturesKHR; + using NativeType = VkPhysicalDeviceVertexAttributeDivisorFeatures; static const bool allowDuplicate = false; - static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceVertexAttributeDivisorFeaturesKHR; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceVertexAttributeDivisorFeatures; #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) - VULKAN_HPP_CONSTEXPR PhysicalDeviceVertexAttributeDivisorFeaturesKHR( VULKAN_HPP_NAMESPACE::Bool32 vertexAttributeInstanceRateDivisor_ = {}, - VULKAN_HPP_NAMESPACE::Bool32 vertexAttributeInstanceRateZeroDivisor_ = {}, - void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR PhysicalDeviceVertexAttributeDivisorFeatures( VULKAN_HPP_NAMESPACE::Bool32 vertexAttributeInstanceRateDivisor_ = {}, + VULKAN_HPP_NAMESPACE::Bool32 vertexAttributeInstanceRateZeroDivisor_ = {}, + void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT : pNext{ pNext_ } , vertexAttributeInstanceRateDivisor{ vertexAttributeInstanceRateDivisor_ } , vertexAttributeInstanceRateZeroDivisor{ vertexAttributeInstanceRateZeroDivisor_ } { } - VULKAN_HPP_CONSTEXPR - PhysicalDeviceVertexAttributeDivisorFeaturesKHR( PhysicalDeviceVertexAttributeDivisorFeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + VULKAN_HPP_CONSTEXPR PhysicalDeviceVertexAttributeDivisorFeatures( PhysicalDeviceVertexAttributeDivisorFeatures const & rhs ) VULKAN_HPP_NOEXCEPT = default; - PhysicalDeviceVertexAttributeDivisorFeaturesKHR( VkPhysicalDeviceVertexAttributeDivisorFeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT - : PhysicalDeviceVertexAttributeDivisorFeaturesKHR( *reinterpret_cast( &rhs ) ) + PhysicalDeviceVertexAttributeDivisorFeatures( VkPhysicalDeviceVertexAttributeDivisorFeatures const & rhs ) VULKAN_HPP_NOEXCEPT + : PhysicalDeviceVertexAttributeDivisorFeatures( *reinterpret_cast( &rhs ) ) { } - PhysicalDeviceVertexAttributeDivisorFeaturesKHR & operator=( PhysicalDeviceVertexAttributeDivisorFeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + PhysicalDeviceVertexAttributeDivisorFeatures & operator=( PhysicalDeviceVertexAttributeDivisorFeatures const & rhs ) VULKAN_HPP_NOEXCEPT = default; #endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ - PhysicalDeviceVertexAttributeDivisorFeaturesKHR & operator=( VkPhysicalDeviceVertexAttributeDivisorFeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + PhysicalDeviceVertexAttributeDivisorFeatures & operator=( VkPhysicalDeviceVertexAttributeDivisorFeatures const & rhs ) VULKAN_HPP_NOEXCEPT { - *this = *reinterpret_cast( &rhs ); + *this = *reinterpret_cast( &rhs ); return *this; } #if !defined( VULKAN_HPP_NO_STRUCT_SETTERS ) - VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceVertexAttributeDivisorFeaturesKHR & setPNext( void * pNext_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceVertexAttributeDivisorFeatures & setPNext( void * pNext_ ) VULKAN_HPP_NOEXCEPT { pNext = pNext_; return *this; } - VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceVertexAttributeDivisorFeaturesKHR & + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceVertexAttributeDivisorFeatures & setVertexAttributeInstanceRateDivisor( VULKAN_HPP_NAMESPACE::Bool32 vertexAttributeInstanceRateDivisor_ ) VULKAN_HPP_NOEXCEPT { vertexAttributeInstanceRateDivisor = vertexAttributeInstanceRateDivisor_; return *this; } - VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceVertexAttributeDivisorFeaturesKHR & + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceVertexAttributeDivisorFeatures & setVertexAttributeInstanceRateZeroDivisor( VULKAN_HPP_NAMESPACE::Bool32 vertexAttributeInstanceRateZeroDivisor_ ) VULKAN_HPP_NOEXCEPT { vertexAttributeInstanceRateZeroDivisor = vertexAttributeInstanceRateZeroDivisor_; @@ -93338,14 +93377,14 @@ namespace VULKAN_HPP_NAMESPACE } #endif /*VULKAN_HPP_NO_STRUCT_SETTERS*/ - operator VkPhysicalDeviceVertexAttributeDivisorFeaturesKHR const &() const VULKAN_HPP_NOEXCEPT + operator VkPhysicalDeviceVertexAttributeDivisorFeatures const &() const VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + return *reinterpret_cast( this ); } - operator VkPhysicalDeviceVertexAttributeDivisorFeaturesKHR &() VULKAN_HPP_NOEXCEPT + operator VkPhysicalDeviceVertexAttributeDivisorFeatures &() VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + return *reinterpret_cast( this ); } #if defined( VULKAN_HPP_USE_REFLECT ) @@ -93361,9 +93400,9 @@ namespace VULKAN_HPP_NAMESPACE #endif #if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( PhysicalDeviceVertexAttributeDivisorFeaturesKHR const & ) const = default; + auto operator<=>( PhysicalDeviceVertexAttributeDivisorFeatures const & ) const = default; #else - bool operator==( PhysicalDeviceVertexAttributeDivisorFeaturesKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator==( PhysicalDeviceVertexAttributeDivisorFeatures const & rhs ) const VULKAN_HPP_NOEXCEPT { # if defined( VULKAN_HPP_USE_REFLECT ) return this->reflect() == rhs.reflect(); @@ -93373,26 +93412,117 @@ namespace VULKAN_HPP_NAMESPACE # endif } - bool operator!=( PhysicalDeviceVertexAttributeDivisorFeaturesKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator!=( PhysicalDeviceVertexAttributeDivisorFeatures const & rhs ) const VULKAN_HPP_NOEXCEPT { return !operator==( rhs ); } #endif public: - VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceVertexAttributeDivisorFeaturesKHR; + VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceVertexAttributeDivisorFeatures; void * pNext = {}; VULKAN_HPP_NAMESPACE::Bool32 vertexAttributeInstanceRateDivisor = {}; VULKAN_HPP_NAMESPACE::Bool32 vertexAttributeInstanceRateZeroDivisor = {}; }; template <> - struct CppType + struct CppType { - using Type = PhysicalDeviceVertexAttributeDivisorFeaturesKHR; + using Type = PhysicalDeviceVertexAttributeDivisorFeatures; }; - using PhysicalDeviceVertexAttributeDivisorFeaturesEXT = PhysicalDeviceVertexAttributeDivisorFeaturesKHR; + using PhysicalDeviceVertexAttributeDivisorFeaturesEXT = PhysicalDeviceVertexAttributeDivisorFeatures; + using PhysicalDeviceVertexAttributeDivisorFeaturesKHR = PhysicalDeviceVertexAttributeDivisorFeatures; + + struct PhysicalDeviceVertexAttributeDivisorProperties + { + using NativeType = VkPhysicalDeviceVertexAttributeDivisorProperties; + + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceVertexAttributeDivisorProperties; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceVertexAttributeDivisorProperties( uint32_t maxVertexAttribDivisor_ = {}, + VULKAN_HPP_NAMESPACE::Bool32 supportsNonZeroFirstInstance_ = {}, + void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT + : pNext{ pNext_ } + , maxVertexAttribDivisor{ maxVertexAttribDivisor_ } + , supportsNonZeroFirstInstance{ supportsNonZeroFirstInstance_ } + { + } + + VULKAN_HPP_CONSTEXPR + PhysicalDeviceVertexAttributeDivisorProperties( PhysicalDeviceVertexAttributeDivisorProperties const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceVertexAttributeDivisorProperties( VkPhysicalDeviceVertexAttributeDivisorProperties const & rhs ) VULKAN_HPP_NOEXCEPT + : PhysicalDeviceVertexAttributeDivisorProperties( *reinterpret_cast( &rhs ) ) + { + } + + PhysicalDeviceVertexAttributeDivisorProperties & operator=( PhysicalDeviceVertexAttributeDivisorProperties const & rhs ) VULKAN_HPP_NOEXCEPT = default; +#endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ + + PhysicalDeviceVertexAttributeDivisorProperties & operator=( VkPhysicalDeviceVertexAttributeDivisorProperties const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + operator VkPhysicalDeviceVertexAttributeDivisorProperties const &() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceVertexAttributeDivisorProperties &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + +#if defined( VULKAN_HPP_USE_REFLECT ) +# if 14 <= VULKAN_HPP_CPP_VERSION + auto +# else + std::tuple +# endif + reflect() const VULKAN_HPP_NOEXCEPT + { + return std::tie( sType, pNext, maxVertexAttribDivisor, supportsNonZeroFirstInstance ); + } +#endif + +#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) + auto operator<=>( PhysicalDeviceVertexAttributeDivisorProperties const & ) const = default; +#else + bool operator==( PhysicalDeviceVertexAttributeDivisorProperties const & rhs ) const VULKAN_HPP_NOEXCEPT + { +# if defined( VULKAN_HPP_USE_REFLECT ) + return this->reflect() == rhs.reflect(); +# else + return ( sType == rhs.sType ) && ( pNext == rhs.pNext ) && ( maxVertexAttribDivisor == rhs.maxVertexAttribDivisor ) && + ( supportsNonZeroFirstInstance == rhs.supportsNonZeroFirstInstance ); +# endif + } + + bool operator!=( PhysicalDeviceVertexAttributeDivisorProperties const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + public: + VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceVertexAttributeDivisorProperties; + void * pNext = {}; + uint32_t maxVertexAttribDivisor = {}; + VULKAN_HPP_NAMESPACE::Bool32 supportsNonZeroFirstInstance = {}; + }; + + template <> + struct CppType + { + using Type = PhysicalDeviceVertexAttributeDivisorProperties; + }; + + using PhysicalDeviceVertexAttributeDivisorPropertiesKHR = PhysicalDeviceVertexAttributeDivisorProperties; struct PhysicalDeviceVertexAttributeDivisorPropertiesEXT { @@ -93478,95 +93608,6 @@ namespace VULKAN_HPP_NAMESPACE using Type = PhysicalDeviceVertexAttributeDivisorPropertiesEXT; }; - struct PhysicalDeviceVertexAttributeDivisorPropertiesKHR - { - using NativeType = VkPhysicalDeviceVertexAttributeDivisorPropertiesKHR; - - static const bool allowDuplicate = false; - static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceVertexAttributeDivisorPropertiesKHR; - -#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) - VULKAN_HPP_CONSTEXPR PhysicalDeviceVertexAttributeDivisorPropertiesKHR( uint32_t maxVertexAttribDivisor_ = {}, - VULKAN_HPP_NAMESPACE::Bool32 supportsNonZeroFirstInstance_ = {}, - void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT - : pNext{ pNext_ } - , maxVertexAttribDivisor{ maxVertexAttribDivisor_ } - , supportsNonZeroFirstInstance{ supportsNonZeroFirstInstance_ } - { - } - - VULKAN_HPP_CONSTEXPR - PhysicalDeviceVertexAttributeDivisorPropertiesKHR( PhysicalDeviceVertexAttributeDivisorPropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; - - PhysicalDeviceVertexAttributeDivisorPropertiesKHR( VkPhysicalDeviceVertexAttributeDivisorPropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT - : PhysicalDeviceVertexAttributeDivisorPropertiesKHR( *reinterpret_cast( &rhs ) ) - { - } - - PhysicalDeviceVertexAttributeDivisorPropertiesKHR & - operator=( PhysicalDeviceVertexAttributeDivisorPropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; -#endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ - - PhysicalDeviceVertexAttributeDivisorPropertiesKHR & operator=( VkPhysicalDeviceVertexAttributeDivisorPropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT - { - *this = *reinterpret_cast( &rhs ); - return *this; - } - - operator VkPhysicalDeviceVertexAttributeDivisorPropertiesKHR const &() const VULKAN_HPP_NOEXCEPT - { - return *reinterpret_cast( this ); - } - - operator VkPhysicalDeviceVertexAttributeDivisorPropertiesKHR &() VULKAN_HPP_NOEXCEPT - { - return *reinterpret_cast( this ); - } - -#if defined( VULKAN_HPP_USE_REFLECT ) -# if 14 <= VULKAN_HPP_CPP_VERSION - auto -# else - std::tuple -# endif - reflect() const VULKAN_HPP_NOEXCEPT - { - return std::tie( sType, pNext, maxVertexAttribDivisor, supportsNonZeroFirstInstance ); - } -#endif - -#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( PhysicalDeviceVertexAttributeDivisorPropertiesKHR const & ) const = default; -#else - bool operator==( PhysicalDeviceVertexAttributeDivisorPropertiesKHR const & rhs ) const VULKAN_HPP_NOEXCEPT - { -# if defined( VULKAN_HPP_USE_REFLECT ) - return this->reflect() == rhs.reflect(); -# else - return ( sType == rhs.sType ) && ( pNext == rhs.pNext ) && ( maxVertexAttribDivisor == rhs.maxVertexAttribDivisor ) && - ( supportsNonZeroFirstInstance == rhs.supportsNonZeroFirstInstance ); -# endif - } - - bool operator!=( PhysicalDeviceVertexAttributeDivisorPropertiesKHR const & rhs ) const VULKAN_HPP_NOEXCEPT - { - return !operator==( rhs ); - } -#endif - - public: - VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceVertexAttributeDivisorPropertiesKHR; - void * pNext = {}; - uint32_t maxVertexAttribDivisor = {}; - VULKAN_HPP_NAMESPACE::Bool32 supportsNonZeroFirstInstance = {}; - }; - - template <> - struct CppType - { - using Type = PhysicalDeviceVertexAttributeDivisorPropertiesKHR; - }; - struct PhysicalDeviceVertexAttributeRobustnessFeaturesEXT { using NativeType = VkPhysicalDeviceVertexAttributeRobustnessFeaturesEXT; @@ -96588,6 +96629,575 @@ namespace VULKAN_HPP_NAMESPACE using Type = PhysicalDeviceVulkan13Properties; }; + struct PhysicalDeviceVulkan14Features + { + using NativeType = VkPhysicalDeviceVulkan14Features; + + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceVulkan14Features; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceVulkan14Features( VULKAN_HPP_NAMESPACE::Bool32 globalPriorityQuery_ = {}, + VULKAN_HPP_NAMESPACE::Bool32 shaderSubgroupRotate_ = {}, + VULKAN_HPP_NAMESPACE::Bool32 shaderSubgroupRotateClustered_ = {}, + VULKAN_HPP_NAMESPACE::Bool32 shaderFloatControls2_ = {}, + VULKAN_HPP_NAMESPACE::Bool32 shaderExpectAssume_ = {}, + VULKAN_HPP_NAMESPACE::Bool32 rectangularLines_ = {}, + VULKAN_HPP_NAMESPACE::Bool32 bresenhamLines_ = {}, + VULKAN_HPP_NAMESPACE::Bool32 smoothLines_ = {}, + VULKAN_HPP_NAMESPACE::Bool32 stippledRectangularLines_ = {}, + VULKAN_HPP_NAMESPACE::Bool32 stippledBresenhamLines_ = {}, + VULKAN_HPP_NAMESPACE::Bool32 stippledSmoothLines_ = {}, + VULKAN_HPP_NAMESPACE::Bool32 vertexAttributeInstanceRateDivisor_ = {}, + VULKAN_HPP_NAMESPACE::Bool32 vertexAttributeInstanceRateZeroDivisor_ = {}, + VULKAN_HPP_NAMESPACE::Bool32 indexTypeUint8_ = {}, + VULKAN_HPP_NAMESPACE::Bool32 dynamicRenderingLocalRead_ = {}, + VULKAN_HPP_NAMESPACE::Bool32 maintenance5_ = {}, + VULKAN_HPP_NAMESPACE::Bool32 maintenance6_ = {}, + VULKAN_HPP_NAMESPACE::Bool32 pipelineProtectedAccess_ = {}, + VULKAN_HPP_NAMESPACE::Bool32 pipelineRobustness_ = {}, + VULKAN_HPP_NAMESPACE::Bool32 hostImageCopy_ = {}, + VULKAN_HPP_NAMESPACE::Bool32 pushDescriptor_ = {}, + void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT + : pNext{ pNext_ } + , globalPriorityQuery{ globalPriorityQuery_ } + , shaderSubgroupRotate{ shaderSubgroupRotate_ } + , shaderSubgroupRotateClustered{ shaderSubgroupRotateClustered_ } + , shaderFloatControls2{ shaderFloatControls2_ } + , shaderExpectAssume{ shaderExpectAssume_ } + , rectangularLines{ rectangularLines_ } + , bresenhamLines{ bresenhamLines_ } + , smoothLines{ smoothLines_ } + , stippledRectangularLines{ stippledRectangularLines_ } + , stippledBresenhamLines{ stippledBresenhamLines_ } + , stippledSmoothLines{ stippledSmoothLines_ } + , vertexAttributeInstanceRateDivisor{ vertexAttributeInstanceRateDivisor_ } + , vertexAttributeInstanceRateZeroDivisor{ vertexAttributeInstanceRateZeroDivisor_ } + , indexTypeUint8{ indexTypeUint8_ } + , dynamicRenderingLocalRead{ dynamicRenderingLocalRead_ } + , maintenance5{ maintenance5_ } + , maintenance6{ maintenance6_ } + , pipelineProtectedAccess{ pipelineProtectedAccess_ } + , pipelineRobustness{ pipelineRobustness_ } + , hostImageCopy{ hostImageCopy_ } + , pushDescriptor{ pushDescriptor_ } + { + } + + VULKAN_HPP_CONSTEXPR PhysicalDeviceVulkan14Features( PhysicalDeviceVulkan14Features const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceVulkan14Features( VkPhysicalDeviceVulkan14Features const & rhs ) VULKAN_HPP_NOEXCEPT + : PhysicalDeviceVulkan14Features( *reinterpret_cast( &rhs ) ) + { + } + + PhysicalDeviceVulkan14Features & operator=( PhysicalDeviceVulkan14Features const & rhs ) VULKAN_HPP_NOEXCEPT = default; +#endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ + + PhysicalDeviceVulkan14Features & operator=( VkPhysicalDeviceVulkan14Features const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + +#if !defined( VULKAN_HPP_NO_STRUCT_SETTERS ) + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceVulkan14Features & setPNext( void * pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceVulkan14Features & setGlobalPriorityQuery( VULKAN_HPP_NAMESPACE::Bool32 globalPriorityQuery_ ) VULKAN_HPP_NOEXCEPT + { + globalPriorityQuery = globalPriorityQuery_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceVulkan14Features & setShaderSubgroupRotate( VULKAN_HPP_NAMESPACE::Bool32 shaderSubgroupRotate_ ) VULKAN_HPP_NOEXCEPT + { + shaderSubgroupRotate = shaderSubgroupRotate_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceVulkan14Features & + setShaderSubgroupRotateClustered( VULKAN_HPP_NAMESPACE::Bool32 shaderSubgroupRotateClustered_ ) VULKAN_HPP_NOEXCEPT + { + shaderSubgroupRotateClustered = shaderSubgroupRotateClustered_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceVulkan14Features & setShaderFloatControls2( VULKAN_HPP_NAMESPACE::Bool32 shaderFloatControls2_ ) VULKAN_HPP_NOEXCEPT + { + shaderFloatControls2 = shaderFloatControls2_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceVulkan14Features & setShaderExpectAssume( VULKAN_HPP_NAMESPACE::Bool32 shaderExpectAssume_ ) VULKAN_HPP_NOEXCEPT + { + shaderExpectAssume = shaderExpectAssume_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceVulkan14Features & setRectangularLines( VULKAN_HPP_NAMESPACE::Bool32 rectangularLines_ ) VULKAN_HPP_NOEXCEPT + { + rectangularLines = rectangularLines_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceVulkan14Features & setBresenhamLines( VULKAN_HPP_NAMESPACE::Bool32 bresenhamLines_ ) VULKAN_HPP_NOEXCEPT + { + bresenhamLines = bresenhamLines_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceVulkan14Features & setSmoothLines( VULKAN_HPP_NAMESPACE::Bool32 smoothLines_ ) VULKAN_HPP_NOEXCEPT + { + smoothLines = smoothLines_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceVulkan14Features & + setStippledRectangularLines( VULKAN_HPP_NAMESPACE::Bool32 stippledRectangularLines_ ) VULKAN_HPP_NOEXCEPT + { + stippledRectangularLines = stippledRectangularLines_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceVulkan14Features & + setStippledBresenhamLines( VULKAN_HPP_NAMESPACE::Bool32 stippledBresenhamLines_ ) VULKAN_HPP_NOEXCEPT + { + stippledBresenhamLines = stippledBresenhamLines_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceVulkan14Features & setStippledSmoothLines( VULKAN_HPP_NAMESPACE::Bool32 stippledSmoothLines_ ) VULKAN_HPP_NOEXCEPT + { + stippledSmoothLines = stippledSmoothLines_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceVulkan14Features & + setVertexAttributeInstanceRateDivisor( VULKAN_HPP_NAMESPACE::Bool32 vertexAttributeInstanceRateDivisor_ ) VULKAN_HPP_NOEXCEPT + { + vertexAttributeInstanceRateDivisor = vertexAttributeInstanceRateDivisor_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceVulkan14Features & + setVertexAttributeInstanceRateZeroDivisor( VULKAN_HPP_NAMESPACE::Bool32 vertexAttributeInstanceRateZeroDivisor_ ) VULKAN_HPP_NOEXCEPT + { + vertexAttributeInstanceRateZeroDivisor = vertexAttributeInstanceRateZeroDivisor_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceVulkan14Features & setIndexTypeUint8( VULKAN_HPP_NAMESPACE::Bool32 indexTypeUint8_ ) VULKAN_HPP_NOEXCEPT + { + indexTypeUint8 = indexTypeUint8_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceVulkan14Features & + setDynamicRenderingLocalRead( VULKAN_HPP_NAMESPACE::Bool32 dynamicRenderingLocalRead_ ) VULKAN_HPP_NOEXCEPT + { + dynamicRenderingLocalRead = dynamicRenderingLocalRead_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceVulkan14Features & setMaintenance5( VULKAN_HPP_NAMESPACE::Bool32 maintenance5_ ) VULKAN_HPP_NOEXCEPT + { + maintenance5 = maintenance5_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceVulkan14Features & setMaintenance6( VULKAN_HPP_NAMESPACE::Bool32 maintenance6_ ) VULKAN_HPP_NOEXCEPT + { + maintenance6 = maintenance6_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceVulkan14Features & + setPipelineProtectedAccess( VULKAN_HPP_NAMESPACE::Bool32 pipelineProtectedAccess_ ) VULKAN_HPP_NOEXCEPT + { + pipelineProtectedAccess = pipelineProtectedAccess_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceVulkan14Features & setPipelineRobustness( VULKAN_HPP_NAMESPACE::Bool32 pipelineRobustness_ ) VULKAN_HPP_NOEXCEPT + { + pipelineRobustness = pipelineRobustness_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceVulkan14Features & setHostImageCopy( VULKAN_HPP_NAMESPACE::Bool32 hostImageCopy_ ) VULKAN_HPP_NOEXCEPT + { + hostImageCopy = hostImageCopy_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceVulkan14Features & setPushDescriptor( VULKAN_HPP_NAMESPACE::Bool32 pushDescriptor_ ) VULKAN_HPP_NOEXCEPT + { + pushDescriptor = pushDescriptor_; + return *this; + } +#endif /*VULKAN_HPP_NO_STRUCT_SETTERS*/ + + operator VkPhysicalDeviceVulkan14Features const &() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceVulkan14Features &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + +#if defined( VULKAN_HPP_USE_REFLECT ) +# if 14 <= VULKAN_HPP_CPP_VERSION + auto +# else + std::tuple +# endif + reflect() const VULKAN_HPP_NOEXCEPT + { + return std::tie( sType, + pNext, + globalPriorityQuery, + shaderSubgroupRotate, + shaderSubgroupRotateClustered, + shaderFloatControls2, + shaderExpectAssume, + rectangularLines, + bresenhamLines, + smoothLines, + stippledRectangularLines, + stippledBresenhamLines, + stippledSmoothLines, + vertexAttributeInstanceRateDivisor, + vertexAttributeInstanceRateZeroDivisor, + indexTypeUint8, + dynamicRenderingLocalRead, + maintenance5, + maintenance6, + pipelineProtectedAccess, + pipelineRobustness, + hostImageCopy, + pushDescriptor ); + } +#endif + +#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) + auto operator<=>( PhysicalDeviceVulkan14Features const & ) const = default; +#else + bool operator==( PhysicalDeviceVulkan14Features const & rhs ) const VULKAN_HPP_NOEXCEPT + { +# if defined( VULKAN_HPP_USE_REFLECT ) + return this->reflect() == rhs.reflect(); +# else + return ( sType == rhs.sType ) && ( pNext == rhs.pNext ) && ( globalPriorityQuery == rhs.globalPriorityQuery ) && + ( shaderSubgroupRotate == rhs.shaderSubgroupRotate ) && ( shaderSubgroupRotateClustered == rhs.shaderSubgroupRotateClustered ) && + ( shaderFloatControls2 == rhs.shaderFloatControls2 ) && ( shaderExpectAssume == rhs.shaderExpectAssume ) && + ( rectangularLines == rhs.rectangularLines ) && ( bresenhamLines == rhs.bresenhamLines ) && ( smoothLines == rhs.smoothLines ) && + ( stippledRectangularLines == rhs.stippledRectangularLines ) && ( stippledBresenhamLines == rhs.stippledBresenhamLines ) && + ( stippledSmoothLines == rhs.stippledSmoothLines ) && ( vertexAttributeInstanceRateDivisor == rhs.vertexAttributeInstanceRateDivisor ) && + ( vertexAttributeInstanceRateZeroDivisor == rhs.vertexAttributeInstanceRateZeroDivisor ) && ( indexTypeUint8 == rhs.indexTypeUint8 ) && + ( dynamicRenderingLocalRead == rhs.dynamicRenderingLocalRead ) && ( maintenance5 == rhs.maintenance5 ) && ( maintenance6 == rhs.maintenance6 ) && + ( pipelineProtectedAccess == rhs.pipelineProtectedAccess ) && ( pipelineRobustness == rhs.pipelineRobustness ) && + ( hostImageCopy == rhs.hostImageCopy ) && ( pushDescriptor == rhs.pushDescriptor ); +# endif + } + + bool operator!=( PhysicalDeviceVulkan14Features const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + public: + VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceVulkan14Features; + void * pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 globalPriorityQuery = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderSubgroupRotate = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderSubgroupRotateClustered = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderFloatControls2 = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderExpectAssume = {}; + VULKAN_HPP_NAMESPACE::Bool32 rectangularLines = {}; + VULKAN_HPP_NAMESPACE::Bool32 bresenhamLines = {}; + VULKAN_HPP_NAMESPACE::Bool32 smoothLines = {}; + VULKAN_HPP_NAMESPACE::Bool32 stippledRectangularLines = {}; + VULKAN_HPP_NAMESPACE::Bool32 stippledBresenhamLines = {}; + VULKAN_HPP_NAMESPACE::Bool32 stippledSmoothLines = {}; + VULKAN_HPP_NAMESPACE::Bool32 vertexAttributeInstanceRateDivisor = {}; + VULKAN_HPP_NAMESPACE::Bool32 vertexAttributeInstanceRateZeroDivisor = {}; + VULKAN_HPP_NAMESPACE::Bool32 indexTypeUint8 = {}; + VULKAN_HPP_NAMESPACE::Bool32 dynamicRenderingLocalRead = {}; + VULKAN_HPP_NAMESPACE::Bool32 maintenance5 = {}; + VULKAN_HPP_NAMESPACE::Bool32 maintenance6 = {}; + VULKAN_HPP_NAMESPACE::Bool32 pipelineProtectedAccess = {}; + VULKAN_HPP_NAMESPACE::Bool32 pipelineRobustness = {}; + VULKAN_HPP_NAMESPACE::Bool32 hostImageCopy = {}; + VULKAN_HPP_NAMESPACE::Bool32 pushDescriptor = {}; + }; + + template <> + struct CppType + { + using Type = PhysicalDeviceVulkan14Features; + }; + + struct PhysicalDeviceVulkan14Properties + { + using NativeType = VkPhysicalDeviceVulkan14Properties; + + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceVulkan14Properties; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceVulkan14Properties( + uint32_t lineSubPixelPrecisionBits_ = {}, + uint32_t maxVertexAttribDivisor_ = {}, + VULKAN_HPP_NAMESPACE::Bool32 supportsNonZeroFirstInstance_ = {}, + uint32_t maxPushDescriptors_ = {}, + VULKAN_HPP_NAMESPACE::Bool32 dynamicRenderingLocalReadDepthStencilAttachments_ = {}, + VULKAN_HPP_NAMESPACE::Bool32 dynamicRenderingLocalReadMultisampledAttachments_ = {}, + VULKAN_HPP_NAMESPACE::Bool32 earlyFragmentMultisampleCoverageAfterSampleCounting_ = {}, + VULKAN_HPP_NAMESPACE::Bool32 earlyFragmentSampleMaskTestBeforeSampleCounting_ = {}, + VULKAN_HPP_NAMESPACE::Bool32 depthStencilSwizzleOneSupport_ = {}, + VULKAN_HPP_NAMESPACE::Bool32 polygonModePointSize_ = {}, + VULKAN_HPP_NAMESPACE::Bool32 nonStrictSinglePixelWideLinesUseParallelogram_ = {}, + VULKAN_HPP_NAMESPACE::Bool32 nonStrictWideLinesUseParallelogram_ = {}, + VULKAN_HPP_NAMESPACE::Bool32 blockTexelViewCompatibleMultipleLayers_ = {}, + uint32_t maxCombinedImageSamplerDescriptorCount_ = {}, + VULKAN_HPP_NAMESPACE::Bool32 fragmentShadingRateClampCombinerInputs_ = {}, + VULKAN_HPP_NAMESPACE::PipelineRobustnessBufferBehavior defaultRobustnessStorageBuffers_ = + VULKAN_HPP_NAMESPACE::PipelineRobustnessBufferBehavior::eDeviceDefault, + VULKAN_HPP_NAMESPACE::PipelineRobustnessBufferBehavior defaultRobustnessUniformBuffers_ = + VULKAN_HPP_NAMESPACE::PipelineRobustnessBufferBehavior::eDeviceDefault, + VULKAN_HPP_NAMESPACE::PipelineRobustnessBufferBehavior defaultRobustnessVertexInputs_ = + VULKAN_HPP_NAMESPACE::PipelineRobustnessBufferBehavior::eDeviceDefault, + VULKAN_HPP_NAMESPACE::PipelineRobustnessImageBehavior defaultRobustnessImages_ = VULKAN_HPP_NAMESPACE::PipelineRobustnessImageBehavior::eDeviceDefault, + uint32_t copySrcLayoutCount_ = {}, + VULKAN_HPP_NAMESPACE::ImageLayout * pCopySrcLayouts_ = {}, + uint32_t copyDstLayoutCount_ = {}, + VULKAN_HPP_NAMESPACE::ImageLayout * pCopyDstLayouts_ = {}, + std::array const & optimalTilingLayoutUUID_ = {}, + VULKAN_HPP_NAMESPACE::Bool32 identicalMemoryTypeRequirements_ = {}, + void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT + : pNext{ pNext_ } + , lineSubPixelPrecisionBits{ lineSubPixelPrecisionBits_ } + , maxVertexAttribDivisor{ maxVertexAttribDivisor_ } + , supportsNonZeroFirstInstance{ supportsNonZeroFirstInstance_ } + , maxPushDescriptors{ maxPushDescriptors_ } + , dynamicRenderingLocalReadDepthStencilAttachments{ dynamicRenderingLocalReadDepthStencilAttachments_ } + , dynamicRenderingLocalReadMultisampledAttachments{ dynamicRenderingLocalReadMultisampledAttachments_ } + , earlyFragmentMultisampleCoverageAfterSampleCounting{ earlyFragmentMultisampleCoverageAfterSampleCounting_ } + , earlyFragmentSampleMaskTestBeforeSampleCounting{ earlyFragmentSampleMaskTestBeforeSampleCounting_ } + , depthStencilSwizzleOneSupport{ depthStencilSwizzleOneSupport_ } + , polygonModePointSize{ polygonModePointSize_ } + , nonStrictSinglePixelWideLinesUseParallelogram{ nonStrictSinglePixelWideLinesUseParallelogram_ } + , nonStrictWideLinesUseParallelogram{ nonStrictWideLinesUseParallelogram_ } + , blockTexelViewCompatibleMultipleLayers{ blockTexelViewCompatibleMultipleLayers_ } + , maxCombinedImageSamplerDescriptorCount{ maxCombinedImageSamplerDescriptorCount_ } + , fragmentShadingRateClampCombinerInputs{ fragmentShadingRateClampCombinerInputs_ } + , defaultRobustnessStorageBuffers{ defaultRobustnessStorageBuffers_ } + , defaultRobustnessUniformBuffers{ defaultRobustnessUniformBuffers_ } + , defaultRobustnessVertexInputs{ defaultRobustnessVertexInputs_ } + , defaultRobustnessImages{ defaultRobustnessImages_ } + , copySrcLayoutCount{ copySrcLayoutCount_ } + , pCopySrcLayouts{ pCopySrcLayouts_ } + , copyDstLayoutCount{ copyDstLayoutCount_ } + , pCopyDstLayouts{ pCopyDstLayouts_ } + , optimalTilingLayoutUUID{ optimalTilingLayoutUUID_ } + , identicalMemoryTypeRequirements{ identicalMemoryTypeRequirements_ } + { + } + + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceVulkan14Properties( PhysicalDeviceVulkan14Properties const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceVulkan14Properties( VkPhysicalDeviceVulkan14Properties const & rhs ) VULKAN_HPP_NOEXCEPT + : PhysicalDeviceVulkan14Properties( *reinterpret_cast( &rhs ) ) + { + } + + PhysicalDeviceVulkan14Properties & operator=( PhysicalDeviceVulkan14Properties const & rhs ) VULKAN_HPP_NOEXCEPT = default; +#endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ + + PhysicalDeviceVulkan14Properties & operator=( VkPhysicalDeviceVulkan14Properties const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + operator VkPhysicalDeviceVulkan14Properties const &() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceVulkan14Properties &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + +#if defined( VULKAN_HPP_USE_REFLECT ) +# if 14 <= VULKAN_HPP_CPP_VERSION + auto +# else + std::tuple const &, + VULKAN_HPP_NAMESPACE::Bool32 const &> +# endif + reflect() const VULKAN_HPP_NOEXCEPT + { + return std::tie( sType, + pNext, + lineSubPixelPrecisionBits, + maxVertexAttribDivisor, + supportsNonZeroFirstInstance, + maxPushDescriptors, + dynamicRenderingLocalReadDepthStencilAttachments, + dynamicRenderingLocalReadMultisampledAttachments, + earlyFragmentMultisampleCoverageAfterSampleCounting, + earlyFragmentSampleMaskTestBeforeSampleCounting, + depthStencilSwizzleOneSupport, + polygonModePointSize, + nonStrictSinglePixelWideLinesUseParallelogram, + nonStrictWideLinesUseParallelogram, + blockTexelViewCompatibleMultipleLayers, + maxCombinedImageSamplerDescriptorCount, + fragmentShadingRateClampCombinerInputs, + defaultRobustnessStorageBuffers, + defaultRobustnessUniformBuffers, + defaultRobustnessVertexInputs, + defaultRobustnessImages, + copySrcLayoutCount, + pCopySrcLayouts, + copyDstLayoutCount, + pCopyDstLayouts, + optimalTilingLayoutUUID, + identicalMemoryTypeRequirements ); + } +#endif + +#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) + auto operator<=>( PhysicalDeviceVulkan14Properties const & ) const = default; +#else + bool operator==( PhysicalDeviceVulkan14Properties const & rhs ) const VULKAN_HPP_NOEXCEPT + { +# if defined( VULKAN_HPP_USE_REFLECT ) + return this->reflect() == rhs.reflect(); +# else + return ( sType == rhs.sType ) && ( pNext == rhs.pNext ) && ( lineSubPixelPrecisionBits == rhs.lineSubPixelPrecisionBits ) && + ( maxVertexAttribDivisor == rhs.maxVertexAttribDivisor ) && ( supportsNonZeroFirstInstance == rhs.supportsNonZeroFirstInstance ) && + ( maxPushDescriptors == rhs.maxPushDescriptors ) && + ( dynamicRenderingLocalReadDepthStencilAttachments == rhs.dynamicRenderingLocalReadDepthStencilAttachments ) && + ( dynamicRenderingLocalReadMultisampledAttachments == rhs.dynamicRenderingLocalReadMultisampledAttachments ) && + ( earlyFragmentMultisampleCoverageAfterSampleCounting == rhs.earlyFragmentMultisampleCoverageAfterSampleCounting ) && + ( earlyFragmentSampleMaskTestBeforeSampleCounting == rhs.earlyFragmentSampleMaskTestBeforeSampleCounting ) && + ( depthStencilSwizzleOneSupport == rhs.depthStencilSwizzleOneSupport ) && ( polygonModePointSize == rhs.polygonModePointSize ) && + ( nonStrictSinglePixelWideLinesUseParallelogram == rhs.nonStrictSinglePixelWideLinesUseParallelogram ) && + ( nonStrictWideLinesUseParallelogram == rhs.nonStrictWideLinesUseParallelogram ) && + ( blockTexelViewCompatibleMultipleLayers == rhs.blockTexelViewCompatibleMultipleLayers ) && + ( maxCombinedImageSamplerDescriptorCount == rhs.maxCombinedImageSamplerDescriptorCount ) && + ( fragmentShadingRateClampCombinerInputs == rhs.fragmentShadingRateClampCombinerInputs ) && + ( defaultRobustnessStorageBuffers == rhs.defaultRobustnessStorageBuffers ) && + ( defaultRobustnessUniformBuffers == rhs.defaultRobustnessUniformBuffers ) && + ( defaultRobustnessVertexInputs == rhs.defaultRobustnessVertexInputs ) && ( defaultRobustnessImages == rhs.defaultRobustnessImages ) && + ( copySrcLayoutCount == rhs.copySrcLayoutCount ) && ( pCopySrcLayouts == rhs.pCopySrcLayouts ) && + ( copyDstLayoutCount == rhs.copyDstLayoutCount ) && ( pCopyDstLayouts == rhs.pCopyDstLayouts ) && + ( optimalTilingLayoutUUID == rhs.optimalTilingLayoutUUID ) && ( identicalMemoryTypeRequirements == rhs.identicalMemoryTypeRequirements ); +# endif + } + + bool operator!=( PhysicalDeviceVulkan14Properties const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + public: + VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceVulkan14Properties; + void * pNext = {}; + uint32_t lineSubPixelPrecisionBits = {}; + uint32_t maxVertexAttribDivisor = {}; + VULKAN_HPP_NAMESPACE::Bool32 supportsNonZeroFirstInstance = {}; + uint32_t maxPushDescriptors = {}; + VULKAN_HPP_NAMESPACE::Bool32 dynamicRenderingLocalReadDepthStencilAttachments = {}; + VULKAN_HPP_NAMESPACE::Bool32 dynamicRenderingLocalReadMultisampledAttachments = {}; + VULKAN_HPP_NAMESPACE::Bool32 earlyFragmentMultisampleCoverageAfterSampleCounting = {}; + VULKAN_HPP_NAMESPACE::Bool32 earlyFragmentSampleMaskTestBeforeSampleCounting = {}; + VULKAN_HPP_NAMESPACE::Bool32 depthStencilSwizzleOneSupport = {}; + VULKAN_HPP_NAMESPACE::Bool32 polygonModePointSize = {}; + VULKAN_HPP_NAMESPACE::Bool32 nonStrictSinglePixelWideLinesUseParallelogram = {}; + VULKAN_HPP_NAMESPACE::Bool32 nonStrictWideLinesUseParallelogram = {}; + VULKAN_HPP_NAMESPACE::Bool32 blockTexelViewCompatibleMultipleLayers = {}; + uint32_t maxCombinedImageSamplerDescriptorCount = {}; + VULKAN_HPP_NAMESPACE::Bool32 fragmentShadingRateClampCombinerInputs = {}; + VULKAN_HPP_NAMESPACE::PipelineRobustnessBufferBehavior defaultRobustnessStorageBuffers = + VULKAN_HPP_NAMESPACE::PipelineRobustnessBufferBehavior::eDeviceDefault; + VULKAN_HPP_NAMESPACE::PipelineRobustnessBufferBehavior defaultRobustnessUniformBuffers = + VULKAN_HPP_NAMESPACE::PipelineRobustnessBufferBehavior::eDeviceDefault; + VULKAN_HPP_NAMESPACE::PipelineRobustnessBufferBehavior defaultRobustnessVertexInputs = + VULKAN_HPP_NAMESPACE::PipelineRobustnessBufferBehavior::eDeviceDefault; + VULKAN_HPP_NAMESPACE::PipelineRobustnessImageBehavior defaultRobustnessImages = VULKAN_HPP_NAMESPACE::PipelineRobustnessImageBehavior::eDeviceDefault; + uint32_t copySrcLayoutCount = {}; + VULKAN_HPP_NAMESPACE::ImageLayout * pCopySrcLayouts = {}; + uint32_t copyDstLayoutCount = {}; + VULKAN_HPP_NAMESPACE::ImageLayout * pCopyDstLayouts = {}; + VULKAN_HPP_NAMESPACE::ArrayWrapper1D optimalTilingLayoutUUID = {}; + VULKAN_HPP_NAMESPACE::Bool32 identicalMemoryTypeRequirements = {}; + }; + + template <> + struct CppType + { + using Type = PhysicalDeviceVulkan14Properties; + }; + struct PhysicalDeviceVulkanMemoryModelFeatures { using NativeType = VkPhysicalDeviceVulkanMemoryModelFeatures; @@ -99173,66 +99783,66 @@ namespace VULKAN_HPP_NAMESPACE using Type = PipelineCoverageToColorStateCreateInfoNV; }; - struct PipelineCreateFlags2CreateInfoKHR + struct PipelineCreateFlags2CreateInfo { - using NativeType = VkPipelineCreateFlags2CreateInfoKHR; + using NativeType = VkPipelineCreateFlags2CreateInfo; static const bool allowDuplicate = false; - static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePipelineCreateFlags2CreateInfoKHR; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePipelineCreateFlags2CreateInfo; #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) - VULKAN_HPP_CONSTEXPR PipelineCreateFlags2CreateInfoKHR( VULKAN_HPP_NAMESPACE::PipelineCreateFlags2KHR flags_ = {}, - const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR PipelineCreateFlags2CreateInfo( VULKAN_HPP_NAMESPACE::PipelineCreateFlags2 flags_ = {}, + const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT : pNext{ pNext_ } , flags{ flags_ } { } - VULKAN_HPP_CONSTEXPR PipelineCreateFlags2CreateInfoKHR( PipelineCreateFlags2CreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + VULKAN_HPP_CONSTEXPR PipelineCreateFlags2CreateInfo( PipelineCreateFlags2CreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; - PipelineCreateFlags2CreateInfoKHR( VkPipelineCreateFlags2CreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT - : PipelineCreateFlags2CreateInfoKHR( *reinterpret_cast( &rhs ) ) + PipelineCreateFlags2CreateInfo( VkPipelineCreateFlags2CreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + : PipelineCreateFlags2CreateInfo( *reinterpret_cast( &rhs ) ) { } - PipelineCreateFlags2CreateInfoKHR & operator=( PipelineCreateFlags2CreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + PipelineCreateFlags2CreateInfo & operator=( PipelineCreateFlags2CreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; #endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ - PipelineCreateFlags2CreateInfoKHR & operator=( VkPipelineCreateFlags2CreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + PipelineCreateFlags2CreateInfo & operator=( VkPipelineCreateFlags2CreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT { - *this = *reinterpret_cast( &rhs ); + *this = *reinterpret_cast( &rhs ); return *this; } #if !defined( VULKAN_HPP_NO_STRUCT_SETTERS ) - VULKAN_HPP_CONSTEXPR_14 PipelineCreateFlags2CreateInfoKHR & setPNext( const void * pNext_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 PipelineCreateFlags2CreateInfo & setPNext( const void * pNext_ ) VULKAN_HPP_NOEXCEPT { pNext = pNext_; return *this; } - VULKAN_HPP_CONSTEXPR_14 PipelineCreateFlags2CreateInfoKHR & setFlags( VULKAN_HPP_NAMESPACE::PipelineCreateFlags2KHR flags_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 PipelineCreateFlags2CreateInfo & setFlags( VULKAN_HPP_NAMESPACE::PipelineCreateFlags2 flags_ ) VULKAN_HPP_NOEXCEPT { flags = flags_; return *this; } #endif /*VULKAN_HPP_NO_STRUCT_SETTERS*/ - operator VkPipelineCreateFlags2CreateInfoKHR const &() const VULKAN_HPP_NOEXCEPT + operator VkPipelineCreateFlags2CreateInfo const &() const VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + return *reinterpret_cast( this ); } - operator VkPipelineCreateFlags2CreateInfoKHR &() VULKAN_HPP_NOEXCEPT + operator VkPipelineCreateFlags2CreateInfo &() VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + return *reinterpret_cast( this ); } #if defined( VULKAN_HPP_USE_REFLECT ) # if 14 <= VULKAN_HPP_CPP_VERSION auto # else - std::tuple + std::tuple # endif reflect() const VULKAN_HPP_NOEXCEPT { @@ -99241,9 +99851,9 @@ namespace VULKAN_HPP_NAMESPACE #endif #if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( PipelineCreateFlags2CreateInfoKHR const & ) const = default; + auto operator<=>( PipelineCreateFlags2CreateInfo const & ) const = default; #else - bool operator==( PipelineCreateFlags2CreateInfoKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator==( PipelineCreateFlags2CreateInfo const & rhs ) const VULKAN_HPP_NOEXCEPT { # if defined( VULKAN_HPP_USE_REFLECT ) return this->reflect() == rhs.reflect(); @@ -99252,24 +99862,26 @@ namespace VULKAN_HPP_NAMESPACE # endif } - bool operator!=( PipelineCreateFlags2CreateInfoKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator!=( PipelineCreateFlags2CreateInfo const & rhs ) const VULKAN_HPP_NOEXCEPT { return !operator==( rhs ); } #endif public: - VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePipelineCreateFlags2CreateInfoKHR; - const void * pNext = {}; - VULKAN_HPP_NAMESPACE::PipelineCreateFlags2KHR flags = {}; + VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePipelineCreateFlags2CreateInfo; + const void * pNext = {}; + VULKAN_HPP_NAMESPACE::PipelineCreateFlags2 flags = {}; }; template <> - struct CppType + struct CppType { - using Type = PipelineCreateFlags2CreateInfoKHR; + using Type = PipelineCreateFlags2CreateInfo; }; + using PipelineCreateFlags2CreateInfoKHR = PipelineCreateFlags2CreateInfo; + struct PipelineCreationFeedback { using NativeType = VkPipelineCreationFeedback; @@ -101074,20 +101686,20 @@ namespace VULKAN_HPP_NAMESPACE using Type = PipelineRasterizationDepthClipStateCreateInfoEXT; }; - struct PipelineRasterizationLineStateCreateInfoKHR + struct PipelineRasterizationLineStateCreateInfo { - using NativeType = VkPipelineRasterizationLineStateCreateInfoKHR; + using NativeType = VkPipelineRasterizationLineStateCreateInfo; static const bool allowDuplicate = false; - static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePipelineRasterizationLineStateCreateInfoKHR; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePipelineRasterizationLineStateCreateInfo; #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) - VULKAN_HPP_CONSTEXPR PipelineRasterizationLineStateCreateInfoKHR( - VULKAN_HPP_NAMESPACE::LineRasterizationModeKHR lineRasterizationMode_ = VULKAN_HPP_NAMESPACE::LineRasterizationModeKHR::eDefault, - VULKAN_HPP_NAMESPACE::Bool32 stippledLineEnable_ = {}, - uint32_t lineStippleFactor_ = {}, - uint16_t lineStipplePattern_ = {}, - const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR PipelineRasterizationLineStateCreateInfo( + VULKAN_HPP_NAMESPACE::LineRasterizationMode lineRasterizationMode_ = VULKAN_HPP_NAMESPACE::LineRasterizationMode::eDefault, + VULKAN_HPP_NAMESPACE::Bool32 stippledLineEnable_ = {}, + uint32_t lineStippleFactor_ = {}, + uint16_t lineStipplePattern_ = {}, + const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT : pNext{ pNext_ } , lineRasterizationMode{ lineRasterizationMode_ } , stippledLineEnable{ stippledLineEnable_ } @@ -101096,64 +101708,64 @@ namespace VULKAN_HPP_NAMESPACE { } - VULKAN_HPP_CONSTEXPR PipelineRasterizationLineStateCreateInfoKHR( PipelineRasterizationLineStateCreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + VULKAN_HPP_CONSTEXPR PipelineRasterizationLineStateCreateInfo( PipelineRasterizationLineStateCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; - PipelineRasterizationLineStateCreateInfoKHR( VkPipelineRasterizationLineStateCreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT - : PipelineRasterizationLineStateCreateInfoKHR( *reinterpret_cast( &rhs ) ) + PipelineRasterizationLineStateCreateInfo( VkPipelineRasterizationLineStateCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + : PipelineRasterizationLineStateCreateInfo( *reinterpret_cast( &rhs ) ) { } - PipelineRasterizationLineStateCreateInfoKHR & operator=( PipelineRasterizationLineStateCreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + PipelineRasterizationLineStateCreateInfo & operator=( PipelineRasterizationLineStateCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; #endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ - PipelineRasterizationLineStateCreateInfoKHR & operator=( VkPipelineRasterizationLineStateCreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + PipelineRasterizationLineStateCreateInfo & operator=( VkPipelineRasterizationLineStateCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT { - *this = *reinterpret_cast( &rhs ); + *this = *reinterpret_cast( &rhs ); return *this; } #if !defined( VULKAN_HPP_NO_STRUCT_SETTERS ) - VULKAN_HPP_CONSTEXPR_14 PipelineRasterizationLineStateCreateInfoKHR & setPNext( const void * pNext_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 PipelineRasterizationLineStateCreateInfo & setPNext( const void * pNext_ ) VULKAN_HPP_NOEXCEPT { pNext = pNext_; return *this; } - VULKAN_HPP_CONSTEXPR_14 PipelineRasterizationLineStateCreateInfoKHR & - setLineRasterizationMode( VULKAN_HPP_NAMESPACE::LineRasterizationModeKHR lineRasterizationMode_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 PipelineRasterizationLineStateCreateInfo & + setLineRasterizationMode( VULKAN_HPP_NAMESPACE::LineRasterizationMode lineRasterizationMode_ ) VULKAN_HPP_NOEXCEPT { lineRasterizationMode = lineRasterizationMode_; return *this; } - VULKAN_HPP_CONSTEXPR_14 PipelineRasterizationLineStateCreateInfoKHR & + VULKAN_HPP_CONSTEXPR_14 PipelineRasterizationLineStateCreateInfo & setStippledLineEnable( VULKAN_HPP_NAMESPACE::Bool32 stippledLineEnable_ ) VULKAN_HPP_NOEXCEPT { stippledLineEnable = stippledLineEnable_; return *this; } - VULKAN_HPP_CONSTEXPR_14 PipelineRasterizationLineStateCreateInfoKHR & setLineStippleFactor( uint32_t lineStippleFactor_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 PipelineRasterizationLineStateCreateInfo & setLineStippleFactor( uint32_t lineStippleFactor_ ) VULKAN_HPP_NOEXCEPT { lineStippleFactor = lineStippleFactor_; return *this; } - VULKAN_HPP_CONSTEXPR_14 PipelineRasterizationLineStateCreateInfoKHR & setLineStipplePattern( uint16_t lineStipplePattern_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 PipelineRasterizationLineStateCreateInfo & setLineStipplePattern( uint16_t lineStipplePattern_ ) VULKAN_HPP_NOEXCEPT { lineStipplePattern = lineStipplePattern_; return *this; } #endif /*VULKAN_HPP_NO_STRUCT_SETTERS*/ - operator VkPipelineRasterizationLineStateCreateInfoKHR const &() const VULKAN_HPP_NOEXCEPT + operator VkPipelineRasterizationLineStateCreateInfo const &() const VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + return *reinterpret_cast( this ); } - operator VkPipelineRasterizationLineStateCreateInfoKHR &() VULKAN_HPP_NOEXCEPT + operator VkPipelineRasterizationLineStateCreateInfo &() VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + return *reinterpret_cast( this ); } #if defined( VULKAN_HPP_USE_REFLECT ) @@ -101162,7 +101774,7 @@ namespace VULKAN_HPP_NAMESPACE # else std::tuple @@ -101174,9 +101786,9 @@ namespace VULKAN_HPP_NAMESPACE #endif #if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( PipelineRasterizationLineStateCreateInfoKHR const & ) const = default; + auto operator<=>( PipelineRasterizationLineStateCreateInfo const & ) const = default; #else - bool operator==( PipelineRasterizationLineStateCreateInfoKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator==( PipelineRasterizationLineStateCreateInfo const & rhs ) const VULKAN_HPP_NOEXCEPT { # if defined( VULKAN_HPP_USE_REFLECT ) return this->reflect() == rhs.reflect(); @@ -101187,28 +101799,29 @@ namespace VULKAN_HPP_NAMESPACE # endif } - bool operator!=( PipelineRasterizationLineStateCreateInfoKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator!=( PipelineRasterizationLineStateCreateInfo const & rhs ) const VULKAN_HPP_NOEXCEPT { return !operator==( rhs ); } #endif public: - VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePipelineRasterizationLineStateCreateInfoKHR; - const void * pNext = {}; - VULKAN_HPP_NAMESPACE::LineRasterizationModeKHR lineRasterizationMode = VULKAN_HPP_NAMESPACE::LineRasterizationModeKHR::eDefault; - VULKAN_HPP_NAMESPACE::Bool32 stippledLineEnable = {}; - uint32_t lineStippleFactor = {}; - uint16_t lineStipplePattern = {}; + VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePipelineRasterizationLineStateCreateInfo; + const void * pNext = {}; + VULKAN_HPP_NAMESPACE::LineRasterizationMode lineRasterizationMode = VULKAN_HPP_NAMESPACE::LineRasterizationMode::eDefault; + VULKAN_HPP_NAMESPACE::Bool32 stippledLineEnable = {}; + uint32_t lineStippleFactor = {}; + uint16_t lineStipplePattern = {}; }; template <> - struct CppType + struct CppType { - using Type = PipelineRasterizationLineStateCreateInfoKHR; + using Type = PipelineRasterizationLineStateCreateInfo; }; - using PipelineRasterizationLineStateCreateInfoEXT = PipelineRasterizationLineStateCreateInfoKHR; + using PipelineRasterizationLineStateCreateInfoEXT = PipelineRasterizationLineStateCreateInfo; + using PipelineRasterizationLineStateCreateInfoKHR = PipelineRasterizationLineStateCreateInfo; struct PipelineRasterizationProvokingVertexStateCreateInfoEXT { @@ -101794,20 +102407,20 @@ namespace VULKAN_HPP_NAMESPACE using Type = PipelineRepresentativeFragmentTestStateCreateInfoNV; }; - struct PipelineRobustnessCreateInfoEXT + struct PipelineRobustnessCreateInfo { - using NativeType = VkPipelineRobustnessCreateInfoEXT; + using NativeType = VkPipelineRobustnessCreateInfo; static const bool allowDuplicate = false; - static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePipelineRobustnessCreateInfoEXT; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePipelineRobustnessCreateInfo; #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) - VULKAN_HPP_CONSTEXPR PipelineRobustnessCreateInfoEXT( - VULKAN_HPP_NAMESPACE::PipelineRobustnessBufferBehaviorEXT storageBuffers_ = VULKAN_HPP_NAMESPACE::PipelineRobustnessBufferBehaviorEXT::eDeviceDefault, - VULKAN_HPP_NAMESPACE::PipelineRobustnessBufferBehaviorEXT uniformBuffers_ = VULKAN_HPP_NAMESPACE::PipelineRobustnessBufferBehaviorEXT::eDeviceDefault, - VULKAN_HPP_NAMESPACE::PipelineRobustnessBufferBehaviorEXT vertexInputs_ = VULKAN_HPP_NAMESPACE::PipelineRobustnessBufferBehaviorEXT::eDeviceDefault, - VULKAN_HPP_NAMESPACE::PipelineRobustnessImageBehaviorEXT images_ = VULKAN_HPP_NAMESPACE::PipelineRobustnessImageBehaviorEXT::eDeviceDefault, - const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR PipelineRobustnessCreateInfo( + VULKAN_HPP_NAMESPACE::PipelineRobustnessBufferBehavior storageBuffers_ = VULKAN_HPP_NAMESPACE::PipelineRobustnessBufferBehavior::eDeviceDefault, + VULKAN_HPP_NAMESPACE::PipelineRobustnessBufferBehavior uniformBuffers_ = VULKAN_HPP_NAMESPACE::PipelineRobustnessBufferBehavior::eDeviceDefault, + VULKAN_HPP_NAMESPACE::PipelineRobustnessBufferBehavior vertexInputs_ = VULKAN_HPP_NAMESPACE::PipelineRobustnessBufferBehavior::eDeviceDefault, + VULKAN_HPP_NAMESPACE::PipelineRobustnessImageBehavior images_ = VULKAN_HPP_NAMESPACE::PipelineRobustnessImageBehavior::eDeviceDefault, + const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT : pNext{ pNext_ } , storageBuffers{ storageBuffers_ } , uniformBuffers{ uniformBuffers_ } @@ -101816,65 +102429,65 @@ namespace VULKAN_HPP_NAMESPACE { } - VULKAN_HPP_CONSTEXPR PipelineRobustnessCreateInfoEXT( PipelineRobustnessCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + VULKAN_HPP_CONSTEXPR PipelineRobustnessCreateInfo( PipelineRobustnessCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; - PipelineRobustnessCreateInfoEXT( VkPipelineRobustnessCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT - : PipelineRobustnessCreateInfoEXT( *reinterpret_cast( &rhs ) ) + PipelineRobustnessCreateInfo( VkPipelineRobustnessCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + : PipelineRobustnessCreateInfo( *reinterpret_cast( &rhs ) ) { } - PipelineRobustnessCreateInfoEXT & operator=( PipelineRobustnessCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + PipelineRobustnessCreateInfo & operator=( PipelineRobustnessCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; #endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ - PipelineRobustnessCreateInfoEXT & operator=( VkPipelineRobustnessCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + PipelineRobustnessCreateInfo & operator=( VkPipelineRobustnessCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT { - *this = *reinterpret_cast( &rhs ); + *this = *reinterpret_cast( &rhs ); return *this; } #if !defined( VULKAN_HPP_NO_STRUCT_SETTERS ) - VULKAN_HPP_CONSTEXPR_14 PipelineRobustnessCreateInfoEXT & setPNext( const void * pNext_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 PipelineRobustnessCreateInfo & setPNext( const void * pNext_ ) VULKAN_HPP_NOEXCEPT { pNext = pNext_; return *this; } - VULKAN_HPP_CONSTEXPR_14 PipelineRobustnessCreateInfoEXT & - setStorageBuffers( VULKAN_HPP_NAMESPACE::PipelineRobustnessBufferBehaviorEXT storageBuffers_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 PipelineRobustnessCreateInfo & + setStorageBuffers( VULKAN_HPP_NAMESPACE::PipelineRobustnessBufferBehavior storageBuffers_ ) VULKAN_HPP_NOEXCEPT { storageBuffers = storageBuffers_; return *this; } - VULKAN_HPP_CONSTEXPR_14 PipelineRobustnessCreateInfoEXT & - setUniformBuffers( VULKAN_HPP_NAMESPACE::PipelineRobustnessBufferBehaviorEXT uniformBuffers_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 PipelineRobustnessCreateInfo & + setUniformBuffers( VULKAN_HPP_NAMESPACE::PipelineRobustnessBufferBehavior uniformBuffers_ ) VULKAN_HPP_NOEXCEPT { uniformBuffers = uniformBuffers_; return *this; } - VULKAN_HPP_CONSTEXPR_14 PipelineRobustnessCreateInfoEXT & - setVertexInputs( VULKAN_HPP_NAMESPACE::PipelineRobustnessBufferBehaviorEXT vertexInputs_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 PipelineRobustnessCreateInfo & + setVertexInputs( VULKAN_HPP_NAMESPACE::PipelineRobustnessBufferBehavior vertexInputs_ ) VULKAN_HPP_NOEXCEPT { vertexInputs = vertexInputs_; return *this; } - VULKAN_HPP_CONSTEXPR_14 PipelineRobustnessCreateInfoEXT & setImages( VULKAN_HPP_NAMESPACE::PipelineRobustnessImageBehaviorEXT images_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 PipelineRobustnessCreateInfo & setImages( VULKAN_HPP_NAMESPACE::PipelineRobustnessImageBehavior images_ ) VULKAN_HPP_NOEXCEPT { images = images_; return *this; } #endif /*VULKAN_HPP_NO_STRUCT_SETTERS*/ - operator VkPipelineRobustnessCreateInfoEXT const &() const VULKAN_HPP_NOEXCEPT + operator VkPipelineRobustnessCreateInfo const &() const VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + return *reinterpret_cast( this ); } - operator VkPipelineRobustnessCreateInfoEXT &() VULKAN_HPP_NOEXCEPT + operator VkPipelineRobustnessCreateInfo &() VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + return *reinterpret_cast( this ); } #if defined( VULKAN_HPP_USE_REFLECT ) @@ -101883,10 +102496,10 @@ namespace VULKAN_HPP_NAMESPACE # else std::tuple + VULKAN_HPP_NAMESPACE::PipelineRobustnessBufferBehavior const &, + VULKAN_HPP_NAMESPACE::PipelineRobustnessBufferBehavior const &, + VULKAN_HPP_NAMESPACE::PipelineRobustnessBufferBehavior const &, + VULKAN_HPP_NAMESPACE::PipelineRobustnessImageBehavior const &> # endif reflect() const VULKAN_HPP_NOEXCEPT { @@ -101895,9 +102508,9 @@ namespace VULKAN_HPP_NAMESPACE #endif #if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( PipelineRobustnessCreateInfoEXT const & ) const = default; + auto operator<=>( PipelineRobustnessCreateInfo const & ) const = default; #else - bool operator==( PipelineRobustnessCreateInfoEXT const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator==( PipelineRobustnessCreateInfo const & rhs ) const VULKAN_HPP_NOEXCEPT { # if defined( VULKAN_HPP_USE_REFLECT ) return this->reflect() == rhs.reflect(); @@ -101907,27 +102520,29 @@ namespace VULKAN_HPP_NAMESPACE # endif } - bool operator!=( PipelineRobustnessCreateInfoEXT const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator!=( PipelineRobustnessCreateInfo const & rhs ) const VULKAN_HPP_NOEXCEPT { return !operator==( rhs ); } #endif public: - VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePipelineRobustnessCreateInfoEXT; - const void * pNext = {}; - VULKAN_HPP_NAMESPACE::PipelineRobustnessBufferBehaviorEXT storageBuffers = VULKAN_HPP_NAMESPACE::PipelineRobustnessBufferBehaviorEXT::eDeviceDefault; - VULKAN_HPP_NAMESPACE::PipelineRobustnessBufferBehaviorEXT uniformBuffers = VULKAN_HPP_NAMESPACE::PipelineRobustnessBufferBehaviorEXT::eDeviceDefault; - VULKAN_HPP_NAMESPACE::PipelineRobustnessBufferBehaviorEXT vertexInputs = VULKAN_HPP_NAMESPACE::PipelineRobustnessBufferBehaviorEXT::eDeviceDefault; - VULKAN_HPP_NAMESPACE::PipelineRobustnessImageBehaviorEXT images = VULKAN_HPP_NAMESPACE::PipelineRobustnessImageBehaviorEXT::eDeviceDefault; + VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePipelineRobustnessCreateInfo; + const void * pNext = {}; + VULKAN_HPP_NAMESPACE::PipelineRobustnessBufferBehavior storageBuffers = VULKAN_HPP_NAMESPACE::PipelineRobustnessBufferBehavior::eDeviceDefault; + VULKAN_HPP_NAMESPACE::PipelineRobustnessBufferBehavior uniformBuffers = VULKAN_HPP_NAMESPACE::PipelineRobustnessBufferBehavior::eDeviceDefault; + VULKAN_HPP_NAMESPACE::PipelineRobustnessBufferBehavior vertexInputs = VULKAN_HPP_NAMESPACE::PipelineRobustnessBufferBehavior::eDeviceDefault; + VULKAN_HPP_NAMESPACE::PipelineRobustnessImageBehavior images = VULKAN_HPP_NAMESPACE::PipelineRobustnessImageBehavior::eDeviceDefault; }; template <> - struct CppType + struct CppType { - using Type = PipelineRobustnessCreateInfoEXT; + using Type = PipelineRobustnessCreateInfo; }; + using PipelineRobustnessCreateInfoEXT = PipelineRobustnessCreateInfo; + struct PipelineSampleLocationsStateCreateInfoEXT { using NativeType = VkPipelineSampleLocationsStateCreateInfoEXT; @@ -102470,55 +103085,55 @@ namespace VULKAN_HPP_NAMESPACE using PipelineTessellationDomainOriginStateCreateInfoKHR = PipelineTessellationDomainOriginStateCreateInfo; - struct VertexInputBindingDivisorDescriptionKHR + struct VertexInputBindingDivisorDescription { - using NativeType = VkVertexInputBindingDivisorDescriptionKHR; + using NativeType = VkVertexInputBindingDivisorDescription; #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) - VULKAN_HPP_CONSTEXPR VertexInputBindingDivisorDescriptionKHR( uint32_t binding_ = {}, uint32_t divisor_ = {} ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR VertexInputBindingDivisorDescription( uint32_t binding_ = {}, uint32_t divisor_ = {} ) VULKAN_HPP_NOEXCEPT : binding{ binding_ } , divisor{ divisor_ } { } - VULKAN_HPP_CONSTEXPR VertexInputBindingDivisorDescriptionKHR( VertexInputBindingDivisorDescriptionKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + VULKAN_HPP_CONSTEXPR VertexInputBindingDivisorDescription( VertexInputBindingDivisorDescription const & rhs ) VULKAN_HPP_NOEXCEPT = default; - VertexInputBindingDivisorDescriptionKHR( VkVertexInputBindingDivisorDescriptionKHR const & rhs ) VULKAN_HPP_NOEXCEPT - : VertexInputBindingDivisorDescriptionKHR( *reinterpret_cast( &rhs ) ) + VertexInputBindingDivisorDescription( VkVertexInputBindingDivisorDescription const & rhs ) VULKAN_HPP_NOEXCEPT + : VertexInputBindingDivisorDescription( *reinterpret_cast( &rhs ) ) { } - VertexInputBindingDivisorDescriptionKHR & operator=( VertexInputBindingDivisorDescriptionKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + VertexInputBindingDivisorDescription & operator=( VertexInputBindingDivisorDescription const & rhs ) VULKAN_HPP_NOEXCEPT = default; #endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ - VertexInputBindingDivisorDescriptionKHR & operator=( VkVertexInputBindingDivisorDescriptionKHR const & rhs ) VULKAN_HPP_NOEXCEPT + VertexInputBindingDivisorDescription & operator=( VkVertexInputBindingDivisorDescription const & rhs ) VULKAN_HPP_NOEXCEPT { - *this = *reinterpret_cast( &rhs ); + *this = *reinterpret_cast( &rhs ); return *this; } #if !defined( VULKAN_HPP_NO_STRUCT_SETTERS ) - VULKAN_HPP_CONSTEXPR_14 VertexInputBindingDivisorDescriptionKHR & setBinding( uint32_t binding_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 VertexInputBindingDivisorDescription & setBinding( uint32_t binding_ ) VULKAN_HPP_NOEXCEPT { binding = binding_; return *this; } - VULKAN_HPP_CONSTEXPR_14 VertexInputBindingDivisorDescriptionKHR & setDivisor( uint32_t divisor_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 VertexInputBindingDivisorDescription & setDivisor( uint32_t divisor_ ) VULKAN_HPP_NOEXCEPT { divisor = divisor_; return *this; } #endif /*VULKAN_HPP_NO_STRUCT_SETTERS*/ - operator VkVertexInputBindingDivisorDescriptionKHR const &() const VULKAN_HPP_NOEXCEPT + operator VkVertexInputBindingDivisorDescription const &() const VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + return *reinterpret_cast( this ); } - operator VkVertexInputBindingDivisorDescriptionKHR &() VULKAN_HPP_NOEXCEPT + operator VkVertexInputBindingDivisorDescription &() VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + return *reinterpret_cast( this ); } #if defined( VULKAN_HPP_USE_REFLECT ) @@ -102534,9 +103149,9 @@ namespace VULKAN_HPP_NAMESPACE #endif #if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( VertexInputBindingDivisorDescriptionKHR const & ) const = default; + auto operator<=>( VertexInputBindingDivisorDescription const & ) const = default; #else - bool operator==( VertexInputBindingDivisorDescriptionKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator==( VertexInputBindingDivisorDescription const & rhs ) const VULKAN_HPP_NOEXCEPT { # if defined( VULKAN_HPP_USE_REFLECT ) return this->reflect() == rhs.reflect(); @@ -102545,7 +103160,7 @@ namespace VULKAN_HPP_NAMESPACE # endif } - bool operator!=( VertexInputBindingDivisorDescriptionKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator!=( VertexInputBindingDivisorDescription const & rhs ) const VULKAN_HPP_NOEXCEPT { return !operator==( rhs ); } @@ -102556,37 +103171,38 @@ namespace VULKAN_HPP_NAMESPACE uint32_t divisor = {}; }; - using VertexInputBindingDivisorDescriptionEXT = VertexInputBindingDivisorDescriptionKHR; + using VertexInputBindingDivisorDescriptionEXT = VertexInputBindingDivisorDescription; + using VertexInputBindingDivisorDescriptionKHR = VertexInputBindingDivisorDescription; - struct PipelineVertexInputDivisorStateCreateInfoKHR + struct PipelineVertexInputDivisorStateCreateInfo { - using NativeType = VkPipelineVertexInputDivisorStateCreateInfoKHR; + using NativeType = VkPipelineVertexInputDivisorStateCreateInfo; static const bool allowDuplicate = false; - static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePipelineVertexInputDivisorStateCreateInfoKHR; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePipelineVertexInputDivisorStateCreateInfo; #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) VULKAN_HPP_CONSTEXPR - PipelineVertexInputDivisorStateCreateInfoKHR( uint32_t vertexBindingDivisorCount_ = {}, - const VULKAN_HPP_NAMESPACE::VertexInputBindingDivisorDescriptionKHR * pVertexBindingDivisors_ = {}, - const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT + PipelineVertexInputDivisorStateCreateInfo( uint32_t vertexBindingDivisorCount_ = {}, + const VULKAN_HPP_NAMESPACE::VertexInputBindingDivisorDescription * pVertexBindingDivisors_ = {}, + const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT : pNext{ pNext_ } , vertexBindingDivisorCount{ vertexBindingDivisorCount_ } , pVertexBindingDivisors{ pVertexBindingDivisors_ } { } - VULKAN_HPP_CONSTEXPR PipelineVertexInputDivisorStateCreateInfoKHR( PipelineVertexInputDivisorStateCreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + VULKAN_HPP_CONSTEXPR PipelineVertexInputDivisorStateCreateInfo( PipelineVertexInputDivisorStateCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; - PipelineVertexInputDivisorStateCreateInfoKHR( VkPipelineVertexInputDivisorStateCreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT - : PipelineVertexInputDivisorStateCreateInfoKHR( *reinterpret_cast( &rhs ) ) + PipelineVertexInputDivisorStateCreateInfo( VkPipelineVertexInputDivisorStateCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + : PipelineVertexInputDivisorStateCreateInfo( *reinterpret_cast( &rhs ) ) { } # if !defined( VULKAN_HPP_DISABLE_ENHANCED_MODE ) - PipelineVertexInputDivisorStateCreateInfoKHR( - VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & vertexBindingDivisors_, - const void * pNext_ = nullptr ) + PipelineVertexInputDivisorStateCreateInfo( + VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & vertexBindingDivisors_, + const void * pNext_ = nullptr ) : pNext( pNext_ ) , vertexBindingDivisorCount( static_cast( vertexBindingDivisors_.size() ) ) , pVertexBindingDivisors( vertexBindingDivisors_.data() ) @@ -102594,39 +103210,38 @@ namespace VULKAN_HPP_NAMESPACE } # endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - PipelineVertexInputDivisorStateCreateInfoKHR & operator=( PipelineVertexInputDivisorStateCreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + PipelineVertexInputDivisorStateCreateInfo & operator=( PipelineVertexInputDivisorStateCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; #endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ - PipelineVertexInputDivisorStateCreateInfoKHR & operator=( VkPipelineVertexInputDivisorStateCreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + PipelineVertexInputDivisorStateCreateInfo & operator=( VkPipelineVertexInputDivisorStateCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT { - *this = *reinterpret_cast( &rhs ); + *this = *reinterpret_cast( &rhs ); return *this; } #if !defined( VULKAN_HPP_NO_STRUCT_SETTERS ) - VULKAN_HPP_CONSTEXPR_14 PipelineVertexInputDivisorStateCreateInfoKHR & setPNext( const void * pNext_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 PipelineVertexInputDivisorStateCreateInfo & setPNext( const void * pNext_ ) VULKAN_HPP_NOEXCEPT { pNext = pNext_; return *this; } - VULKAN_HPP_CONSTEXPR_14 PipelineVertexInputDivisorStateCreateInfoKHR & - setVertexBindingDivisorCount( uint32_t vertexBindingDivisorCount_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 PipelineVertexInputDivisorStateCreateInfo & setVertexBindingDivisorCount( uint32_t vertexBindingDivisorCount_ ) VULKAN_HPP_NOEXCEPT { vertexBindingDivisorCount = vertexBindingDivisorCount_; return *this; } - VULKAN_HPP_CONSTEXPR_14 PipelineVertexInputDivisorStateCreateInfoKHR & - setPVertexBindingDivisors( const VULKAN_HPP_NAMESPACE::VertexInputBindingDivisorDescriptionKHR * pVertexBindingDivisors_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 PipelineVertexInputDivisorStateCreateInfo & + setPVertexBindingDivisors( const VULKAN_HPP_NAMESPACE::VertexInputBindingDivisorDescription * pVertexBindingDivisors_ ) VULKAN_HPP_NOEXCEPT { pVertexBindingDivisors = pVertexBindingDivisors_; return *this; } # if !defined( VULKAN_HPP_DISABLE_ENHANCED_MODE ) - PipelineVertexInputDivisorStateCreateInfoKHR & setVertexBindingDivisors( - VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & vertexBindingDivisors_ ) + PipelineVertexInputDivisorStateCreateInfo & setVertexBindingDivisors( + VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & vertexBindingDivisors_ ) VULKAN_HPP_NOEXCEPT { vertexBindingDivisorCount = static_cast( vertexBindingDivisors_.size() ); @@ -102636,14 +103251,14 @@ namespace VULKAN_HPP_NAMESPACE # endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ #endif /*VULKAN_HPP_NO_STRUCT_SETTERS*/ - operator VkPipelineVertexInputDivisorStateCreateInfoKHR const &() const VULKAN_HPP_NOEXCEPT + operator VkPipelineVertexInputDivisorStateCreateInfo const &() const VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + return *reinterpret_cast( this ); } - operator VkPipelineVertexInputDivisorStateCreateInfoKHR &() VULKAN_HPP_NOEXCEPT + operator VkPipelineVertexInputDivisorStateCreateInfo &() VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + return *reinterpret_cast( this ); } #if defined( VULKAN_HPP_USE_REFLECT ) @@ -102653,7 +103268,7 @@ namespace VULKAN_HPP_NAMESPACE std::tuple + const VULKAN_HPP_NAMESPACE::VertexInputBindingDivisorDescription * const &> # endif reflect() const VULKAN_HPP_NOEXCEPT { @@ -102662,9 +103277,9 @@ namespace VULKAN_HPP_NAMESPACE #endif #if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( PipelineVertexInputDivisorStateCreateInfoKHR const & ) const = default; + auto operator<=>( PipelineVertexInputDivisorStateCreateInfo const & ) const = default; #else - bool operator==( PipelineVertexInputDivisorStateCreateInfoKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator==( PipelineVertexInputDivisorStateCreateInfo const & rhs ) const VULKAN_HPP_NOEXCEPT { # if defined( VULKAN_HPP_USE_REFLECT ) return this->reflect() == rhs.reflect(); @@ -102674,26 +103289,27 @@ namespace VULKAN_HPP_NAMESPACE # endif } - bool operator!=( PipelineVertexInputDivisorStateCreateInfoKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator!=( PipelineVertexInputDivisorStateCreateInfo const & rhs ) const VULKAN_HPP_NOEXCEPT { return !operator==( rhs ); } #endif public: - VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePipelineVertexInputDivisorStateCreateInfoKHR; - const void * pNext = {}; - uint32_t vertexBindingDivisorCount = {}; - const VULKAN_HPP_NAMESPACE::VertexInputBindingDivisorDescriptionKHR * pVertexBindingDivisors = {}; + VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePipelineVertexInputDivisorStateCreateInfo; + const void * pNext = {}; + uint32_t vertexBindingDivisorCount = {}; + const VULKAN_HPP_NAMESPACE::VertexInputBindingDivisorDescription * pVertexBindingDivisors = {}; }; template <> - struct CppType + struct CppType { - using Type = PipelineVertexInputDivisorStateCreateInfoKHR; + using Type = PipelineVertexInputDivisorStateCreateInfo; }; - using PipelineVertexInputDivisorStateCreateInfoEXT = PipelineVertexInputDivisorStateCreateInfoKHR; + using PipelineVertexInputDivisorStateCreateInfoEXT = PipelineVertexInputDivisorStateCreateInfo; + using PipelineVertexInputDivisorStateCreateInfoKHR = PipelineVertexInputDivisorStateCreateInfo; struct PipelineViewportCoarseSampleOrderStateCreateInfoNV { @@ -105073,20 +105689,20 @@ namespace VULKAN_HPP_NAMESPACE using Type = ProtectedSubmitInfo; }; - struct PushConstantsInfoKHR + struct PushConstantsInfo { - using NativeType = VkPushConstantsInfoKHR; + using NativeType = VkPushConstantsInfo; static const bool allowDuplicate = false; - static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePushConstantsInfoKHR; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePushConstantsInfo; #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) - VULKAN_HPP_CONSTEXPR PushConstantsInfoKHR( VULKAN_HPP_NAMESPACE::PipelineLayout layout_ = {}, - VULKAN_HPP_NAMESPACE::ShaderStageFlags stageFlags_ = {}, - uint32_t offset_ = {}, - uint32_t size_ = {}, - const void * pValues_ = {}, - const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR PushConstantsInfo( VULKAN_HPP_NAMESPACE::PipelineLayout layout_ = {}, + VULKAN_HPP_NAMESPACE::ShaderStageFlags stageFlags_ = {}, + uint32_t offset_ = {}, + uint32_t size_ = {}, + const void * pValues_ = {}, + const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT : pNext{ pNext_ } , layout{ layout_ } , stageFlags{ stageFlags_ } @@ -105096,20 +105712,17 @@ namespace VULKAN_HPP_NAMESPACE { } - VULKAN_HPP_CONSTEXPR PushConstantsInfoKHR( PushConstantsInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + VULKAN_HPP_CONSTEXPR PushConstantsInfo( PushConstantsInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; - PushConstantsInfoKHR( VkPushConstantsInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT - : PushConstantsInfoKHR( *reinterpret_cast( &rhs ) ) - { - } + PushConstantsInfo( VkPushConstantsInfo const & rhs ) VULKAN_HPP_NOEXCEPT : PushConstantsInfo( *reinterpret_cast( &rhs ) ) {} # if !defined( VULKAN_HPP_DISABLE_ENHANCED_MODE ) template - PushConstantsInfoKHR( VULKAN_HPP_NAMESPACE::PipelineLayout layout_, - VULKAN_HPP_NAMESPACE::ShaderStageFlags stageFlags_, - uint32_t offset_, - VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & values_, - const void * pNext_ = nullptr ) + PushConstantsInfo( VULKAN_HPP_NAMESPACE::PipelineLayout layout_, + VULKAN_HPP_NAMESPACE::ShaderStageFlags stageFlags_, + uint32_t offset_, + VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & values_, + const void * pNext_ = nullptr ) : pNext( pNext_ ) , layout( layout_ ) , stageFlags( stageFlags_ ) @@ -105120,47 +105733,47 @@ namespace VULKAN_HPP_NAMESPACE } # endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - PushConstantsInfoKHR & operator=( PushConstantsInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + PushConstantsInfo & operator=( PushConstantsInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; #endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ - PushConstantsInfoKHR & operator=( VkPushConstantsInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + PushConstantsInfo & operator=( VkPushConstantsInfo const & rhs ) VULKAN_HPP_NOEXCEPT { - *this = *reinterpret_cast( &rhs ); + *this = *reinterpret_cast( &rhs ); return *this; } #if !defined( VULKAN_HPP_NO_STRUCT_SETTERS ) - VULKAN_HPP_CONSTEXPR_14 PushConstantsInfoKHR & setPNext( const void * pNext_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 PushConstantsInfo & setPNext( const void * pNext_ ) VULKAN_HPP_NOEXCEPT { pNext = pNext_; return *this; } - VULKAN_HPP_CONSTEXPR_14 PushConstantsInfoKHR & setLayout( VULKAN_HPP_NAMESPACE::PipelineLayout layout_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 PushConstantsInfo & setLayout( VULKAN_HPP_NAMESPACE::PipelineLayout layout_ ) VULKAN_HPP_NOEXCEPT { layout = layout_; return *this; } - VULKAN_HPP_CONSTEXPR_14 PushConstantsInfoKHR & setStageFlags( VULKAN_HPP_NAMESPACE::ShaderStageFlags stageFlags_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 PushConstantsInfo & setStageFlags( VULKAN_HPP_NAMESPACE::ShaderStageFlags stageFlags_ ) VULKAN_HPP_NOEXCEPT { stageFlags = stageFlags_; return *this; } - VULKAN_HPP_CONSTEXPR_14 PushConstantsInfoKHR & setOffset( uint32_t offset_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 PushConstantsInfo & setOffset( uint32_t offset_ ) VULKAN_HPP_NOEXCEPT { offset = offset_; return *this; } - VULKAN_HPP_CONSTEXPR_14 PushConstantsInfoKHR & setSize( uint32_t size_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 PushConstantsInfo & setSize( uint32_t size_ ) VULKAN_HPP_NOEXCEPT { size = size_; return *this; } - VULKAN_HPP_CONSTEXPR_14 PushConstantsInfoKHR & setPValues( const void * pValues_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 PushConstantsInfo & setPValues( const void * pValues_ ) VULKAN_HPP_NOEXCEPT { pValues = pValues_; return *this; @@ -105168,7 +105781,7 @@ namespace VULKAN_HPP_NAMESPACE # if !defined( VULKAN_HPP_DISABLE_ENHANCED_MODE ) template - PushConstantsInfoKHR & setValues( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & values_ ) VULKAN_HPP_NOEXCEPT + PushConstantsInfo & setValues( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & values_ ) VULKAN_HPP_NOEXCEPT { size = static_cast( values_.size() * sizeof( T ) ); pValues = values_.data(); @@ -105177,14 +105790,14 @@ namespace VULKAN_HPP_NAMESPACE # endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ #endif /*VULKAN_HPP_NO_STRUCT_SETTERS*/ - operator VkPushConstantsInfoKHR const &() const VULKAN_HPP_NOEXCEPT + operator VkPushConstantsInfo const &() const VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + return *reinterpret_cast( this ); } - operator VkPushConstantsInfoKHR &() VULKAN_HPP_NOEXCEPT + operator VkPushConstantsInfo &() VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + return *reinterpret_cast( this ); } #if defined( VULKAN_HPP_USE_REFLECT ) @@ -105206,9 +105819,9 @@ namespace VULKAN_HPP_NAMESPACE #endif #if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( PushConstantsInfoKHR const & ) const = default; + auto operator<=>( PushConstantsInfo const & ) const = default; #else - bool operator==( PushConstantsInfoKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator==( PushConstantsInfo const & rhs ) const VULKAN_HPP_NOEXCEPT { # if defined( VULKAN_HPP_USE_REFLECT ) return this->reflect() == rhs.reflect(); @@ -105218,14 +105831,14 @@ namespace VULKAN_HPP_NAMESPACE # endif } - bool operator!=( PushConstantsInfoKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator!=( PushConstantsInfo const & rhs ) const VULKAN_HPP_NOEXCEPT { return !operator==( rhs ); } #endif public: - VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePushConstantsInfoKHR; + VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePushConstantsInfo; const void * pNext = {}; VULKAN_HPP_NAMESPACE::PipelineLayout layout = {}; VULKAN_HPP_NAMESPACE::ShaderStageFlags stageFlags = {}; @@ -105235,11 +105848,13 @@ namespace VULKAN_HPP_NAMESPACE }; template <> - struct CppType + struct CppType { - using Type = PushConstantsInfoKHR; + using Type = PushConstantsInfo; }; + using PushConstantsInfoKHR = PushConstantsInfo; + struct WriteDescriptorSet { using NativeType = VkWriteDescriptorSet; @@ -105471,20 +106086,20 @@ namespace VULKAN_HPP_NAMESPACE using Type = WriteDescriptorSet; }; - struct PushDescriptorSetInfoKHR + struct PushDescriptorSetInfo { - using NativeType = VkPushDescriptorSetInfoKHR; + using NativeType = VkPushDescriptorSetInfo; static const bool allowDuplicate = false; - static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePushDescriptorSetInfoKHR; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePushDescriptorSetInfo; #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) - VULKAN_HPP_CONSTEXPR PushDescriptorSetInfoKHR( VULKAN_HPP_NAMESPACE::ShaderStageFlags stageFlags_ = {}, - VULKAN_HPP_NAMESPACE::PipelineLayout layout_ = {}, - uint32_t set_ = {}, - uint32_t descriptorWriteCount_ = {}, - const VULKAN_HPP_NAMESPACE::WriteDescriptorSet * pDescriptorWrites_ = {}, - const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR PushDescriptorSetInfo( VULKAN_HPP_NAMESPACE::ShaderStageFlags stageFlags_ = {}, + VULKAN_HPP_NAMESPACE::PipelineLayout layout_ = {}, + uint32_t set_ = {}, + uint32_t descriptorWriteCount_ = {}, + const VULKAN_HPP_NAMESPACE::WriteDescriptorSet * pDescriptorWrites_ = {}, + const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT : pNext{ pNext_ } , stageFlags{ stageFlags_ } , layout{ layout_ } @@ -105494,19 +106109,19 @@ namespace VULKAN_HPP_NAMESPACE { } - VULKAN_HPP_CONSTEXPR PushDescriptorSetInfoKHR( PushDescriptorSetInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + VULKAN_HPP_CONSTEXPR PushDescriptorSetInfo( PushDescriptorSetInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; - PushDescriptorSetInfoKHR( VkPushDescriptorSetInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT - : PushDescriptorSetInfoKHR( *reinterpret_cast( &rhs ) ) + PushDescriptorSetInfo( VkPushDescriptorSetInfo const & rhs ) VULKAN_HPP_NOEXCEPT + : PushDescriptorSetInfo( *reinterpret_cast( &rhs ) ) { } # if !defined( VULKAN_HPP_DISABLE_ENHANCED_MODE ) - PushDescriptorSetInfoKHR( VULKAN_HPP_NAMESPACE::ShaderStageFlags stageFlags_, - VULKAN_HPP_NAMESPACE::PipelineLayout layout_, - uint32_t set_, - VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & descriptorWrites_, - const void * pNext_ = nullptr ) + PushDescriptorSetInfo( VULKAN_HPP_NAMESPACE::ShaderStageFlags stageFlags_, + VULKAN_HPP_NAMESPACE::PipelineLayout layout_, + uint32_t set_, + VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & descriptorWrites_, + const void * pNext_ = nullptr ) : pNext( pNext_ ) , stageFlags( stageFlags_ ) , layout( layout_ ) @@ -105517,47 +106132,47 @@ namespace VULKAN_HPP_NAMESPACE } # endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - PushDescriptorSetInfoKHR & operator=( PushDescriptorSetInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + PushDescriptorSetInfo & operator=( PushDescriptorSetInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; #endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ - PushDescriptorSetInfoKHR & operator=( VkPushDescriptorSetInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + PushDescriptorSetInfo & operator=( VkPushDescriptorSetInfo const & rhs ) VULKAN_HPP_NOEXCEPT { - *this = *reinterpret_cast( &rhs ); + *this = *reinterpret_cast( &rhs ); return *this; } #if !defined( VULKAN_HPP_NO_STRUCT_SETTERS ) - VULKAN_HPP_CONSTEXPR_14 PushDescriptorSetInfoKHR & setPNext( const void * pNext_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 PushDescriptorSetInfo & setPNext( const void * pNext_ ) VULKAN_HPP_NOEXCEPT { pNext = pNext_; return *this; } - VULKAN_HPP_CONSTEXPR_14 PushDescriptorSetInfoKHR & setStageFlags( VULKAN_HPP_NAMESPACE::ShaderStageFlags stageFlags_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 PushDescriptorSetInfo & setStageFlags( VULKAN_HPP_NAMESPACE::ShaderStageFlags stageFlags_ ) VULKAN_HPP_NOEXCEPT { stageFlags = stageFlags_; return *this; } - VULKAN_HPP_CONSTEXPR_14 PushDescriptorSetInfoKHR & setLayout( VULKAN_HPP_NAMESPACE::PipelineLayout layout_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 PushDescriptorSetInfo & setLayout( VULKAN_HPP_NAMESPACE::PipelineLayout layout_ ) VULKAN_HPP_NOEXCEPT { layout = layout_; return *this; } - VULKAN_HPP_CONSTEXPR_14 PushDescriptorSetInfoKHR & setSet( uint32_t set_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 PushDescriptorSetInfo & setSet( uint32_t set_ ) VULKAN_HPP_NOEXCEPT { set = set_; return *this; } - VULKAN_HPP_CONSTEXPR_14 PushDescriptorSetInfoKHR & setDescriptorWriteCount( uint32_t descriptorWriteCount_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 PushDescriptorSetInfo & setDescriptorWriteCount( uint32_t descriptorWriteCount_ ) VULKAN_HPP_NOEXCEPT { descriptorWriteCount = descriptorWriteCount_; return *this; } - VULKAN_HPP_CONSTEXPR_14 PushDescriptorSetInfoKHR & + VULKAN_HPP_CONSTEXPR_14 PushDescriptorSetInfo & setPDescriptorWrites( const VULKAN_HPP_NAMESPACE::WriteDescriptorSet * pDescriptorWrites_ ) VULKAN_HPP_NOEXCEPT { pDescriptorWrites = pDescriptorWrites_; @@ -105565,7 +106180,7 @@ namespace VULKAN_HPP_NAMESPACE } # if !defined( VULKAN_HPP_DISABLE_ENHANCED_MODE ) - PushDescriptorSetInfoKHR & setDescriptorWrites( + PushDescriptorSetInfo & setDescriptorWrites( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & descriptorWrites_ ) VULKAN_HPP_NOEXCEPT { descriptorWriteCount = static_cast( descriptorWrites_.size() ); @@ -105575,14 +106190,14 @@ namespace VULKAN_HPP_NAMESPACE # endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ #endif /*VULKAN_HPP_NO_STRUCT_SETTERS*/ - operator VkPushDescriptorSetInfoKHR const &() const VULKAN_HPP_NOEXCEPT + operator VkPushDescriptorSetInfo const &() const VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + return *reinterpret_cast( this ); } - operator VkPushDescriptorSetInfoKHR &() VULKAN_HPP_NOEXCEPT + operator VkPushDescriptorSetInfo &() VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + return *reinterpret_cast( this ); } #if defined( VULKAN_HPP_USE_REFLECT ) @@ -105604,9 +106219,9 @@ namespace VULKAN_HPP_NAMESPACE #endif #if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( PushDescriptorSetInfoKHR const & ) const = default; + auto operator<=>( PushDescriptorSetInfo const & ) const = default; #else - bool operator==( PushDescriptorSetInfoKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator==( PushDescriptorSetInfo const & rhs ) const VULKAN_HPP_NOEXCEPT { # if defined( VULKAN_HPP_USE_REFLECT ) return this->reflect() == rhs.reflect(); @@ -105616,14 +106231,14 @@ namespace VULKAN_HPP_NAMESPACE # endif } - bool operator!=( PushDescriptorSetInfoKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator!=( PushDescriptorSetInfo const & rhs ) const VULKAN_HPP_NOEXCEPT { return !operator==( rhs ); } #endif public: - VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePushDescriptorSetInfoKHR; + VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePushDescriptorSetInfo; const void * pNext = {}; VULKAN_HPP_NAMESPACE::ShaderStageFlags stageFlags = {}; VULKAN_HPP_NAMESPACE::PipelineLayout layout = {}; @@ -105633,24 +106248,26 @@ namespace VULKAN_HPP_NAMESPACE }; template <> - struct CppType + struct CppType { - using Type = PushDescriptorSetInfoKHR; + using Type = PushDescriptorSetInfo; }; - struct PushDescriptorSetWithTemplateInfoKHR + using PushDescriptorSetInfoKHR = PushDescriptorSetInfo; + + struct PushDescriptorSetWithTemplateInfo { - using NativeType = VkPushDescriptorSetWithTemplateInfoKHR; + using NativeType = VkPushDescriptorSetWithTemplateInfo; static const bool allowDuplicate = false; - static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePushDescriptorSetWithTemplateInfoKHR; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePushDescriptorSetWithTemplateInfo; #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) - VULKAN_HPP_CONSTEXPR PushDescriptorSetWithTemplateInfoKHR( VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate descriptorUpdateTemplate_ = {}, - VULKAN_HPP_NAMESPACE::PipelineLayout layout_ = {}, - uint32_t set_ = {}, - const void * pData_ = {}, - const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR PushDescriptorSetWithTemplateInfo( VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate descriptorUpdateTemplate_ = {}, + VULKAN_HPP_NAMESPACE::PipelineLayout layout_ = {}, + uint32_t set_ = {}, + const void * pData_ = {}, + const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT : pNext{ pNext_ } , descriptorUpdateTemplate{ descriptorUpdateTemplate_ } , layout{ layout_ } @@ -105659,63 +106276,63 @@ namespace VULKAN_HPP_NAMESPACE { } - VULKAN_HPP_CONSTEXPR PushDescriptorSetWithTemplateInfoKHR( PushDescriptorSetWithTemplateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + VULKAN_HPP_CONSTEXPR PushDescriptorSetWithTemplateInfo( PushDescriptorSetWithTemplateInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; - PushDescriptorSetWithTemplateInfoKHR( VkPushDescriptorSetWithTemplateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT - : PushDescriptorSetWithTemplateInfoKHR( *reinterpret_cast( &rhs ) ) + PushDescriptorSetWithTemplateInfo( VkPushDescriptorSetWithTemplateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + : PushDescriptorSetWithTemplateInfo( *reinterpret_cast( &rhs ) ) { } - PushDescriptorSetWithTemplateInfoKHR & operator=( PushDescriptorSetWithTemplateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + PushDescriptorSetWithTemplateInfo & operator=( PushDescriptorSetWithTemplateInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; #endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ - PushDescriptorSetWithTemplateInfoKHR & operator=( VkPushDescriptorSetWithTemplateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + PushDescriptorSetWithTemplateInfo & operator=( VkPushDescriptorSetWithTemplateInfo const & rhs ) VULKAN_HPP_NOEXCEPT { - *this = *reinterpret_cast( &rhs ); + *this = *reinterpret_cast( &rhs ); return *this; } #if !defined( VULKAN_HPP_NO_STRUCT_SETTERS ) - VULKAN_HPP_CONSTEXPR_14 PushDescriptorSetWithTemplateInfoKHR & setPNext( const void * pNext_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 PushDescriptorSetWithTemplateInfo & setPNext( const void * pNext_ ) VULKAN_HPP_NOEXCEPT { pNext = pNext_; return *this; } - VULKAN_HPP_CONSTEXPR_14 PushDescriptorSetWithTemplateInfoKHR & + VULKAN_HPP_CONSTEXPR_14 PushDescriptorSetWithTemplateInfo & setDescriptorUpdateTemplate( VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate descriptorUpdateTemplate_ ) VULKAN_HPP_NOEXCEPT { descriptorUpdateTemplate = descriptorUpdateTemplate_; return *this; } - VULKAN_HPP_CONSTEXPR_14 PushDescriptorSetWithTemplateInfoKHR & setLayout( VULKAN_HPP_NAMESPACE::PipelineLayout layout_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 PushDescriptorSetWithTemplateInfo & setLayout( VULKAN_HPP_NAMESPACE::PipelineLayout layout_ ) VULKAN_HPP_NOEXCEPT { layout = layout_; return *this; } - VULKAN_HPP_CONSTEXPR_14 PushDescriptorSetWithTemplateInfoKHR & setSet( uint32_t set_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 PushDescriptorSetWithTemplateInfo & setSet( uint32_t set_ ) VULKAN_HPP_NOEXCEPT { set = set_; return *this; } - VULKAN_HPP_CONSTEXPR_14 PushDescriptorSetWithTemplateInfoKHR & setPData( const void * pData_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 PushDescriptorSetWithTemplateInfo & setPData( const void * pData_ ) VULKAN_HPP_NOEXCEPT { pData = pData_; return *this; } #endif /*VULKAN_HPP_NO_STRUCT_SETTERS*/ - operator VkPushDescriptorSetWithTemplateInfoKHR const &() const VULKAN_HPP_NOEXCEPT + operator VkPushDescriptorSetWithTemplateInfo const &() const VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + return *reinterpret_cast( this ); } - operator VkPushDescriptorSetWithTemplateInfoKHR &() VULKAN_HPP_NOEXCEPT + operator VkPushDescriptorSetWithTemplateInfo &() VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + return *reinterpret_cast( this ); } #if defined( VULKAN_HPP_USE_REFLECT ) @@ -105736,9 +106353,9 @@ namespace VULKAN_HPP_NAMESPACE #endif #if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( PushDescriptorSetWithTemplateInfoKHR const & ) const = default; + auto operator<=>( PushDescriptorSetWithTemplateInfo const & ) const = default; #else - bool operator==( PushDescriptorSetWithTemplateInfoKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator==( PushDescriptorSetWithTemplateInfo const & rhs ) const VULKAN_HPP_NOEXCEPT { # if defined( VULKAN_HPP_USE_REFLECT ) return this->reflect() == rhs.reflect(); @@ -105748,14 +106365,14 @@ namespace VULKAN_HPP_NAMESPACE # endif } - bool operator!=( PushDescriptorSetWithTemplateInfoKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator!=( PushDescriptorSetWithTemplateInfo const & rhs ) const VULKAN_HPP_NOEXCEPT { return !operator==( rhs ); } #endif public: - VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePushDescriptorSetWithTemplateInfoKHR; + VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePushDescriptorSetWithTemplateInfo; const void * pNext = {}; VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate descriptorUpdateTemplate = {}; VULKAN_HPP_NAMESPACE::PipelineLayout layout = {}; @@ -105764,11 +106381,13 @@ namespace VULKAN_HPP_NAMESPACE }; template <> - struct CppType + struct CppType { - using Type = PushDescriptorSetWithTemplateInfoKHR; + using Type = PushDescriptorSetWithTemplateInfo; }; + using PushDescriptorSetWithTemplateInfoKHR = PushDescriptorSetWithTemplateInfo; + struct QueryLowLatencySupportNV { using NativeType = VkQueryLowLatencySupportNV; @@ -106498,64 +107117,63 @@ namespace VULKAN_HPP_NAMESPACE using Type = QueueFamilyCheckpointPropertiesNV; }; - struct QueueFamilyGlobalPriorityPropertiesKHR + struct QueueFamilyGlobalPriorityProperties { - using NativeType = VkQueueFamilyGlobalPriorityPropertiesKHR; + using NativeType = VkQueueFamilyGlobalPriorityProperties; static const bool allowDuplicate = false; - static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eQueueFamilyGlobalPriorityPropertiesKHR; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eQueueFamilyGlobalPriorityProperties; #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) - VULKAN_HPP_CONSTEXPR_14 - QueueFamilyGlobalPriorityPropertiesKHR( uint32_t priorityCount_ = {}, - std::array const & - priorities_ = { { VULKAN_HPP_NAMESPACE::QueueGlobalPriorityKHR::eLow, - VULKAN_HPP_NAMESPACE::QueueGlobalPriorityKHR::eLow, - VULKAN_HPP_NAMESPACE::QueueGlobalPriorityKHR::eLow, - VULKAN_HPP_NAMESPACE::QueueGlobalPriorityKHR::eLow, - VULKAN_HPP_NAMESPACE::QueueGlobalPriorityKHR::eLow, - VULKAN_HPP_NAMESPACE::QueueGlobalPriorityKHR::eLow, - VULKAN_HPP_NAMESPACE::QueueGlobalPriorityKHR::eLow, - VULKAN_HPP_NAMESPACE::QueueGlobalPriorityKHR::eLow, - VULKAN_HPP_NAMESPACE::QueueGlobalPriorityKHR::eLow, - VULKAN_HPP_NAMESPACE::QueueGlobalPriorityKHR::eLow, - VULKAN_HPP_NAMESPACE::QueueGlobalPriorityKHR::eLow, - VULKAN_HPP_NAMESPACE::QueueGlobalPriorityKHR::eLow, - VULKAN_HPP_NAMESPACE::QueueGlobalPriorityKHR::eLow, - VULKAN_HPP_NAMESPACE::QueueGlobalPriorityKHR::eLow, - VULKAN_HPP_NAMESPACE::QueueGlobalPriorityKHR::eLow, - VULKAN_HPP_NAMESPACE::QueueGlobalPriorityKHR::eLow } }, - void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 QueueFamilyGlobalPriorityProperties( uint32_t priorityCount_ = {}, + std::array const & + priorities_ = { { VULKAN_HPP_NAMESPACE::QueueGlobalPriority::eLow, + VULKAN_HPP_NAMESPACE::QueueGlobalPriority::eLow, + VULKAN_HPP_NAMESPACE::QueueGlobalPriority::eLow, + VULKAN_HPP_NAMESPACE::QueueGlobalPriority::eLow, + VULKAN_HPP_NAMESPACE::QueueGlobalPriority::eLow, + VULKAN_HPP_NAMESPACE::QueueGlobalPriority::eLow, + VULKAN_HPP_NAMESPACE::QueueGlobalPriority::eLow, + VULKAN_HPP_NAMESPACE::QueueGlobalPriority::eLow, + VULKAN_HPP_NAMESPACE::QueueGlobalPriority::eLow, + VULKAN_HPP_NAMESPACE::QueueGlobalPriority::eLow, + VULKAN_HPP_NAMESPACE::QueueGlobalPriority::eLow, + VULKAN_HPP_NAMESPACE::QueueGlobalPriority::eLow, + VULKAN_HPP_NAMESPACE::QueueGlobalPriority::eLow, + VULKAN_HPP_NAMESPACE::QueueGlobalPriority::eLow, + VULKAN_HPP_NAMESPACE::QueueGlobalPriority::eLow, + VULKAN_HPP_NAMESPACE::QueueGlobalPriority::eLow } }, + void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT : pNext{ pNext_ } , priorityCount{ priorityCount_ } , priorities{ priorities_ } { } - VULKAN_HPP_CONSTEXPR_14 QueueFamilyGlobalPriorityPropertiesKHR( QueueFamilyGlobalPriorityPropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + VULKAN_HPP_CONSTEXPR_14 QueueFamilyGlobalPriorityProperties( QueueFamilyGlobalPriorityProperties const & rhs ) VULKAN_HPP_NOEXCEPT = default; - QueueFamilyGlobalPriorityPropertiesKHR( VkQueueFamilyGlobalPriorityPropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT - : QueueFamilyGlobalPriorityPropertiesKHR( *reinterpret_cast( &rhs ) ) + QueueFamilyGlobalPriorityProperties( VkQueueFamilyGlobalPriorityProperties const & rhs ) VULKAN_HPP_NOEXCEPT + : QueueFamilyGlobalPriorityProperties( *reinterpret_cast( &rhs ) ) { } - QueueFamilyGlobalPriorityPropertiesKHR & operator=( QueueFamilyGlobalPriorityPropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + QueueFamilyGlobalPriorityProperties & operator=( QueueFamilyGlobalPriorityProperties const & rhs ) VULKAN_HPP_NOEXCEPT = default; #endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ - QueueFamilyGlobalPriorityPropertiesKHR & operator=( VkQueueFamilyGlobalPriorityPropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + QueueFamilyGlobalPriorityProperties & operator=( VkQueueFamilyGlobalPriorityProperties const & rhs ) VULKAN_HPP_NOEXCEPT { - *this = *reinterpret_cast( &rhs ); + *this = *reinterpret_cast( &rhs ); return *this; } - operator VkQueueFamilyGlobalPriorityPropertiesKHR const &() const VULKAN_HPP_NOEXCEPT + operator VkQueueFamilyGlobalPriorityProperties const &() const VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + return *reinterpret_cast( this ); } - operator VkQueueFamilyGlobalPriorityPropertiesKHR &() VULKAN_HPP_NOEXCEPT + operator VkQueueFamilyGlobalPriorityProperties &() VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + return *reinterpret_cast( this ); } #if defined( VULKAN_HPP_USE_REFLECT ) @@ -106565,7 +107183,7 @@ namespace VULKAN_HPP_NAMESPACE std::tuple const &> + VULKAN_HPP_NAMESPACE::ArrayWrapper1D const &> # endif reflect() const VULKAN_HPP_NOEXCEPT { @@ -106574,7 +107192,7 @@ namespace VULKAN_HPP_NAMESPACE #endif #if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - std::strong_ordering operator<=>( QueueFamilyGlobalPriorityPropertiesKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + std::strong_ordering operator<=>( QueueFamilyGlobalPriorityProperties const & rhs ) const VULKAN_HPP_NOEXCEPT { if ( auto cmp = sType <=> rhs.sType; cmp != 0 ) return cmp; @@ -106592,31 +107210,32 @@ namespace VULKAN_HPP_NAMESPACE } #endif - bool operator==( QueueFamilyGlobalPriorityPropertiesKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator==( QueueFamilyGlobalPriorityProperties const & rhs ) const VULKAN_HPP_NOEXCEPT { return ( sType == rhs.sType ) && ( pNext == rhs.pNext ) && ( priorityCount == rhs.priorityCount ) && - ( memcmp( priorities, rhs.priorities, priorityCount * sizeof( VULKAN_HPP_NAMESPACE::QueueGlobalPriorityKHR ) ) == 0 ); + ( memcmp( priorities, rhs.priorities, priorityCount * sizeof( VULKAN_HPP_NAMESPACE::QueueGlobalPriority ) ) == 0 ); } - bool operator!=( QueueFamilyGlobalPriorityPropertiesKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator!=( QueueFamilyGlobalPriorityProperties const & rhs ) const VULKAN_HPP_NOEXCEPT { return !operator==( rhs ); } public: - VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eQueueFamilyGlobalPriorityPropertiesKHR; + VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eQueueFamilyGlobalPriorityProperties; void * pNext = {}; uint32_t priorityCount = {}; - VULKAN_HPP_NAMESPACE::ArrayWrapper1D priorities = {}; + VULKAN_HPP_NAMESPACE::ArrayWrapper1D priorities = {}; }; template <> - struct CppType + struct CppType { - using Type = QueueFamilyGlobalPriorityPropertiesKHR; + using Type = QueueFamilyGlobalPriorityProperties; }; - using QueueFamilyGlobalPriorityPropertiesEXT = QueueFamilyGlobalPriorityPropertiesKHR; + using QueueFamilyGlobalPriorityPropertiesEXT = QueueFamilyGlobalPriorityProperties; + using QueueFamilyGlobalPriorityPropertiesKHR = QueueFamilyGlobalPriorityProperties; struct QueueFamilyProperties { @@ -111484,20 +112103,20 @@ namespace VULKAN_HPP_NAMESPACE using Type = RenderPassTransformBeginInfoQCOM; }; - struct RenderingAreaInfoKHR + struct RenderingAreaInfo { - using NativeType = VkRenderingAreaInfoKHR; + using NativeType = VkRenderingAreaInfo; static const bool allowDuplicate = false; - static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eRenderingAreaInfoKHR; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eRenderingAreaInfo; #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) - VULKAN_HPP_CONSTEXPR RenderingAreaInfoKHR( uint32_t viewMask_ = {}, - uint32_t colorAttachmentCount_ = {}, - const VULKAN_HPP_NAMESPACE::Format * pColorAttachmentFormats_ = {}, - VULKAN_HPP_NAMESPACE::Format depthAttachmentFormat_ = VULKAN_HPP_NAMESPACE::Format::eUndefined, - VULKAN_HPP_NAMESPACE::Format stencilAttachmentFormat_ = VULKAN_HPP_NAMESPACE::Format::eUndefined, - const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR RenderingAreaInfo( uint32_t viewMask_ = {}, + uint32_t colorAttachmentCount_ = {}, + const VULKAN_HPP_NAMESPACE::Format * pColorAttachmentFormats_ = {}, + VULKAN_HPP_NAMESPACE::Format depthAttachmentFormat_ = VULKAN_HPP_NAMESPACE::Format::eUndefined, + VULKAN_HPP_NAMESPACE::Format stencilAttachmentFormat_ = VULKAN_HPP_NAMESPACE::Format::eUndefined, + const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT : pNext{ pNext_ } , viewMask{ viewMask_ } , colorAttachmentCount{ colorAttachmentCount_ } @@ -111507,19 +112126,16 @@ namespace VULKAN_HPP_NAMESPACE { } - VULKAN_HPP_CONSTEXPR RenderingAreaInfoKHR( RenderingAreaInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + VULKAN_HPP_CONSTEXPR RenderingAreaInfo( RenderingAreaInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; - RenderingAreaInfoKHR( VkRenderingAreaInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT - : RenderingAreaInfoKHR( *reinterpret_cast( &rhs ) ) - { - } + RenderingAreaInfo( VkRenderingAreaInfo const & rhs ) VULKAN_HPP_NOEXCEPT : RenderingAreaInfo( *reinterpret_cast( &rhs ) ) {} # if !defined( VULKAN_HPP_DISABLE_ENHANCED_MODE ) - RenderingAreaInfoKHR( uint32_t viewMask_, - VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & colorAttachmentFormats_, - VULKAN_HPP_NAMESPACE::Format depthAttachmentFormat_ = VULKAN_HPP_NAMESPACE::Format::eUndefined, - VULKAN_HPP_NAMESPACE::Format stencilAttachmentFormat_ = VULKAN_HPP_NAMESPACE::Format::eUndefined, - const void * pNext_ = nullptr ) + RenderingAreaInfo( uint32_t viewMask_, + VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & colorAttachmentFormats_, + VULKAN_HPP_NAMESPACE::Format depthAttachmentFormat_ = VULKAN_HPP_NAMESPACE::Format::eUndefined, + VULKAN_HPP_NAMESPACE::Format stencilAttachmentFormat_ = VULKAN_HPP_NAMESPACE::Format::eUndefined, + const void * pNext_ = nullptr ) : pNext( pNext_ ) , viewMask( viewMask_ ) , colorAttachmentCount( static_cast( colorAttachmentFormats_.size() ) ) @@ -111530,43 +112146,42 @@ namespace VULKAN_HPP_NAMESPACE } # endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - RenderingAreaInfoKHR & operator=( RenderingAreaInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + RenderingAreaInfo & operator=( RenderingAreaInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; #endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ - RenderingAreaInfoKHR & operator=( VkRenderingAreaInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + RenderingAreaInfo & operator=( VkRenderingAreaInfo const & rhs ) VULKAN_HPP_NOEXCEPT { - *this = *reinterpret_cast( &rhs ); + *this = *reinterpret_cast( &rhs ); return *this; } #if !defined( VULKAN_HPP_NO_STRUCT_SETTERS ) - VULKAN_HPP_CONSTEXPR_14 RenderingAreaInfoKHR & setPNext( const void * pNext_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 RenderingAreaInfo & setPNext( const void * pNext_ ) VULKAN_HPP_NOEXCEPT { pNext = pNext_; return *this; } - VULKAN_HPP_CONSTEXPR_14 RenderingAreaInfoKHR & setViewMask( uint32_t viewMask_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 RenderingAreaInfo & setViewMask( uint32_t viewMask_ ) VULKAN_HPP_NOEXCEPT { viewMask = viewMask_; return *this; } - VULKAN_HPP_CONSTEXPR_14 RenderingAreaInfoKHR & setColorAttachmentCount( uint32_t colorAttachmentCount_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 RenderingAreaInfo & setColorAttachmentCount( uint32_t colorAttachmentCount_ ) VULKAN_HPP_NOEXCEPT { colorAttachmentCount = colorAttachmentCount_; return *this; } - VULKAN_HPP_CONSTEXPR_14 RenderingAreaInfoKHR & - setPColorAttachmentFormats( const VULKAN_HPP_NAMESPACE::Format * pColorAttachmentFormats_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 RenderingAreaInfo & setPColorAttachmentFormats( const VULKAN_HPP_NAMESPACE::Format * pColorAttachmentFormats_ ) VULKAN_HPP_NOEXCEPT { pColorAttachmentFormats = pColorAttachmentFormats_; return *this; } # if !defined( VULKAN_HPP_DISABLE_ENHANCED_MODE ) - RenderingAreaInfoKHR & setColorAttachmentFormats( + RenderingAreaInfo & setColorAttachmentFormats( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & colorAttachmentFormats_ ) VULKAN_HPP_NOEXCEPT { colorAttachmentCount = static_cast( colorAttachmentFormats_.size() ); @@ -111575,27 +112190,27 @@ namespace VULKAN_HPP_NAMESPACE } # endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - VULKAN_HPP_CONSTEXPR_14 RenderingAreaInfoKHR & setDepthAttachmentFormat( VULKAN_HPP_NAMESPACE::Format depthAttachmentFormat_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 RenderingAreaInfo & setDepthAttachmentFormat( VULKAN_HPP_NAMESPACE::Format depthAttachmentFormat_ ) VULKAN_HPP_NOEXCEPT { depthAttachmentFormat = depthAttachmentFormat_; return *this; } - VULKAN_HPP_CONSTEXPR_14 RenderingAreaInfoKHR & setStencilAttachmentFormat( VULKAN_HPP_NAMESPACE::Format stencilAttachmentFormat_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 RenderingAreaInfo & setStencilAttachmentFormat( VULKAN_HPP_NAMESPACE::Format stencilAttachmentFormat_ ) VULKAN_HPP_NOEXCEPT { stencilAttachmentFormat = stencilAttachmentFormat_; return *this; } #endif /*VULKAN_HPP_NO_STRUCT_SETTERS*/ - operator VkRenderingAreaInfoKHR const &() const VULKAN_HPP_NOEXCEPT + operator VkRenderingAreaInfo const &() const VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + return *reinterpret_cast( this ); } - operator VkRenderingAreaInfoKHR &() VULKAN_HPP_NOEXCEPT + operator VkRenderingAreaInfo &() VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + return *reinterpret_cast( this ); } #if defined( VULKAN_HPP_USE_REFLECT ) @@ -111617,9 +112232,9 @@ namespace VULKAN_HPP_NAMESPACE #endif #if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( RenderingAreaInfoKHR const & ) const = default; + auto operator<=>( RenderingAreaInfo const & ) const = default; #else - bool operator==( RenderingAreaInfoKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator==( RenderingAreaInfo const & rhs ) const VULKAN_HPP_NOEXCEPT { # if defined( VULKAN_HPP_USE_REFLECT ) return this->reflect() == rhs.reflect(); @@ -111630,14 +112245,14 @@ namespace VULKAN_HPP_NAMESPACE # endif } - bool operator!=( RenderingAreaInfoKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator!=( RenderingAreaInfo const & rhs ) const VULKAN_HPP_NOEXCEPT { return !operator==( rhs ); } #endif public: - VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eRenderingAreaInfoKHR; + VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eRenderingAreaInfo; const void * pNext = {}; uint32_t viewMask = {}; uint32_t colorAttachmentCount = {}; @@ -111647,11 +112262,13 @@ namespace VULKAN_HPP_NAMESPACE }; template <> - struct CppType + struct CppType { - using Type = RenderingAreaInfoKHR; + using Type = RenderingAreaInfo; }; + using RenderingAreaInfoKHR = RenderingAreaInfo; + struct RenderingAttachmentInfo { using NativeType = VkRenderingAttachmentInfo; @@ -111805,33 +112422,33 @@ namespace VULKAN_HPP_NAMESPACE using RenderingAttachmentInfoKHR = RenderingAttachmentInfo; - struct RenderingAttachmentLocationInfoKHR + struct RenderingAttachmentLocationInfo { - using NativeType = VkRenderingAttachmentLocationInfoKHR; + using NativeType = VkRenderingAttachmentLocationInfo; static const bool allowDuplicate = false; - static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eRenderingAttachmentLocationInfoKHR; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eRenderingAttachmentLocationInfo; #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) - VULKAN_HPP_CONSTEXPR RenderingAttachmentLocationInfoKHR( uint32_t colorAttachmentCount_ = {}, - const uint32_t * pColorAttachmentLocations_ = {}, - const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR RenderingAttachmentLocationInfo( uint32_t colorAttachmentCount_ = {}, + const uint32_t * pColorAttachmentLocations_ = {}, + const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT : pNext{ pNext_ } , colorAttachmentCount{ colorAttachmentCount_ } , pColorAttachmentLocations{ pColorAttachmentLocations_ } { } - VULKAN_HPP_CONSTEXPR RenderingAttachmentLocationInfoKHR( RenderingAttachmentLocationInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + VULKAN_HPP_CONSTEXPR RenderingAttachmentLocationInfo( RenderingAttachmentLocationInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; - RenderingAttachmentLocationInfoKHR( VkRenderingAttachmentLocationInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT - : RenderingAttachmentLocationInfoKHR( *reinterpret_cast( &rhs ) ) + RenderingAttachmentLocationInfo( VkRenderingAttachmentLocationInfo const & rhs ) VULKAN_HPP_NOEXCEPT + : RenderingAttachmentLocationInfo( *reinterpret_cast( &rhs ) ) { } # if !defined( VULKAN_HPP_DISABLE_ENHANCED_MODE ) - RenderingAttachmentLocationInfoKHR( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & colorAttachmentLocations_, - const void * pNext_ = nullptr ) + RenderingAttachmentLocationInfo( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & colorAttachmentLocations_, + const void * pNext_ = nullptr ) : pNext( pNext_ ) , colorAttachmentCount( static_cast( colorAttachmentLocations_.size() ) ) , pColorAttachmentLocations( colorAttachmentLocations_.data() ) @@ -111839,36 +112456,36 @@ namespace VULKAN_HPP_NAMESPACE } # endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - RenderingAttachmentLocationInfoKHR & operator=( RenderingAttachmentLocationInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + RenderingAttachmentLocationInfo & operator=( RenderingAttachmentLocationInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; #endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ - RenderingAttachmentLocationInfoKHR & operator=( VkRenderingAttachmentLocationInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + RenderingAttachmentLocationInfo & operator=( VkRenderingAttachmentLocationInfo const & rhs ) VULKAN_HPP_NOEXCEPT { - *this = *reinterpret_cast( &rhs ); + *this = *reinterpret_cast( &rhs ); return *this; } #if !defined( VULKAN_HPP_NO_STRUCT_SETTERS ) - VULKAN_HPP_CONSTEXPR_14 RenderingAttachmentLocationInfoKHR & setPNext( const void * pNext_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 RenderingAttachmentLocationInfo & setPNext( const void * pNext_ ) VULKAN_HPP_NOEXCEPT { pNext = pNext_; return *this; } - VULKAN_HPP_CONSTEXPR_14 RenderingAttachmentLocationInfoKHR & setColorAttachmentCount( uint32_t colorAttachmentCount_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 RenderingAttachmentLocationInfo & setColorAttachmentCount( uint32_t colorAttachmentCount_ ) VULKAN_HPP_NOEXCEPT { colorAttachmentCount = colorAttachmentCount_; return *this; } - VULKAN_HPP_CONSTEXPR_14 RenderingAttachmentLocationInfoKHR & setPColorAttachmentLocations( const uint32_t * pColorAttachmentLocations_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 RenderingAttachmentLocationInfo & setPColorAttachmentLocations( const uint32_t * pColorAttachmentLocations_ ) VULKAN_HPP_NOEXCEPT { pColorAttachmentLocations = pColorAttachmentLocations_; return *this; } # if !defined( VULKAN_HPP_DISABLE_ENHANCED_MODE ) - RenderingAttachmentLocationInfoKHR & + RenderingAttachmentLocationInfo & setColorAttachmentLocations( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & colorAttachmentLocations_ ) VULKAN_HPP_NOEXCEPT { colorAttachmentCount = static_cast( colorAttachmentLocations_.size() ); @@ -111878,14 +112495,14 @@ namespace VULKAN_HPP_NAMESPACE # endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ #endif /*VULKAN_HPP_NO_STRUCT_SETTERS*/ - operator VkRenderingAttachmentLocationInfoKHR const &() const VULKAN_HPP_NOEXCEPT + operator VkRenderingAttachmentLocationInfo const &() const VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + return *reinterpret_cast( this ); } - operator VkRenderingAttachmentLocationInfoKHR &() VULKAN_HPP_NOEXCEPT + operator VkRenderingAttachmentLocationInfo &() VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + return *reinterpret_cast( this ); } #if defined( VULKAN_HPP_USE_REFLECT ) @@ -111901,9 +112518,9 @@ namespace VULKAN_HPP_NAMESPACE #endif #if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( RenderingAttachmentLocationInfoKHR const & ) const = default; + auto operator<=>( RenderingAttachmentLocationInfo const & ) const = default; #else - bool operator==( RenderingAttachmentLocationInfoKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator==( RenderingAttachmentLocationInfo const & rhs ) const VULKAN_HPP_NOEXCEPT { # if defined( VULKAN_HPP_USE_REFLECT ) return this->reflect() == rhs.reflect(); @@ -111913,25 +112530,27 @@ namespace VULKAN_HPP_NAMESPACE # endif } - bool operator!=( RenderingAttachmentLocationInfoKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator!=( RenderingAttachmentLocationInfo const & rhs ) const VULKAN_HPP_NOEXCEPT { return !operator==( rhs ); } #endif public: - VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eRenderingAttachmentLocationInfoKHR; + VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eRenderingAttachmentLocationInfo; const void * pNext = {}; uint32_t colorAttachmentCount = {}; const uint32_t * pColorAttachmentLocations = {}; }; template <> - struct CppType + struct CppType { - using Type = RenderingAttachmentLocationInfoKHR; + using Type = RenderingAttachmentLocationInfo; }; + using RenderingAttachmentLocationInfoKHR = RenderingAttachmentLocationInfo; + struct RenderingFragmentDensityMapAttachmentInfoEXT { using NativeType = VkRenderingFragmentDensityMapAttachmentInfoEXT; @@ -112368,19 +112987,19 @@ namespace VULKAN_HPP_NAMESPACE using RenderingInfoKHR = RenderingInfo; - struct RenderingInputAttachmentIndexInfoKHR + struct RenderingInputAttachmentIndexInfo { - using NativeType = VkRenderingInputAttachmentIndexInfoKHR; + using NativeType = VkRenderingInputAttachmentIndexInfo; static const bool allowDuplicate = false; - static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eRenderingInputAttachmentIndexInfoKHR; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eRenderingInputAttachmentIndexInfo; #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) - VULKAN_HPP_CONSTEXPR RenderingInputAttachmentIndexInfoKHR( uint32_t colorAttachmentCount_ = {}, - const uint32_t * pColorAttachmentInputIndices_ = {}, - const uint32_t * pDepthInputAttachmentIndex_ = {}, - const uint32_t * pStencilInputAttachmentIndex_ = {}, - const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR RenderingInputAttachmentIndexInfo( uint32_t colorAttachmentCount_ = {}, + const uint32_t * pColorAttachmentInputIndices_ = {}, + const uint32_t * pDepthInputAttachmentIndex_ = {}, + const uint32_t * pStencilInputAttachmentIndex_ = {}, + const void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT : pNext{ pNext_ } , colorAttachmentCount{ colorAttachmentCount_ } , pColorAttachmentInputIndices{ pColorAttachmentInputIndices_ } @@ -112389,18 +113008,18 @@ namespace VULKAN_HPP_NAMESPACE { } - VULKAN_HPP_CONSTEXPR RenderingInputAttachmentIndexInfoKHR( RenderingInputAttachmentIndexInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + VULKAN_HPP_CONSTEXPR RenderingInputAttachmentIndexInfo( RenderingInputAttachmentIndexInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; - RenderingInputAttachmentIndexInfoKHR( VkRenderingInputAttachmentIndexInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT - : RenderingInputAttachmentIndexInfoKHR( *reinterpret_cast( &rhs ) ) + RenderingInputAttachmentIndexInfo( VkRenderingInputAttachmentIndexInfo const & rhs ) VULKAN_HPP_NOEXCEPT + : RenderingInputAttachmentIndexInfo( *reinterpret_cast( &rhs ) ) { } # if !defined( VULKAN_HPP_DISABLE_ENHANCED_MODE ) - RenderingInputAttachmentIndexInfoKHR( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & colorAttachmentInputIndices_, - const uint32_t * pDepthInputAttachmentIndex_ = {}, - const uint32_t * pStencilInputAttachmentIndex_ = {}, - const void * pNext_ = nullptr ) + RenderingInputAttachmentIndexInfo( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & colorAttachmentInputIndices_, + const uint32_t * pDepthInputAttachmentIndex_ = {}, + const uint32_t * pStencilInputAttachmentIndex_ = {}, + const void * pNext_ = nullptr ) : pNext( pNext_ ) , colorAttachmentCount( static_cast( colorAttachmentInputIndices_.size() ) ) , pColorAttachmentInputIndices( colorAttachmentInputIndices_.data() ) @@ -112410,29 +113029,29 @@ namespace VULKAN_HPP_NAMESPACE } # endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - RenderingInputAttachmentIndexInfoKHR & operator=( RenderingInputAttachmentIndexInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + RenderingInputAttachmentIndexInfo & operator=( RenderingInputAttachmentIndexInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; #endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ - RenderingInputAttachmentIndexInfoKHR & operator=( VkRenderingInputAttachmentIndexInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + RenderingInputAttachmentIndexInfo & operator=( VkRenderingInputAttachmentIndexInfo const & rhs ) VULKAN_HPP_NOEXCEPT { - *this = *reinterpret_cast( &rhs ); + *this = *reinterpret_cast( &rhs ); return *this; } #if !defined( VULKAN_HPP_NO_STRUCT_SETTERS ) - VULKAN_HPP_CONSTEXPR_14 RenderingInputAttachmentIndexInfoKHR & setPNext( const void * pNext_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 RenderingInputAttachmentIndexInfo & setPNext( const void * pNext_ ) VULKAN_HPP_NOEXCEPT { pNext = pNext_; return *this; } - VULKAN_HPP_CONSTEXPR_14 RenderingInputAttachmentIndexInfoKHR & setColorAttachmentCount( uint32_t colorAttachmentCount_ ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR_14 RenderingInputAttachmentIndexInfo & setColorAttachmentCount( uint32_t colorAttachmentCount_ ) VULKAN_HPP_NOEXCEPT { colorAttachmentCount = colorAttachmentCount_; return *this; } - VULKAN_HPP_CONSTEXPR_14 RenderingInputAttachmentIndexInfoKHR & + VULKAN_HPP_CONSTEXPR_14 RenderingInputAttachmentIndexInfo & setPColorAttachmentInputIndices( const uint32_t * pColorAttachmentInputIndices_ ) VULKAN_HPP_NOEXCEPT { pColorAttachmentInputIndices = pColorAttachmentInputIndices_; @@ -112440,7 +113059,7 @@ namespace VULKAN_HPP_NAMESPACE } # if !defined( VULKAN_HPP_DISABLE_ENHANCED_MODE ) - RenderingInputAttachmentIndexInfoKHR & + RenderingInputAttachmentIndexInfo & setColorAttachmentInputIndices( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & colorAttachmentInputIndices_ ) VULKAN_HPP_NOEXCEPT { colorAttachmentCount = static_cast( colorAttachmentInputIndices_.size() ); @@ -112449,14 +113068,14 @@ namespace VULKAN_HPP_NAMESPACE } # endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ - VULKAN_HPP_CONSTEXPR_14 RenderingInputAttachmentIndexInfoKHR & + VULKAN_HPP_CONSTEXPR_14 RenderingInputAttachmentIndexInfo & setPDepthInputAttachmentIndex( const uint32_t * pDepthInputAttachmentIndex_ ) VULKAN_HPP_NOEXCEPT { pDepthInputAttachmentIndex = pDepthInputAttachmentIndex_; return *this; } - VULKAN_HPP_CONSTEXPR_14 RenderingInputAttachmentIndexInfoKHR & + VULKAN_HPP_CONSTEXPR_14 RenderingInputAttachmentIndexInfo & setPStencilInputAttachmentIndex( const uint32_t * pStencilInputAttachmentIndex_ ) VULKAN_HPP_NOEXCEPT { pStencilInputAttachmentIndex = pStencilInputAttachmentIndex_; @@ -112464,14 +113083,14 @@ namespace VULKAN_HPP_NAMESPACE } #endif /*VULKAN_HPP_NO_STRUCT_SETTERS*/ - operator VkRenderingInputAttachmentIndexInfoKHR const &() const VULKAN_HPP_NOEXCEPT + operator VkRenderingInputAttachmentIndexInfo const &() const VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + return *reinterpret_cast( this ); } - operator VkRenderingInputAttachmentIndexInfoKHR &() VULKAN_HPP_NOEXCEPT + operator VkRenderingInputAttachmentIndexInfo &() VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + return *reinterpret_cast( this ); } #if defined( VULKAN_HPP_USE_REFLECT ) @@ -112492,9 +113111,9 @@ namespace VULKAN_HPP_NAMESPACE #endif #if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( RenderingInputAttachmentIndexInfoKHR const & ) const = default; + auto operator<=>( RenderingInputAttachmentIndexInfo const & ) const = default; #else - bool operator==( RenderingInputAttachmentIndexInfoKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator==( RenderingInputAttachmentIndexInfo const & rhs ) const VULKAN_HPP_NOEXCEPT { # if defined( VULKAN_HPP_USE_REFLECT ) return this->reflect() == rhs.reflect(); @@ -112505,14 +113124,14 @@ namespace VULKAN_HPP_NAMESPACE # endif } - bool operator!=( RenderingInputAttachmentIndexInfoKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator!=( RenderingInputAttachmentIndexInfo const & rhs ) const VULKAN_HPP_NOEXCEPT { return !operator==( rhs ); } #endif public: - VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eRenderingInputAttachmentIndexInfoKHR; + VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eRenderingInputAttachmentIndexInfo; const void * pNext = {}; uint32_t colorAttachmentCount = {}; const uint32_t * pColorAttachmentInputIndices = {}; @@ -112521,11 +113140,13 @@ namespace VULKAN_HPP_NAMESPACE }; template <> - struct CppType + struct CppType { - using Type = RenderingInputAttachmentIndexInfoKHR; + using Type = RenderingInputAttachmentIndexInfo; }; + using RenderingInputAttachmentIndexInfoKHR = RenderingInputAttachmentIndexInfo; + struct ResolveImageInfo2 { using NativeType = VkResolveImageInfo2; @@ -118117,44 +118738,44 @@ namespace VULKAN_HPP_NAMESPACE using Type = SubpassShadingPipelineCreateInfoHUAWEI; }; - struct SubresourceHostMemcpySizeEXT + struct SubresourceHostMemcpySize { - using NativeType = VkSubresourceHostMemcpySizeEXT; + using NativeType = VkSubresourceHostMemcpySize; static const bool allowDuplicate = false; - static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eSubresourceHostMemcpySizeEXT; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eSubresourceHostMemcpySize; #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) - VULKAN_HPP_CONSTEXPR SubresourceHostMemcpySizeEXT( VULKAN_HPP_NAMESPACE::DeviceSize size_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR SubresourceHostMemcpySize( VULKAN_HPP_NAMESPACE::DeviceSize size_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT : pNext{ pNext_ } , size{ size_ } { } - VULKAN_HPP_CONSTEXPR SubresourceHostMemcpySizeEXT( SubresourceHostMemcpySizeEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + VULKAN_HPP_CONSTEXPR SubresourceHostMemcpySize( SubresourceHostMemcpySize const & rhs ) VULKAN_HPP_NOEXCEPT = default; - SubresourceHostMemcpySizeEXT( VkSubresourceHostMemcpySizeEXT const & rhs ) VULKAN_HPP_NOEXCEPT - : SubresourceHostMemcpySizeEXT( *reinterpret_cast( &rhs ) ) + SubresourceHostMemcpySize( VkSubresourceHostMemcpySize const & rhs ) VULKAN_HPP_NOEXCEPT + : SubresourceHostMemcpySize( *reinterpret_cast( &rhs ) ) { } - SubresourceHostMemcpySizeEXT & operator=( SubresourceHostMemcpySizeEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + SubresourceHostMemcpySize & operator=( SubresourceHostMemcpySize const & rhs ) VULKAN_HPP_NOEXCEPT = default; #endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ - SubresourceHostMemcpySizeEXT & operator=( VkSubresourceHostMemcpySizeEXT const & rhs ) VULKAN_HPP_NOEXCEPT + SubresourceHostMemcpySize & operator=( VkSubresourceHostMemcpySize const & rhs ) VULKAN_HPP_NOEXCEPT { - *this = *reinterpret_cast( &rhs ); + *this = *reinterpret_cast( &rhs ); return *this; } - operator VkSubresourceHostMemcpySizeEXT const &() const VULKAN_HPP_NOEXCEPT + operator VkSubresourceHostMemcpySize const &() const VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + return *reinterpret_cast( this ); } - operator VkSubresourceHostMemcpySizeEXT &() VULKAN_HPP_NOEXCEPT + operator VkSubresourceHostMemcpySize &() VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + return *reinterpret_cast( this ); } #if defined( VULKAN_HPP_USE_REFLECT ) @@ -118170,9 +118791,9 @@ namespace VULKAN_HPP_NAMESPACE #endif #if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( SubresourceHostMemcpySizeEXT const & ) const = default; + auto operator<=>( SubresourceHostMemcpySize const & ) const = default; #else - bool operator==( SubresourceHostMemcpySizeEXT const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator==( SubresourceHostMemcpySize const & rhs ) const VULKAN_HPP_NOEXCEPT { # if defined( VULKAN_HPP_USE_REFLECT ) return this->reflect() == rhs.reflect(); @@ -118181,62 +118802,61 @@ namespace VULKAN_HPP_NAMESPACE # endif } - bool operator!=( SubresourceHostMemcpySizeEXT const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator!=( SubresourceHostMemcpySize const & rhs ) const VULKAN_HPP_NOEXCEPT { return !operator==( rhs ); } #endif public: - VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eSubresourceHostMemcpySizeEXT; + VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eSubresourceHostMemcpySize; void * pNext = {}; VULKAN_HPP_NAMESPACE::DeviceSize size = {}; }; template <> - struct CppType + struct CppType { - using Type = SubresourceHostMemcpySizeEXT; + using Type = SubresourceHostMemcpySize; }; - struct SubresourceLayout2KHR + using SubresourceHostMemcpySizeEXT = SubresourceHostMemcpySize; + + struct SubresourceLayout2 { - using NativeType = VkSubresourceLayout2KHR; + using NativeType = VkSubresourceLayout2; static const bool allowDuplicate = false; - static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eSubresourceLayout2KHR; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eSubresourceLayout2; #if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) - VULKAN_HPP_CONSTEXPR SubresourceLayout2KHR( VULKAN_HPP_NAMESPACE::SubresourceLayout subresourceLayout_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT + VULKAN_HPP_CONSTEXPR SubresourceLayout2( VULKAN_HPP_NAMESPACE::SubresourceLayout subresourceLayout_ = {}, void * pNext_ = nullptr ) VULKAN_HPP_NOEXCEPT : pNext{ pNext_ } , subresourceLayout{ subresourceLayout_ } { } - VULKAN_HPP_CONSTEXPR SubresourceLayout2KHR( SubresourceLayout2KHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + VULKAN_HPP_CONSTEXPR SubresourceLayout2( SubresourceLayout2 const & rhs ) VULKAN_HPP_NOEXCEPT = default; - SubresourceLayout2KHR( VkSubresourceLayout2KHR const & rhs ) VULKAN_HPP_NOEXCEPT - : SubresourceLayout2KHR( *reinterpret_cast( &rhs ) ) - { - } + SubresourceLayout2( VkSubresourceLayout2 const & rhs ) VULKAN_HPP_NOEXCEPT : SubresourceLayout2( *reinterpret_cast( &rhs ) ) {} - SubresourceLayout2KHR & operator=( SubresourceLayout2KHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + SubresourceLayout2 & operator=( SubresourceLayout2 const & rhs ) VULKAN_HPP_NOEXCEPT = default; #endif /*VULKAN_HPP_NO_STRUCT_CONSTRUCTORS*/ - SubresourceLayout2KHR & operator=( VkSubresourceLayout2KHR const & rhs ) VULKAN_HPP_NOEXCEPT + SubresourceLayout2 & operator=( VkSubresourceLayout2 const & rhs ) VULKAN_HPP_NOEXCEPT { - *this = *reinterpret_cast( &rhs ); + *this = *reinterpret_cast( &rhs ); return *this; } - operator VkSubresourceLayout2KHR const &() const VULKAN_HPP_NOEXCEPT + operator VkSubresourceLayout2 const &() const VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + return *reinterpret_cast( this ); } - operator VkSubresourceLayout2KHR &() VULKAN_HPP_NOEXCEPT + operator VkSubresourceLayout2 &() VULKAN_HPP_NOEXCEPT { - return *reinterpret_cast( this ); + return *reinterpret_cast( this ); } #if defined( VULKAN_HPP_USE_REFLECT ) @@ -118252,9 +118872,9 @@ namespace VULKAN_HPP_NAMESPACE #endif #if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) - auto operator<=>( SubresourceLayout2KHR const & ) const = default; + auto operator<=>( SubresourceLayout2 const & ) const = default; #else - bool operator==( SubresourceLayout2KHR const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator==( SubresourceLayout2 const & rhs ) const VULKAN_HPP_NOEXCEPT { # if defined( VULKAN_HPP_USE_REFLECT ) return this->reflect() == rhs.reflect(); @@ -118263,25 +118883,26 @@ namespace VULKAN_HPP_NAMESPACE # endif } - bool operator!=( SubresourceLayout2KHR const & rhs ) const VULKAN_HPP_NOEXCEPT + bool operator!=( SubresourceLayout2 const & rhs ) const VULKAN_HPP_NOEXCEPT { return !operator==( rhs ); } #endif public: - VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eSubresourceLayout2KHR; + VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eSubresourceLayout2; void * pNext = {}; VULKAN_HPP_NAMESPACE::SubresourceLayout subresourceLayout = {}; }; template <> - struct CppType + struct CppType { - using Type = SubresourceLayout2KHR; + using Type = SubresourceLayout2; }; - using SubresourceLayout2EXT = SubresourceLayout2KHR; + using SubresourceLayout2EXT = SubresourceLayout2; + using SubresourceLayout2KHR = SubresourceLayout2; struct SurfaceCapabilities2EXT { diff --git a/third_party/vulkan/vulkan_to_string.hpp b/third_party/vulkan/vulkan_to_string.hpp index d8f29ee..971dc25 100644 --- a/third_party/vulkan/vulkan_to_string.hpp +++ b/third_party/vulkan/vulkan_to_string.hpp @@ -182,6 +182,8 @@ namespace VULKAN_HPP_NAMESPACE result += "TransientAttachment | "; if ( value & ImageUsageFlagBits::eInputAttachment ) result += "InputAttachment | "; + if ( value & ImageUsageFlagBits::eHostTransfer ) + result += "HostTransfer | "; if ( value & ImageUsageFlagBits::eVideoDecodeDstKHR ) result += "VideoDecodeDstKHR | "; if ( value & ImageUsageFlagBits::eVideoDecodeSrcKHR ) @@ -192,8 +194,6 @@ namespace VULKAN_HPP_NAMESPACE result += "FragmentDensityMapEXT | "; if ( value & ImageUsageFlagBits::eFragmentShadingRateAttachmentKHR ) result += "FragmentShadingRateAttachmentKHR | "; - if ( value & ImageUsageFlagBits::eHostTransferEXT ) - result += "HostTransferEXT | "; if ( value & ImageUsageFlagBits::eVideoEncodeDstKHR ) result += "VideoEncodeDstKHR | "; if ( value & ImageUsageFlagBits::eVideoEncodeSrcKHR ) @@ -753,6 +753,10 @@ namespace VULKAN_HPP_NAMESPACE result += "FailOnPipelineCompileRequired | "; if ( value & PipelineCreateFlagBits::eEarlyReturnOnFailure ) result += "EarlyReturnOnFailure | "; + if ( value & PipelineCreateFlagBits::eNoProtectedAccess ) + result += "NoProtectedAccess | "; + if ( value & PipelineCreateFlagBits::eProtectedAccessOnly ) + result += "ProtectedAccessOnly | "; if ( value & PipelineCreateFlagBits::eRayTracingNoNullAnyHitShadersKHR ) result += "RayTracingNoNullAnyHitShadersKHR | "; if ( value & PipelineCreateFlagBits::eRayTracingNoNullClosestHitShadersKHR ) @@ -799,10 +803,6 @@ namespace VULKAN_HPP_NAMESPACE if ( value & PipelineCreateFlagBits::eRayTracingDisplacementMicromapNV ) result += "RayTracingDisplacementMicromapNV | "; #endif /*VK_ENABLE_BETA_EXTENSIONS*/ - if ( value & PipelineCreateFlagBits::eNoProtectedAccessEXT ) - result += "NoProtectedAccessEXT | "; - if ( value & PipelineCreateFlagBits::eProtectedAccessOnlyEXT ) - result += "ProtectedAccessOnlyEXT | "; return "{ " + result.substr( 0, result.size() - 3 ) + " }"; } @@ -977,8 +977,8 @@ namespace VULKAN_HPP_NAMESPACE std::string result; if ( value & DescriptorSetLayoutCreateFlagBits::eUpdateAfterBindPool ) result += "UpdateAfterBindPool | "; - if ( value & DescriptorSetLayoutCreateFlagBits::ePushDescriptorKHR ) - result += "PushDescriptorKHR | "; + if ( value & DescriptorSetLayoutCreateFlagBits::ePushDescriptor ) + result += "PushDescriptor | "; if ( value & DescriptorSetLayoutCreateFlagBits::eDescriptorBufferEXT ) result += "DescriptorBufferEXT | "; if ( value & DescriptorSetLayoutCreateFlagBits::eEmbeddedImmutableSamplersEXT ) @@ -1245,12 +1245,12 @@ namespace VULKAN_HPP_NAMESPACE result += "Clustered | "; if ( value & SubgroupFeatureFlagBits::eQuad ) result += "Quad | "; + if ( value & SubgroupFeatureFlagBits::eRotate ) + result += "Rotate | "; + if ( value & SubgroupFeatureFlagBits::eRotateClustered ) + result += "RotateClustered | "; if ( value & SubgroupFeatureFlagBits::ePartitionedNV ) result += "PartitionedNV | "; - if ( value & SubgroupFeatureFlagBits::eRotateKHR ) - result += "RotateKHR | "; - if ( value & SubgroupFeatureFlagBits::eRotateClusteredKHR ) - result += "RotateClusteredKHR | "; return "{ " + result.substr( 0, result.size() - 3 ) + " }"; } @@ -1804,8 +1804,6 @@ namespace VULKAN_HPP_NAMESPACE result += "BlitDst | "; if ( value & FormatFeatureFlagBits2::eSampledImageFilterLinear ) result += "SampledImageFilterLinear | "; - if ( value & FormatFeatureFlagBits2::eSampledImageFilterCubic ) - result += "SampledImageFilterCubic | "; if ( value & FormatFeatureFlagBits2::eTransferSrc ) result += "TransferSrc | "; if ( value & FormatFeatureFlagBits2::eTransferDst ) @@ -1832,6 +1830,10 @@ namespace VULKAN_HPP_NAMESPACE result += "StorageWriteWithoutFormat | "; if ( value & FormatFeatureFlagBits2::eSampledImageDepthComparison ) result += "SampledImageDepthComparison | "; + if ( value & FormatFeatureFlagBits2::eSampledImageFilterCubic ) + result += "SampledImageFilterCubic | "; + if ( value & FormatFeatureFlagBits2::eHostImageTransfer ) + result += "HostImageTransfer | "; if ( value & FormatFeatureFlagBits2::eVideoDecodeOutputKHR ) result += "VideoDecodeOutputKHR | "; if ( value & FormatFeatureFlagBits2::eVideoDecodeDpbKHR ) @@ -1842,8 +1844,6 @@ namespace VULKAN_HPP_NAMESPACE result += "FragmentDensityMapEXT | "; if ( value & FormatFeatureFlagBits2::eFragmentShadingRateAttachmentKHR ) result += "FragmentShadingRateAttachmentKHR | "; - if ( value & FormatFeatureFlagBits2::eHostImageTransferEXT ) - result += "HostImageTransferEXT | "; if ( value & FormatFeatureFlagBits2::eVideoEncodeInputKHR ) result += "VideoEncodeInputKHR | "; if ( value & FormatFeatureFlagBits2::eVideoEncodeDpbKHR ) @@ -1872,6 +1872,180 @@ namespace VULKAN_HPP_NAMESPACE return "{ " + result.substr( 0, result.size() - 3 ) + " }"; } + //=== VK_VERSION_1_4 === + + VULKAN_HPP_INLINE std::string to_string( MemoryUnmapFlags value ) + { + if ( !value ) + return "{}"; + + std::string result; + if ( value & MemoryUnmapFlagBits::eReserveEXT ) + result += "ReserveEXT | "; + + return "{ " + result.substr( 0, result.size() - 3 ) + " }"; + } + + VULKAN_HPP_INLINE std::string to_string( PipelineCreateFlags2 value ) + { + if ( !value ) + return "{}"; + + std::string result; + if ( value & PipelineCreateFlagBits2::eDisableOptimization ) + result += "DisableOptimization | "; + if ( value & PipelineCreateFlagBits2::eAllowDerivatives ) + result += "AllowDerivatives | "; + if ( value & PipelineCreateFlagBits2::eDerivative ) + result += "Derivative | "; + if ( value & PipelineCreateFlagBits2::eViewIndexFromDeviceIndex ) + result += "ViewIndexFromDeviceIndex | "; + if ( value & PipelineCreateFlagBits2::eDispatchBase ) + result += "DispatchBase | "; + if ( value & PipelineCreateFlagBits2::eFailOnPipelineCompileRequired ) + result += "FailOnPipelineCompileRequired | "; + if ( value & PipelineCreateFlagBits2::eEarlyReturnOnFailure ) + result += "EarlyReturnOnFailure | "; + if ( value & PipelineCreateFlagBits2::eNoProtectedAccess ) + result += "NoProtectedAccess | "; + if ( value & PipelineCreateFlagBits2::eProtectedAccessOnly ) + result += "ProtectedAccessOnly | "; +#if defined( VK_ENABLE_BETA_EXTENSIONS ) + if ( value & PipelineCreateFlagBits2::eExecutionGraphAMDX ) + result += "ExecutionGraphAMDX | "; +#endif /*VK_ENABLE_BETA_EXTENSIONS*/ + if ( value & PipelineCreateFlagBits2::eEnableLegacyDitheringEXT ) + result += "EnableLegacyDitheringEXT | "; + if ( value & PipelineCreateFlagBits2::eDeferCompileNV ) + result += "DeferCompileNV | "; + if ( value & PipelineCreateFlagBits2::eCaptureStatisticsKHR ) + result += "CaptureStatisticsKHR | "; + if ( value & PipelineCreateFlagBits2::eCaptureInternalRepresentationsKHR ) + result += "CaptureInternalRepresentationsKHR | "; + if ( value & PipelineCreateFlagBits2::eLinkTimeOptimizationEXT ) + result += "LinkTimeOptimizationEXT | "; + if ( value & PipelineCreateFlagBits2::eRetainLinkTimeOptimizationInfoEXT ) + result += "RetainLinkTimeOptimizationInfoEXT | "; + if ( value & PipelineCreateFlagBits2::eLibraryKHR ) + result += "LibraryKHR | "; + if ( value & PipelineCreateFlagBits2::eRayTracingSkipTrianglesKHR ) + result += "RayTracingSkipTrianglesKHR | "; + if ( value & PipelineCreateFlagBits2::eRayTracingSkipAabbsKHR ) + result += "RayTracingSkipAabbsKHR | "; + if ( value & PipelineCreateFlagBits2::eRayTracingNoNullAnyHitShadersKHR ) + result += "RayTracingNoNullAnyHitShadersKHR | "; + if ( value & PipelineCreateFlagBits2::eRayTracingNoNullClosestHitShadersKHR ) + result += "RayTracingNoNullClosestHitShadersKHR | "; + if ( value & PipelineCreateFlagBits2::eRayTracingNoNullMissShadersKHR ) + result += "RayTracingNoNullMissShadersKHR | "; + if ( value & PipelineCreateFlagBits2::eRayTracingNoNullIntersectionShadersKHR ) + result += "RayTracingNoNullIntersectionShadersKHR | "; + if ( value & PipelineCreateFlagBits2::eRayTracingShaderGroupHandleCaptureReplayKHR ) + result += "RayTracingShaderGroupHandleCaptureReplayKHR | "; + if ( value & PipelineCreateFlagBits2::eIndirectBindableNV ) + result += "IndirectBindableNV | "; + if ( value & PipelineCreateFlagBits2::eRayTracingAllowMotionNV ) + result += "RayTracingAllowMotionNV | "; + if ( value & PipelineCreateFlagBits2::eRenderingFragmentShadingRateAttachmentKHR ) + result += "RenderingFragmentShadingRateAttachmentKHR | "; + if ( value & PipelineCreateFlagBits2::eRenderingFragmentDensityMapAttachmentEXT ) + result += "RenderingFragmentDensityMapAttachmentEXT | "; + if ( value & PipelineCreateFlagBits2::eRayTracingOpacityMicromapEXT ) + result += "RayTracingOpacityMicromapEXT | "; + if ( value & PipelineCreateFlagBits2::eColorAttachmentFeedbackLoopEXT ) + result += "ColorAttachmentFeedbackLoopEXT | "; + if ( value & PipelineCreateFlagBits2::eDepthStencilAttachmentFeedbackLoopEXT ) + result += "DepthStencilAttachmentFeedbackLoopEXT | "; + if ( value & PipelineCreateFlagBits2::eRayTracingDisplacementMicromapNV ) + result += "RayTracingDisplacementMicromapNV | "; + if ( value & PipelineCreateFlagBits2::eDescriptorBufferEXT ) + result += "DescriptorBufferEXT | "; + if ( value & PipelineCreateFlagBits2::eCaptureDataKHR ) + result += "CaptureDataKHR | "; + if ( value & PipelineCreateFlagBits2::eIndirectBindableEXT ) + result += "IndirectBindableEXT | "; + + return "{ " + result.substr( 0, result.size() - 3 ) + " }"; + } + + VULKAN_HPP_INLINE std::string to_string( BufferUsageFlags2 value ) + { + if ( !value ) + return "{}"; + + std::string result; + if ( value & BufferUsageFlagBits2::eTransferSrc ) + result += "TransferSrc | "; + if ( value & BufferUsageFlagBits2::eTransferDst ) + result += "TransferDst | "; + if ( value & BufferUsageFlagBits2::eUniformTexelBuffer ) + result += "UniformTexelBuffer | "; + if ( value & BufferUsageFlagBits2::eStorageTexelBuffer ) + result += "StorageTexelBuffer | "; + if ( value & BufferUsageFlagBits2::eUniformBuffer ) + result += "UniformBuffer | "; + if ( value & BufferUsageFlagBits2::eStorageBuffer ) + result += "StorageBuffer | "; + if ( value & BufferUsageFlagBits2::eIndexBuffer ) + result += "IndexBuffer | "; + if ( value & BufferUsageFlagBits2::eVertexBuffer ) + result += "VertexBuffer | "; + if ( value & BufferUsageFlagBits2::eIndirectBuffer ) + result += "IndirectBuffer | "; + if ( value & BufferUsageFlagBits2::eShaderDeviceAddress ) + result += "ShaderDeviceAddress | "; +#if defined( VK_ENABLE_BETA_EXTENSIONS ) + if ( value & BufferUsageFlagBits2::eExecutionGraphScratchAMDX ) + result += "ExecutionGraphScratchAMDX | "; +#endif /*VK_ENABLE_BETA_EXTENSIONS*/ + if ( value & BufferUsageFlagBits2::eConditionalRenderingEXT ) + result += "ConditionalRenderingEXT | "; + if ( value & BufferUsageFlagBits2::eShaderBindingTableKHR ) + result += "ShaderBindingTableKHR | "; + if ( value & BufferUsageFlagBits2::eTransformFeedbackBufferEXT ) + result += "TransformFeedbackBufferEXT | "; + if ( value & BufferUsageFlagBits2::eTransformFeedbackCounterBufferEXT ) + result += "TransformFeedbackCounterBufferEXT | "; + if ( value & BufferUsageFlagBits2::eVideoDecodeSrcKHR ) + result += "VideoDecodeSrcKHR | "; + if ( value & BufferUsageFlagBits2::eVideoDecodeDstKHR ) + result += "VideoDecodeDstKHR | "; + if ( value & BufferUsageFlagBits2::eVideoEncodeDstKHR ) + result += "VideoEncodeDstKHR | "; + if ( value & BufferUsageFlagBits2::eVideoEncodeSrcKHR ) + result += "VideoEncodeSrcKHR | "; + if ( value & BufferUsageFlagBits2::eAccelerationStructureBuildInputReadOnlyKHR ) + result += "AccelerationStructureBuildInputReadOnlyKHR | "; + if ( value & BufferUsageFlagBits2::eAccelerationStructureStorageKHR ) + result += "AccelerationStructureStorageKHR | "; + if ( value & BufferUsageFlagBits2::eSamplerDescriptorBufferEXT ) + result += "SamplerDescriptorBufferEXT | "; + if ( value & BufferUsageFlagBits2::eResourceDescriptorBufferEXT ) + result += "ResourceDescriptorBufferEXT | "; + if ( value & BufferUsageFlagBits2::ePushDescriptorsDescriptorBufferEXT ) + result += "PushDescriptorsDescriptorBufferEXT | "; + if ( value & BufferUsageFlagBits2::eMicromapBuildInputReadOnlyEXT ) + result += "MicromapBuildInputReadOnlyEXT | "; + if ( value & BufferUsageFlagBits2::eMicromapStorageEXT ) + result += "MicromapStorageEXT | "; + if ( value & BufferUsageFlagBits2::ePreprocessBufferEXT ) + result += "PreprocessBufferEXT | "; + + return "{ " + result.substr( 0, result.size() - 3 ) + " }"; + } + + VULKAN_HPP_INLINE std::string to_string( HostImageCopyFlags value ) + { + if ( !value ) + return "{}"; + + std::string result; + if ( value & HostImageCopyFlagBits::eMemcpy ) + result += "Memcpy | "; + + return "{ " + result.substr( 0, result.size() - 3 ) + " }"; + } + //=== VK_KHR_surface === VULKAN_HPP_INLINE std::string to_string( CompositeAlphaFlagsKHR value ) @@ -2839,34 +3013,6 @@ namespace VULKAN_HPP_NAMESPACE return "{}"; } - //=== VK_EXT_host_image_copy === - - VULKAN_HPP_INLINE std::string to_string( HostImageCopyFlagsEXT value ) - { - if ( !value ) - return "{}"; - - std::string result; - if ( value & HostImageCopyFlagBitsEXT::eMemcpy ) - result += "Memcpy | "; - - return "{ " + result.substr( 0, result.size() - 3 ) + " }"; - } - - //=== VK_KHR_map_memory2 === - - VULKAN_HPP_INLINE std::string to_string( MemoryUnmapFlagsKHR value ) - { - if ( !value ) - return "{}"; - - std::string result; - if ( value & MemoryUnmapFlagBitsKHR::eReserveEXT ) - result += "ReserveEXT | "; - - return "{ " + result.substr( 0, result.size() - 3 ) + " }"; - } - //=== VK_EXT_surface_maintenance1 === VULKAN_HPP_INLINE std::string to_string( PresentScalingFlagsEXT value ) @@ -3409,156 +3555,6 @@ namespace VULKAN_HPP_NAMESPACE return "{ " + result.substr( 0, result.size() - 3 ) + " }"; } - //=== VK_KHR_maintenance5 === - - VULKAN_HPP_INLINE std::string to_string( PipelineCreateFlags2KHR value ) - { - if ( !value ) - return "{}"; - - std::string result; - if ( value & PipelineCreateFlagBits2KHR::eDisableOptimization ) - result += "DisableOptimization | "; - if ( value & PipelineCreateFlagBits2KHR::eAllowDerivatives ) - result += "AllowDerivatives | "; - if ( value & PipelineCreateFlagBits2KHR::eDerivative ) - result += "Derivative | "; -#if defined( VK_ENABLE_BETA_EXTENSIONS ) - if ( value & PipelineCreateFlagBits2KHR::eExecutionGraphAMDX ) - result += "ExecutionGraphAMDX | "; -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ - if ( value & PipelineCreateFlagBits2KHR::eEnableLegacyDitheringEXT ) - result += "EnableLegacyDitheringEXT | "; - if ( value & PipelineCreateFlagBits2KHR::eViewIndexFromDeviceIndex ) - result += "ViewIndexFromDeviceIndex | "; - if ( value & PipelineCreateFlagBits2KHR::eDispatchBase ) - result += "DispatchBase | "; - if ( value & PipelineCreateFlagBits2KHR::eDeferCompileNV ) - result += "DeferCompileNV | "; - if ( value & PipelineCreateFlagBits2KHR::eCaptureStatistics ) - result += "CaptureStatistics | "; - if ( value & PipelineCreateFlagBits2KHR::eCaptureInternalRepresentations ) - result += "CaptureInternalRepresentations | "; - if ( value & PipelineCreateFlagBits2KHR::eFailOnPipelineCompileRequired ) - result += "FailOnPipelineCompileRequired | "; - if ( value & PipelineCreateFlagBits2KHR::eEarlyReturnOnFailure ) - result += "EarlyReturnOnFailure | "; - if ( value & PipelineCreateFlagBits2KHR::eLinkTimeOptimizationEXT ) - result += "LinkTimeOptimizationEXT | "; - if ( value & PipelineCreateFlagBits2KHR::eRetainLinkTimeOptimizationInfoEXT ) - result += "RetainLinkTimeOptimizationInfoEXT | "; - if ( value & PipelineCreateFlagBits2KHR::eLibrary ) - result += "Library | "; - if ( value & PipelineCreateFlagBits2KHR::eRayTracingSkipTriangles ) - result += "RayTracingSkipTriangles | "; - if ( value & PipelineCreateFlagBits2KHR::eRayTracingSkipAabbs ) - result += "RayTracingSkipAabbs | "; - if ( value & PipelineCreateFlagBits2KHR::eRayTracingNoNullAnyHitShaders ) - result += "RayTracingNoNullAnyHitShaders | "; - if ( value & PipelineCreateFlagBits2KHR::eRayTracingNoNullClosestHitShaders ) - result += "RayTracingNoNullClosestHitShaders | "; - if ( value & PipelineCreateFlagBits2KHR::eRayTracingNoNullMissShaders ) - result += "RayTracingNoNullMissShaders | "; - if ( value & PipelineCreateFlagBits2KHR::eRayTracingNoNullIntersectionShaders ) - result += "RayTracingNoNullIntersectionShaders | "; - if ( value & PipelineCreateFlagBits2KHR::eRayTracingShaderGroupHandleCaptureReplay ) - result += "RayTracingShaderGroupHandleCaptureReplay | "; - if ( value & PipelineCreateFlagBits2KHR::eIndirectBindableNV ) - result += "IndirectBindableNV | "; - if ( value & PipelineCreateFlagBits2KHR::eRayTracingAllowMotionNV ) - result += "RayTracingAllowMotionNV | "; - if ( value & PipelineCreateFlagBits2KHR::eRenderingFragmentShadingRateAttachment ) - result += "RenderingFragmentShadingRateAttachment | "; - if ( value & PipelineCreateFlagBits2KHR::eRenderingFragmentDensityMapAttachmentEXT ) - result += "RenderingFragmentDensityMapAttachmentEXT | "; - if ( value & PipelineCreateFlagBits2KHR::eRayTracingOpacityMicromapEXT ) - result += "RayTracingOpacityMicromapEXT | "; - if ( value & PipelineCreateFlagBits2KHR::eColorAttachmentFeedbackLoopEXT ) - result += "ColorAttachmentFeedbackLoopEXT | "; - if ( value & PipelineCreateFlagBits2KHR::eDepthStencilAttachmentFeedbackLoopEXT ) - result += "DepthStencilAttachmentFeedbackLoopEXT | "; - if ( value & PipelineCreateFlagBits2KHR::eNoProtectedAccessEXT ) - result += "NoProtectedAccessEXT | "; - if ( value & PipelineCreateFlagBits2KHR::eProtectedAccessOnlyEXT ) - result += "ProtectedAccessOnlyEXT | "; - if ( value & PipelineCreateFlagBits2KHR::eRayTracingDisplacementMicromapNV ) - result += "RayTracingDisplacementMicromapNV | "; - if ( value & PipelineCreateFlagBits2KHR::eDescriptorBufferEXT ) - result += "DescriptorBufferEXT | "; - if ( value & PipelineCreateFlagBits2KHR::eCaptureData ) - result += "CaptureData | "; - if ( value & PipelineCreateFlagBits2KHR::eIndirectBindableEXT ) - result += "IndirectBindableEXT | "; - - return "{ " + result.substr( 0, result.size() - 3 ) + " }"; - } - - VULKAN_HPP_INLINE std::string to_string( BufferUsageFlags2KHR value ) - { - if ( !value ) - return "{}"; - - std::string result; - if ( value & BufferUsageFlagBits2KHR::eTransferSrc ) - result += "TransferSrc | "; - if ( value & BufferUsageFlagBits2KHR::eTransferDst ) - result += "TransferDst | "; - if ( value & BufferUsageFlagBits2KHR::eUniformTexelBuffer ) - result += "UniformTexelBuffer | "; - if ( value & BufferUsageFlagBits2KHR::eStorageTexelBuffer ) - result += "StorageTexelBuffer | "; - if ( value & BufferUsageFlagBits2KHR::eUniformBuffer ) - result += "UniformBuffer | "; - if ( value & BufferUsageFlagBits2KHR::eStorageBuffer ) - result += "StorageBuffer | "; - if ( value & BufferUsageFlagBits2KHR::eIndexBuffer ) - result += "IndexBuffer | "; - if ( value & BufferUsageFlagBits2KHR::eVertexBuffer ) - result += "VertexBuffer | "; - if ( value & BufferUsageFlagBits2KHR::eIndirectBuffer ) - result += "IndirectBuffer | "; -#if defined( VK_ENABLE_BETA_EXTENSIONS ) - if ( value & BufferUsageFlagBits2KHR::eExecutionGraphScratchAMDX ) - result += "ExecutionGraphScratchAMDX | "; -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ - if ( value & BufferUsageFlagBits2KHR::eConditionalRenderingEXT ) - result += "ConditionalRenderingEXT | "; - if ( value & BufferUsageFlagBits2KHR::eShaderBindingTable ) - result += "ShaderBindingTable | "; - if ( value & BufferUsageFlagBits2KHR::eTransformFeedbackBufferEXT ) - result += "TransformFeedbackBufferEXT | "; - if ( value & BufferUsageFlagBits2KHR::eTransformFeedbackCounterBufferEXT ) - result += "TransformFeedbackCounterBufferEXT | "; - if ( value & BufferUsageFlagBits2KHR::eVideoDecodeSrc ) - result += "VideoDecodeSrc | "; - if ( value & BufferUsageFlagBits2KHR::eVideoDecodeDst ) - result += "VideoDecodeDst | "; - if ( value & BufferUsageFlagBits2KHR::eVideoEncodeDst ) - result += "VideoEncodeDst | "; - if ( value & BufferUsageFlagBits2KHR::eVideoEncodeSrc ) - result += "VideoEncodeSrc | "; - if ( value & BufferUsageFlagBits2KHR::eShaderDeviceAddress ) - result += "ShaderDeviceAddress | "; - if ( value & BufferUsageFlagBits2KHR::eAccelerationStructureBuildInputReadOnly ) - result += "AccelerationStructureBuildInputReadOnly | "; - if ( value & BufferUsageFlagBits2KHR::eAccelerationStructureStorage ) - result += "AccelerationStructureStorage | "; - if ( value & BufferUsageFlagBits2KHR::eSamplerDescriptorBufferEXT ) - result += "SamplerDescriptorBufferEXT | "; - if ( value & BufferUsageFlagBits2KHR::eResourceDescriptorBufferEXT ) - result += "ResourceDescriptorBufferEXT | "; - if ( value & BufferUsageFlagBits2KHR::ePushDescriptorsDescriptorBufferEXT ) - result += "PushDescriptorsDescriptorBufferEXT | "; - if ( value & BufferUsageFlagBits2KHR::eMicromapBuildInputReadOnlyEXT ) - result += "MicromapBuildInputReadOnlyEXT | "; - if ( value & BufferUsageFlagBits2KHR::eMicromapStorageEXT ) - result += "MicromapStorageEXT | "; - if ( value & BufferUsageFlagBits2KHR::ePreprocessBufferEXT ) - result += "PreprocessBufferEXT | "; - - return "{ " + result.substr( 0, result.size() - 3 ) + " }"; - } - //=== VK_EXT_shader_object === VULKAN_HPP_INLINE std::string to_string( ShaderCreateFlagsEXT value ) @@ -3734,6 +3730,7 @@ namespace VULKAN_HPP_NAMESPACE case Result::eErrorFragmentation: return "ErrorFragmentation"; case Result::eErrorInvalidOpaqueCaptureAddress: return "ErrorInvalidOpaqueCaptureAddress"; case Result::ePipelineCompileRequired: return "PipelineCompileRequired"; + case Result::eErrorNotPermitted: return "ErrorNotPermitted"; case Result::eErrorSurfaceLostKHR: return "ErrorSurfaceLostKHR"; case Result::eErrorNativeWindowInUseKHR: return "ErrorNativeWindowInUseKHR"; case Result::eSuboptimalKHR: return "SuboptimalKHR"; @@ -3748,7 +3745,6 @@ namespace VULKAN_HPP_NAMESPACE case Result::eErrorVideoProfileCodecNotSupportedKHR: return "ErrorVideoProfileCodecNotSupportedKHR"; case Result::eErrorVideoStdVersionNotSupportedKHR: return "ErrorVideoStdVersionNotSupportedKHR"; case Result::eErrorInvalidDrmFormatModifierPlaneLayoutEXT: return "ErrorInvalidDrmFormatModifierPlaneLayoutEXT"; - case Result::eErrorNotPermittedKHR: return "ErrorNotPermittedKHR"; #if defined( VK_USE_PLATFORM_WIN32_KHR ) case Result::eErrorFullScreenExclusiveModeLostEXT: return "ErrorFullScreenExclusiveModeLostEXT"; #endif /*VK_USE_PLATFORM_WIN32_KHR*/ @@ -3985,6 +3981,56 @@ namespace VULKAN_HPP_NAMESPACE case StructureType::ePhysicalDeviceMaintenance4Properties: return "PhysicalDeviceMaintenance4Properties"; case StructureType::eDeviceBufferMemoryRequirements: return "DeviceBufferMemoryRequirements"; case StructureType::eDeviceImageMemoryRequirements: return "DeviceImageMemoryRequirements"; + case StructureType::ePhysicalDeviceVulkan14Features: return "PhysicalDeviceVulkan14Features"; + case StructureType::ePhysicalDeviceVulkan14Properties: return "PhysicalDeviceVulkan14Properties"; + case StructureType::eDeviceQueueGlobalPriorityCreateInfo: return "DeviceQueueGlobalPriorityCreateInfo"; + case StructureType::ePhysicalDeviceGlobalPriorityQueryFeatures: return "PhysicalDeviceGlobalPriorityQueryFeatures"; + case StructureType::eQueueFamilyGlobalPriorityProperties: return "QueueFamilyGlobalPriorityProperties"; + case StructureType::ePhysicalDeviceShaderSubgroupRotateFeatures: return "PhysicalDeviceShaderSubgroupRotateFeatures"; + case StructureType::ePhysicalDeviceShaderFloatControls2Features: return "PhysicalDeviceShaderFloatControls2Features"; + case StructureType::ePhysicalDeviceShaderExpectAssumeFeatures: return "PhysicalDeviceShaderExpectAssumeFeatures"; + case StructureType::ePhysicalDeviceLineRasterizationFeatures: return "PhysicalDeviceLineRasterizationFeatures"; + case StructureType::ePipelineRasterizationLineStateCreateInfo: return "PipelineRasterizationLineStateCreateInfo"; + case StructureType::ePhysicalDeviceLineRasterizationProperties: return "PhysicalDeviceLineRasterizationProperties"; + case StructureType::ePhysicalDeviceVertexAttributeDivisorProperties: return "PhysicalDeviceVertexAttributeDivisorProperties"; + case StructureType::ePipelineVertexInputDivisorStateCreateInfo: return "PipelineVertexInputDivisorStateCreateInfo"; + case StructureType::ePhysicalDeviceVertexAttributeDivisorFeatures: return "PhysicalDeviceVertexAttributeDivisorFeatures"; + case StructureType::ePhysicalDeviceIndexTypeUint8Features: return "PhysicalDeviceIndexTypeUint8Features"; + case StructureType::eMemoryMapInfo: return "MemoryMapInfo"; + case StructureType::eMemoryUnmapInfo: return "MemoryUnmapInfo"; + case StructureType::ePhysicalDeviceMaintenance5Features: return "PhysicalDeviceMaintenance5Features"; + case StructureType::ePhysicalDeviceMaintenance5Properties: return "PhysicalDeviceMaintenance5Properties"; + case StructureType::eRenderingAreaInfo: return "RenderingAreaInfo"; + case StructureType::eDeviceImageSubresourceInfo: return "DeviceImageSubresourceInfo"; + case StructureType::eSubresourceLayout2: return "SubresourceLayout2"; + case StructureType::eImageSubresource2: return "ImageSubresource2"; + case StructureType::ePipelineCreateFlags2CreateInfo: return "PipelineCreateFlags2CreateInfo"; + case StructureType::eBufferUsageFlags2CreateInfo: return "BufferUsageFlags2CreateInfo"; + case StructureType::ePhysicalDevicePushDescriptorProperties: return "PhysicalDevicePushDescriptorProperties"; + case StructureType::ePhysicalDeviceDynamicRenderingLocalReadFeatures: return "PhysicalDeviceDynamicRenderingLocalReadFeatures"; + case StructureType::eRenderingAttachmentLocationInfo: return "RenderingAttachmentLocationInfo"; + case StructureType::eRenderingInputAttachmentIndexInfo: return "RenderingInputAttachmentIndexInfo"; + case StructureType::ePhysicalDeviceMaintenance6Features: return "PhysicalDeviceMaintenance6Features"; + case StructureType::ePhysicalDeviceMaintenance6Properties: return "PhysicalDeviceMaintenance6Properties"; + case StructureType::eBindMemoryStatus: return "BindMemoryStatus"; + case StructureType::eBindDescriptorSetsInfo: return "BindDescriptorSetsInfo"; + case StructureType::ePushConstantsInfo: return "PushConstantsInfo"; + case StructureType::ePushDescriptorSetInfo: return "PushDescriptorSetInfo"; + case StructureType::ePushDescriptorSetWithTemplateInfo: return "PushDescriptorSetWithTemplateInfo"; + case StructureType::ePhysicalDevicePipelineProtectedAccessFeatures: return "PhysicalDevicePipelineProtectedAccessFeatures"; + case StructureType::ePipelineRobustnessCreateInfo: return "PipelineRobustnessCreateInfo"; + case StructureType::ePhysicalDevicePipelineRobustnessFeatures: return "PhysicalDevicePipelineRobustnessFeatures"; + case StructureType::ePhysicalDevicePipelineRobustnessProperties: return "PhysicalDevicePipelineRobustnessProperties"; + case StructureType::ePhysicalDeviceHostImageCopyFeatures: return "PhysicalDeviceHostImageCopyFeatures"; + case StructureType::ePhysicalDeviceHostImageCopyProperties: return "PhysicalDeviceHostImageCopyProperties"; + case StructureType::eMemoryToImageCopy: return "MemoryToImageCopy"; + case StructureType::eImageToMemoryCopy: return "ImageToMemoryCopy"; + case StructureType::eCopyImageToMemoryInfo: return "CopyImageToMemoryInfo"; + case StructureType::eCopyMemoryToImageInfo: return "CopyMemoryToImageInfo"; + case StructureType::eHostImageLayoutTransitionInfo: return "HostImageLayoutTransitionInfo"; + case StructureType::eCopyImageToImageInfo: return "CopyImageToImageInfo"; + case StructureType::eSubresourceHostMemcpySize: return "SubresourceHostMemcpySize"; + case StructureType::eHostImageCopyDevicePerformanceQuery: return "HostImageCopyDevicePerformanceQuery"; case StructureType::eSwapchainCreateInfoKHR: return "SwapchainCreateInfoKHR"; case StructureType::ePresentInfoKHR: return "PresentInfoKHR"; case StructureType::eDeviceGroupPresentCapabilitiesKHR: return "DeviceGroupPresentCapabilitiesKHR"; @@ -4100,9 +4146,6 @@ namespace VULKAN_HPP_NAMESPACE #endif /*VK_USE_PLATFORM_VI_NN*/ case StructureType::eImageViewAstcDecodeModeEXT: return "ImageViewAstcDecodeModeEXT"; case StructureType::ePhysicalDeviceAstcDecodeFeaturesEXT: return "PhysicalDeviceAstcDecodeFeaturesEXT"; - case StructureType::ePipelineRobustnessCreateInfoEXT: return "PipelineRobustnessCreateInfoEXT"; - case StructureType::ePhysicalDevicePipelineRobustnessFeaturesEXT: return "PhysicalDevicePipelineRobustnessFeaturesEXT"; - case StructureType::ePhysicalDevicePipelineRobustnessPropertiesEXT: return "PhysicalDevicePipelineRobustnessPropertiesEXT"; #if defined( VK_USE_PLATFORM_WIN32_KHR ) case StructureType::eImportMemoryWin32HandleInfoKHR: return "ImportMemoryWin32HandleInfoKHR"; case StructureType::eExportMemoryWin32HandleInfoKHR: return "ExportMemoryWin32HandleInfoKHR"; @@ -4121,7 +4164,6 @@ namespace VULKAN_HPP_NAMESPACE #endif /*VK_USE_PLATFORM_WIN32_KHR*/ case StructureType::eImportSemaphoreFdInfoKHR: return "ImportSemaphoreFdInfoKHR"; case StructureType::eSemaphoreGetFdInfoKHR: return "SemaphoreGetFdInfoKHR"; - case StructureType::ePhysicalDevicePushDescriptorPropertiesKHR: return "PhysicalDevicePushDescriptorPropertiesKHR"; case StructureType::eCommandBufferInheritanceConditionalRenderingInfoEXT: return "CommandBufferInheritanceConditionalRenderingInfoEXT"; case StructureType::ePhysicalDeviceConditionalRenderingFeaturesEXT: return "PhysicalDeviceConditionalRenderingFeaturesEXT"; case StructureType::eConditionalRenderingBeginInfoEXT: return "ConditionalRenderingBeginInfoEXT"; @@ -4271,9 +4313,6 @@ namespace VULKAN_HPP_NAMESPACE case StructureType::eVideoDecodeH265ProfileInfoKHR: return "VideoDecodeH265ProfileInfoKHR"; case StructureType::eVideoDecodeH265PictureInfoKHR: return "VideoDecodeH265PictureInfoKHR"; case StructureType::eVideoDecodeH265DpbSlotInfoKHR: return "VideoDecodeH265DpbSlotInfoKHR"; - case StructureType::eDeviceQueueGlobalPriorityCreateInfoKHR: return "DeviceQueueGlobalPriorityCreateInfoKHR"; - case StructureType::ePhysicalDeviceGlobalPriorityQueryFeaturesKHR: return "PhysicalDeviceGlobalPriorityQueryFeaturesKHR"; - case StructureType::eQueueFamilyGlobalPriorityPropertiesKHR: return "QueueFamilyGlobalPriorityPropertiesKHR"; case StructureType::eDeviceMemoryOverallocationCreateInfoAMD: return "DeviceMemoryOverallocationCreateInfoAMD"; case StructureType::ePhysicalDeviceVertexAttributeDivisorPropertiesEXT: return "PhysicalDeviceVertexAttributeDivisorPropertiesEXT"; #if defined( VK_USE_PLATFORM_GGP ) @@ -4316,9 +4355,6 @@ namespace VULKAN_HPP_NAMESPACE case StructureType::eRenderingFragmentShadingRateAttachmentInfoKHR: return "RenderingFragmentShadingRateAttachmentInfoKHR"; case StructureType::ePhysicalDeviceShaderCoreProperties2AMD: return "PhysicalDeviceShaderCoreProperties2AMD"; case StructureType::ePhysicalDeviceCoherentMemoryFeaturesAMD: return "PhysicalDeviceCoherentMemoryFeaturesAMD"; - case StructureType::ePhysicalDeviceDynamicRenderingLocalReadFeaturesKHR: return "PhysicalDeviceDynamicRenderingLocalReadFeaturesKHR"; - case StructureType::eRenderingAttachmentLocationInfoKHR: return "RenderingAttachmentLocationInfoKHR"; - case StructureType::eRenderingInputAttachmentIndexInfoKHR: return "RenderingInputAttachmentIndexInfoKHR"; case StructureType::ePhysicalDeviceShaderImageAtomicInt64FeaturesEXT: return "PhysicalDeviceShaderImageAtomicInt64FeaturesEXT"; case StructureType::ePhysicalDeviceShaderQuadControlFeaturesKHR: return "PhysicalDeviceShaderQuadControlFeaturesKHR"; case StructureType::ePhysicalDeviceMemoryBudgetPropertiesEXT: return "PhysicalDeviceMemoryBudgetPropertiesEXT"; @@ -4355,18 +4391,6 @@ namespace VULKAN_HPP_NAMESPACE case StructureType::ePipelineExecutableInfoKHR: return "PipelineExecutableInfoKHR"; case StructureType::ePipelineExecutableStatisticKHR: return "PipelineExecutableStatisticKHR"; case StructureType::ePipelineExecutableInternalRepresentationKHR: return "PipelineExecutableInternalRepresentationKHR"; - case StructureType::ePhysicalDeviceHostImageCopyFeaturesEXT: return "PhysicalDeviceHostImageCopyFeaturesEXT"; - case StructureType::ePhysicalDeviceHostImageCopyPropertiesEXT: return "PhysicalDeviceHostImageCopyPropertiesEXT"; - case StructureType::eMemoryToImageCopyEXT: return "MemoryToImageCopyEXT"; - case StructureType::eImageToMemoryCopyEXT: return "ImageToMemoryCopyEXT"; - case StructureType::eCopyImageToMemoryInfoEXT: return "CopyImageToMemoryInfoEXT"; - case StructureType::eCopyMemoryToImageInfoEXT: return "CopyMemoryToImageInfoEXT"; - case StructureType::eHostImageLayoutTransitionInfoEXT: return "HostImageLayoutTransitionInfoEXT"; - case StructureType::eCopyImageToImageInfoEXT: return "CopyImageToImageInfoEXT"; - case StructureType::eSubresourceHostMemcpySizeEXT: return "SubresourceHostMemcpySizeEXT"; - case StructureType::eHostImageCopyDevicePerformanceQueryEXT: return "HostImageCopyDevicePerformanceQueryEXT"; - case StructureType::eMemoryMapInfoKHR: return "MemoryMapInfoKHR"; - case StructureType::eMemoryUnmapInfoKHR: return "MemoryUnmapInfoKHR"; case StructureType::ePhysicalDeviceMapMemoryPlacedFeaturesEXT: return "PhysicalDeviceMapMemoryPlacedFeaturesEXT"; case StructureType::ePhysicalDeviceMapMemoryPlacedPropertiesEXT: return "PhysicalDeviceMapMemoryPlacedPropertiesEXT"; case StructureType::eMemoryMapPlacedInfoEXT: return "MemoryMapPlacedInfoEXT"; @@ -4567,7 +4591,6 @@ namespace VULKAN_HPP_NAMESPACE case StructureType::eSamplerBorderColorComponentMappingCreateInfoEXT: return "SamplerBorderColorComponentMappingCreateInfoEXT"; case StructureType::ePhysicalDevicePageableDeviceLocalMemoryFeaturesEXT: return "PhysicalDevicePageableDeviceLocalMemoryFeaturesEXT"; case StructureType::ePhysicalDeviceShaderCorePropertiesARM: return "PhysicalDeviceShaderCorePropertiesARM"; - case StructureType::ePhysicalDeviceShaderSubgroupRotateFeaturesKHR: return "PhysicalDeviceShaderSubgroupRotateFeaturesKHR"; case StructureType::eDeviceQueueShaderCoreControlCreateInfoARM: return "DeviceQueueShaderCoreControlCreateInfoARM"; case StructureType::ePhysicalDeviceSchedulingControlsFeaturesARM: return "PhysicalDeviceSchedulingControlsFeaturesARM"; case StructureType::ePhysicalDeviceSchedulingControlsPropertiesARM: return "PhysicalDeviceSchedulingControlsPropertiesARM"; @@ -4623,20 +4646,11 @@ namespace VULKAN_HPP_NAMESPACE case StructureType::eOpticalFlowExecuteInfoNV: return "OpticalFlowExecuteInfoNV"; case StructureType::eOpticalFlowSessionCreatePrivateDataInfoNV: return "OpticalFlowSessionCreatePrivateDataInfoNV"; case StructureType::ePhysicalDeviceLegacyDitheringFeaturesEXT: return "PhysicalDeviceLegacyDitheringFeaturesEXT"; - case StructureType::ePhysicalDevicePipelineProtectedAccessFeaturesEXT: return "PhysicalDevicePipelineProtectedAccessFeaturesEXT"; #if defined( VK_USE_PLATFORM_ANDROID_KHR ) case StructureType::ePhysicalDeviceExternalFormatResolveFeaturesANDROID: return "PhysicalDeviceExternalFormatResolveFeaturesANDROID"; case StructureType::ePhysicalDeviceExternalFormatResolvePropertiesANDROID: return "PhysicalDeviceExternalFormatResolvePropertiesANDROID"; case StructureType::eAndroidHardwareBufferFormatResolvePropertiesANDROID: return "AndroidHardwareBufferFormatResolvePropertiesANDROID"; #endif /*VK_USE_PLATFORM_ANDROID_KHR*/ - case StructureType::ePhysicalDeviceMaintenance5FeaturesKHR: return "PhysicalDeviceMaintenance5FeaturesKHR"; - case StructureType::ePhysicalDeviceMaintenance5PropertiesKHR: return "PhysicalDeviceMaintenance5PropertiesKHR"; - case StructureType::eRenderingAreaInfoKHR: return "RenderingAreaInfoKHR"; - case StructureType::eDeviceImageSubresourceInfoKHR: return "DeviceImageSubresourceInfoKHR"; - case StructureType::eSubresourceLayout2KHR: return "SubresourceLayout2KHR"; - case StructureType::eImageSubresource2KHR: return "ImageSubresource2KHR"; - case StructureType::ePipelineCreateFlags2CreateInfoKHR: return "PipelineCreateFlags2CreateInfoKHR"; - case StructureType::eBufferUsageFlags2CreateInfoKHR: return "BufferUsageFlags2CreateInfoKHR"; case StructureType::ePhysicalDeviceAntiLagFeaturesAMD: return "PhysicalDeviceAntiLagFeaturesAMD"; case StructureType::eAntiLagDataAMD: return "AntiLagDataAMD"; case StructureType::eAntiLagPresentationInfoAMD: return "AntiLagPresentationInfoAMD"; @@ -4717,10 +4731,6 @@ namespace VULKAN_HPP_NAMESPACE case StructureType::eSamplerYcbcrConversionYcbcrDegammaCreateInfoQCOM: return "SamplerYcbcrConversionYcbcrDegammaCreateInfoQCOM"; case StructureType::ePhysicalDeviceCubicClampFeaturesQCOM: return "PhysicalDeviceCubicClampFeaturesQCOM"; case StructureType::ePhysicalDeviceAttachmentFeedbackLoopDynamicStateFeaturesEXT: return "PhysicalDeviceAttachmentFeedbackLoopDynamicStateFeaturesEXT"; - case StructureType::ePhysicalDeviceVertexAttributeDivisorPropertiesKHR: return "PhysicalDeviceVertexAttributeDivisorPropertiesKHR"; - case StructureType::ePipelineVertexInputDivisorStateCreateInfoKHR: return "PipelineVertexInputDivisorStateCreateInfoKHR"; - case StructureType::ePhysicalDeviceVertexAttributeDivisorFeaturesKHR: return "PhysicalDeviceVertexAttributeDivisorFeaturesKHR"; - case StructureType::ePhysicalDeviceShaderFloatControls2FeaturesKHR: return "PhysicalDeviceShaderFloatControls2FeaturesKHR"; #if defined( VK_USE_PLATFORM_SCREEN_QNX ) case StructureType::eScreenBufferPropertiesQNX: return "ScreenBufferPropertiesQNX"; case StructureType::eScreenBufferFormatPropertiesQNX: return "ScreenBufferFormatPropertiesQNX"; @@ -4729,19 +4739,7 @@ namespace VULKAN_HPP_NAMESPACE case StructureType::ePhysicalDeviceExternalMemoryScreenBufferFeaturesQNX: return "PhysicalDeviceExternalMemoryScreenBufferFeaturesQNX"; #endif /*VK_USE_PLATFORM_SCREEN_QNX*/ case StructureType::ePhysicalDeviceLayeredDriverPropertiesMSFT: return "PhysicalDeviceLayeredDriverPropertiesMSFT"; - case StructureType::ePhysicalDeviceIndexTypeUint8FeaturesKHR: return "PhysicalDeviceIndexTypeUint8FeaturesKHR"; - case StructureType::ePhysicalDeviceLineRasterizationFeaturesKHR: return "PhysicalDeviceLineRasterizationFeaturesKHR"; - case StructureType::ePipelineRasterizationLineStateCreateInfoKHR: return "PipelineRasterizationLineStateCreateInfoKHR"; - case StructureType::ePhysicalDeviceLineRasterizationPropertiesKHR: return "PhysicalDeviceLineRasterizationPropertiesKHR"; case StructureType::eCalibratedTimestampInfoKHR: return "CalibratedTimestampInfoKHR"; - case StructureType::ePhysicalDeviceShaderExpectAssumeFeaturesKHR: return "PhysicalDeviceShaderExpectAssumeFeaturesKHR"; - case StructureType::ePhysicalDeviceMaintenance6FeaturesKHR: return "PhysicalDeviceMaintenance6FeaturesKHR"; - case StructureType::ePhysicalDeviceMaintenance6PropertiesKHR: return "PhysicalDeviceMaintenance6PropertiesKHR"; - case StructureType::eBindMemoryStatusKHR: return "BindMemoryStatusKHR"; - case StructureType::eBindDescriptorSetsInfoKHR: return "BindDescriptorSetsInfoKHR"; - case StructureType::ePushConstantsInfoKHR: return "PushConstantsInfoKHR"; - case StructureType::ePushDescriptorSetInfoKHR: return "PushDescriptorSetInfoKHR"; - case StructureType::ePushDescriptorSetWithTemplateInfoKHR: return "PushDescriptorSetWithTemplateInfoKHR"; case StructureType::eSetDescriptorBufferOffsetsInfoEXT: return "SetDescriptorBufferOffsetsInfoEXT"; case StructureType::eBindDescriptorBufferEmbeddedSamplersInfoEXT: return "BindDescriptorBufferEmbeddedSamplersInfoEXT"; case StructureType::ePhysicalDeviceDescriptorPoolOverallocationFeaturesNV: return "PhysicalDeviceDescriptorPoolOverallocationFeaturesNV"; @@ -5131,6 +5129,8 @@ namespace VULKAN_HPP_NAMESPACE case Format::eAstc10x10SfloatBlock: return "Astc10x10SfloatBlock"; case Format::eAstc12x10SfloatBlock: return "Astc12x10SfloatBlock"; case Format::eAstc12x12SfloatBlock: return "Astc12x12SfloatBlock"; + case Format::eA1B5G5R5UnormPack16: return "A1B5G5R5UnormPack16"; + case Format::eA8Unorm: return "A8Unorm"; case Format::ePvrtc12BppUnormBlockIMG: return "Pvrtc12BppUnormBlockIMG"; case Format::ePvrtc14BppUnormBlockIMG: return "Pvrtc14BppUnormBlockIMG"; case Format::ePvrtc22BppUnormBlockIMG: return "Pvrtc22BppUnormBlockIMG"; @@ -5140,8 +5140,6 @@ namespace VULKAN_HPP_NAMESPACE case Format::ePvrtc22BppSrgbBlockIMG: return "Pvrtc22BppSrgbBlockIMG"; case Format::ePvrtc24BppSrgbBlockIMG: return "Pvrtc24BppSrgbBlockIMG"; case Format::eR16G16Sfixed5NV: return "R16G16Sfixed5NV"; - case Format::eA1B5G5R5UnormPack16KHR: return "A1B5G5R5UnormPack16KHR"; - case Format::eA8UnormKHR: return "A8UnormKHR"; default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; } } @@ -5248,12 +5246,12 @@ namespace VULKAN_HPP_NAMESPACE case ImageUsageFlagBits::eDepthStencilAttachment: return "DepthStencilAttachment"; case ImageUsageFlagBits::eTransientAttachment: return "TransientAttachment"; case ImageUsageFlagBits::eInputAttachment: return "InputAttachment"; + case ImageUsageFlagBits::eHostTransfer: return "HostTransfer"; case ImageUsageFlagBits::eVideoDecodeDstKHR: return "VideoDecodeDstKHR"; case ImageUsageFlagBits::eVideoDecodeSrcKHR: return "VideoDecodeSrcKHR"; case ImageUsageFlagBits::eVideoDecodeDpbKHR: return "VideoDecodeDpbKHR"; case ImageUsageFlagBits::eFragmentDensityMapEXT: return "FragmentDensityMapEXT"; case ImageUsageFlagBits::eFragmentShadingRateAttachmentKHR: return "FragmentShadingRateAttachmentKHR"; - case ImageUsageFlagBits::eHostTransferEXT: return "HostTransferEXT"; case ImageUsageFlagBits::eVideoEncodeDstKHR: return "VideoEncodeDstKHR"; case ImageUsageFlagBits::eVideoEncodeSrcKHR: return "VideoEncodeSrcKHR"; case ImageUsageFlagBits::eVideoEncodeDpbKHR: return "VideoEncodeDpbKHR"; @@ -5642,6 +5640,7 @@ namespace VULKAN_HPP_NAMESPACE case ImageLayout::eStencilReadOnlyOptimal: return "StencilReadOnlyOptimal"; case ImageLayout::eReadOnlyOptimal: return "ReadOnlyOptimal"; case ImageLayout::eAttachmentOptimal: return "AttachmentOptimal"; + case ImageLayout::eRenderingLocalRead: return "RenderingLocalRead"; case ImageLayout::ePresentSrcKHR: return "PresentSrcKHR"; case ImageLayout::eVideoDecodeDstKHR: return "VideoDecodeDstKHR"; case ImageLayout::eVideoDecodeSrcKHR: return "VideoDecodeSrcKHR"; @@ -5649,7 +5648,6 @@ namespace VULKAN_HPP_NAMESPACE case ImageLayout::eSharedPresentKHR: return "SharedPresentKHR"; case ImageLayout::eFragmentDensityMapOptimalEXT: return "FragmentDensityMapOptimalEXT"; case ImageLayout::eFragmentShadingRateAttachmentOptimalKHR: return "FragmentShadingRateAttachmentOptimalKHR"; - case ImageLayout::eRenderingLocalReadKHR: return "RenderingLocalReadKHR"; case ImageLayout::eVideoEncodeDstKHR: return "VideoEncodeDstKHR"; case ImageLayout::eVideoEncodeSrcKHR: return "VideoEncodeSrcKHR"; case ImageLayout::eVideoEncodeDpbKHR: return "VideoEncodeDpbKHR"; @@ -5868,6 +5866,7 @@ namespace VULKAN_HPP_NAMESPACE case DynamicState::eRasterizerDiscardEnable: return "RasterizerDiscardEnable"; case DynamicState::eDepthBiasEnable: return "DepthBiasEnable"; case DynamicState::ePrimitiveRestartEnable: return "PrimitiveRestartEnable"; + case DynamicState::eLineStipple: return "LineStipple"; case DynamicState::eViewportWScalingNV: return "ViewportWScalingNV"; case DynamicState::eDiscardRectangleEXT: return "DiscardRectangleEXT"; case DynamicState::eDiscardRectangleEnableEXT: return "DiscardRectangleEnableEXT"; @@ -5915,7 +5914,6 @@ namespace VULKAN_HPP_NAMESPACE case DynamicState::eRepresentativeFragmentTestEnableNV: return "RepresentativeFragmentTestEnableNV"; case DynamicState::eCoverageReductionModeNV: return "CoverageReductionModeNV"; case DynamicState::eAttachmentFeedbackLoopEnableEXT: return "AttachmentFeedbackLoopEnableEXT"; - case DynamicState::eLineStippleKHR: return "LineStippleKHR"; case DynamicState::eDepthClampRangeEXT: return "DepthClampRangeEXT"; default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; } @@ -5966,6 +5964,8 @@ namespace VULKAN_HPP_NAMESPACE case PipelineCreateFlagBits::eDispatchBase: return "DispatchBase"; case PipelineCreateFlagBits::eFailOnPipelineCompileRequired: return "FailOnPipelineCompileRequired"; case PipelineCreateFlagBits::eEarlyReturnOnFailure: return "EarlyReturnOnFailure"; + case PipelineCreateFlagBits::eNoProtectedAccess: return "NoProtectedAccess"; + case PipelineCreateFlagBits::eProtectedAccessOnly: return "ProtectedAccessOnly"; case PipelineCreateFlagBits::eRayTracingNoNullAnyHitShadersKHR: return "RayTracingNoNullAnyHitShadersKHR"; case PipelineCreateFlagBits::eRayTracingNoNullClosestHitShadersKHR: return "RayTracingNoNullClosestHitShadersKHR"; case PipelineCreateFlagBits::eRayTracingNoNullMissShadersKHR: return "RayTracingNoNullMissShadersKHR"; @@ -5990,8 +5990,6 @@ namespace VULKAN_HPP_NAMESPACE #if defined( VK_ENABLE_BETA_EXTENSIONS ) case PipelineCreateFlagBits::eRayTracingDisplacementMicromapNV: return "RayTracingDisplacementMicromapNV"; #endif /*VK_ENABLE_BETA_EXTENSIONS*/ - case PipelineCreateFlagBits::eNoProtectedAccessEXT: return "NoProtectedAccessEXT"; - case PipelineCreateFlagBits::eProtectedAccessOnlyEXT: return "ProtectedAccessOnlyEXT"; default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; } } @@ -6233,7 +6231,7 @@ namespace VULKAN_HPP_NAMESPACE switch ( value ) { case DescriptorSetLayoutCreateFlagBits::eUpdateAfterBindPool: return "UpdateAfterBindPool"; - case DescriptorSetLayoutCreateFlagBits::ePushDescriptorKHR: return "PushDescriptorKHR"; + case DescriptorSetLayoutCreateFlagBits::ePushDescriptor: return "PushDescriptor"; case DescriptorSetLayoutCreateFlagBits::eDescriptorBufferEXT: return "DescriptorBufferEXT"; case DescriptorSetLayoutCreateFlagBits::eEmbeddedImmutableSamplersEXT: return "EmbeddedImmutableSamplersEXT"; case DescriptorSetLayoutCreateFlagBits::eIndirectBindableNV: return "IndirectBindableNV"; @@ -6326,7 +6324,7 @@ namespace VULKAN_HPP_NAMESPACE case AttachmentLoadOp::eLoad: return "Load"; case AttachmentLoadOp::eClear: return "Clear"; case AttachmentLoadOp::eDontCare: return "DontCare"; - case AttachmentLoadOp::eNoneKHR: return "NoneKHR"; + case AttachmentLoadOp::eNone: return "None"; default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; } } @@ -6468,8 +6466,8 @@ namespace VULKAN_HPP_NAMESPACE { case IndexType::eUint16: return "Uint16"; case IndexType::eUint32: return "Uint32"; + case IndexType::eUint8: return "Uint8"; case IndexType::eNoneKHR: return "NoneKHR"; - case IndexType::eUint8KHR: return "Uint8KHR"; default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; } } @@ -6510,9 +6508,9 @@ namespace VULKAN_HPP_NAMESPACE case SubgroupFeatureFlagBits::eShuffleRelative: return "ShuffleRelative"; case SubgroupFeatureFlagBits::eClustered: return "Clustered"; case SubgroupFeatureFlagBits::eQuad: return "Quad"; + case SubgroupFeatureFlagBits::eRotate: return "Rotate"; + case SubgroupFeatureFlagBits::eRotateClustered: return "RotateClustered"; case SubgroupFeatureFlagBits::ePartitionedNV: return "PartitionedNV"; - case SubgroupFeatureFlagBits::eRotateKHR: return "RotateKHR"; - case SubgroupFeatureFlagBits::eRotateClusteredKHR: return "RotateClusteredKHR"; default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; } } @@ -6603,7 +6601,7 @@ namespace VULKAN_HPP_NAMESPACE switch ( value ) { case DescriptorUpdateTemplateType::eDescriptorSet: return "DescriptorSet"; - case DescriptorUpdateTemplateType::ePushDescriptorsKHR: return "PushDescriptorsKHR"; + case DescriptorUpdateTemplateType::ePushDescriptors: return "PushDescriptors"; default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; } } @@ -6998,7 +6996,6 @@ namespace VULKAN_HPP_NAMESPACE case FormatFeatureFlagBits2::eBlitSrc: return "BlitSrc"; case FormatFeatureFlagBits2::eBlitDst: return "BlitDst"; case FormatFeatureFlagBits2::eSampledImageFilterLinear: return "SampledImageFilterLinear"; - case FormatFeatureFlagBits2::eSampledImageFilterCubic: return "SampledImageFilterCubic"; case FormatFeatureFlagBits2::eTransferSrc: return "TransferSrc"; case FormatFeatureFlagBits2::eTransferDst: return "TransferDst"; case FormatFeatureFlagBits2::eSampledImageFilterMinmax: return "SampledImageFilterMinmax"; @@ -7013,12 +7010,13 @@ namespace VULKAN_HPP_NAMESPACE case FormatFeatureFlagBits2::eStorageReadWithoutFormat: return "StorageReadWithoutFormat"; case FormatFeatureFlagBits2::eStorageWriteWithoutFormat: return "StorageWriteWithoutFormat"; case FormatFeatureFlagBits2::eSampledImageDepthComparison: return "SampledImageDepthComparison"; + case FormatFeatureFlagBits2::eSampledImageFilterCubic: return "SampledImageFilterCubic"; + case FormatFeatureFlagBits2::eHostImageTransfer: return "HostImageTransfer"; case FormatFeatureFlagBits2::eVideoDecodeOutputKHR: return "VideoDecodeOutputKHR"; case FormatFeatureFlagBits2::eVideoDecodeDpbKHR: return "VideoDecodeDpbKHR"; case FormatFeatureFlagBits2::eAccelerationStructureVertexBufferKHR: return "AccelerationStructureVertexBufferKHR"; case FormatFeatureFlagBits2::eFragmentDensityMapEXT: return "FragmentDensityMapEXT"; case FormatFeatureFlagBits2::eFragmentShadingRateAttachmentKHR: return "FragmentShadingRateAttachmentKHR"; - case FormatFeatureFlagBits2::eHostImageTransferEXT: return "HostImageTransferEXT"; case FormatFeatureFlagBits2::eVideoEncodeInputKHR: return "VideoEncodeInputKHR"; case FormatFeatureFlagBits2::eVideoEncodeDpbKHR: return "VideoEncodeDpbKHR"; case FormatFeatureFlagBits2::eLinearColorAttachmentNV: return "LinearColorAttachmentNV"; @@ -7035,6 +7033,156 @@ namespace VULKAN_HPP_NAMESPACE } } + //=== VK_VERSION_1_4 === + + VULKAN_HPP_INLINE std::string to_string( QueueGlobalPriority value ) + { + switch ( value ) + { + case QueueGlobalPriority::eLow: return "Low"; + case QueueGlobalPriority::eMedium: return "Medium"; + case QueueGlobalPriority::eHigh: return "High"; + case QueueGlobalPriority::eRealtime: return "Realtime"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + VULKAN_HPP_INLINE std::string to_string( LineRasterizationMode value ) + { + switch ( value ) + { + case LineRasterizationMode::eDefault: return "Default"; + case LineRasterizationMode::eRectangular: return "Rectangular"; + case LineRasterizationMode::eBresenham: return "Bresenham"; + case LineRasterizationMode::eRectangularSmooth: return "RectangularSmooth"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + VULKAN_HPP_INLINE std::string to_string( MemoryUnmapFlagBits value ) + { + switch ( value ) + { + case MemoryUnmapFlagBits::eReserveEXT: return "ReserveEXT"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + VULKAN_HPP_INLINE std::string to_string( PipelineCreateFlagBits2 value ) + { + switch ( value ) + { + case PipelineCreateFlagBits2::eDisableOptimization: return "DisableOptimization"; + case PipelineCreateFlagBits2::eAllowDerivatives: return "AllowDerivatives"; + case PipelineCreateFlagBits2::eDerivative: return "Derivative"; + case PipelineCreateFlagBits2::eViewIndexFromDeviceIndex: return "ViewIndexFromDeviceIndex"; + case PipelineCreateFlagBits2::eDispatchBase: return "DispatchBase"; + case PipelineCreateFlagBits2::eFailOnPipelineCompileRequired: return "FailOnPipelineCompileRequired"; + case PipelineCreateFlagBits2::eEarlyReturnOnFailure: return "EarlyReturnOnFailure"; + case PipelineCreateFlagBits2::eNoProtectedAccess: return "NoProtectedAccess"; + case PipelineCreateFlagBits2::eProtectedAccessOnly: return "ProtectedAccessOnly"; +#if defined( VK_ENABLE_BETA_EXTENSIONS ) + case PipelineCreateFlagBits2::eExecutionGraphAMDX: return "ExecutionGraphAMDX"; +#endif /*VK_ENABLE_BETA_EXTENSIONS*/ + case PipelineCreateFlagBits2::eEnableLegacyDitheringEXT: return "EnableLegacyDitheringEXT"; + case PipelineCreateFlagBits2::eDeferCompileNV: return "DeferCompileNV"; + case PipelineCreateFlagBits2::eCaptureStatisticsKHR: return "CaptureStatisticsKHR"; + case PipelineCreateFlagBits2::eCaptureInternalRepresentationsKHR: return "CaptureInternalRepresentationsKHR"; + case PipelineCreateFlagBits2::eLinkTimeOptimizationEXT: return "LinkTimeOptimizationEXT"; + case PipelineCreateFlagBits2::eRetainLinkTimeOptimizationInfoEXT: return "RetainLinkTimeOptimizationInfoEXT"; + case PipelineCreateFlagBits2::eLibraryKHR: return "LibraryKHR"; + case PipelineCreateFlagBits2::eRayTracingSkipTrianglesKHR: return "RayTracingSkipTrianglesKHR"; + case PipelineCreateFlagBits2::eRayTracingSkipAabbsKHR: return "RayTracingSkipAabbsKHR"; + case PipelineCreateFlagBits2::eRayTracingNoNullAnyHitShadersKHR: return "RayTracingNoNullAnyHitShadersKHR"; + case PipelineCreateFlagBits2::eRayTracingNoNullClosestHitShadersKHR: return "RayTracingNoNullClosestHitShadersKHR"; + case PipelineCreateFlagBits2::eRayTracingNoNullMissShadersKHR: return "RayTracingNoNullMissShadersKHR"; + case PipelineCreateFlagBits2::eRayTracingNoNullIntersectionShadersKHR: return "RayTracingNoNullIntersectionShadersKHR"; + case PipelineCreateFlagBits2::eRayTracingShaderGroupHandleCaptureReplayKHR: return "RayTracingShaderGroupHandleCaptureReplayKHR"; + case PipelineCreateFlagBits2::eIndirectBindableNV: return "IndirectBindableNV"; + case PipelineCreateFlagBits2::eRayTracingAllowMotionNV: return "RayTracingAllowMotionNV"; + case PipelineCreateFlagBits2::eRenderingFragmentShadingRateAttachmentKHR: return "RenderingFragmentShadingRateAttachmentKHR"; + case PipelineCreateFlagBits2::eRenderingFragmentDensityMapAttachmentEXT: return "RenderingFragmentDensityMapAttachmentEXT"; + case PipelineCreateFlagBits2::eRayTracingOpacityMicromapEXT: return "RayTracingOpacityMicromapEXT"; + case PipelineCreateFlagBits2::eColorAttachmentFeedbackLoopEXT: return "ColorAttachmentFeedbackLoopEXT"; + case PipelineCreateFlagBits2::eDepthStencilAttachmentFeedbackLoopEXT: return "DepthStencilAttachmentFeedbackLoopEXT"; + case PipelineCreateFlagBits2::eRayTracingDisplacementMicromapNV: return "RayTracingDisplacementMicromapNV"; + case PipelineCreateFlagBits2::eDescriptorBufferEXT: return "DescriptorBufferEXT"; + case PipelineCreateFlagBits2::eCaptureDataKHR: return "CaptureDataKHR"; + case PipelineCreateFlagBits2::eIndirectBindableEXT: return "IndirectBindableEXT"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + VULKAN_HPP_INLINE std::string to_string( BufferUsageFlagBits2 value ) + { + switch ( value ) + { + case BufferUsageFlagBits2::eTransferSrc: return "TransferSrc"; + case BufferUsageFlagBits2::eTransferDst: return "TransferDst"; + case BufferUsageFlagBits2::eUniformTexelBuffer: return "UniformTexelBuffer"; + case BufferUsageFlagBits2::eStorageTexelBuffer: return "StorageTexelBuffer"; + case BufferUsageFlagBits2::eUniformBuffer: return "UniformBuffer"; + case BufferUsageFlagBits2::eStorageBuffer: return "StorageBuffer"; + case BufferUsageFlagBits2::eIndexBuffer: return "IndexBuffer"; + case BufferUsageFlagBits2::eVertexBuffer: return "VertexBuffer"; + case BufferUsageFlagBits2::eIndirectBuffer: return "IndirectBuffer"; + case BufferUsageFlagBits2::eShaderDeviceAddress: return "ShaderDeviceAddress"; +#if defined( VK_ENABLE_BETA_EXTENSIONS ) + case BufferUsageFlagBits2::eExecutionGraphScratchAMDX: return "ExecutionGraphScratchAMDX"; +#endif /*VK_ENABLE_BETA_EXTENSIONS*/ + case BufferUsageFlagBits2::eConditionalRenderingEXT: return "ConditionalRenderingEXT"; + case BufferUsageFlagBits2::eShaderBindingTableKHR: return "ShaderBindingTableKHR"; + case BufferUsageFlagBits2::eTransformFeedbackBufferEXT: return "TransformFeedbackBufferEXT"; + case BufferUsageFlagBits2::eTransformFeedbackCounterBufferEXT: return "TransformFeedbackCounterBufferEXT"; + case BufferUsageFlagBits2::eVideoDecodeSrcKHR: return "VideoDecodeSrcKHR"; + case BufferUsageFlagBits2::eVideoDecodeDstKHR: return "VideoDecodeDstKHR"; + case BufferUsageFlagBits2::eVideoEncodeDstKHR: return "VideoEncodeDstKHR"; + case BufferUsageFlagBits2::eVideoEncodeSrcKHR: return "VideoEncodeSrcKHR"; + case BufferUsageFlagBits2::eAccelerationStructureBuildInputReadOnlyKHR: return "AccelerationStructureBuildInputReadOnlyKHR"; + case BufferUsageFlagBits2::eAccelerationStructureStorageKHR: return "AccelerationStructureStorageKHR"; + case BufferUsageFlagBits2::eSamplerDescriptorBufferEXT: return "SamplerDescriptorBufferEXT"; + case BufferUsageFlagBits2::eResourceDescriptorBufferEXT: return "ResourceDescriptorBufferEXT"; + case BufferUsageFlagBits2::ePushDescriptorsDescriptorBufferEXT: return "PushDescriptorsDescriptorBufferEXT"; + case BufferUsageFlagBits2::eMicromapBuildInputReadOnlyEXT: return "MicromapBuildInputReadOnlyEXT"; + case BufferUsageFlagBits2::eMicromapStorageEXT: return "MicromapStorageEXT"; + case BufferUsageFlagBits2::ePreprocessBufferEXT: return "PreprocessBufferEXT"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + VULKAN_HPP_INLINE std::string to_string( PipelineRobustnessBufferBehavior value ) + { + switch ( value ) + { + case PipelineRobustnessBufferBehavior::eDeviceDefault: return "DeviceDefault"; + case PipelineRobustnessBufferBehavior::eDisabled: return "Disabled"; + case PipelineRobustnessBufferBehavior::eRobustBufferAccess: return "RobustBufferAccess"; + case PipelineRobustnessBufferBehavior::eRobustBufferAccess2: return "RobustBufferAccess2"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + VULKAN_HPP_INLINE std::string to_string( PipelineRobustnessImageBehavior value ) + { + switch ( value ) + { + case PipelineRobustnessImageBehavior::eDeviceDefault: return "DeviceDefault"; + case PipelineRobustnessImageBehavior::eDisabled: return "Disabled"; + case PipelineRobustnessImageBehavior::eRobustImageAccess: return "RobustImageAccess"; + case PipelineRobustnessImageBehavior::eRobustImageAccess2: return "RobustImageAccess2"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + VULKAN_HPP_INLINE std::string to_string( HostImageCopyFlagBits value ) + { + switch ( value ) + { + case HostImageCopyFlagBits::eMemcpy: return "Memcpy"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + //=== VK_KHR_surface === VULKAN_HPP_INLINE std::string to_string( SurfaceTransformFlagBitsKHR value ) @@ -7651,32 +7799,6 @@ namespace VULKAN_HPP_NAMESPACE } #endif /*VK_USE_PLATFORM_VI_NN*/ - //=== VK_EXT_pipeline_robustness === - - VULKAN_HPP_INLINE std::string to_string( PipelineRobustnessBufferBehaviorEXT value ) - { - switch ( value ) - { - case PipelineRobustnessBufferBehaviorEXT::eDeviceDefault: return "DeviceDefault"; - case PipelineRobustnessBufferBehaviorEXT::eDisabled: return "Disabled"; - case PipelineRobustnessBufferBehaviorEXT::eRobustBufferAccess: return "RobustBufferAccess"; - case PipelineRobustnessBufferBehaviorEXT::eRobustBufferAccess2: return "RobustBufferAccess2"; - default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; - } - } - - VULKAN_HPP_INLINE std::string to_string( PipelineRobustnessImageBehaviorEXT value ) - { - switch ( value ) - { - case PipelineRobustnessImageBehaviorEXT::eDeviceDefault: return "DeviceDefault"; - case PipelineRobustnessImageBehaviorEXT::eDisabled: return "Disabled"; - case PipelineRobustnessImageBehaviorEXT::eRobustImageAccess: return "RobustImageAccess"; - case PipelineRobustnessImageBehaviorEXT::eRobustImageAccess2: return "RobustImageAccess2"; - default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; - } - } - //=== VK_EXT_conditional_rendering === VULKAN_HPP_INLINE std::string to_string( ConditionalRenderingFlagBitsEXT value ) @@ -8167,20 +8289,6 @@ namespace VULKAN_HPP_NAMESPACE return "(void)"; } - //=== VK_KHR_global_priority === - - VULKAN_HPP_INLINE std::string to_string( QueueGlobalPriorityKHR value ) - { - switch ( value ) - { - case QueueGlobalPriorityKHR::eLow: return "Low"; - case QueueGlobalPriorityKHR::eMedium: return "Medium"; - case QueueGlobalPriorityKHR::eHigh: return "High"; - case QueueGlobalPriorityKHR::eRealtime: return "Realtime"; - default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; - } - } - //=== VK_AMD_memory_overallocation_behavior === VULKAN_HPP_INLINE std::string to_string( MemoryOverallocationBehaviorAMD value ) @@ -8384,28 +8492,6 @@ namespace VULKAN_HPP_NAMESPACE } } - //=== VK_EXT_host_image_copy === - - VULKAN_HPP_INLINE std::string to_string( HostImageCopyFlagBitsEXT value ) - { - switch ( value ) - { - case HostImageCopyFlagBitsEXT::eMemcpy: return "Memcpy"; - default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; - } - } - - //=== VK_KHR_map_memory2 === - - VULKAN_HPP_INLINE std::string to_string( MemoryUnmapFlagBitsKHR value ) - { - switch ( value ) - { - case MemoryUnmapFlagBitsKHR::eReserveEXT: return "ReserveEXT"; - default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; - } - } - //=== VK_EXT_surface_maintenance1 === VULKAN_HPP_INLINE std::string to_string( PresentScalingFlagBitsEXT value ) @@ -9075,90 +9161,6 @@ namespace VULKAN_HPP_NAMESPACE } } - //=== VK_KHR_maintenance5 === - - VULKAN_HPP_INLINE std::string to_string( PipelineCreateFlagBits2KHR value ) - { - switch ( value ) - { - case PipelineCreateFlagBits2KHR::eDisableOptimization: return "DisableOptimization"; - case PipelineCreateFlagBits2KHR::eAllowDerivatives: return "AllowDerivatives"; - case PipelineCreateFlagBits2KHR::eDerivative: return "Derivative"; -#if defined( VK_ENABLE_BETA_EXTENSIONS ) - case PipelineCreateFlagBits2KHR::eExecutionGraphAMDX: return "ExecutionGraphAMDX"; -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ - case PipelineCreateFlagBits2KHR::eEnableLegacyDitheringEXT: return "EnableLegacyDitheringEXT"; - case PipelineCreateFlagBits2KHR::eViewIndexFromDeviceIndex: return "ViewIndexFromDeviceIndex"; - case PipelineCreateFlagBits2KHR::eDispatchBase: return "DispatchBase"; - case PipelineCreateFlagBits2KHR::eDeferCompileNV: return "DeferCompileNV"; - case PipelineCreateFlagBits2KHR::eCaptureStatistics: return "CaptureStatistics"; - case PipelineCreateFlagBits2KHR::eCaptureInternalRepresentations: return "CaptureInternalRepresentations"; - case PipelineCreateFlagBits2KHR::eFailOnPipelineCompileRequired: return "FailOnPipelineCompileRequired"; - case PipelineCreateFlagBits2KHR::eEarlyReturnOnFailure: return "EarlyReturnOnFailure"; - case PipelineCreateFlagBits2KHR::eLinkTimeOptimizationEXT: return "LinkTimeOptimizationEXT"; - case PipelineCreateFlagBits2KHR::eRetainLinkTimeOptimizationInfoEXT: return "RetainLinkTimeOptimizationInfoEXT"; - case PipelineCreateFlagBits2KHR::eLibrary: return "Library"; - case PipelineCreateFlagBits2KHR::eRayTracingSkipTriangles: return "RayTracingSkipTriangles"; - case PipelineCreateFlagBits2KHR::eRayTracingSkipAabbs: return "RayTracingSkipAabbs"; - case PipelineCreateFlagBits2KHR::eRayTracingNoNullAnyHitShaders: return "RayTracingNoNullAnyHitShaders"; - case PipelineCreateFlagBits2KHR::eRayTracingNoNullClosestHitShaders: return "RayTracingNoNullClosestHitShaders"; - case PipelineCreateFlagBits2KHR::eRayTracingNoNullMissShaders: return "RayTracingNoNullMissShaders"; - case PipelineCreateFlagBits2KHR::eRayTracingNoNullIntersectionShaders: return "RayTracingNoNullIntersectionShaders"; - case PipelineCreateFlagBits2KHR::eRayTracingShaderGroupHandleCaptureReplay: return "RayTracingShaderGroupHandleCaptureReplay"; - case PipelineCreateFlagBits2KHR::eIndirectBindableNV: return "IndirectBindableNV"; - case PipelineCreateFlagBits2KHR::eRayTracingAllowMotionNV: return "RayTracingAllowMotionNV"; - case PipelineCreateFlagBits2KHR::eRenderingFragmentShadingRateAttachment: return "RenderingFragmentShadingRateAttachment"; - case PipelineCreateFlagBits2KHR::eRenderingFragmentDensityMapAttachmentEXT: return "RenderingFragmentDensityMapAttachmentEXT"; - case PipelineCreateFlagBits2KHR::eRayTracingOpacityMicromapEXT: return "RayTracingOpacityMicromapEXT"; - case PipelineCreateFlagBits2KHR::eColorAttachmentFeedbackLoopEXT: return "ColorAttachmentFeedbackLoopEXT"; - case PipelineCreateFlagBits2KHR::eDepthStencilAttachmentFeedbackLoopEXT: return "DepthStencilAttachmentFeedbackLoopEXT"; - case PipelineCreateFlagBits2KHR::eNoProtectedAccessEXT: return "NoProtectedAccessEXT"; - case PipelineCreateFlagBits2KHR::eProtectedAccessOnlyEXT: return "ProtectedAccessOnlyEXT"; - case PipelineCreateFlagBits2KHR::eRayTracingDisplacementMicromapNV: return "RayTracingDisplacementMicromapNV"; - case PipelineCreateFlagBits2KHR::eDescriptorBufferEXT: return "DescriptorBufferEXT"; - case PipelineCreateFlagBits2KHR::eCaptureData: return "CaptureData"; - case PipelineCreateFlagBits2KHR::eIndirectBindableEXT: return "IndirectBindableEXT"; - default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; - } - } - - VULKAN_HPP_INLINE std::string to_string( BufferUsageFlagBits2KHR value ) - { - switch ( value ) - { - case BufferUsageFlagBits2KHR::eTransferSrc: return "TransferSrc"; - case BufferUsageFlagBits2KHR::eTransferDst: return "TransferDst"; - case BufferUsageFlagBits2KHR::eUniformTexelBuffer: return "UniformTexelBuffer"; - case BufferUsageFlagBits2KHR::eStorageTexelBuffer: return "StorageTexelBuffer"; - case BufferUsageFlagBits2KHR::eUniformBuffer: return "UniformBuffer"; - case BufferUsageFlagBits2KHR::eStorageBuffer: return "StorageBuffer"; - case BufferUsageFlagBits2KHR::eIndexBuffer: return "IndexBuffer"; - case BufferUsageFlagBits2KHR::eVertexBuffer: return "VertexBuffer"; - case BufferUsageFlagBits2KHR::eIndirectBuffer: return "IndirectBuffer"; -#if defined( VK_ENABLE_BETA_EXTENSIONS ) - case BufferUsageFlagBits2KHR::eExecutionGraphScratchAMDX: return "ExecutionGraphScratchAMDX"; -#endif /*VK_ENABLE_BETA_EXTENSIONS*/ - case BufferUsageFlagBits2KHR::eConditionalRenderingEXT: return "ConditionalRenderingEXT"; - case BufferUsageFlagBits2KHR::eShaderBindingTable: return "ShaderBindingTable"; - case BufferUsageFlagBits2KHR::eTransformFeedbackBufferEXT: return "TransformFeedbackBufferEXT"; - case BufferUsageFlagBits2KHR::eTransformFeedbackCounterBufferEXT: return "TransformFeedbackCounterBufferEXT"; - case BufferUsageFlagBits2KHR::eVideoDecodeSrc: return "VideoDecodeSrc"; - case BufferUsageFlagBits2KHR::eVideoDecodeDst: return "VideoDecodeDst"; - case BufferUsageFlagBits2KHR::eVideoEncodeDst: return "VideoEncodeDst"; - case BufferUsageFlagBits2KHR::eVideoEncodeSrc: return "VideoEncodeSrc"; - case BufferUsageFlagBits2KHR::eShaderDeviceAddress: return "ShaderDeviceAddress"; - case BufferUsageFlagBits2KHR::eAccelerationStructureBuildInputReadOnly: return "AccelerationStructureBuildInputReadOnly"; - case BufferUsageFlagBits2KHR::eAccelerationStructureStorage: return "AccelerationStructureStorage"; - case BufferUsageFlagBits2KHR::eSamplerDescriptorBufferEXT: return "SamplerDescriptorBufferEXT"; - case BufferUsageFlagBits2KHR::eResourceDescriptorBufferEXT: return "ResourceDescriptorBufferEXT"; - case BufferUsageFlagBits2KHR::ePushDescriptorsDescriptorBufferEXT: return "PushDescriptorsDescriptorBufferEXT"; - case BufferUsageFlagBits2KHR::eMicromapBuildInputReadOnlyEXT: return "MicromapBuildInputReadOnlyEXT"; - case BufferUsageFlagBits2KHR::eMicromapStorageEXT: return "MicromapStorageEXT"; - case BufferUsageFlagBits2KHR::ePreprocessBufferEXT: return "PreprocessBufferEXT"; - default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; - } - } - //=== VK_AMD_anti_lag === VULKAN_HPP_INLINE std::string to_string( AntiLagModeAMD value ) @@ -9415,20 +9417,6 @@ namespace VULKAN_HPP_NAMESPACE } } - //=== VK_KHR_line_rasterization === - - VULKAN_HPP_INLINE std::string to_string( LineRasterizationModeKHR value ) - { - switch ( value ) - { - case LineRasterizationModeKHR::eDefault: return "Default"; - case LineRasterizationModeKHR::eRectangular: return "Rectangular"; - case LineRasterizationModeKHR::eBresenham: return "Bresenham"; - case LineRasterizationModeKHR::eRectangularSmooth: return "RectangularSmooth"; - default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; - } - } - //=== VK_KHR_calibrated_timestamps === VULKAN_HPP_INLINE std::string to_string( TimeDomainKHR value ) diff --git a/third_party/vulkan/vulkan_video.hpp b/third_party/vulkan/vulkan_video.hpp index fb82aa3..ee88975 100644 --- a/third_party/vulkan/vulkan_video.hpp +++ b/third_party/vulkan/vulkan_video.hpp @@ -7,8 +7,15 @@ #ifndef VULKAN_VIDEO_HPP #define VULKAN_VIDEO_HPP +// clang-format off +#include +// clang-format on + #include #include +#if ( 301 < VK_HEADER_VERSION ) +# include +#endif #include #include #include @@ -16,7 +23,6 @@ #include #include #include -#include #if !defined( VULKAN_HPP_VIDEO_NAMESPACE ) # define VULKAN_HPP_VIDEO_NAMESPACE video diff --git a/xmake.lua b/xmake.lua index 93f72d0..857433a 100644 --- a/xmake.lua +++ b/xmake.lua @@ -1,21 +1,9 @@ --------------------------------------------------------------------------------- --- -- --- ::: :::::::: -- --- xmake.lua :+: :+: :+: -- --- +:+ +:+ +:+ -- --- By: maldavid +#+ +:+ +#+ -- --- +#+#+#+#+#+ +#+ -- --- Created: 2023/12/07 15:21:38 by kbz_8 #+# #+# -- --- Updated: 2024/01/02 23:40:20 by kbz_8 ### ########.fr -- --- -- --------------------------------------------------------------------------------- - -- Global settings -add_requires("libsdl", {configs = { sdlmain = false }}) +add_requires("libsdl", { configs = { sdlmain = false } }) add_rules("mode.debug", "mode.release") -set_languages("cxx17", "c99") +set_languages("cxx20", "c99") set_objectdir("objs/xmake/$(os)_$(arch)") set_targetdir("./") @@ -44,6 +32,16 @@ option("profiler") add_defines("PROFILER") option_end() +option("force_wayland") + set_default(false) + add_defines("FORCE_WAYLAND") +option_end() + +option("disable_all_safeties") + set_default(false) + add_defines("DISABLE_ALL_SAFETIES") +option_end() + -- Targets target("mlx") @@ -53,18 +51,39 @@ target("mlx") add_options("images_optimized") add_options("force_integrated_gpu") add_options("graphics_memory_dump") - add_includedirs("includes", "src", "third_party") + add_options("profiler") + add_options("force_wayland") + add_options("disable_all_safeties") + + add_includedirs("runtime/Includes", "runtime/Sources", "includes", "third_party") + + set_pcxxheader("runtime/Includes/PreCompiled.h") add_defines("MLX_BUILD", "SDL_MAIN_HANDLED") - add_files("src/**.cpp") + add_files("runtime/Sources/**.cpp") add_packages("libsdl") if is_mode("debug") then add_defines("DEBUG") end -target_end() -- optional but I think the code is cleaner with this -- optional but I think the code is cleaner with this + + on_clean(function(target) + if target:objectfiles() then + for _, file in ipairs(target:objectfiles()) do + if os.exists(file) then + print("Removing " .. file) + os.rm(file) + end + end + end + if target:targetfile() and os.exists(target:targetfile()) then + print("Removing " .. target:targetfile()) + os.rm(target:targetfile()) + end + end) +target_end() target("Test") set_default(false)