From d54c28dcaabf9018e5cb84e25090aa29c7ec9f8b Mon Sep 17 00:00:00 2001 From: Kbz-8 Date: Sun, 12 Apr 2026 22:34:58 +0200 Subject: [PATCH] adding buffer-image copies --- build.zig.zon | 4 +- src/soft/SoftCommandBuffer.zig | 2 +- src/soft/SoftDescriptorSet.zig | 30 +++- src/soft/SoftImage.zig | 216 ++++++++++++++++++++++++---- src/soft/SoftPhysicalDevice.zig | 2 +- src/soft/device/Blitter.zig | 12 +- src/soft/device/ComputeRoutines.zig | 12 ++ src/vulkan/Image.zig | 90 +++--------- src/vulkan/format.zig | 88 ++++++++++++ src/vulkan/lib.zig | 1 + src/vulkan/lib_vulkan.zig | 4 +- 11 files changed, 351 insertions(+), 110 deletions(-) create mode 100644 src/vulkan/format.zig diff --git a/build.zig.zon b/build.zig.zon index 2f81795..2e880b6 100644 --- a/build.zig.zon +++ b/build.zig.zon @@ -59,8 +59,8 @@ .lazy = true, }, .SPIRV_Interpreter = .{ - .url = "git+https://git.kbz8.me/kbz_8/SPIRV-Interpreter#85e0fe4362e3425a0d3142b873f0e8d9cca40fc6", - .hash = "SPIRV_Interpreter-0.0.1-ajmpnwF9BAAAb2kR9WjfMH0h3kOjhGICGfdId_4lomVD", + .url = "git+https://git.kbz8.me/kbz_8/SPIRV-Interpreter#9f586ae9c0414dbcd5545b258733a78b5301fee8", + .hash = "SPIRV_Interpreter-0.0.1-ajmpnxCFBADEKjJFMVvGhAEs1Nr-lrsHXDd5uYnUxYor", }, }, diff --git a/src/soft/SoftCommandBuffer.zig b/src/soft/SoftCommandBuffer.zig index fd5930c..11eb96b 100644 --- a/src/soft/SoftCommandBuffer.zig +++ b/src/soft/SoftCommandBuffer.zig @@ -175,7 +175,7 @@ pub fn clearColorImage(interface: *Interface, image: *base.Image, layout: vk.Ima range: vk.ImageSubresourceRange, pub fn execute(impl: *const Impl, _: *ExecutionDevice) VkError!void { - impl.image.clearRange(impl.clear_color, impl.range); + try impl.image.clearRange(impl.clear_color, impl.range); } }; diff --git a/src/soft/SoftDescriptorSet.zig b/src/soft/SoftDescriptorSet.zig index 402929e..fa7c4e3 100644 --- a/src/soft/SoftDescriptorSet.zig +++ b/src/soft/SoftDescriptorSet.zig @@ -5,8 +5,12 @@ const base = @import("base"); const VkError = base.VkError; const Device = base.Device; const Buffer = base.Buffer; +const ImageView = base.ImageView; const SoftBuffer = @import("SoftBuffer.zig"); +const SoftImage = @import("SoftImage.zig"); +const SoftImageView = @import("SoftImageView.zig"); +const SoftSampler = @import("SoftSampler.zig"); const NonDispatchable = base.NonDispatchable; @@ -19,9 +23,19 @@ const DescriptorBuffer = struct { size: vk.DeviceSize, }; +const DescriptorTexture = struct { + sampler: ?*SoftSampler, + view: ?*SoftImageView, +}; + +const DescriptorImage = struct { + object: ?*SoftImage, +}; + const Descriptor = union(enum) { buffer: []DescriptorBuffer, - image: struct {}, + texture: []DescriptorTexture, + image: []DescriptorImage, unsupported: struct {}, }; @@ -49,6 +63,7 @@ pub fn create(device: *base.Device, allocator: std.mem.Allocator, layout: *base. for (layout.bindings) |binding| { const struct_size: usize = switch (binding.descriptor_type) { .storage_buffer, .storage_buffer_dynamic => @sizeOf(DescriptorBuffer), + .storage_image, .input_attachment => @sizeOf(DescriptorImage), else => 0, }; @@ -69,6 +84,9 @@ pub fn create(device: *base.Device, allocator: std.mem.Allocator, layout: *base. .storage_buffer, .storage_buffer_dynamic => descriptor.* = .{ .buffer = local_allocator.alloc(DescriptorBuffer, binding.array_size) catch return VkError.OutOfHostMemory, }, + .storage_image, .input_attachment => descriptor.* = .{ + .image = local_allocator.alloc(DescriptorImage, binding.array_size) catch return VkError.OutOfHostMemory, + }, else => {}, } } @@ -114,6 +132,16 @@ pub fn write(interface: *Interface, write_data: vk.WriteDescriptorSet) VkError!v } } }, + .storage_image, .input_attachment => { + for (write_data.p_image_info, 0..write_data.descriptor_count) |image_info, i| { + const desc = &self.descriptors[write_data.dst_binding].image[i]; + desc.* = .{ .object = null }; + if (image_info.image_view != .null_handle) { + const image_view = try NonDispatchable(ImageView).fromHandleObject(image_info.image_view); + desc.object = @as(*SoftImage, @alignCast(@fieldParentPtr("interface", image_view.image))); + } + } + }, else => { self.descriptors[write_data.dst_binding] = .{ .unsupported = .{} }; base.unsupported("descriptor type {s} for writting", .{@tagName(write_data.descriptor_type)}); diff --git a/src/soft/SoftImage.zig b/src/soft/SoftImage.zig index c370282..6d07a51 100644 --- a/src/soft/SoftImage.zig +++ b/src/soft/SoftImage.zig @@ -24,6 +24,7 @@ pub fn create(device: *base.Device, allocator: std.mem.Allocator, info: *const v interface.vtable = &.{ .destroy = destroy, .getMemoryRequirements = getMemoryRequirements, + .getTotalSizeForAspect = getTotalSizeForAspect, }; self.* = .{ @@ -37,17 +38,16 @@ pub fn destroy(interface: *Interface, allocator: std.mem.Allocator) void { allocator.destroy(self); } -pub fn getMemoryRequirements(interface: *Interface, requirements: *vk.MemoryRequirements) void { - _ = interface; +pub fn getMemoryRequirements(_: *Interface, requirements: *vk.MemoryRequirements) VkError!void { requirements.alignment = lib.MEMORY_REQUIREMENTS_IMAGE_ALIGNMENT; } -inline fn clear(self: *Self, pixel: vk.ClearValue, format: vk.Format, view_format: vk.Format, range: vk.ImageSubresourceRange, area: ?vk.Rect2D) void { +inline fn clear(self: *Self, pixel: vk.ClearValue, format: vk.Format, view_format: vk.Format, range: vk.ImageSubresourceRange, area: ?vk.Rect2D) VkError!void { const soft_device: *SoftDevice = @alignCast(@fieldParentPtr("interface", self.interface.owner)); - soft_device.blitter.clear(pixel, format, self, view_format, range, area); + try soft_device.blitter.clear(pixel, format, self, view_format, range, area); } -pub fn clearRange(self: *Self, color: vk.ClearColorValue, range: vk.ImageSubresourceRange) void { +pub fn clearRange(self: *Self, color: vk.ClearColorValue, range: vk.ImageSubresourceRange) VkError!void { std.debug.assert(range.aspect_mask == vk.ImageAspectFlags{ .color_bit = true }); const clear_format: vk.Format = if (base.vku.vkuFormatIsSINT(@intCast(@intFromEnum(self.interface.format)))) @@ -56,7 +56,7 @@ pub fn clearRange(self: *Self, color: vk.ClearColorValue, range: vk.ImageSubreso .r32g32b32a32_uint else .r32g32b32a32_sfloat; - self.clear(.{ .color = color }, clear_format, self.interface.format, range, null); + try self.clear(.{ .color = color }, clear_format, self.interface.format, range, null); } pub fn copyImage(self: *const Self, self_layout: vk.ImageLayout, dst: *Self, dst_layout: vk.ImageLayout, regions: []const vk.ImageCopy) VkError!void { @@ -70,13 +70,12 @@ pub fn copyImage(self: *const Self, self_layout: vk.ImageLayout, dst: *Self, dst pub fn copyToBuffer(self: *const Self, dst: *SoftBuffer, region: vk.BufferImageCopy) VkError!void { const dst_size = dst.interface.size - region.buffer_offset; + const dst_offset = dst.interface.offset + region.buffer_offset; const dst_memory = if (dst.interface.memory) |memory| memory else return VkError.InvalidDeviceMemoryDrv; - const dst_map: []u8 = @as([*]u8, @ptrCast(try dst_memory.map(region.buffer_offset, dst_size)))[0..dst_size]; + const dst_map: []u8 = @as([*]u8, @ptrCast(try dst_memory.map(dst_offset, dst_size)))[0..dst_size]; try self.copy( null, dst_map, - @intCast(region.buffer_row_length), - @intCast(region.buffer_image_height), region.image_subresource, region.image_offset, region.image_extent, @@ -85,35 +84,200 @@ pub fn copyToBuffer(self: *const Self, dst: *SoftBuffer, region: vk.BufferImageC pub fn copyFromBuffer(self: *const Self, src: *const SoftBuffer, region: vk.BufferImageCopy) VkError!void { const src_size = src.interface.size - region.buffer_offset; + const src_offset = src.interface.offset + region.buffer_offset; const src_memory = if (src.interface.memory) |memory| memory else return VkError.InvalidDeviceMemoryDrv; - const src_map: []u8 = @as([*]u8, @ptrCast(try src_memory.map(region.buffer_offset, src_size)))[0..src_size]; + const src_map: []u8 = @as([*]u8, @ptrCast(try src_memory.map(src_offset, src_size)))[0..src_size]; try self.copy( src_map, null, - @intCast(region.buffer_row_length), - @intCast(region.buffer_image_height), region.image_subresource, region.image_offset, region.image_extent, ); } +/// Based on SwiftShader vk::Image::copy pub fn copy( self: *const Self, - src_memory: ?[]const u8, - dst_memory: ?[]u8, - row_len: usize, - image_height: usize, + base_src_memory: ?[]const u8, + base_dst_memory: ?[]u8, image_subresource: vk.ImageSubresourceLayers, - image_copy_offset: vk.Offset3D, - image_copy_extent: vk.Extent3D, + image_offset: vk.Offset3D, + image_extent: vk.Extent3D, ) VkError!void { - _ = self; - _ = src_memory; - _ = dst_memory; - _ = row_len; - _ = image_height; - _ = image_subresource; - _ = image_copy_offset; - _ = image_copy_extent; + std.debug.assert((base_src_memory == null) != (base_dst_memory == null)); + + const is_source: bool = base_src_memory != null; + + if (image_subresource.aspect_mask.subtract(.{ + .color_bit = true, + .depth_bit = true, + .stencil_bit = true, + }).toInt() != 0) { + base.unsupported("aspectMask {f}", .{image_subresource.aspect_mask}); + return VkError.ValidationFailed; + } + + const format = self.interface.formatFromAspect(image_subresource.aspect_mask); + + // TODO: handle extent of compressed formats + + if (image_extent.width == 0 or image_extent.height == 0 or image_extent.depth == 0) { + return; + } + + const bytes_per_block = base.format.texelSize(format); + const memory_row_pitch_bytes = image_extent.width * bytes_per_block; + const memory_slice_pitch_bytes = image_extent.height * memory_row_pitch_bytes; + + const image_texel_offset = try self.getTexelMemoryOffset(image_offset, .{ + .aspect_mask = image_subresource.aspect_mask, + .mip_level = image_subresource.mip_level, + .array_layer = image_subresource.base_array_layer, + }); + const image_size = self.getLayerSize(image_subresource.aspect_mask) - self.getTexelMemoryOffsetInSubresource(image_offset, .{ + .aspect_mask = image_subresource.aspect_mask, + .mip_level = image_subresource.mip_level, + .array_layer = image_subresource.base_array_layer, + }); + const image_memory = if (self.interface.memory) |memory| memory else return VkError.InvalidDeviceMemoryDrv; + const image_map: []u8 = @as([*]u8, @ptrCast(try image_memory.map(self.interface.memory_offset + image_texel_offset, image_size)))[0..image_size]; + + var src_memory = if (is_source) base_src_memory orelse return VkError.InvalidDeviceMemoryDrv else image_map; + var dst_memory = if (is_source) image_map else base_dst_memory orelse return VkError.InvalidDeviceMemoryDrv; + + const src_slice_pitch_bytes = if (is_source) memory_slice_pitch_bytes else self.getSliceMemSizeForMipLevel(image_subresource.aspect_mask, image_subresource.mip_level); + const dst_slice_pitch_bytes = if (is_source) self.getSliceMemSizeForMipLevel(image_subresource.aspect_mask, image_subresource.mip_level) else memory_slice_pitch_bytes; + const src_row_pitch_bytes = if (is_source) memory_row_pitch_bytes else self.getRowPitchMemSizeForMipLevel(image_subresource.aspect_mask, image_subresource.mip_level); + const dst_row_pitch_bytes = if (is_source) self.getRowPitchMemSizeForMipLevel(image_subresource.aspect_mask, image_subresource.mip_level) else memory_row_pitch_bytes; + + const src_layer_size = if (is_source) memory_slice_pitch_bytes else self.getLayerSize(image_subresource.aspect_mask); + const dst_layer_size = if (is_source) self.getLayerSize(image_subresource.aspect_mask) else memory_slice_pitch_bytes; + + const layer_count = if (image_subresource.layer_count == vk.REMAINING_ARRAY_LAYERS) self.interface.array_layers - image_subresource.base_array_layer else image_subresource.layer_count; + + const copy_size = image_extent.width * bytes_per_block; + + for (0..layer_count) |_| { + var src_layer_memory = src_memory[0..]; + var dst_layer_memory = dst_memory[0..]; + + for (0..image_extent.depth) |_| { + var src_slice_memory = src_layer_memory[0..]; + var dst_slice_memory = dst_layer_memory[0..]; + + for (0..image_extent.height) |_| { + @memcpy(dst_slice_memory[0..copy_size], src_slice_memory[0..copy_size]); + src_slice_memory = src_slice_memory[src_row_pitch_bytes..]; + dst_slice_memory = dst_slice_memory[dst_row_pitch_bytes..]; + } + src_layer_memory = src_layer_memory[src_slice_pitch_bytes..]; + dst_layer_memory = dst_layer_memory[dst_slice_pitch_bytes..]; + } + src_memory = src_memory[src_layer_size..]; + dst_memory = dst_memory[dst_layer_size..]; + } +} + +fn getTexelMemoryOffsetInSubresource(self: *const Self, offset: vk.Offset3D, subresource: vk.ImageSubresource) usize { + return @as(usize, @intCast(offset.z)) * self.getSliceMemSizeForMipLevel(subresource.aspect_mask, subresource.mip_level) + + @as(usize, @intCast(offset.y)) * self.getRowPitchMemSizeForMipLevel(subresource.aspect_mask, subresource.mip_level) + + @as(usize, @intCast(offset.x)) * base.format.texelSize(base.format.fromAspect(self.interface.format, subresource.aspect_mask)); +} + +fn getTexelMemoryOffset(self: *const Self, offset: vk.Offset3D, subresource: vk.ImageSubresource) VkError!usize { + return self.getTexelMemoryOffsetInSubresource(offset, subresource) + try self.getSubresourceOffset(subresource.aspect_mask, subresource.mip_level, subresource.array_layer); +} + +fn getSubresourceOffset(self: *const Self, aspect_mask: vk.ImageAspectFlags, mip_level: u32, layer: u32) VkError!usize { + var offset = try self.getAspectOffset(aspect_mask); + for (0..mip_level) |mip| { + offset += self.getMultiSampledLevelSize(aspect_mask, @intCast(mip)); + } + + const is_3D = (self.interface.image_type == .@"3d") and self.interface.flags.@"2d_array_compatible_bit"; + const layer_offset = if (is_3D) + self.getSliceMemSizeForMipLevel(aspect_mask, mip_level) + else + self.getLayerSize(aspect_mask); + return offset + layer * layer_offset; +} + +fn getAspectOffset(self: *const Self, aspect_mask: vk.ImageAspectFlags) VkError!usize { + return switch (self.interface.format) { + .d16_unorm_s8_uint, + .d24_unorm_s8_uint, + .d32_sfloat_s8_uint, + => if (aspect_mask.stencil_bit) + try self.interface.getTotalSizeForAspect(.{ .depth_bit = true }) + else + 0, + else => 0, + }; +} + +fn getTotalSizeForAspect(interface: *const Interface, aspect_mask: vk.ImageAspectFlags) VkError!usize { + const self: *const Self = @alignCast(@fieldParentPtr("interface", interface)); + + if (aspect_mask.subtract(.{ + .color_bit = true, + .depth_bit = true, + .stencil_bit = true, + }).toInt() != 0) { + base.unsupported("aspectMask {f}", .{aspect_mask}); + return VkError.ValidationFailed; + } + + var size: usize = 0; + + if (aspect_mask.color_bit) + size += self.getLayerSize(.{ .color_bit = true }); + if (aspect_mask.depth_bit) + size += self.getLayerSize(.{ .depth_bit = true }); + if (aspect_mask.stencil_bit) + size += self.getLayerSize(.{ .stencil_bit = true }); + + return size * self.interface.array_layers; +} + +fn getLayerSize(self: *const Self, aspect_mask: vk.ImageAspectFlags) usize { + var size: usize = 0; + for (0..self.interface.mip_levels) |mip_level| { + size += self.getMultiSampledLevelSize(aspect_mask, @intCast(mip_level)); + } + return size; +} + +inline fn getMultiSampledLevelSize(self: *const Self, aspect_mask: vk.ImageAspectFlags, mip_level: u32) usize { + return self.getMipLevelSize(aspect_mask, mip_level) * self.interface.samples.toInt(); +} + +inline fn getMipLevelSize(self: *const Self, aspect_mask: vk.ImageAspectFlags, mip_level: u32) usize { + return self.getSliceMemSizeForMipLevel(aspect_mask, mip_level) * self.getMipLevelExtent(mip_level).depth; +} + +fn getMipLevelExtent(self: *const Self, mip_level: u32) vk.Extent3D { + var extent: vk.Extent3D = .{ + .width = self.interface.extent.width >> @intCast(mip_level), + .height = self.interface.extent.height >> @intCast(mip_level), + .depth = self.interface.extent.depth >> @intCast(mip_level), + }; + + if (extent.width == 0) extent.width = 1; + if (extent.height == 0) extent.height = 1; + if (extent.depth == 0) extent.depth = 1; + + return extent; +} + +fn getSliceMemSizeForMipLevel(self: *const Self, aspect_mask: vk.ImageAspectFlags, mip_level: u32) usize { + const mip_extent = self.getMipLevelExtent(mip_level); + const format = self.interface.formatFromAspect(aspect_mask); + return base.format.sliceMemSize(format, mip_extent.width, mip_extent.height); +} + +fn getRowPitchMemSizeForMipLevel(self: *const Self, aspect_mask: vk.ImageAspectFlags, mip_level: u32) usize { + const mip_extent = self.getMipLevelExtent(mip_level); + const format = self.interface.formatFromAspect(aspect_mask); + return base.format.pitchMemSize(format, mip_extent.width); } diff --git a/src/soft/SoftPhysicalDevice.zig b/src/soft/SoftPhysicalDevice.zig index 36ac9fb..61bb3f7 100644 --- a/src/soft/SoftPhysicalDevice.zig +++ b/src/soft/SoftPhysicalDevice.zig @@ -515,7 +515,7 @@ pub fn getFormatProperties(interface: *Interface, format: vk.Format) VkError!vk. else => {}, } - if (base.Image.formatSupportsColorAttachemendBlend(format)) { + if (base.format.supportsColorAttachemendBlend(format)) { properties.optimal_tiling_features.color_attachment_blend_bit = true; } diff --git a/src/soft/device/Blitter.zig b/src/soft/device/Blitter.zig index 44d237e..eede8d7 100644 --- a/src/soft/device/Blitter.zig +++ b/src/soft/device/Blitter.zig @@ -4,6 +4,8 @@ const std = @import("std"); const vk = @import("vulkan"); const base = @import("base"); +const VkError = base.VkError; + pub const SoftImage = @import("../SoftImage.zig"); pub const SoftImageView = @import("../SoftImageView.zig"); @@ -15,8 +17,8 @@ pub const init: Self = .{ .blit_mutex = .{}, }; -pub fn clear(self: *Self, pixel: vk.ClearValue, format: vk.Format, dest: *SoftImage, view_format: vk.Format, range: vk.ImageSubresourceRange, area: ?vk.Rect2D) void { - const dst_format = base.Image.formatFromAspect(view_format, range.aspect_mask); +pub fn clear(self: *Self, pixel: vk.ClearValue, format: vk.Format, dest: *SoftImage, view_format: vk.Format, range: vk.ImageSubresourceRange, area: ?vk.Rect2D) VkError!void { + const dst_format = base.format.fromAspect(view_format, range.aspect_mask); if (dst_format == .undefined) { return; } @@ -40,13 +42,13 @@ pub fn clear(self: *Self, pixel: vk.ClearValue, format: vk.Format, dest: *SoftIm } } - if (self.fastClear(clamped_pixel, format, dest, dst_format, range, area)) { + if (try self.fastClear(clamped_pixel, format, dest, dst_format, range, area)) { return; } base.logger.fixme("implement slow clear", .{}); } -fn fastClear(self: *Self, clear_value: vk.ClearValue, clear_format: vk.Format, dest: *SoftImage, view_format: vk.Format, range: vk.ImageSubresourceRange, render_area: ?vk.Rect2D) bool { +fn fastClear(self: *Self, clear_value: vk.ClearValue, clear_format: vk.Format, dest: *SoftImage, view_format: vk.Format, range: vk.ImageSubresourceRange, render_area: ?vk.Rect2D) VkError!bool { _ = self; _ = render_area; _ = range; @@ -90,7 +92,7 @@ fn fastClear(self: *Self, clear_value: vk.ClearValue, clear_format: vk.Format, d } if (dest.interface.memory) |memory| { - const image_size = dest.interface.getTotalSize(); + const image_size = try dest.interface.getTotalSize(); const memory_map = memory.map(dest.interface.memory_offset, image_size) catch return false; defer memory.unmap(); diff --git a/src/soft/device/ComputeRoutines.zig b/src/soft/device/ComputeRoutines.zig index 778aed1..9043c1f 100644 --- a/src/soft/device/ComputeRoutines.zig +++ b/src/soft/device/ComputeRoutines.zig @@ -200,6 +200,18 @@ fn writeDescriptorSets(self: *Self, rt: *spv.Runtime) !void { ); } }, + .image => |image_data_array| for (image_data_array, 0..) |image_data, descriptor_index| { + if (image_data.object) |image| { + const memory = if (image.interface.memory) |memory| memory else continue :bindings; + const map: []u8 = @as([*]u8, @ptrCast(try memory.map(image.interface.memory_offset, try image.interface.getTotalSize())))[0..try image.interface.getTotalSize()]; + try rt.writeDescriptorSet( + map, + @as(u32, @intCast(set_index)), + @as(u32, @intCast(binding_index)), + @as(u32, @intCast(descriptor_index)), + ); + } + }, else => {}, } } diff --git a/src/vulkan/Image.zig b/src/vulkan/Image.zig index a7f63d5..f3d37f8 100644 --- a/src/vulkan/Image.zig +++ b/src/vulkan/Image.zig @@ -18,6 +18,7 @@ extent: vk.Extent3D, mip_levels: u32, array_layers: u32, samples: vk.SampleCountFlags, +flags: vk.ImageCreateFlags, tiling: vk.ImageTiling, usage: vk.ImageUsageFlags, memory: ?*DeviceMemory, @@ -28,7 +29,8 @@ vtable: *const VTable, pub const VTable = struct { destroy: *const fn (*Self, std.mem.Allocator) void, - getMemoryRequirements: *const fn (*Self, *vk.MemoryRequirements) void, + getMemoryRequirements: *const fn (*Self, *vk.MemoryRequirements) VkError!void, + getTotalSizeForAspect: *const fn (*const Self, vk.ImageAspectFlags) VkError!usize, }; pub fn init(device: *Device, allocator: std.mem.Allocator, info: *const vk.ImageCreateInfo) VkError!Self { @@ -41,6 +43,7 @@ pub fn init(device: *Device, allocator: std.mem.Allocator, info: *const vk.Image .mip_levels = info.mip_levels, .array_layers = info.array_layers, .samples = info.samples, + .flags = info.flags, .tiling = info.tiling, .usage = info.usage, .memory = null, @@ -55,7 +58,7 @@ pub inline fn destroy(self: *Self, allocator: std.mem.Allocator) void { } pub inline fn bindMemory(self: *Self, memory: *DeviceMemory, offset: vk.DeviceSize) VkError!void { - const image_size = self.getTotalSize(); + const image_size = try self.getTotalSize(); if (offset >= image_size or !self.allowed_memory_types.isSet(memory.memory_type_index)) { return VkError.ValidationFailed; } @@ -63,11 +66,10 @@ pub inline fn bindMemory(self: *Self, memory: *DeviceMemory, offset: vk.DeviceSi self.memory_offset = offset; } -pub inline fn getMemoryRequirements(self: *Self, requirements: *vk.MemoryRequirements) void { - const image_size = self.getTotalSize(); - requirements.size = image_size; +pub inline fn getMemoryRequirements(self: *Self, requirements: *vk.MemoryRequirements) VkError!void { + requirements.size = try self.getTotalSize(); requirements.memory_type_bits = self.allowed_memory_types.mask; - self.vtable.getMemoryRequirements(self, requirements); + try self.vtable.getMemoryRequirements(self, requirements); } pub inline fn getClearFormat(self: *Self) vk.Format { @@ -79,78 +81,22 @@ pub inline fn getClearFormat(self: *Self) vk.Format { .r32g32b32a32_sfloat; } -pub inline fn getPixelSize(self: *const Self) usize { - return lib.vku.vkuFormatTexelBlockSize(@intCast(@intFromEnum(self.format))); +pub inline fn getTexelSize(self: *const Self) usize { + return lib.format.texelSize(self.format); } -pub inline fn getTotalSize(self: *const Self) usize { - const pixel_size = self.getPixelSize(); - return self.extent.width * self.extent.height * self.extent.depth * pixel_size; +pub inline fn getTotalSizeForAspect(self: *const Self, aspect: vk.ImageAspectFlags) VkError!usize { + return self.vtable.getTotalSizeForAspect(self, aspect); } -pub inline fn getFormatPixelSize(format: vk.Format) usize { - return lib.vku.vkuFormatTexelBlockSize(@intCast(@intFromEnum(format))); +pub inline fn getTotalSize(self: *const Self) VkError!usize { + return self.vtable.getTotalSizeForAspect(self, lib.format.toAspect(self.format)); } -pub inline fn getFormatTotalSize(self: *const Self, format: vk.Format) usize { - const pixel_size = self.getFormatPixelSize(format); - return self.extent.width * self.extent.height * self.extent.depth * pixel_size; +pub inline fn formatFromAspect(self: *const Self, aspect_mask: vk.ImageAspectFlags) vk.Format { + return lib.format.fromAspect(self.format, aspect_mask); } -pub fn formatSupportsColorAttachemendBlend(format: vk.Format) bool { - return switch (format) { - // Vulkan 1.1 mandatory - .r5g6b5_unorm_pack16, - .a1r5g5b5_unorm_pack16, - .r8_unorm, - .r8g8_unorm, - .r8g8b8a8_unorm, - .r8g8b8a8_srgb, - .b8g8r8a8_unorm, - .b8g8r8a8_srgb, - .a8b8g8r8_unorm_pack32, - .a8b8g8r8_srgb_pack32, - .a2b10g10r10_unorm_pack32, - .r16_sfloat, - .r16g16_sfloat, - .r16g16b16a16_sfloat, - // optional - .r4g4b4a4_unorm_pack16, - .b4g4r4a4_unorm_pack16, - .b5g6r5_unorm_pack16, - .r5g5b5a1_unorm_pack16, - .b5g5r5a1_unorm_pack16, - .a2r10g10b10_unorm_pack32, - .r16_unorm, - .r16g16_unorm, - .r16g16b16a16_unorm, - .r32_sfloat, - .r32g32_sfloat, - .r32g32b32a32_sfloat, - .b10g11r11_ufloat_pack32, - .a4r4g4b4_unorm_pack16, - .a4b4g4r4_unorm_pack16, - => true, - else => false, - }; -} - -pub fn formatFromAspect(base_format: vk.Format, aspect: vk.ImageAspectFlags) vk.Format { - if (aspect.color_bit or (aspect.color_bit and aspect.stencil_bit)) { - return base_format; - } else if (aspect.depth_bit) { - if (base_format == .d16_unorm or base_format == .d16_unorm_s8_uint) { - return .d16_unorm; - } else if (base_format == .d24_unorm_s8_uint) { - return .x8_d24_unorm_pack32; - } else if (base_format == .d32_sfloat or base_format == .d32_sfloat_s8_uint) { - return .d32_sfloat; - } - } else if (aspect.stencil_bit) { - if (base_format == .s8_uint or base_format == .d16_unorm_s8_uint or base_format == .d24_unorm_s8_uint or base_format == .d32_sfloat_s8_uint) { - return .s8_uint; - } - } - lib.unsupported("format {d}", .{@intFromEnum(base_format)}); - return base_format; +pub inline fn formatToAspect(self: *const Self, aspect_mask: vk.ImageAspectFlags) vk.ImageAspectFlags { + return lib.format.toAspect(self.format, aspect_mask); } diff --git a/src/vulkan/format.zig b/src/vulkan/format.zig new file mode 100644 index 0000000..d30f401 --- /dev/null +++ b/src/vulkan/format.zig @@ -0,0 +1,88 @@ +const std = @import("std"); +const vk = @import("vulkan"); +const lib = @import("lib.zig"); + +pub fn fromAspect(format: vk.Format, aspect: vk.ImageAspectFlags) vk.Format { + if (aspect.color_bit or (aspect.color_bit and aspect.stencil_bit)) { + return format; + } else if (aspect.depth_bit) { + if (format == .d16_unorm or format == .d16_unorm_s8_uint) { + return .d16_unorm; + } else if (format == .d24_unorm_s8_uint) { + return .x8_d24_unorm_pack32; + } else if (format == .d32_sfloat or format == .d32_sfloat_s8_uint) { + return .d32_sfloat; + } + } else if (aspect.stencil_bit) { + if (format == .s8_uint or format == .d16_unorm_s8_uint or format == .d24_unorm_s8_uint or format == .d32_sfloat_s8_uint) { + return .s8_uint; + } + } + lib.unsupported("format {s}", .{@tagName(format)}); + return format; +} + +pub fn toAspect(format: vk.Format) vk.ImageAspectFlags { + var aspect: vk.ImageAspectFlags = .{}; + if (lib.vku.vkuFormatHasDepth(@intCast(@intFromEnum(format)))) + aspect.depth_bit = true; + if (lib.vku.vkuFormatHasStencil(@intCast(@intFromEnum(format)))) + aspect.stencil_bit = true; + + if (aspect.toInt() == 0) + aspect.color_bit = true; + + return aspect; +} + +pub inline fn texelSize(format: vk.Format) usize { + return lib.vku.vkuFormatTexelBlockSize(@intCast(@intFromEnum(format))); +} + +pub inline fn supportsColorAttachemendBlend(format: vk.Format) bool { + return switch (format) { + // Vulkan 1.1 mandatory + .r5g6b5_unorm_pack16, + .a1r5g5b5_unorm_pack16, + .r8_unorm, + .r8g8_unorm, + .r8g8b8a8_unorm, + .r8g8b8a8_srgb, + .b8g8r8a8_unorm, + .b8g8r8a8_srgb, + .a8b8g8r8_unorm_pack32, + .a8b8g8r8_srgb_pack32, + .a2b10g10r10_unorm_pack32, + .r16_sfloat, + .r16g16_sfloat, + .r16g16b16a16_sfloat, + // optional + .r4g4b4a4_unorm_pack16, + .b4g4r4a4_unorm_pack16, + .b5g6r5_unorm_pack16, + .r5g5b5a1_unorm_pack16, + .b5g5r5a1_unorm_pack16, + .a2r10g10b10_unorm_pack32, + .r16_unorm, + .r16g16_unorm, + .r16g16b16a16_unorm, + .r32_sfloat, + .r32g32_sfloat, + .r32g32b32a32_sfloat, + .b10g11r11_ufloat_pack32, + .a4r4g4b4_unorm_pack16, + .a4b4g4r4_unorm_pack16, + => true, + else => false, + }; +} + +pub inline fn pitchMemSize(format: vk.Format, width: usize) usize { + // To be updated for compressed formats handling + return texelSize(format) * width; +} + +pub inline fn sliceMemSize(format: vk.Format, width: usize, height: usize) usize { + // To be updated for compressed formats handling + return pitchMemSize(format, width) * height; +} diff --git a/src/vulkan/lib.zig b/src/vulkan/lib.zig index aaddaac..b6c8cd4 100644 --- a/src/vulkan/lib.zig +++ b/src/vulkan/lib.zig @@ -10,6 +10,7 @@ pub const vku = @cImport({ pub const errors = @import("error_set.zig"); pub const lib_vulkan = @import("lib_vulkan.zig"); pub const logger = @import("logger/logger.zig"); +pub const format = @import("format.zig"); pub const Dispatchable = @import("Dispatchable.zig").Dispatchable; pub const NonDispatchable = @import("NonDispatchable.zig").NonDispatchable; diff --git a/src/vulkan/lib_vulkan.zig b/src/vulkan/lib_vulkan.zig index 3b99ded..204dd09 100644 --- a/src/vulkan/lib_vulkan.zig +++ b/src/vulkan/lib_vulkan.zig @@ -624,7 +624,7 @@ pub export fn strollAllocateCommandBuffers(p_device: vk.Device, info: *const vk. } pub export fn strollAllocateDescriptorSets(p_device: vk.Device, info: *const vk.DescriptorSetAllocateInfo, p_sets: [*]vk.DescriptorSet) callconv(vk.vulkan_call_conv) vk.Result { - entryPointBeginLogTrace(.vkAllocateCommandBuffers); + entryPointBeginLogTrace(.vkAllocateDescriptorSets); defer entryPointEndLogTrace(); Dispatchable(Device).checkHandleValidity(p_device) catch |err| return toVkResult(err); @@ -1385,7 +1385,7 @@ pub export fn strollGetImageMemoryRequirements(p_device: vk.Device, p_image: vk. Dispatchable(Device).checkHandleValidity(p_device) catch |err| return errorLogger(err); const image = NonDispatchable(Image).fromHandleObject(p_image) catch |err| return errorLogger(err); - image.getMemoryRequirements(requirements); + image.getMemoryRequirements(requirements) catch |err| return errorLogger(err); } pub export fn strollGetImageSparseMemoryRequirements(p_device: vk.Device, p_image: vk.Image, requirements: *vk.SparseImageMemoryRequirements) callconv(vk.vulkan_call_conv) void {