reworking command buffers, adding soft compute routines
Some checks failed
Build / build (push) Successful in 2m4s
Test / build_and_test (push) Failing after 3h24m26s

This commit is contained in:
2026-02-24 04:49:59 +01:00
parent d97533082d
commit e5cbbbcc91
20 changed files with 630 additions and 374 deletions

View File

@@ -132,6 +132,9 @@ fn customSoft(b: *std.Build, lib: *std.Build.Step.Compile) !void {
lib.addSystemIncludePath(cpuinfo.path("include"));
lib.linkLibrary(cpuinfo.artifact("cpuinfo"));
const interface = b.lazyDependency("interface", .{}) orelse return error.UnresolvedDependency;
lib.root_module.addImport("interface", interface.module("interface"));
const spv = b.dependency("SPIRV_Interpreter", .{
.@"no-example" = true,
.@"no-test" = true,

View File

@@ -53,9 +53,14 @@
.hash = "N-V-__8AABQ7TgCnPlp8MP4YA8znrjd6E-ZjpF1rvrS8J_2I",
.lazy = true,
},
.interface = .{
.url = "git+https://github.com/nilslice/zig-interface#8c0fe8fa9fd0702eee43f50cb75dce1cc5a7e1f4",
.hash = "interface-0.0.2-GFlWJ1mcAQARS-V4xJ7qDt5_cutxOHSEz6H9yiK-Sw0A",
.lazy = true,
},
.SPIRV_Interpreter = .{
.url = "git+https://git.kbz8.me/kbz_8/SPIRV-Interpreter#2ea707ea57ea3e36d51d30c7bf363bfe2eca778c",
.hash = "SPIRV_Interpreter-0.0.1-ajmpn5lyAwBPB_XSXWCQwuUmXgledjEyt3hYXZfbCvSV",
.url = "git+https://git.kbz8.me/kbz_8/SPIRV-Interpreter#2409ec726946a314f795b6edb5ae3ddd3eb7426c",
.hash = "SPIRV_Interpreter-0.0.1-ajmpnyuJAwD5jM0piGGnEq07unzNZyEQ_GmBp_PVMg2X",
},
},

View File

@@ -46,3 +46,35 @@ pub fn getMemoryRequirements(interface: *Interface, requirements: *vk.MemoryRequ
requirements.alignment = @max(requirements.alignment, lib.MIN_UNIFORM_BUFFER_ALIGNMENT);
}
}
pub fn copyBuffer(self: *const Self, dst: *Self, regions: []const vk.BufferCopy) VkError!void {
for (regions) |region| {
const src_memory = if (self.interface.memory) |memory| memory else return VkError.InvalidDeviceMemoryDrv;
const dst_memory = if (dst.interface.memory) |memory| memory else return VkError.InvalidDeviceMemoryDrv;
const src_map: []u8 = @as([*]u8, @ptrCast(try src_memory.map(region.src_offset, region.size)))[0..region.size];
const dst_map: []u8 = @as([*]u8, @ptrCast(try dst_memory.map(region.dst_offset, region.size)))[0..region.size];
@memcpy(dst_map, src_map);
src_memory.unmap();
dst_memory.unmap();
}
}
pub fn fillBuffer(self: *Self, offset: vk.DeviceSize, size: vk.DeviceSize, data: u32) VkError!void {
const memory = if (self.interface.memory) |memory| memory else return VkError.InvalidDeviceMemoryDrv;
var memory_map: []u32 = @as([*]u32, @ptrCast(@alignCast(try memory.map(offset, size))))[0..size];
var bytes = if (size == vk.WHOLE_SIZE) memory.size - offset else size;
var i: usize = 0;
while (bytes >= 4) : ({
bytes -= 4;
i += 1;
}) {
memory_map[i] = data;
}
memory.unmap();
}

View File

@@ -1,15 +1,32 @@
const std = @import("std");
const vk = @import("vulkan");
const base = @import("base");
const lib = @import("lib.zig");
const InterfaceFactory = @import("interface").Interface;
const VkError = base.VkError;
const Device = base.Device;
const SoftBuffer = @import("SoftBuffer.zig");
const SoftImage = @import("SoftImage.zig");
const SoftPipeline = @import("SoftPipeline.zig");
const SoftDescriptorSet = @import("SoftDescriptorSet.zig");
const ExecutionDevice = @import("device/Device.zig");
const Self = @This();
pub const Interface = base.CommandBuffer;
const Command = InterfaceFactory(.{
.execute = fn (*ExecutionDevice) VkError!void,
}, null);
interface: Interface,
command_allocator: std.heap.ArenaAllocator,
commands: std.ArrayList(Command),
pub fn create(device: *base.Device, allocator: std.mem.Allocator, info: *const vk.CommandBufferAllocateInfo) VkError!*Self {
const self = allocator.create(Self) catch return VkError.OutOfHostMemory;
errdefer allocator.destroy(self);
@@ -28,6 +45,7 @@ pub fn create(device: *base.Device, allocator: std.mem.Allocator, info: *const v
.copyBuffer = copyBuffer,
.copyImage = copyImage,
.copyImageToBuffer = copyImageToBuffer,
.dispatch = dispatch,
.end = end,
.fillBuffer = fillBuffer,
.reset = reset,
@@ -38,7 +56,10 @@ pub fn create(device: *base.Device, allocator: std.mem.Allocator, info: *const v
self.* = .{
.interface = interface,
.command_allocator = undefined,
.commands = .empty,
};
self.command_allocator = .init(self.interface.host_allocator.allocator());
return self;
}
@@ -47,10 +68,16 @@ pub fn destroy(interface: *Interface, allocator: std.mem.Allocator) void {
allocator.destroy(self);
}
pub fn begin(interface: *Interface, info: *const vk.CommandBufferBeginInfo) VkError!void {
// No-op
_ = interface;
_ = info;
pub fn execute(self: *Self, device: *ExecutionDevice) VkError!void {
self.interface.submit() catch return;
for (self.commands.items) |command| {
try command.vtable.execute(command.ptr, device);
}
}
pub fn begin(interface: *Interface, _: *const vk.CommandBufferBeginInfo) VkError!void {
const self: *Self = @alignCast(@fieldParentPtr("interface", interface));
self.command_allocator.deinit();
}
pub fn end(interface: *Interface) VkError!void {
@@ -58,76 +85,236 @@ pub fn end(interface: *Interface) VkError!void {
_ = interface;
}
pub fn reset(interface: *Interface, flags: vk.CommandBufferResetFlags) VkError!void {
// No-op
_ = interface;
_ = flags;
pub fn reset(interface: *Interface, _: vk.CommandBufferResetFlags) VkError!void {
const self: *Self = @alignCast(@fieldParentPtr("interface", interface));
const allocator = self.command_allocator.allocator();
self.commands.clearAndFree(allocator);
if (!self.command_allocator.reset(.{ .retain_with_limit = 16_384 }))
return VkError.OutOfHostMemory;
}
// Commands ====================================================================================================
pub fn bindDescriptorSets(interface: *Interface, bind_point: vk.PipelineBindPoint, first_set: u32, sets: [base.VULKAN_MAX_DESCRIPTOR_SETS]?*base.DescriptorSet, dynamic_offsets: []const u32) VkError!void {
// No-op
_ = interface;
_ = bind_point;
_ = first_set;
_ = sets;
_ = dynamic_offsets;
const self: *Self = @alignCast(@fieldParentPtr("interface", interface));
const allocator = self.command_allocator.allocator();
const CommandImpl = struct {
const Impl = @This();
bind_point: vk.PipelineBindPoint,
first_set: u32,
sets: [base.VULKAN_MAX_DESCRIPTOR_SETS]?*base.DescriptorSet,
dynamic_offsets: []const u32,
pub fn execute(impl: *const Impl, device: *ExecutionDevice) VkError!void {
for (impl.first_set.., impl.sets[0..]) |i, set| {
if (set == null)
break;
device.pipeline_states[@intCast(@intFromEnum(impl.bind_point))].sets[i] = @alignCast(@fieldParentPtr("interface", set.?));
}
}
};
const cmd = allocator.create(CommandImpl) catch return VkError.OutOfHostMemory;
errdefer allocator.destroy(cmd);
cmd.* = .{
.bind_point = bind_point,
.first_set = first_set,
.sets = sets,
.dynamic_offsets = dynamic_offsets,
};
self.commands.append(allocator, Command.from(cmd)) catch return VkError.OutOfHostMemory;
}
pub fn bindPipeline(interface: *Interface, bind_point: vk.PipelineBindPoint, pipeline: *base.Pipeline) VkError!void {
_ = interface;
_ = pipeline;
const self: *Self = @alignCast(@fieldParentPtr("interface", interface));
const allocator = self.command_allocator.allocator();
if (bind_point != .graphics and bind_point != .compute) {
std.log.warn("Software driver does not support bind point {s}", .{@tagName(bind_point)});
return VkError.ValidationFailed;
const CommandImpl = struct {
const Impl = @This();
bind_point: vk.PipelineBindPoint,
pipeline: *SoftPipeline,
pub fn execute(impl: *const Impl, device: *ExecutionDevice) VkError!void {
device.pipeline_states[@intCast(@intFromEnum(impl.bind_point))].pipeline = impl.pipeline;
}
};
const cmd = allocator.create(CommandImpl) catch return VkError.OutOfHostMemory;
errdefer allocator.destroy(cmd);
cmd.* = .{
.bind_point = bind_point,
.pipeline = @alignCast(@fieldParentPtr("interface", pipeline)),
};
self.commands.append(allocator, Command.from(cmd)) catch return VkError.OutOfHostMemory;
}
pub fn clearColorImage(interface: *Interface, image: *base.Image, layout: vk.ImageLayout, color: *const vk.ClearColorValue, range: vk.ImageSubresourceRange) VkError!void {
// No-op
_ = interface;
_ = image;
_ = layout;
_ = color;
_ = range;
}
const self: *Self = @alignCast(@fieldParentPtr("interface", interface));
const allocator = self.command_allocator.allocator();
pub fn fillBuffer(interface: *Interface, buffer: *base.Buffer, offset: vk.DeviceSize, size: vk.DeviceSize, data: u32) VkError!void {
// No-op
_ = interface;
_ = buffer;
_ = offset;
_ = size;
_ = data;
const CommandImpl = struct {
const Impl = @This();
image: *SoftImage,
layout: vk.ImageLayout,
clear_color: vk.ClearColorValue,
range: vk.ImageSubresourceRange,
pub fn execute(impl: *const Impl, _: *ExecutionDevice) VkError!void {
impl.image.clearRange(impl.clear_color, impl.range);
}
};
const cmd = allocator.create(CommandImpl) catch return VkError.OutOfHostMemory;
errdefer allocator.destroy(cmd);
cmd.* = .{
.image = @alignCast(@fieldParentPtr("interface", image)),
.layout = layout,
.clear_color = color.*,
.range = range,
};
self.commands.append(allocator, Command.from(cmd)) catch return VkError.OutOfHostMemory;
}
pub fn copyBuffer(interface: *Interface, src: *base.Buffer, dst: *base.Buffer, regions: []const vk.BufferCopy) VkError!void {
// No-op
_ = interface;
_ = src;
_ = dst;
_ = regions;
const self: *Self = @alignCast(@fieldParentPtr("interface", interface));
const allocator = self.command_allocator.allocator();
const CommandImpl = struct {
const Impl = @This();
src: *const SoftBuffer,
dst: *SoftBuffer,
regions: []const vk.BufferCopy,
pub fn execute(impl: *const Impl, _: *ExecutionDevice) VkError!void {
try impl.src.copyBuffer(impl.dst, impl.regions);
}
};
const cmd = allocator.create(CommandImpl) catch return VkError.OutOfHostMemory;
errdefer allocator.destroy(cmd);
cmd.* = .{
.src = @alignCast(@fieldParentPtr("interface", src)),
.dst = @alignCast(@fieldParentPtr("interface", dst)),
.regions = allocator.dupe(vk.BufferCopy, regions) catch return VkError.OutOfHostMemory, // Will be freed on cmdbuf reset or destroy
};
self.commands.append(allocator, Command.from(cmd)) catch return VkError.OutOfHostMemory;
}
pub fn copyImage(interface: *Interface, src: *base.Image, src_layout: vk.ImageLayout, dst: *base.Image, dst_layout: vk.ImageLayout, regions: []const vk.ImageCopy) VkError!void {
// No-op
_ = interface;
_ = src;
_ = src_layout;
_ = dst;
_ = dst_layout;
_ = regions;
const self: *Self = @alignCast(@fieldParentPtr("interface", interface));
const allocator = self.command_allocator.allocator();
const CommandImpl = struct {
const Impl = @This();
src: *const SoftImage,
src_layout: vk.ImageLayout,
dst: *SoftImage,
dst_layout: vk.ImageLayout,
regions: []const vk.ImageCopy,
pub fn execute(impl: *const Impl, _: *ExecutionDevice) VkError!void {
try impl.src.copyImage(impl.src_layout, impl.dst, impl.dst_layout, impl.regions);
}
};
const cmd = allocator.create(CommandImpl) catch return VkError.OutOfHostMemory;
errdefer allocator.destroy(cmd);
cmd.* = .{
.src = @alignCast(@fieldParentPtr("interface", src)),
.src_layout = src_layout,
.dst = @alignCast(@fieldParentPtr("interface", dst)),
.dst_layout = dst_layout,
.regions = allocator.dupe(vk.ImageCopy, regions) catch return VkError.OutOfHostMemory, // Will be freed on cmdbuf reset or destroy
};
self.commands.append(allocator, Command.from(cmd)) catch return VkError.OutOfHostMemory;
}
pub fn copyImageToBuffer(interface: *Interface, src: *base.Image, src_layout: vk.ImageLayout, dst: *base.Buffer, regions: []const vk.BufferImageCopy) VkError!void {
// No-op
_ = interface;
_ = src;
_ = src_layout;
_ = dst;
_ = regions;
const self: *Self = @alignCast(@fieldParentPtr("interface", interface));
const allocator = self.command_allocator.allocator();
const CommandImpl = struct {
const Impl = @This();
src: *const SoftImage,
src_layout: vk.ImageLayout,
dst: *SoftBuffer,
regions: []const vk.BufferImageCopy,
pub fn execute(impl: *const Impl, _: *ExecutionDevice) VkError!void {
try impl.src.copyImageToBuffer(impl.src_layout, impl.dst, impl.regions);
}
};
const cmd = allocator.create(CommandImpl) catch return VkError.OutOfHostMemory;
errdefer allocator.destroy(cmd);
cmd.* = .{
.src = @alignCast(@fieldParentPtr("interface", src)),
.src_layout = src_layout,
.dst = @alignCast(@fieldParentPtr("interface", dst)),
.regions = allocator.dupe(vk.BufferImageCopy, regions) catch return VkError.OutOfHostMemory, // Will be freed on cmdbuf reset or destroy
};
self.commands.append(allocator, Command.from(cmd)) catch return VkError.OutOfHostMemory;
}
pub fn dispatch(interface: *Interface, group_count_x: u32, group_count_y: u32, group_count_z: u32) VkError!void {
const self: *Self = @alignCast(@fieldParentPtr("interface", interface));
const allocator = self.command_allocator.allocator();
const CommandImpl = struct {
const Impl = @This();
group_count_x: u32,
group_count_y: u32,
group_count_z: u32,
pub fn execute(impl: *const Impl, device: *ExecutionDevice) VkError!void {
try device.compute_routines.dispatch(impl.group_count_x, impl.group_count_y, impl.group_count_z);
}
};
const cmd = allocator.create(CommandImpl) catch return VkError.OutOfHostMemory;
errdefer allocator.destroy(cmd);
cmd.* = .{
.group_count_x = group_count_x,
.group_count_y = group_count_y,
.group_count_z = group_count_z,
};
self.commands.append(allocator, Command.from(cmd)) catch return VkError.OutOfHostMemory;
}
pub fn fillBuffer(interface: *Interface, buffer: *base.Buffer, offset: vk.DeviceSize, size: vk.DeviceSize, data: u32) VkError!void {
const self: *Self = @alignCast(@fieldParentPtr("interface", interface));
const allocator = self.command_allocator.allocator();
const CommandImpl = struct {
const Impl = @This();
buffer: *SoftBuffer,
offset: vk.DeviceSize,
size: vk.DeviceSize,
data: u32,
pub fn execute(impl: *const Impl, _: *ExecutionDevice) VkError!void {
try impl.buffer.fillBuffer(impl.offset, impl.size, impl.data);
}
};
const cmd = allocator.create(CommandImpl) catch return VkError.OutOfHostMemory;
errdefer allocator.destroy(cmd);
cmd.* = .{
.buffer = @alignCast(@fieldParentPtr("interface", buffer)),
.offset = offset,
.size = size,
.data = data,
};
self.commands.append(allocator, Command.from(cmd)) catch return VkError.OutOfHostMemory;
}
pub fn resetEvent(interface: *Interface, event: *base.Event, stage: vk.PipelineStageFlags) VkError!void {

View File

@@ -77,6 +77,9 @@ pub fn write(interface: *Interface, write_data: vk.WriteDescriptorSet) VkError!v
if (buffer_info.buffer != .null_handle) {
const buffer = try NonDispatchable(Buffer).fromHandleObject(buffer_info.buffer);
desc.buffer.object = @as(*SoftBuffer, @alignCast(@fieldParentPtr("interface", buffer)));
if (desc.buffer.size == vk.WHOLE_SIZE) {
desc.buffer.size = if (buffer.memory) |memory| memory.size - desc.buffer.offset else return VkError.InvalidDeviceMemoryDrv;
}
}
}
},

View File

@@ -7,6 +7,7 @@ const lib = @import("lib.zig");
const VkError = base.VkError;
const Device = base.Device;
const SoftBuffer = @import("SoftBuffer.zig");
const SoftDevice = @import("SoftDevice.zig");
const Self = @This();
@@ -57,3 +58,49 @@ pub fn clearRange(self: *Self, color: vk.ClearColorValue, range: vk.ImageSubreso
.r32g32b32a32_sfloat;
self.clear(.{ .color = color }, clear_format, self.interface.format, range, null);
}
pub fn copyImage(self: *const Self, self_layout: vk.ImageLayout, dst: *Self, dst_layout: vk.ImageLayout, regions: []const vk.ImageCopy) VkError!void {
_ = self;
_ = self_layout;
_ = dst;
_ = dst_layout;
_ = regions;
std.log.scoped(.commandExecutor).warn("FIXME: implement image to image copy", .{});
}
pub fn copyImageToBuffer(self: *const Self, self_layout: vk.ImageLayout, dst: *SoftBuffer, regions: []const vk.BufferImageCopy) VkError!void {
_ = self_layout;
for (regions) |region| {
const src_memory = if (self.interface.memory) |memory| memory else return VkError.InvalidDeviceMemoryDrv;
const dst_memory = if (dst.interface.memory) |memory| memory else return VkError.InvalidDeviceMemoryDrv;
const pixel_size: u32 = @intCast(self.interface.getPixelSize());
const image_row_pitch: u32 = self.interface.extent.width * pixel_size;
const image_size: u32 = @intCast(self.interface.getTotalSize());
const buffer_row_length: u32 = if (region.buffer_row_length != 0) region.buffer_row_length else region.image_extent.width;
const buffer_row_pitch: u32 = buffer_row_length * pixel_size;
const buffer_size: u32 = buffer_row_pitch * region.image_extent.height * region.image_extent.depth;
const src_map: []u8 = @as([*]u8, @ptrCast(try src_memory.map(0, image_size)))[0..image_size];
const dst_map: []u8 = @as([*]u8, @ptrCast(try dst_memory.map(region.buffer_offset, buffer_size)))[0..buffer_size];
const row_size = region.image_extent.width * pixel_size;
for (0..self.interface.extent.depth) |z| {
for (0..self.interface.extent.height) |y| {
const z_as_u32: u32 = @intCast(z);
const y_as_u32: u32 = @intCast(y);
const src_offset = ((@as(u32, @intCast(region.image_offset.z)) + z_as_u32) * self.interface.extent.height + @as(u32, @intCast(region.image_offset.y)) + y_as_u32) * image_row_pitch + @as(u32, @intCast(region.image_offset.x)) * pixel_size;
const dst_offset = (z_as_u32 * buffer_row_length * region.image_extent.height + y_as_u32 * buffer_row_length) * pixel_size;
const src_slice = src_map[src_offset..(src_offset + row_size)];
const dst_slice = dst_map[dst_offset..(dst_offset + row_size)];
@memcpy(dst_slice, src_slice);
}
}
src_memory.unmap();
dst_memory.unmap();
}
}

View File

@@ -15,9 +15,23 @@ const SoftShaderModule = @import("SoftShaderModule.zig");
const Self = @This();
pub const Interface = base.Pipeline;
interface: Interface,
const Shader = struct {
module: *SoftShaderModule,
runtimes: []spv.Runtime,
entry: []const u8,
};
const Stages = enum {
vertex,
tessellation_control,
tessellation_evaluation,
geometry,
fragment,
compute,
};
interface: Interface,
stages: std.EnumMap(Stages, Shader),
pub fn createCompute(device: *base.Device, allocator: std.mem.Allocator, cache: ?*base.PipelineCache, info: *const vk.ComputePipelineCreateInfo) VkError!*Self {
const self = allocator.create(Self) catch return VkError.OutOfHostMemory;
@@ -33,19 +47,31 @@ pub fn createCompute(device: *base.Device, allocator: std.mem.Allocator, cache:
const module = try NonDispatchable(ShaderModule).fromHandleObject(info.stage.module);
const soft_module: *SoftShaderModule = @alignCast(@fieldParentPtr("interface", module));
const runtimes = allocator.alloc(spv.Runtime, soft_device.workers.getIdCount()) catch return VkError.OutOfHostMemory;
errdefer allocator.free(runtimes);
const device_allocator = soft_device.device_allocator.allocator();
self.* = .{
.interface = interface,
.stages = std.EnumMap(Stages, Shader).init(.{
.compute = .{
.module = blk: {
soft_module.ref();
break :blk soft_module;
},
.runtimes = blk: {
const runtimes = device_allocator.alloc(spv.Runtime, soft_device.workers.getIdCount()) catch return VkError.OutOfHostMemory;
errdefer device_allocator.free(runtimes);
for (runtimes) |*runtime| {
runtime.* = spv.Runtime.init(allocator, &soft_module.module) catch |err| {
runtime.* = spv.Runtime.init(device_allocator, &soft_module.module) catch |err| {
std.log.scoped(.SpvRuntimeInit).err("SPIR-V Runtime failed to initialize, {s}", .{@errorName(err)});
return VkError.Unknown;
};
}
self.* = .{
.interface = interface,
.runtimes = runtimes,
break :blk runtimes;
},
.entry = allocator.dupe(u8, std.mem.span(info.stage.p_name)) catch return VkError.OutOfHostMemory,
},
}),
};
return self;
}
@@ -74,16 +100,24 @@ pub fn createGraphics(device: *base.Device, allocator: std.mem.Allocator, cache:
self.* = .{
.interface = interface,
.runtimes = runtimes,
.stages = std.enums.EnumMap(Stages, Shader).init(.{}),
};
return self;
}
pub fn destroy(interface: *Interface, allocator: std.mem.Allocator) void {
const self: *Self = @alignCast(@fieldParentPtr("interface", interface));
for (self.runtimes) |*runtime| {
runtime.deinit(allocator);
const soft_device: *SoftDevice = @alignCast(@fieldParentPtr("interface", interface.owner));
const device_allocator = soft_device.device_allocator.allocator();
var it = self.stages.iterator();
while (it.next()) |stage| {
stage.value.module.unref(allocator);
for (stage.value.runtimes) |*runtime| {
runtime.deinit(device_allocator);
}
device_allocator.free(stage.value.runtimes);
allocator.free(stage.value.entry);
}
allocator.free(self.runtimes);
allocator.destroy(self);
}

View File

@@ -4,11 +4,12 @@ const base = @import("base");
const RefCounter = base.RefCounter;
const Device = @import("device/Device.zig");
const ExecutionDevice = @import("device/Device.zig");
const Dispatchable = base.Dispatchable;
const CommandBuffer = base.CommandBuffer;
const SoftDevice = @import("SoftDevice.zig");
const SoftCommandBuffer = @import("SoftCommandBuffer.zig");
const VkError = base.VkError;
@@ -97,14 +98,13 @@ fn taskRunner(self: *Self, info: Interface.SubmitInfo, p_fence: ?*base.Fence, ru
command_buffers.deinit(soft_device.device_allocator.allocator());
}
var device = Device.init(soft_device);
defer device.deinit();
var execution_device: ExecutionDevice = .init;
execution_device.setup(soft_device);
defer execution_device.deinit();
for (info.command_buffers.items) |command_buffer| {
command_buffer.submit() catch continue;
for (command_buffer.commands.items) |command| {
device.execute(&command) catch |err| base.errors.errorLoggerContext(err, "the software command dispatcher");
}
const soft_command_buffer: *SoftCommandBuffer = @alignCast(@fieldParentPtr("interface", command_buffer));
soft_command_buffer.execute(&execution_device) catch |err| base.errors.errorLoggerContext(err, "the software execution device");
}
if (p_fence) |fence| {

View File

@@ -13,6 +13,10 @@ pub const Interface = base.ShaderModule;
interface: Interface,
module: spv.Module,
/// Pipelines need SPIR-V module reference so shader module may not
/// be destroy on call to `vkDestroyShaderModule`
ref_count: std.atomic.Value(usize),
pub fn create(device: *base.Device, allocator: std.mem.Allocator, info: *const vk.ShaderModuleCreateInfo) VkError!*Self {
const self = allocator.create(Self) catch return VkError.OutOfHostMemory;
errdefer allocator.destroy(self);
@@ -33,12 +37,27 @@ pub fn create(device: *base.Device, allocator: std.mem.Allocator, info: *const v
spv.Module.ModuleError.OutOfMemory => return VkError.OutOfHostMemory,
else => return VkError.ValidationFailed,
},
.ref_count = std.atomic.Value(usize).init(1),
};
return self;
}
pub fn destroy(interface: *Interface, allocator: std.mem.Allocator) void {
const self: *Self = @alignCast(@fieldParentPtr("interface", interface));
self.unref(allocator);
}
pub inline fn drop(self: *Self, allocator: std.mem.Allocator) void {
self.module.deinit(allocator);
allocator.destroy(self);
}
pub inline fn ref(self: *Self) void {
_ = self.ref_count.fetchAdd(1, .monotonic);
}
pub inline fn unref(self: *Self, allocator: std.mem.Allocator) void {
if (self.ref_count.fetchSub(1, .release) == 1) {
self.drop(allocator);
}
}

View File

@@ -12,16 +12,210 @@ const VkError = base.VkError;
const Self = @This();
const RunData = struct {
self: *Self,
batch_id: usize,
group_count: usize,
group_count_x: usize,
group_count_y: usize,
group_count_z: usize,
subgroups_per_workgroup: usize,
pipeline: *SoftPipeline,
};
device: *SoftDevice,
state: *PipelineState,
batch_size: usize,
pub fn init(device: *SoftDevice, state: *PipelineState) Self {
return .{
.device = device,
.state = state,
.batch_size = 0,
};
}
pub fn destroy(self: *Self) void {
_ = self;
}
pub fn dispatch(self: *Self, group_count_x: u32, group_count_y: u32, group_count_z: u32) VkError!void {
const group_count: usize = @intCast(group_count_x * group_count_y * group_count_z);
const pipeline = self.state.pipeline orelse return VkError.InvalidPipelineDrv;
const shader = pipeline.stages.getPtr(.compute) orelse return VkError.InvalidPipelineDrv;
const spv_module = &shader.module.module;
self.batch_size = shader.runtimes.len;
const invocations_per_subgroup = 4;
const invocations_per_workgroup = spv_module.local_size_x * spv_module.local_size_y * spv_module.local_size_z;
const subgroups_per_workgroup = @divTrunc(invocations_per_workgroup + invocations_per_subgroup - 1, invocations_per_subgroup);
var wg: std.Thread.WaitGroup = .{};
for (0..@min(self.batch_size, group_count)) |batch_id| {
self.device.workers.spawnWg(&wg, runWrapper, .{
RunData{
.self = self,
.batch_id = batch_id,
.group_count = group_count,
.group_count_x = @as(usize, @intCast(group_count_x)),
.group_count_y = @as(usize, @intCast(group_count_y)),
.group_count_z = @as(usize, @intCast(group_count_z)),
.subgroups_per_workgroup = subgroups_per_workgroup,
.pipeline = pipeline,
},
});
}
self.device.workers.waitAndWork(&wg);
}
fn runWrapper(data: RunData) void {
@call(.always_inline, run, .{data}) catch |err| {
std.log.scoped(.@"SPIR-V runtime").err("SPIR-V runtime catched a '{s}'", .{@errorName(err)});
if (@errorReturnTrace()) |trace| {
std.debug.dumpStackTrace(trace.*);
}
};
}
inline fn run(data: RunData) !void {
const allocator = data.self.device.device_allocator.allocator();
const shader = data.pipeline.stages.getPtrAssertContains(.compute);
const rt = &shader.runtimes[data.batch_id];
const entry = try rt.getEntryPointByName(shader.entry);
var group_index: usize = data.batch_id;
while (group_index < data.group_count) : (group_index += data.self.batch_size) {
var modulo: usize = group_index;
const group_z = @divTrunc(modulo, data.group_count_x * data.group_count_y);
modulo -= group_z * data.group_count_x * data.group_count_y;
const group_y = @divTrunc(modulo, data.group_count_x);
modulo -= group_y * data.group_count_x;
const group_x = modulo;
try setupWorkgroupBuiltins(
data.self,
rt,
.{
@as(u32, @intCast(data.group_count_x)),
@as(u32, @intCast(data.group_count_y)),
@as(u32, @intCast(data.group_count_z)),
},
.{
@as(u32, @intCast(group_x)),
@as(u32, @intCast(group_y)),
@as(u32, @intCast(group_z)),
},
);
for (0..data.subgroups_per_workgroup) |i| {
try setupSubgroupBuiltins(
data.self,
rt,
.{
@as(u32, @intCast(group_x)),
@as(u32, @intCast(group_y)),
@as(u32, @intCast(group_z)),
},
i,
);
try data.self.syncDescriptorSets(allocator, rt, true);
rt.callEntryPoint(allocator, entry) catch |err| switch (err) {
spv.Runtime.RuntimeError.OutOfBounds => {},
else => return err,
};
try data.self.syncDescriptorSets(allocator, rt, false);
}
}
}
fn syncDescriptorSets(self: *Self, allocator: std.mem.Allocator, rt: *spv.Runtime, write: bool) !void {
sets: for (self.state.sets[0..], 0..) |set, set_index| {
if (set == null)
continue :sets;
bindings: for (set.?.descriptors[0..], 0..) |binding, binding_index| {
switch (binding) {
.buffer => |buffer_data| if (buffer_data.object) |buffer| {
const memory = if (buffer.interface.memory) |memory| memory else continue :bindings;
const map: []u8 = @as([*]u8, @ptrCast(try memory.map(buffer_data.offset, buffer_data.size)))[0..buffer_data.size];
if (write) {
try rt.writeDescriptorSet(
allocator,
map,
@as(u32, @intCast(set_index)),
@as(u32, @intCast(binding_index)),
);
} else {
try rt.readDescriptorSet(
map,
@as(u32, @intCast(set_index)),
@as(u32, @intCast(binding_index)),
);
}
},
else => {},
}
}
}
}
fn setupWorkgroupBuiltins(
self: *Self,
rt: *spv.Runtime,
group_count: [3]u32,
group_id: [3]u32,
) spv.Runtime.RuntimeError!void {
const spv_module = &self.state.pipeline.?.stages.getPtrAssertContains(.compute).module.module;
const workgroup_size = [3]u32{
spv_module.local_size_x,
spv_module.local_size_y,
spv_module.local_size_z,
};
rt.writeBuiltIn(std.mem.asBytes(&workgroup_size), .WorkgroupSize) catch {};
rt.writeBuiltIn(std.mem.asBytes(&group_count), .NumWorkgroups) catch {};
rt.writeBuiltIn(std.mem.asBytes(&group_id), .WorkgroupId) catch {};
}
fn setupSubgroupBuiltins(
self: *Self,
rt: *spv.Runtime,
group_id: [3]u32,
local_invocation_index: usize,
) spv.Runtime.RuntimeError!void {
const spv_module = &self.state.pipeline.?.stages.getPtrAssertContains(.compute).module.module;
const workgroup_size = [3]u32{
spv_module.local_size_x,
spv_module.local_size_y,
spv_module.local_size_z,
};
const local_base = [3]u32{
workgroup_size[0] * group_id[0],
workgroup_size[1] * group_id[1],
workgroup_size[2] * group_id[2],
};
var local_invocation = [3]u32{ 0, 0, 0 };
var idx: u32 = @intCast(local_invocation_index);
local_invocation[2] = @divTrunc(idx, workgroup_size[0] * workgroup_size[1]);
idx -= local_invocation[2] * workgroup_size[0] * workgroup_size[1];
local_invocation[1] = @divTrunc(idx, workgroup_size[0]);
idx -= local_invocation[1] * workgroup_size[0];
local_invocation[0] = idx;
const global_invocation_index = [3]u32{
local_base[0] + local_invocation[0],
local_base[1] + local_invocation[1],
local_base[2] + local_invocation[2],
};
rt.writeBuiltIn(std.mem.asBytes(&global_invocation_index), .GlobalInvocationId) catch {};
}

View File

@@ -2,8 +2,6 @@ const std = @import("std");
const vk = @import("vulkan");
const base = @import("base");
const copy_routines = @import("copy_routines.zig");
const SoftDescriptorSet = @import("../SoftDescriptorSet.zig");
const SoftDevice = @import("../SoftDevice.zig");
const SoftPipeline = @import("../SoftPipeline.zig");
@@ -11,50 +9,31 @@ const SoftPipeline = @import("../SoftPipeline.zig");
const ComputeRoutines = @import("ComputeRoutines.zig");
const PipelineState = @import("PipelineState.zig");
const cmd = base.commands;
const VkError = base.VkError;
const Self = @This();
compute_routine: ComputeRoutines,
compute_routines: ComputeRoutines,
/// .graphics = 0
/// .compute = 1
pipeline_states: [2]PipelineState,
pub fn init(device: *SoftDevice) Self {
var self: Self = undefined;
pub const init: Self = .{
.compute_routines = undefined,
.pipeline_states = undefined,
};
pub fn setup(self: *Self, device: *SoftDevice) void {
for (self.pipeline_states[0..]) |*state| {
state.* = .{
.pipeline = null,
.sets = [_]?*SoftDescriptorSet{null} ** base.VULKAN_MAX_DESCRIPTOR_SETS,
};
}
self.compute_routine = .init(device, &self.pipeline_states[@intFromEnum(vk.PipelineBindPoint.compute)]);
return self;
self.compute_routines = .init(device, &self.pipeline_states[@intFromEnum(vk.PipelineBindPoint.compute)]);
}
pub fn deinit(self: *Self) void {
self.compute_routine.destroy();
}
pub fn execute(self: *Self, command: *const cmd.Command) VkError!void {
switch (command.*) {
.BindDescriptorSets => |data| {
for (data.first_set.., data.sets[0..]) |i, set| {
if (set == null) break;
self.pipeline_states[@intCast(@intFromEnum(data.bind_point))].sets[i] = @alignCast(@fieldParentPtr("interface", set.?));
}
},
.BindPipeline => |data| self.pipeline_states[@intCast(@intFromEnum(data.bind_point))].pipeline = @alignCast(@fieldParentPtr("interface", data.pipeline)),
.ClearColorImage => |data| try copy_routines.clearColorImage(&data),
.CopyBuffer => |data| try copy_routines.copyBuffer(&data),
.CopyImage => |data| try copy_routines.copyImage(&data),
.CopyImageToBuffer => |data| try copy_routines.copyImageToBuffer(&data),
.FillBuffer => |data| try copy_routines.fillBuffer(&data),
else => {},
}
self.compute_routines.destroy();
}

View File

@@ -1,86 +0,0 @@
const std = @import("std");
const vk = @import("vulkan");
const base = @import("base");
const cmd = base.commands;
const VkError = base.VkError;
const SoftImage = @import("../SoftImage.zig");
pub fn clearColorImage(data: *const cmd.CommandClearColorImage) VkError!void {
const soft_image: *SoftImage = @alignCast(@fieldParentPtr("interface", data.image));
soft_image.clearRange(data.clear_color, data.range);
}
pub fn copyBuffer(data: *const cmd.CommandCopyBuffer) VkError!void {
for (data.regions) |region| {
const src_memory = if (data.src.memory) |memory| memory else return VkError.ValidationFailed;
const dst_memory = if (data.dst.memory) |memory| memory else return VkError.ValidationFailed;
const src_map: []u8 = @as([*]u8, @ptrCast(try src_memory.map(region.src_offset, region.size)))[0..region.size];
const dst_map: []u8 = @as([*]u8, @ptrCast(try dst_memory.map(region.dst_offset, region.size)))[0..region.size];
@memcpy(dst_map, src_map);
src_memory.unmap();
dst_memory.unmap();
}
}
pub fn copyImage(data: *const cmd.CommandCopyImage) VkError!void {
_ = data;
std.log.scoped(.commandExecutor).warn("FIXME: implement image to image copy", .{});
}
pub fn copyImageToBuffer(data: *const cmd.CommandCopyImageToBuffer) VkError!void {
for (data.regions) |region| {
const src_memory = if (data.src.memory) |memory| memory else return VkError.ValidationFailed;
const dst_memory = if (data.dst.memory) |memory| memory else return VkError.ValidationFailed;
const pixel_size: u32 = @intCast(data.src.getPixelSize());
const image_row_pitch: u32 = data.src.extent.width * pixel_size;
const image_size: u32 = @intCast(data.src.getTotalSize());
const buffer_row_length: u32 = if (region.buffer_row_length != 0) region.buffer_row_length else region.image_extent.width;
const buffer_row_pitch: u32 = buffer_row_length * pixel_size;
const buffer_size: u32 = buffer_row_pitch * region.image_extent.height * region.image_extent.depth;
const src_map: []u8 = @as([*]u8, @ptrCast(try src_memory.map(0, image_size)))[0..image_size];
const dst_map: []u8 = @as([*]u8, @ptrCast(try dst_memory.map(region.buffer_offset, buffer_size)))[0..buffer_size];
const row_size = region.image_extent.width * pixel_size;
for (0..data.src.extent.depth) |z| {
for (0..data.src.extent.height) |y| {
const z_as_u32: u32 = @intCast(z);
const y_as_u32: u32 = @intCast(y);
const src_offset = ((@as(u32, @intCast(region.image_offset.z)) + z_as_u32) * data.src.extent.height + @as(u32, @intCast(region.image_offset.y)) + y_as_u32) * image_row_pitch + @as(u32, @intCast(region.image_offset.x)) * pixel_size;
const dst_offset = (z_as_u32 * buffer_row_length * region.image_extent.height + y_as_u32 * buffer_row_length) * pixel_size;
const src_slice = src_map[src_offset..(src_offset + row_size)];
const dst_slice = dst_map[dst_offset..(dst_offset + row_size)];
@memcpy(dst_slice, src_slice);
}
}
src_memory.unmap();
dst_memory.unmap();
}
}
pub fn fillBuffer(data: *const cmd.CommandFillBuffer) VkError!void {
const memory = if (data.buffer.memory) |memory| memory else return VkError.ValidationFailed;
var memory_map: []u32 = @as([*]u32, @ptrCast(@alignCast(try memory.map(data.offset, data.size))))[0..data.size];
var bytes = if (data.size == vk.WHOLE_SIZE) memory.size - data.offset else data.size;
var i: usize = 0;
while (bytes >= 4) : ({
bytes -= 4;
i += 1;
}) {
memory_map[i] = data.data;
}
memory.unmap();
}

View File

@@ -2,8 +2,6 @@ const std = @import("std");
const vk = @import("vulkan");
const lib = @import("lib.zig");
const cmd = @import("commands.zig");
const NonDispatchable = @import("NonDispatchable.zig").NonDispatchable;
const VkError = @import("error_set.zig").VkError;
const VulkanAllocator = @import("VulkanAllocator.zig");
@@ -17,8 +15,6 @@ const Image = @import("Image.zig");
const Pipeline = @import("Pipeline.zig");
const DescriptorSet = @import("DescriptorSet.zig");
const COMMAND_BUFFER_BASE_CAPACITY = 256;
const State = enum {
Initial,
Recording,
@@ -35,7 +31,6 @@ pool: *CommandPool,
state: State,
begin_info: ?vk.CommandBufferBeginInfo,
host_allocator: VulkanAllocator,
commands: std.ArrayList(cmd.Command),
state_mutex: std.Thread.Mutex,
vtable: *const VTable,
@@ -49,6 +44,7 @@ pub const DispatchTable = struct {
copyBuffer: *const fn (*Self, *Buffer, *Buffer, []const vk.BufferCopy) VkError!void,
copyImage: *const fn (*Self, *Image, vk.ImageLayout, *Image, vk.ImageLayout, []const vk.ImageCopy) VkError!void,
copyImageToBuffer: *const fn (*Self, *Image, vk.ImageLayout, *Buffer, []const vk.BufferImageCopy) VkError!void,
dispatch: *const fn (*Self, u32, u32, u32) VkError!void,
end: *const fn (*Self) VkError!void,
fillBuffer: *const fn (*Self, *Buffer, vk.DeviceSize, vk.DeviceSize, u32) VkError!void,
reset: *const fn (*Self, vk.CommandBufferResetFlags) VkError!void,
@@ -68,7 +64,6 @@ pub fn init(device: *Device, allocator: std.mem.Allocator, info: *const vk.Comma
.state = .Initial,
.begin_info = null,
.host_allocator = VulkanAllocator.from(allocator).cloneWithScope(.object),
.commands = std.ArrayList(cmd.Command).initCapacity(allocator, COMMAND_BUFFER_BASE_CAPACITY) catch return VkError.OutOfHostMemory,
.state_mutex = .{},
.vtable = undefined,
.dispatch_table = undefined,
@@ -85,8 +80,6 @@ inline fn transitionState(self: *Self, target: State, from_allowed: []const Stat
}
pub inline fn destroy(self: *Self, allocator: std.mem.Allocator) void {
self.cleanCommandList();
self.commands.deinit(allocator);
self.vtable.destroy(self, allocator);
}
@@ -109,7 +102,6 @@ pub inline fn reset(self: *Self, flags: vk.CommandBufferResetFlags) VkError!void
if (!self.pool.flags.reset_command_buffer_bit) {
return VkError.ValidationFailed;
}
defer self.cleanCommandList();
self.transitionState(.Initial, &.{ .Initial, .Recording, .Executable, .Invalid }) catch return VkError.ValidationFailed;
try self.dispatch_table.reset(self, flags);
@@ -124,101 +116,44 @@ pub inline fn submit(self: *Self) VkError!void {
self.transitionState(.Pending, &.{ .Pending, .Executable }) catch return VkError.ValidationFailed;
}
fn cleanCommandList(self: *Self) void {
const allocator = self.host_allocator.allocator();
for (self.commands.items) |command| {
switch (command) {
.CopyBuffer => |data| allocator.free(data.regions),
.CopyImage => |data| allocator.free(data.regions),
.CopyImageToBuffer => |data| allocator.free(data.regions),
else => {},
}
}
}
// Commands ====================================================================================================
pub inline fn bindDescriptorSets(self: *Self, bind_point: vk.PipelineBindPoint, first_set: u32, sets: []const vk.DescriptorSet, dynamic_offsets: []const u32) VkError!void {
const allocator = self.host_allocator.allocator();
var inner_sets = [_]?*DescriptorSet{null} ** lib.VULKAN_MAX_DESCRIPTOR_SETS;
for (sets, inner_sets[0..sets.len]) |set, *inner_set| {
inner_set.* = try NonDispatchable(DescriptorSet).fromHandleObject(set);
}
try self.dispatch_table.bindDescriptorSets(self, bind_point, first_set, inner_sets, dynamic_offsets);
self.commands.append(allocator, .{ .BindDescriptorSets = .{
.bind_point = bind_point,
.first_set = first_set,
.sets = inner_sets,
.dynamic_offsets = dynamic_offsets,
} }) catch return VkError.OutOfHostMemory;
}
pub inline fn bindPipeline(self: *Self, bind_point: vk.PipelineBindPoint, pipeline: *Pipeline) VkError!void {
const allocator = self.host_allocator.allocator();
try self.dispatch_table.bindPipeline(self, bind_point, pipeline);
self.commands.append(allocator, .{ .BindPipeline = .{
.bind_point = bind_point,
.pipeline = pipeline,
} }) catch return VkError.OutOfHostMemory;
}
pub inline fn clearColorImage(self: *Self, image: *Image, layout: vk.ImageLayout, color: *const vk.ClearColorValue, ranges: []const vk.ImageSubresourceRange) VkError!void {
const allocator = self.host_allocator.allocator();
for (ranges) |range| {
try self.dispatch_table.clearColorImage(self, image, layout, color, range);
self.commands.append(allocator, .{ .ClearColorImage = .{
.image = image,
.layout = layout,
.clear_color = color.*,
.range = range,
} }) catch return VkError.OutOfHostMemory;
}
}
pub inline fn copyBuffer(self: *Self, src: *Buffer, dst: *Buffer, regions: []const vk.BufferCopy) VkError!void {
const allocator = self.host_allocator.allocator();
try self.dispatch_table.copyBuffer(self, src, dst, regions);
self.commands.append(allocator, .{ .CopyBuffer = .{
.src = src,
.dst = dst,
.regions = allocator.dupe(vk.BufferCopy, regions) catch return VkError.OutOfHostMemory,
} }) catch return VkError.OutOfHostMemory;
}
pub inline fn copyImage(self: *Self, src: *Image, src_layout: vk.ImageLayout, dst: *Image, dst_layout: vk.ImageLayout, regions: []const vk.ImageCopy) VkError!void {
const allocator = self.host_allocator.allocator();
try self.dispatch_table.copyImage(self, src, src_layout, dst, dst_layout, regions);
self.commands.append(allocator, .{ .CopyImage = .{
.src = src,
.src_layout = src_layout,
.dst = dst,
.dst_layout = dst_layout,
.regions = allocator.dupe(vk.ImageCopy, regions) catch return VkError.OutOfHostMemory,
} }) catch return VkError.OutOfHostMemory;
}
pub inline fn copyImageToBuffer(self: *Self, src: *Image, src_layout: vk.ImageLayout, dst: *Buffer, regions: []const vk.BufferImageCopy) VkError!void {
const allocator = self.host_allocator.allocator();
try self.dispatch_table.copyImageToBuffer(self, src, src_layout, dst, regions);
self.commands.append(allocator, .{ .CopyImageToBuffer = .{
.src = src,
.src_layout = src_layout,
.dst = dst,
.regions = allocator.dupe(vk.BufferImageCopy, regions) catch return VkError.OutOfHostMemory,
} }) catch return VkError.OutOfHostMemory;
}
pub inline fn dispatch(self: *Self, group_count_x: u32, group_count_y: u32, group_count_z: u32) VkError!void {
try self.dispatch_table.dispatch(self, group_count_x, group_count_y, group_count_z);
}
pub inline fn fillBuffer(self: *Self, buffer: *Buffer, offset: vk.DeviceSize, size: vk.DeviceSize, data: u32) VkError!void {
const allocator = self.host_allocator.allocator();
try self.dispatch_table.fillBuffer(self, buffer, offset, size, data);
self.commands.append(allocator, .{ .FillBuffer = .{
.buffer = buffer,
.offset = offset,
.size = if (size == vk.WHOLE_SIZE) buffer.size else size,
.data = data,
} }) catch return VkError.OutOfHostMemory;
}
pub inline fn resetEvent(self: *Self, event: *Event, stage: vk.PipelineStageFlags) VkError!void {

View File

@@ -47,11 +47,11 @@ pub fn Dispatchable(comptime T: type) type {
pub inline fn fromHandle(vk_handle: anytype) VkError!*Self {
const handle = @intFromEnum(vk_handle);
if (handle == 0) {
return VkError.ValidationFailed;
return VkError.InvalidHandleDrv;
}
const dispatchable: *Self = @ptrFromInt(handle);
if (dispatchable.object_type != T.ObjectType) {
return VkError.ValidationFailed;
return VkError.InvalidHandleDrv;
}
return dispatchable;
}

View File

@@ -79,11 +79,11 @@ pub inline fn getClearFormat(self: *Self) vk.Format {
.r32g32b32a32_sfloat;
}
pub inline fn getPixelSize(self: *Self) usize {
pub inline fn getPixelSize(self: *const Self) usize {
return lib.vku.vkuFormatTexelBlockSize(@intCast(@intFromEnum(self.format)));
}
pub inline fn getTotalSize(self: *Self) usize {
pub inline fn getTotalSize(self: *const Self) usize {
const pixel_size = self.getPixelSize();
return self.extent.width * self.extent.height * self.extent.depth * pixel_size;
}
@@ -92,7 +92,7 @@ pub inline fn getFormatPixelSize(format: vk.Format) usize {
return lib.vku.vkuFormatTexelBlockSize(@intCast(@intFromEnum(format)));
}
pub inline fn getFormatTotalSize(self: *Self, format: vk.Format) usize {
pub inline fn getFormatTotalSize(self: *const Self, format: vk.Format) usize {
const pixel_size = self.getFormatPixelSize(format);
return self.extent.width * self.extent.height * self.extent.depth * pixel_size;
}

View File

@@ -42,11 +42,11 @@ pub fn NonDispatchable(comptime T: type) type {
pub inline fn fromHandle(vk_handle: anytype) VkError!*Self {
const handle = @intFromEnum(vk_handle);
if (handle == 0) {
return VkError.ValidationFailed;
return VkError.InvalidHandleDrv;
}
const non_dispatchable: *Self = @ptrFromInt(handle);
if (non_dispatchable.object_type != T.ObjectType) {
return VkError.ValidationFailed;
return VkError.InvalidHandleDrv;
}
return non_dispatchable;
}

View File

@@ -1,94 +0,0 @@
const std = @import("std");
const vk = @import("vulkan");
const lib = @import("lib.zig");
const Buffer = @import("Buffer.zig");
const Image = @import("Image.zig");
const Pipeline = @import("Pipeline.zig");
const DescriptorSet = @import("DescriptorSet.zig");
pub const CommandBindDescriptorSets = struct {
bind_point: vk.PipelineBindPoint,
first_set: u32,
sets: [lib.VULKAN_MAX_DESCRIPTOR_SETS]?*DescriptorSet,
dynamic_offsets: []const u32,
};
pub const CommandBindPipeline = struct {
bind_point: vk.PipelineBindPoint,
pipeline: *Pipeline,
};
pub const CommandBindVertexBuffer = struct {
buffers: []*const Buffer,
offsets: []vk.DeviceSize,
first_binding: u32,
};
pub const CommandClearColorImage = struct {
image: *Image,
layout: vk.ImageLayout,
clear_color: vk.ClearColorValue,
range: vk.ImageSubresourceRange,
};
pub const CommandCopyBuffer = struct {
src: *Buffer,
dst: *Buffer,
regions: []const vk.BufferCopy,
};
pub const CommandCopyImage = struct {
src: *Image,
src_layout: vk.ImageLayout,
dst: *Image,
dst_layout: vk.ImageLayout,
regions: []const vk.ImageCopy,
};
pub const CommandCopyImageToBuffer = struct {
src: *Image,
src_layout: vk.ImageLayout,
dst: *Buffer,
regions: []const vk.BufferImageCopy,
};
pub const CommandDraw = struct {
vertex_count: u32,
instance_count: u32,
first_vertex: u32,
first_instance: u32,
};
pub const CommandDrawIndexed = struct {
index_count: u32,
instance_count: u32,
first_index: u32,
vertex_offset: i32,
first_instance: u32,
};
pub const CommandDrawIndexedIndirect = struct {
buffer: *Buffer,
offset: vk.DeviceSize,
count: u32,
stride: u32,
};
pub const CommandDrawIndirect = struct {
buffer: *Buffer,
offset: vk.DeviceSize,
count: u32,
stride: u32,
};
pub const CommandFillBuffer = struct {
buffer: *Buffer,
offset: vk.DeviceSize,
size: vk.DeviceSize,
data: u32,
};
pub const Command = union(enum) {
BindDescriptorSets: CommandBindDescriptorSets,
BindPipeline: CommandBindPipeline,
BindVertexBuffer: CommandBindVertexBuffer,
ClearColorImage: CommandClearColorImage,
CopyBuffer: CommandCopyBuffer,
CopyImage: CommandCopyImage,
CopyImageToBuffer: CommandCopyImageToBuffer,
Draw: CommandDraw,
DrawIndexed: CommandDrawIndexed,
DrawIndexedIndirect: CommandDrawIndexedIndirect,
DrawIndirect: CommandDrawIndirect,
FillBuffer: CommandFillBuffer,
};

View File

@@ -50,6 +50,10 @@ pub const VkError = error{
IncompatibleShaderBinaryExt,
PipelineBinaryMissingKhr,
NotEnoughSpaceKhr,
// ====== Internal errors
InvalidHandleDrv,
InvalidPipelineDrv,
InvalidDeviceMemoryDrv,
};
pub inline fn errorLogger(err: VkError) void {
@@ -80,7 +84,6 @@ pub inline fn toVkResult(err: VkError) vk.Result {
VkError.TooManyObjects => .error_too_many_objects,
VkError.FormatNotSupported => .error_format_not_supported,
VkError.FragmentedPool => .error_fragmented_pool,
VkError.Unknown => .error_unknown,
VkError.ValidationFailed => .error_validation_failed,
VkError.OutOfPoolMemory => .error_out_of_pool_memory,
VkError.InvalidExternalHandle => .error_invalid_external_handle,
@@ -111,5 +114,7 @@ pub inline fn toVkResult(err: VkError) vk.Result {
VkError.IncompatibleShaderBinaryExt => .incompatible_shader_binary_ext,
VkError.PipelineBinaryMissingKhr => .pipeline_binary_missing_khr,
VkError.NotEnoughSpaceKhr => .error_not_enough_space_khr,
VkError.InvalidHandleDrv => .error_validation_failed,
else => .error_unknown,
};
}

View File

@@ -7,7 +7,6 @@ pub const vku = @cImport({
@cInclude("vulkan/utility/vk_format_utils.h");
});
pub const commands = @import("commands.zig");
pub const errors = @import("error_set.zig");
pub const lib_vulkan = @import("lib_vulkan.zig");
pub const logger = @import("logger/logger.zig");

View File

@@ -1429,7 +1429,7 @@ pub export fn strollGetPipelineCacheData(p_device: vk.Device, p_cache: vk.Pipeli
_ = size;
_ = data;
return .error_unknown;
return .success;
}
pub export fn strollGetQueryPoolResults(
@@ -1699,7 +1699,7 @@ pub export fn strollCmdBindPipeline(p_cmd: vk.CommandBuffer, bind_point: vk.Pipe
defer entryPointEndLogTrace();
const cmd = Dispatchable(CommandBuffer).fromHandleObject(p_cmd) catch |err| return errorLogger(err);
const pipeline = Dispatchable(Pipeline).fromHandleObject(p_pipeline) catch |err| return errorLogger(err);
const pipeline = NonDispatchable(Pipeline).fromHandleObject(p_pipeline) catch |err| return errorLogger(err);
cmd.bindPipeline(bind_point, pipeline) catch |err| return errorLogger(err);
}
@@ -1860,13 +1860,7 @@ pub export fn strollCmdDispatch(p_cmd: vk.CommandBuffer, group_count_x: u32, gro
defer entryPointEndLogTrace();
const cmd = Dispatchable(CommandBuffer).fromHandleObject(p_cmd) catch |err| return errorLogger(err);
notImplementedWarning();
_ = cmd;
_ = group_count_x;
_ = group_count_y;
_ = group_count_z;
cmd.dispatch(group_count_x, group_count_y, group_count_z) catch |err| return errorLogger(err);
}
pub export fn strollCmdDispatchIndirect(p_cmd: vk.CommandBuffer, p_buffer: vk.Buffer, offset: vk.DeviceSize) callconv(vk.vulkan_call_conv) void {