From 96f69de54f1cf34382bf030f6bac140230b20b03 Mon Sep 17 00:00:00 2001 From: Kbz-8 Date: Fri, 5 Dec 2025 00:36:14 +0100 Subject: [PATCH] fixing lots of minor issues --- build.zig | 12 ++++++++ src/soft/SoftFence.zig | 7 ++--- src/soft/SoftQueue.zig | 52 +++++++++++++++++++++------------- src/vulkan/Dispatchable.zig | 4 ++- src/vulkan/NonDispatchable.zig | 4 ++- src/vulkan/RefCounter.zig | 23 +++++++++++++++ src/vulkan/VulkanAllocator.zig | 5 ++-- src/vulkan/lib.zig | 1 + 8 files changed, 78 insertions(+), 30 deletions(-) create mode 100644 src/vulkan/RefCounter.zig diff --git a/build.zig b/build.zig index 3d83284..da421da 100644 --- a/build.zig +++ b/build.zig @@ -153,6 +153,18 @@ pub fn build(b: *std.Build) !void { const run_cts_step = b.step(b.fmt("test-conformance-{s}", .{impl.name}), b.fmt("Run Vulkan conformance tests for {s}", .{impl.name})); run_cts_step.dependOn(&run_cts.step); + + const run_gdb_cts = b.addSystemCommand(&[_][]const u8{ + "gdb", + "--args", + try cts_exe_path.getPath3(b, null).toString(b.allocator), + b.fmt("--deqp-caselist-file={s}", .{try cts.path("mustpass/1.0.0/vk-default.txt").getPath3(b, null).toString(b.allocator)}), + b.fmt("--deqp-vk-library-path={s}", .{b.getInstallPath(.lib, lib.out_lib_filename)}), + }); + run_gdb_cts.step.dependOn(&lib_install.step); + + const run_cts_gdb_step = b.step(b.fmt("test-conformance-{s}-gdb", .{impl.name}), b.fmt("Run Vulkan conformance tests for {s} with GDB", .{impl.name})); + run_cts_gdb_step.dependOn(&run_gdb_cts.step); } const autodoc_test = b.addObject(.{ diff --git a/src/soft/SoftFence.zig b/src/soft/SoftFence.zig index 75f9214..f1930c5 100644 --- a/src/soft/SoftFence.zig +++ b/src/soft/SoftFence.zig @@ -12,8 +12,6 @@ interface: Interface, mutex: std.Thread.Mutex, condition: std.Thread.Condition, is_signaled: std.atomic.Value(bool), -/// Used by impl queues to know when the fence should be signaled -concurrent_submits_count: std.atomic.Value(usize), pub fn create(device: *Device, allocator: std.mem.Allocator, info: *const vk.FenceCreateInfo) VkError!*Self { const self = allocator.create(Self) catch return VkError.OutOfHostMemory; @@ -34,7 +32,6 @@ pub fn create(device: *Device, allocator: std.mem.Allocator, info: *const vk.Fen .mutex = std.Thread.Mutex{}, .condition = std.Thread.Condition{}, .is_signaled = std.atomic.Value(bool).init(info.flags.signaled_bit), - .concurrent_submits_count = std.atomic.Value(usize).init(0), }; return self; } @@ -46,7 +43,7 @@ pub fn destroy(interface: *Interface, allocator: std.mem.Allocator) void { pub fn getStatus(interface: *Interface) VkError!void { const self: *Self = @alignCast(@fieldParentPtr("interface", interface)); - if (!self.is_signaled.load(.monotonic)) { + if (!self.is_signaled.load(.acquire)) { return VkError.NotReady; } } @@ -64,7 +61,7 @@ pub fn signal(interface: *Interface) VkError!void { pub fn wait(interface: *Interface, timeout: u64) VkError!void { const self: *Self = @alignCast(@fieldParentPtr("interface", interface)); - if (self.is_signaled.load(.monotonic)) return; + if (self.is_signaled.load(.acquire)) return; if (timeout == 0) return VkError.Timeout; self.mutex.lock(); diff --git a/src/soft/SoftQueue.zig b/src/soft/SoftQueue.zig index 0240cda..404917f 100644 --- a/src/soft/SoftQueue.zig +++ b/src/soft/SoftQueue.zig @@ -2,23 +2,21 @@ const std = @import("std"); const vk = @import("vulkan"); const base = @import("base"); +const RefCounter = base.RefCounter; + const Executor = @import("Executor.zig"); const Dispatchable = base.Dispatchable; const CommandBuffer = base.CommandBuffer; const SoftDevice = @import("SoftDevice.zig"); -const SoftDeviceMemory = @import("SoftDeviceMemory.zig"); -const SoftFence = @import("SoftFence.zig"); - const VkError = base.VkError; const Self = @This(); pub const Interface = base.Queue; interface: Interface, -wait_group: std.Thread.WaitGroup, -worker_mutex: std.Thread.Mutex, +lock: std.Thread.RwLock, pub fn create(allocator: std.mem.Allocator, device: *base.Device, index: u32, family_index: u32, flags: vk.DeviceQueueCreateFlags) VkError!*Interface { const self = allocator.create(Self) catch return VkError.OutOfHostMemory; @@ -34,8 +32,7 @@ pub fn create(allocator: std.mem.Allocator, device: *base.Device, index: u32, fa self.* = .{ .interface = interface, - .wait_group = .{}, - .worker_mutex = .{}, + .lock = .{}, }; return &self.interface; } @@ -54,30 +51,46 @@ pub fn bindSparse(interface: *Interface, info: []const vk.BindSparseInfo, fence: } pub fn submit(interface: *Interface, infos: []Interface.SubmitInfo, p_fence: ?*base.Fence) VkError!void { - var self: *Self = @alignCast(@fieldParentPtr("interface", interface)); - var soft_device: *SoftDevice = @alignCast(@fieldParentPtr("interface", interface.owner)); + const self: *Self = @alignCast(@fieldParentPtr("interface", interface)); + const soft_device: *SoftDevice = @alignCast(@fieldParentPtr("interface", interface.owner)); - if (p_fence) |fence| { - const soft_fence: *SoftFence = @alignCast(@fieldParentPtr("interface", fence)); - soft_fence.concurrent_submits_count = std.atomic.Value(usize).init(infos.len); - } + const allocator = soft_device.device_allocator.allocator(); + + // Lock here to avoid acquiring it in `waitIdle` before runners start + self.lock.lockShared(); + defer self.lock.unlockShared(); for (infos) |info| { // Cloning info to keep them alive until commands dispatch end const cloned_info: Interface.SubmitInfo = .{ - .command_buffers = info.command_buffers.clone(soft_device.device_allocator.allocator()) catch return VkError.OutOfDeviceMemory, + .command_buffers = info.command_buffers.clone(allocator) catch return VkError.OutOfDeviceMemory, }; - soft_device.workers.spawnWg(&self.wait_group, Self.taskRunner, .{ self, cloned_info, p_fence }); + const runners_counter = allocator.create(RefCounter) catch return VkError.OutOfDeviceMemory; + runners_counter.* = .init; + soft_device.workers.spawn(Self.taskRunner, .{ self, cloned_info, p_fence, runners_counter }) catch return VkError.Unknown; } } pub fn waitIdle(interface: *Interface) VkError!void { const self: *Self = @alignCast(@fieldParentPtr("interface", interface)); - if (!self.wait_group.isDone()) - self.wait_group.wait(); + self.lock.lock(); + defer self.lock.unlock(); } -fn taskRunner(self: *Self, info: Interface.SubmitInfo, p_fence: ?*base.Fence) void { +fn taskRunner(self: *Self, info: Interface.SubmitInfo, p_fence: ?*base.Fence, runners_counter: *RefCounter) void { + self.lock.lockShared(); + defer self.lock.unlockShared(); + + runners_counter.ref(); + defer { + runners_counter.unref(); + if (!runners_counter.hasRefs()) { + const soft_device: *SoftDevice = @alignCast(@fieldParentPtr("interface", self.interface.owner)); + const allocator = soft_device.device_allocator.allocator(); + allocator.destroy(runners_counter); + } + } + var soft_device: *SoftDevice = @alignCast(@fieldParentPtr("interface", self.interface.owner)); defer { var command_buffers = info.command_buffers; @@ -95,8 +108,7 @@ fn taskRunner(self: *Self, info: Interface.SubmitInfo, p_fence: ?*base.Fence) vo } if (p_fence) |fence| { - const soft_fence: *SoftFence = @alignCast(@fieldParentPtr("interface", fence)); - if (soft_fence.concurrent_submits_count.fetchSub(1, .release) == 1) { + if (runners_counter.getRefsCount() == 1) { fence.signal() catch {}; } } diff --git a/src/vulkan/Dispatchable.zig b/src/vulkan/Dispatchable.zig index 4e7a38c..c134ff7 100644 --- a/src/vulkan/Dispatchable.zig +++ b/src/vulkan/Dispatchable.zig @@ -21,17 +21,19 @@ pub fn Dispatchable(comptime T: type) type { .object_type = T.ObjectType, .object = object, }; - std.log.debug("Created dispatchable handle at 0x{X}", .{@intFromPtr(self)}); + std.log.debug("Created dispatchable handle of type '{s}' at 0x{X}", .{ @typeName(T), @intFromPtr(self) }); return self; } pub inline fn intrusiveDestroy(self: *Self, allocator: std.mem.Allocator) void { self.object.destroy(allocator); allocator.destroy(self); + std.log.debug("Destroyed dispatchable handle of type '{s}' at 0x{X}", .{ @typeName(T), @intFromPtr(self) }); } pub inline fn destroy(self: *Self, allocator: std.mem.Allocator) void { allocator.destroy(self); + std.log.debug("Destroyed dispatchable handle of type '{s}' at 0x{X}", .{ @typeName(T), @intFromPtr(self) }); } pub inline fn toHandle(self: *Self) usize { diff --git a/src/vulkan/NonDispatchable.zig b/src/vulkan/NonDispatchable.zig index a75f16b..5423054 100644 --- a/src/vulkan/NonDispatchable.zig +++ b/src/vulkan/NonDispatchable.zig @@ -16,17 +16,19 @@ pub fn NonDispatchable(comptime T: type) type { .object_type = T.ObjectType, .object = object, }; - std.log.debug("Created non dispatchable handle at 0x{X}", .{@intFromPtr(self)}); + std.log.debug("Created non dispatchable handle of type '{s}' at 0x{X}", .{ @typeName(T), @intFromPtr(self) }); return self; } pub inline fn intrusiveDestroy(self: *Self, allocator: std.mem.Allocator) void { self.object.destroy(allocator); allocator.destroy(self); + std.log.debug("Destroyed non dispatchable handle of type '{s}' at 0x{X}", .{ @typeName(T), @intFromPtr(self) }); } pub inline fn destroy(self: *Self, allocator: std.mem.Allocator) void { allocator.destroy(self); + std.log.debug("Destroyed non dispatchable handle of type '{s}' at 0x{X}", .{ @typeName(T), @intFromPtr(self) }); } pub inline fn toHandle(self: *Self) usize { diff --git a/src/vulkan/RefCounter.zig b/src/vulkan/RefCounter.zig new file mode 100644 index 0000000..c41b54f --- /dev/null +++ b/src/vulkan/RefCounter.zig @@ -0,0 +1,23 @@ +const std = @import("std"); + +const Self = @This(); + +count: std.atomic.Value(usize), + +pub const init: Self = .{ .count = std.atomic.Value(usize).init(0) }; + +pub inline fn ref(self: *Self) void { + _ = self.count.fetchAdd(1, .monotonic); +} + +pub inline fn unref(self: *Self) void { + _ = self.count.fetchSub(1, .monotonic); +} + +pub inline fn hasRefs(self: *Self) bool { + return self.getRefsCount() == 0; +} + +pub inline fn getRefsCount(self: *Self) usize { + return self.count.load(.acquire); +} diff --git a/src/vulkan/VulkanAllocator.zig b/src/vulkan/VulkanAllocator.zig index 8bcac6f..aadf7e1 100644 --- a/src/vulkan/VulkanAllocator.zig +++ b/src/vulkan/VulkanAllocator.zig @@ -9,8 +9,6 @@ const Alignment = std.mem.Alignment; const Self = @This(); -var fallback_allocator: std.heap.ThreadSafeAllocator = .{ .child_allocator = std.heap.c_allocator }; - callbacks: ?vk.AllocationCallbacks, scope: vk.SystemAllocationScope, @@ -39,7 +37,7 @@ pub fn from(a: Allocator) *Self { return self; } -pub fn clone(self: *Self) Self { +pub inline fn clone(self: *Self) Self { return self.cloneWithScope(self.scope); } @@ -91,5 +89,6 @@ fn free(context: *anyopaque, ptr: []u8, alignment: Alignment, ret_addr: usize) v } inline fn getFallbackAllocator() std.mem.Allocator { + var fallback_allocator: std.heap.ThreadSafeAllocator = .{ .child_allocator = std.heap.c_allocator }; return fallback_allocator.allocator(); } diff --git a/src/vulkan/lib.zig b/src/vulkan/lib.zig index 5217ede..2358cd9 100644 --- a/src/vulkan/lib.zig +++ b/src/vulkan/lib.zig @@ -15,6 +15,7 @@ pub const Dispatchable = @import("Dispatchable.zig").Dispatchable; pub const NonDispatchable = @import("NonDispatchable.zig").NonDispatchable; pub const VkError = errors.VkError; pub const VulkanAllocator = @import("VulkanAllocator.zig"); +pub const RefCounter = @import("RefCounter.zig"); pub const CommandBuffer = @import("CommandBuffer.zig"); pub const Device = @import("Device.zig");