fixing lots of minor issues

This commit is contained in:
2025-12-05 00:36:14 +01:00
parent 4b8aae9eb9
commit 96f69de54f
8 changed files with 78 additions and 30 deletions

View File

@@ -153,6 +153,18 @@ pub fn build(b: *std.Build) !void {
const run_cts_step = b.step(b.fmt("test-conformance-{s}", .{impl.name}), b.fmt("Run Vulkan conformance tests for {s}", .{impl.name}));
run_cts_step.dependOn(&run_cts.step);
const run_gdb_cts = b.addSystemCommand(&[_][]const u8{
"gdb",
"--args",
try cts_exe_path.getPath3(b, null).toString(b.allocator),
b.fmt("--deqp-caselist-file={s}", .{try cts.path("mustpass/1.0.0/vk-default.txt").getPath3(b, null).toString(b.allocator)}),
b.fmt("--deqp-vk-library-path={s}", .{b.getInstallPath(.lib, lib.out_lib_filename)}),
});
run_gdb_cts.step.dependOn(&lib_install.step);
const run_cts_gdb_step = b.step(b.fmt("test-conformance-{s}-gdb", .{impl.name}), b.fmt("Run Vulkan conformance tests for {s} with GDB", .{impl.name}));
run_cts_gdb_step.dependOn(&run_gdb_cts.step);
}
const autodoc_test = b.addObject(.{

View File

@@ -12,8 +12,6 @@ interface: Interface,
mutex: std.Thread.Mutex,
condition: std.Thread.Condition,
is_signaled: std.atomic.Value(bool),
/// Used by impl queues to know when the fence should be signaled
concurrent_submits_count: std.atomic.Value(usize),
pub fn create(device: *Device, allocator: std.mem.Allocator, info: *const vk.FenceCreateInfo) VkError!*Self {
const self = allocator.create(Self) catch return VkError.OutOfHostMemory;
@@ -34,7 +32,6 @@ pub fn create(device: *Device, allocator: std.mem.Allocator, info: *const vk.Fen
.mutex = std.Thread.Mutex{},
.condition = std.Thread.Condition{},
.is_signaled = std.atomic.Value(bool).init(info.flags.signaled_bit),
.concurrent_submits_count = std.atomic.Value(usize).init(0),
};
return self;
}
@@ -46,7 +43,7 @@ pub fn destroy(interface: *Interface, allocator: std.mem.Allocator) void {
pub fn getStatus(interface: *Interface) VkError!void {
const self: *Self = @alignCast(@fieldParentPtr("interface", interface));
if (!self.is_signaled.load(.monotonic)) {
if (!self.is_signaled.load(.acquire)) {
return VkError.NotReady;
}
}
@@ -64,7 +61,7 @@ pub fn signal(interface: *Interface) VkError!void {
pub fn wait(interface: *Interface, timeout: u64) VkError!void {
const self: *Self = @alignCast(@fieldParentPtr("interface", interface));
if (self.is_signaled.load(.monotonic)) return;
if (self.is_signaled.load(.acquire)) return;
if (timeout == 0) return VkError.Timeout;
self.mutex.lock();

View File

@@ -2,23 +2,21 @@ const std = @import("std");
const vk = @import("vulkan");
const base = @import("base");
const RefCounter = base.RefCounter;
const Executor = @import("Executor.zig");
const Dispatchable = base.Dispatchable;
const CommandBuffer = base.CommandBuffer;
const SoftDevice = @import("SoftDevice.zig");
const SoftDeviceMemory = @import("SoftDeviceMemory.zig");
const SoftFence = @import("SoftFence.zig");
const VkError = base.VkError;
const Self = @This();
pub const Interface = base.Queue;
interface: Interface,
wait_group: std.Thread.WaitGroup,
worker_mutex: std.Thread.Mutex,
lock: std.Thread.RwLock,
pub fn create(allocator: std.mem.Allocator, device: *base.Device, index: u32, family_index: u32, flags: vk.DeviceQueueCreateFlags) VkError!*Interface {
const self = allocator.create(Self) catch return VkError.OutOfHostMemory;
@@ -34,8 +32,7 @@ pub fn create(allocator: std.mem.Allocator, device: *base.Device, index: u32, fa
self.* = .{
.interface = interface,
.wait_group = .{},
.worker_mutex = .{},
.lock = .{},
};
return &self.interface;
}
@@ -54,30 +51,46 @@ pub fn bindSparse(interface: *Interface, info: []const vk.BindSparseInfo, fence:
}
pub fn submit(interface: *Interface, infos: []Interface.SubmitInfo, p_fence: ?*base.Fence) VkError!void {
var self: *Self = @alignCast(@fieldParentPtr("interface", interface));
var soft_device: *SoftDevice = @alignCast(@fieldParentPtr("interface", interface.owner));
const self: *Self = @alignCast(@fieldParentPtr("interface", interface));
const soft_device: *SoftDevice = @alignCast(@fieldParentPtr("interface", interface.owner));
if (p_fence) |fence| {
const soft_fence: *SoftFence = @alignCast(@fieldParentPtr("interface", fence));
soft_fence.concurrent_submits_count = std.atomic.Value(usize).init(infos.len);
}
const allocator = soft_device.device_allocator.allocator();
// Lock here to avoid acquiring it in `waitIdle` before runners start
self.lock.lockShared();
defer self.lock.unlockShared();
for (infos) |info| {
// Cloning info to keep them alive until commands dispatch end
const cloned_info: Interface.SubmitInfo = .{
.command_buffers = info.command_buffers.clone(soft_device.device_allocator.allocator()) catch return VkError.OutOfDeviceMemory,
.command_buffers = info.command_buffers.clone(allocator) catch return VkError.OutOfDeviceMemory,
};
soft_device.workers.spawnWg(&self.wait_group, Self.taskRunner, .{ self, cloned_info, p_fence });
const runners_counter = allocator.create(RefCounter) catch return VkError.OutOfDeviceMemory;
runners_counter.* = .init;
soft_device.workers.spawn(Self.taskRunner, .{ self, cloned_info, p_fence, runners_counter }) catch return VkError.Unknown;
}
}
pub fn waitIdle(interface: *Interface) VkError!void {
const self: *Self = @alignCast(@fieldParentPtr("interface", interface));
if (!self.wait_group.isDone())
self.wait_group.wait();
self.lock.lock();
defer self.lock.unlock();
}
fn taskRunner(self: *Self, info: Interface.SubmitInfo, p_fence: ?*base.Fence) void {
fn taskRunner(self: *Self, info: Interface.SubmitInfo, p_fence: ?*base.Fence, runners_counter: *RefCounter) void {
self.lock.lockShared();
defer self.lock.unlockShared();
runners_counter.ref();
defer {
runners_counter.unref();
if (!runners_counter.hasRefs()) {
const soft_device: *SoftDevice = @alignCast(@fieldParentPtr("interface", self.interface.owner));
const allocator = soft_device.device_allocator.allocator();
allocator.destroy(runners_counter);
}
}
var soft_device: *SoftDevice = @alignCast(@fieldParentPtr("interface", self.interface.owner));
defer {
var command_buffers = info.command_buffers;
@@ -95,8 +108,7 @@ fn taskRunner(self: *Self, info: Interface.SubmitInfo, p_fence: ?*base.Fence) vo
}
if (p_fence) |fence| {
const soft_fence: *SoftFence = @alignCast(@fieldParentPtr("interface", fence));
if (soft_fence.concurrent_submits_count.fetchSub(1, .release) == 1) {
if (runners_counter.getRefsCount() == 1) {
fence.signal() catch {};
}
}

View File

@@ -21,17 +21,19 @@ pub fn Dispatchable(comptime T: type) type {
.object_type = T.ObjectType,
.object = object,
};
std.log.debug("Created dispatchable handle at 0x{X}", .{@intFromPtr(self)});
std.log.debug("Created dispatchable handle of type '{s}' at 0x{X}", .{ @typeName(T), @intFromPtr(self) });
return self;
}
pub inline fn intrusiveDestroy(self: *Self, allocator: std.mem.Allocator) void {
self.object.destroy(allocator);
allocator.destroy(self);
std.log.debug("Destroyed dispatchable handle of type '{s}' at 0x{X}", .{ @typeName(T), @intFromPtr(self) });
}
pub inline fn destroy(self: *Self, allocator: std.mem.Allocator) void {
allocator.destroy(self);
std.log.debug("Destroyed dispatchable handle of type '{s}' at 0x{X}", .{ @typeName(T), @intFromPtr(self) });
}
pub inline fn toHandle(self: *Self) usize {

View File

@@ -16,17 +16,19 @@ pub fn NonDispatchable(comptime T: type) type {
.object_type = T.ObjectType,
.object = object,
};
std.log.debug("Created non dispatchable handle at 0x{X}", .{@intFromPtr(self)});
std.log.debug("Created non dispatchable handle of type '{s}' at 0x{X}", .{ @typeName(T), @intFromPtr(self) });
return self;
}
pub inline fn intrusiveDestroy(self: *Self, allocator: std.mem.Allocator) void {
self.object.destroy(allocator);
allocator.destroy(self);
std.log.debug("Destroyed non dispatchable handle of type '{s}' at 0x{X}", .{ @typeName(T), @intFromPtr(self) });
}
pub inline fn destroy(self: *Self, allocator: std.mem.Allocator) void {
allocator.destroy(self);
std.log.debug("Destroyed non dispatchable handle of type '{s}' at 0x{X}", .{ @typeName(T), @intFromPtr(self) });
}
pub inline fn toHandle(self: *Self) usize {

23
src/vulkan/RefCounter.zig git.filemode.normal_file
View File

@@ -0,0 +1,23 @@
const std = @import("std");
const Self = @This();
count: std.atomic.Value(usize),
pub const init: Self = .{ .count = std.atomic.Value(usize).init(0) };
pub inline fn ref(self: *Self) void {
_ = self.count.fetchAdd(1, .monotonic);
}
pub inline fn unref(self: *Self) void {
_ = self.count.fetchSub(1, .monotonic);
}
pub inline fn hasRefs(self: *Self) bool {
return self.getRefsCount() == 0;
}
pub inline fn getRefsCount(self: *Self) usize {
return self.count.load(.acquire);
}

View File

@@ -9,8 +9,6 @@ const Alignment = std.mem.Alignment;
const Self = @This();
var fallback_allocator: std.heap.ThreadSafeAllocator = .{ .child_allocator = std.heap.c_allocator };
callbacks: ?vk.AllocationCallbacks,
scope: vk.SystemAllocationScope,
@@ -39,7 +37,7 @@ pub fn from(a: Allocator) *Self {
return self;
}
pub fn clone(self: *Self) Self {
pub inline fn clone(self: *Self) Self {
return self.cloneWithScope(self.scope);
}
@@ -91,5 +89,6 @@ fn free(context: *anyopaque, ptr: []u8, alignment: Alignment, ret_addr: usize) v
}
inline fn getFallbackAllocator() std.mem.Allocator {
var fallback_allocator: std.heap.ThreadSafeAllocator = .{ .child_allocator = std.heap.c_allocator };
return fallback_allocator.allocator();
}

View File

@@ -15,6 +15,7 @@ pub const Dispatchable = @import("Dispatchable.zig").Dispatchable;
pub const NonDispatchable = @import("NonDispatchable.zig").NonDispatchable;
pub const VkError = errors.VkError;
pub const VulkanAllocator = @import("VulkanAllocator.zig");
pub const RefCounter = @import("RefCounter.zig");
pub const CommandBuffer = @import("CommandBuffer.zig");
pub const Device = @import("Device.zig");