rework of the queue

This commit is contained in:
2025-11-21 23:57:35 +01:00
parent 967451a458
commit 5df8677051
11 changed files with 220 additions and 53 deletions

19
src/soft/Executor.zig git.filemode.normal_file
View File

@@ -0,0 +1,19 @@
const std = @import("std");
const vk = @import("vulkan");
const cmd = @import("base").commands;
const Self = @This();
pub fn init() Self {
return .{};
}
pub fn deinit(self: *Self) void {
_ = self;
}
pub fn dispatch(self: *Self, command: *const cmd.Command) void {
_ = self;
_ = command;
}

View File

@@ -2,6 +2,8 @@ const std = @import("std");
const vk = @import("vulkan");
const base = @import("base");
const lib = @import("lib.zig");
const VkError = base.VkError;
const Device = base.Device;
@@ -21,6 +23,8 @@ pub fn create(device: *base.Device, allocator: std.mem.Allocator, info: *const v
.getMemoryRequirements = getMemoryRequirements,
};
interface.allowed_memory_types = lib.MEMORY_TYPE_GENERIC_BIT;
self.* = .{
.interface = interface,
};
@@ -33,6 +37,14 @@ pub fn destroy(interface: *Interface, allocator: std.mem.Allocator) void {
}
pub fn getMemoryRequirements(interface: *Interface, requirements: *vk.MemoryRequirements) void {
_ = interface;
_ = requirements;
requirements.alignment = lib.MEMORY_REQUIREMENTS_ALIGNMENT;
if (interface.usage.uniform_texel_buffer_bit or interface.usage.uniform_texel_buffer_bit) {
requirements.alignment = @max(requirements.alignment, lib.MIN_TEXEL_BUFFER_ALIGNMENT);
}
if (interface.usage.storage_buffer_bit) {
requirements.alignment = @max(requirements.alignment, lib.MIN_STORAGE_BUFFER_ALIGNMENT);
}
if (interface.usage.uniform_buffer_bit) {
requirements.alignment = @max(requirements.alignment, lib.MIN_UNIFORM_BUFFER_ALIGNMENT);
}
}

View File

@@ -23,6 +23,7 @@ pub fn create(device: *base.Device, allocator: std.mem.Allocator, info: *const v
interface.dispatch_table = &.{
.begin = begin,
.end = end,
.fillBuffer = fillBuffer,
.reset = reset,
};
@@ -38,15 +39,29 @@ pub fn destroy(interface: *Interface, allocator: std.mem.Allocator) void {
}
pub fn begin(interface: *Interface, info: *const vk.CommandBufferBeginInfo) VkError!void {
// No-op
_ = interface;
_ = info;
}
pub fn end(interface: *Interface) VkError!void {
// No-op
_ = interface;
}
pub fn reset(interface: *Interface, flags: vk.CommandBufferResetFlags) VkError!void {
// No-op
_ = interface;
_ = flags;
}
// Commands ====================================================================================================
pub fn fillBuffer(interface: *Interface, buffer: *base.Buffer, offset: vk.DeviceSize, size: vk.DeviceSize, data: u32) VkError!void {
// No-op
_ = interface;
_ = buffer;
_ = offset;
_ = size;
_ = data;
}

View File

@@ -12,6 +12,8 @@ interface: Interface,
mutex: std.Thread.Mutex,
condition: std.Thread.Condition,
is_signaled: bool,
/// Used by impl queues to know when the fence should be signaled
concurrent_submits_count: std.atomic.Value(usize),
pub fn create(device: *Device, allocator: std.mem.Allocator, info: *const vk.FenceCreateInfo) VkError!*Self {
const self = allocator.create(Self) catch return VkError.OutOfHostMemory;
@@ -32,6 +34,7 @@ pub fn create(device: *Device, allocator: std.mem.Allocator, info: *const vk.Fen
.mutex = std.Thread.Mutex{},
.condition = std.Thread.Condition{},
.is_signaled = info.flags.signaled_bit,
.concurrent_submits_count = std.atomic.Value(usize).init(0),
};
return self;
}

View File

@@ -2,7 +2,12 @@ const std = @import("std");
const vk = @import("vulkan");
const base = @import("base");
const Executor = @import("Executor.zig");
const Dispatchable = base.Dispatchable;
const CommandBuffer = base.CommandBuffer;
const SoftDevice = @import("SoftDevice.zig");
const SoftDeviceMemory = @import("SoftDeviceMemory.zig");
const SoftFence = @import("SoftFence.zig");
@@ -13,7 +18,6 @@ pub const Interface = base.Queue;
interface: Interface,
wait_group: std.Thread.WaitGroup,
mutex: std.Thread.Mutex,
worker_mutex: std.Thread.Mutex,
pub fn create(allocator: std.mem.Allocator, device: *base.Device, index: u32, family_index: u32, flags: vk.DeviceQueueCreateFlags) VkError!*Interface {
@@ -31,7 +35,6 @@ pub fn create(allocator: std.mem.Allocator, device: *base.Device, index: u32, fa
self.* = .{
.interface = interface,
.wait_group = .{},
.mutex = .{},
.worker_mutex = .{},
};
return &self.interface;
@@ -50,36 +53,50 @@ pub fn bindSparse(interface: *Interface, info: []const vk.BindSparseInfo, fence:
return VkError.FeatureNotPresent;
}
pub fn submit(interface: *Interface, info: []const vk.SubmitInfo, fence: ?*base.Fence) VkError!void {
pub fn submit(interface: *Interface, infos: []Interface.SubmitInfo, p_fence: ?*base.Fence) VkError!void {
var self: *Self = @alignCast(@fieldParentPtr("interface", interface));
_ = info;
const Runner = struct {
fn run(queue: *Self, p_fence: ?*base.Fence) void {
// Waiting for older submits to finish execution
queue.worker_mutex.lock();
defer queue.worker_mutex.unlock();
// TODO: commands executions
if (p_fence) |fence_obj| {
fence_obj.signal() catch {};
}
}
};
self.mutex.lock();
defer self.mutex.unlock();
var soft_device: *SoftDevice = @alignCast(@fieldParentPtr("interface", interface.owner));
soft_device.workers.spawnWg(&self.wait_group, Runner.run, .{ self, fence });
if (p_fence) |fence| {
const soft_fence: *SoftFence = @alignCast(@fieldParentPtr("interface", fence));
soft_fence.concurrent_submits_count = std.atomic.Value(usize).init(infos.len);
}
for (infos) |info| {
// Cloning info to keep them alive until commands dispatch end
const cloned_info: Interface.SubmitInfo = .{
.command_buffers = info.command_buffers.clone(soft_device.device_allocator.allocator()) catch return VkError.OutOfDeviceMemory,
};
soft_device.workers.spawnWg(&self.wait_group, Self.taskRunner, .{ self, cloned_info, p_fence });
}
}
pub fn waitIdle(interface: *Interface) VkError!void {
const self: *Self = @alignCast(@fieldParentPtr("interface", interface));
self.mutex.lock();
defer self.mutex.unlock();
self.wait_group.wait();
}
fn taskRunner(self: *Self, info: Interface.SubmitInfo, p_fence: ?*base.Fence) void {
var soft_device: *SoftDevice = @alignCast(@fieldParentPtr("interface", self.interface.owner));
defer {
var command_buffers = info.command_buffers;
command_buffers.deinit(soft_device.device_allocator.allocator());
}
var executor = Executor.init();
defer executor.deinit();
loop: for (info.command_buffers.items) |command_buffer| {
command_buffer.submit() catch continue :loop;
for (command_buffer.commands.items) |command| {
executor.dispatch(&command);
}
}
if (p_fence) |fence| {
const soft_fence: *SoftFence = @alignCast(@fieldParentPtr("interface", fence));
if (soft_fence.concurrent_submits_count.fetchSub(1, .release) == 1) {
fence.signal() catch {};
}
}
}

View File

@@ -2,6 +2,8 @@ const std = @import("std");
const vk = @import("vulkan");
pub const base = @import("base");
pub const Executor = @import("Executor.zig");
pub const SoftInstance = @import("SoftInstance.zig");
pub const SoftDevice = @import("SoftDevice.zig");
pub const SoftPhysicalDevice = @import("SoftPhysicalDevice.zig");
@@ -22,6 +24,19 @@ pub const VULKAN_VERSION = vk.makeApiVersion(0, 1, 0, 0);
pub const DRIVER_VERSION = vk.makeApiVersion(0, 0, 0, 1);
pub const DEVICE_ID = 0x600DCAFE;
/// Generic system memory.
pub const MEMORY_TYPE_GENERIC_BIT = 0x1;
/// 16 bytes for 128-bit vector types.
pub const MEMORY_REQUIREMENTS_ALIGNMENT = 16;
/// Vulkan 1.2 requires buffer offset alignment to be at most 256.
pub const MIN_TEXEL_BUFFER_ALIGNMENT = 256;
/// Vulkan 1.2 requires buffer offset alignment to be at most 256.
pub const MIN_UNIFORM_BUFFER_ALIGNMENT = 256;
/// Vulkan 1.2 requires buffer offset alignment to be at most 256.
pub const MIN_STORAGE_BUFFER_ALIGNMENT = 256;
pub const std_options = base.std_options;
comptime {

View File

@@ -1,10 +1,18 @@
const std = @import("std");
const vk = @import("vulkan");
const VkError = @import("error_set.zig").VkError;
const CommandPool = @import("CommandPool.zig");
const Device = @import("Device.zig");
const cmd = @import("commands.zig");
const NonDispatchable = @import("NonDispatchable.zig").NonDispatchable;
const VkError = @import("error_set.zig").VkError;
const VulkanAllocator = @import("VulkanAllocator.zig");
const Device = @import("Device.zig");
const Buffer = @import("Buffer.zig");
const CommandPool = @import("CommandPool.zig");
const COMMAND_BUFFER_BASE_CAPACITY = 256;
const State = enum {
Initial,
@@ -21,6 +29,9 @@ owner: *Device,
pool: *CommandPool,
state: State,
begin_info: ?vk.CommandBufferBeginInfo,
host_allocator: VulkanAllocator,
commands: std.ArrayList(cmd.Command),
state_mutex: std.Thread.Mutex,
vtable: *const VTable,
dispatch_table: *const DispatchTable,
@@ -28,6 +39,7 @@ dispatch_table: *const DispatchTable,
pub const DispatchTable = struct {
begin: *const fn (*Self, *const vk.CommandBufferBeginInfo) VkError!void,
end: *const fn (*Self) VkError!void,
fillBuffer: *const fn (*Self, *Buffer, vk.DeviceSize, vk.DeviceSize, u32) VkError!void,
reset: *const fn (*Self, vk.CommandBufferResetFlags) VkError!void,
};
@@ -36,12 +48,14 @@ pub const VTable = struct {
};
pub fn init(device: *Device, allocator: std.mem.Allocator, info: *const vk.CommandBufferAllocateInfo) VkError!Self {
_ = allocator;
return .{
.owner = device,
.pool = try NonDispatchable(CommandPool).fromHandleObject(info.command_pool),
.state = .Initial,
.begin_info = null,
.host_allocator = VulkanAllocator.from(allocator).clone(),
.commands = std.ArrayList(cmd.Command).initCapacity(allocator, COMMAND_BUFFER_BASE_CAPACITY) catch return VkError.OutOfHostMemory,
.state_mutex = .{},
.vtable = undefined,
.dispatch_table = undefined,
};
@@ -51,10 +65,13 @@ inline fn transitionState(self: *Self, target: State, from_allowed: []const Stat
if (!std.EnumSet(State).initMany(from_allowed).contains(self.state)) {
return error.NotAllowed;
}
self.state_mutex.lock();
defer self.state_mutex.unlock();
self.state = target;
}
pub inline fn destroy(self: *Self, allocator: std.mem.Allocator) void {
self.commands.deinit(allocator);
self.vtable.destroy(self, allocator);
}
@@ -89,3 +106,16 @@ pub inline fn submit(self: *Self) VkError!void {
}
self.transitionState(.Pending, &.{ .Pending, .Executable }) catch return VkError.ValidationFailed;
}
// Commands ====================================================================================================
pub inline fn fillBuffer(self: *Self, buffer: *Buffer, offset: vk.DeviceSize, size: vk.DeviceSize, data: u32) VkError!void {
const allocator = self.host_allocator.allocator();
self.commands.append(allocator, .{ .FillBuffer = .{
.buffer = buffer,
.offset = offset,
.size = size,
.data = data,
} }) catch return VkError.OutOfHostMemory;
try self.dispatch_table.fillBuffer(self, buffer, offset, size, data);
}

View File

@@ -85,7 +85,9 @@ pub inline fn destroy(self: *Self, allocator: std.mem.Allocator) VkError!void {
}
pub inline fn createBuffer(self: *Self, allocator: std.mem.Allocator, info: *const vk.BufferCreateInfo) VkError!*Buffer {
return self.dispatch_table.createBuffer(self, allocator, info);
const buffer = try self.dispatch_table.createBuffer(self, allocator, info);
std.debug.assert(buffer.allowed_memory_types != 0);
return buffer;
}
pub inline fn createFence(self: *Self, allocator: std.mem.Allocator, info: *const vk.FenceCreateInfo) VkError!*Fence {

View File

@@ -1,10 +1,13 @@
const std = @import("std");
const vk = @import("vulkan");
const Dispatchable = @import("Dispatchable.zig").Dispatchable;
const VkError = @import("error_set.zig").VkError;
const VulkanAllocator = @import("VulkanAllocator.zig");
const CommandBuffer = @import("CommandBuffer.zig");
const Device = @import("Device.zig");
const Dispatchable = @import("Dispatchable.zig").Dispatchable;
const Fence = @import("Fence.zig");
const Self = @This();
@@ -14,23 +17,58 @@ owner: *Device,
family_index: u32,
index: u32,
flags: vk.DeviceQueueCreateFlags,
host_allocator: VulkanAllocator,
dispatch_table: *const DispatchTable,
pub const DispatchTable = struct {
bindSparse: *const fn (*Self, []const vk.BindSparseInfo, ?*Fence) VkError!void,
submit: *const fn (*Self, []const vk.SubmitInfo, ?*Fence) VkError!void,
submit: *const fn (*Self, []SubmitInfo, ?*Fence) VkError!void,
waitIdle: *const fn (*Self) VkError!void,
};
pub const SubmitInfo = struct {
command_buffers: std.ArrayList(*CommandBuffer),
// TODO: complete
fn initBlob(allocator: std.mem.Allocator, infos: []const vk.SubmitInfo) VkError!std.ArrayList(SubmitInfo) {
var self = std.ArrayList(SubmitInfo).initCapacity(allocator, infos.len) catch return VkError.OutOfHostMemory;
errdefer self.deinit(allocator);
loop: for (infos) |info| {
if (info.command_buffer_count == 0) continue :loop;
if (info.p_command_buffers == null) continue :loop;
var submit_info: SubmitInfo = .{
.command_buffers = std.ArrayList(*CommandBuffer).initCapacity(allocator, info.command_buffer_count) catch return VkError.OutOfHostMemory,
};
for (info.p_command_buffers.?[0..info.command_buffer_count]) |vk_command_buffer| {
submit_info.command_buffers.append(allocator, try Dispatchable(CommandBuffer).fromHandleObject(vk_command_buffer)) catch return VkError.OutOfHostMemory;
}
self.append(allocator, submit_info) catch return VkError.OutOfHostMemory;
}
return self;
}
fn deinitBlob(allocator: std.mem.Allocator, self: *std.ArrayList(SubmitInfo)) void {
for (self.items) |submit_info| {
const command_buffers = &submit_info.command_buffers;
@constCast(command_buffers).deinit(allocator);
}
self.deinit(allocator);
}
};
pub fn init(allocator: std.mem.Allocator, device: *Device, index: u32, family_index: u32, flags: vk.DeviceQueueCreateFlags) VkError!Self {
std.log.scoped(.vkCreateDevice).info("Creating device queue with family index {d} and index {d}", .{ family_index, index });
_ = allocator;
return .{
.owner = device,
.family_index = family_index,
.index = index,
.flags = flags,
.host_allocator = VulkanAllocator.from(allocator).clone(),
.dispatch_table = undefined,
};
}
@@ -39,16 +77,13 @@ pub inline fn bindSparse(self: *Self, info: []const vk.BindSparseInfo, fence: ?*
try self.dispatch_table.bindSparse(self, info, fence);
}
pub inline fn submit(self: *Self, info: []const vk.SubmitInfo, fence: ?*Fence) VkError!void {
try self.dispatch_table.submit(self, info, fence);
for (info) |submit_info| {
if (submit_info.p_command_buffers) |p_command_buffers| {
for (p_command_buffers[0..submit_info.command_buffer_count]) |p_cmd| {
const cmd = try Dispatchable(CommandBuffer).fromHandleObject(p_cmd);
try cmd.submit();
}
}
}
pub inline fn submit(self: *Self, infos: []const vk.SubmitInfo, p_fence: ?*Fence) VkError!void {
const allocator = self.host_allocator.cloneWithScope(.command).allocator();
var submit_infos = try SubmitInfo.initBlob(allocator, infos);
defer SubmitInfo.deinitBlob(allocator, &submit_infos);
try self.dispatch_table.submit(self, submit_infos.items, p_fence);
}
pub inline fn waitIdle(self: *Self) VkError!void {

View File

@@ -4,17 +4,32 @@ const vk = @import("vulkan");
const Buffer = @import("Buffer.zig");
pub const CommandType = enum {
BindPipeline,
BindVertexBuffer,
CopyBuffer,
Draw,
DrawIndexed,
DrawIndirect,
DrawIndexedIndirect,
BindPipeline,
DrawIndirect,
FillBuffer,
};
pub const CommandCopyBuffer = struct {
src: *Buffer,
dst: *Buffer,
regions: []*const vk.BufferCopy,
};
pub const CommandFillBuffer = struct {
buffer: *Buffer,
offset: vk.DeviceSize,
size: vk.DeviceSize,
data: u32,
};
pub const CommandBindVertexBuffer = struct {
buffers: std.ArrayList(Buffer),
offsets: std.ArrayList(vk.DeviceSize),
buffers: []*const Buffer,
offsets: []vk.DeviceSize,
first_binding: u32,
};
@@ -52,10 +67,12 @@ pub const CommandBindPipeline = struct {
};
pub const Command = union(CommandType) {
BindPipeline: CommandBindPipeline,
BindVertexBuffer: CommandBindVertexBuffer,
CopyBuffer: CommandCopyBuffer,
Draw: CommandDraw,
DrawIndexed: CommandDrawIndexed,
DrawIndirect: CommandDrawIndirect,
DrawIndexedIndirect: CommandDrawIndexedIndirect,
BindPipeline: CommandBindPipeline,
DrawIndirect: CommandDrawIndirect,
FillBuffer: CommandFillBuffer,
};

View File

@@ -301,6 +301,8 @@ pub export fn strollQueueBindSparse(p_queue: vk.Queue, count: u32, info: [*]vk.B
}
pub export fn strollQueueSubmit(p_queue: vk.Queue, count: u32, info: [*]const vk.SubmitInfo, p_fence: vk.Fence) callconv(vk.vulkan_call_conv) vk.Result {
if (count == 0) return .success;
const queue = Dispatchable(Queue).fromHandleObject(p_queue) catch |err| return toVkResult(err);
const fence = if (p_fence != .null_handle) NonDispatchable(Fence).fromHandleObject(p_fence) catch |err| return toVkResult(err) else null;
queue.submit(info[0..count], fence) catch |err| return toVkResult(err);