fixing command pool, adding base command buffer dispatch table

This commit is contained in:
2025-11-16 20:42:08 +01:00
parent c74bc7fb15
commit 5661505bef
7 changed files with 94 additions and 11 deletions

View File

@@ -20,6 +20,11 @@ pub fn create(device: *base.Device, allocator: std.mem.Allocator, info: *const v
.destroy = destroy,
};
interface.dispatch_table = &.{
.begin = begin,
.end = end,
};
self.* = .{
.interface = interface,
};
@@ -30,3 +35,12 @@ pub fn destroy(interface: *Interface, allocator: std.mem.Allocator) void {
const self: *Self = @alignCast(@fieldParentPtr("interface", interface));
allocator.destroy(self);
}
pub fn begin(interface: *Interface, info: *const vk.CommandBufferBeginInfo) VkError!void {
_ = interface;
_ = info;
}
pub fn end(interface: *Interface) VkError!void {
_ = interface;
}

View File

@@ -12,6 +12,7 @@ const SoftDeviceMemory = @import("SoftDeviceMemory.zig");
const SoftFence = @import("SoftFence.zig");
const VkError = base.VkError;
const Dispatchable = base.Dispatchable;
const NonDispatchable = base.NonDispatchable;
const Self = @This();
@@ -42,6 +43,7 @@ pub fn create(physical_device: *base.PhysicalDevice, allocator: std.mem.Allocato
.destroy = destroy,
.destroyCommandPool = destroyCommandPool,
.destroyFence = destroyFence,
.freeCommandBuffers = freeCommandBuffers,
.freeMemory = freeMemory,
.getFenceStatus = getFenceStatus,
.resetFences = resetFences,
@@ -107,7 +109,7 @@ pub fn waitForFences(_: *Interface, fences: []*base.Fence, waitForAll: bool, tim
// Command Pool functions ============================================================================================================================
pub fn allocateCommandBuffers(_: *Interface, info: *const vk.CommandBufferAllocateInfo) VkError![]*base.CommandBuffer {
pub fn allocateCommandBuffers(_: *Interface, info: *const vk.CommandBufferAllocateInfo) VkError![]*Dispatchable(base.CommandBuffer) {
const pool = try NonDispatchable(base.CommandPool).fromHandleObject(info.command_pool);
return pool.allocateCommandBuffers(info);
}
@@ -121,6 +123,10 @@ pub fn destroyCommandPool(_: *Interface, allocator: std.mem.Allocator, pool: *ba
pool.destroy(allocator);
}
pub fn freeCommandBuffers(_: *Interface, pool: *base.CommandPool, cmds: []*Dispatchable(base.CommandBuffer)) VkError!void {
try pool.freeCommandBuffers(cmds);
}
// Memory functions ==================================================================================================================================
pub fn allocateMemory(interface: *Interface, allocator: std.mem.Allocator, info: *const vk.MemoryAllocateInfo) VkError!*base.DeviceMemory {

View File

@@ -10,6 +10,12 @@ pub const ObjectType: vk.ObjectType = .command_buffer;
owner: *Device,
vtable: *const VTable,
dispatch_table: *const DispatchTable,
pub const DispatchTable = struct {
begin: *const fn (*Self, *const vk.CommandBufferBeginInfo) VkError!void,
end: *const fn (*Self) VkError!void,
};
pub const VTable = struct {
destroy: *const fn (*Self, std.mem.Allocator) void,
@@ -21,9 +27,18 @@ pub fn init(device: *Device, allocator: std.mem.Allocator, info: *const vk.Comma
return .{
.owner = device,
.vtable = undefined,
.dispatch_table = undefined,
};
}
pub inline fn destroy(self: *Self, allocator: std.mem.Allocator) void {
self.vtable.destroy(self, allocator);
}
pub inline fn begin(self: *Self, info: *const vk.CommandBufferBeginInfo) VkError!void {
try self.dispatch_table.begin(self, info);
}
pub inline fn end(self: *Self) VkError!void {
try self.dispatch_table.end(self);
}

View File

@@ -3,7 +3,7 @@ const vk = @import("vulkan");
const VkError = @import("error_set.zig").VkError;
const VulkanAllocator = @import("VulkanAllocator.zig");
const NonDispatchable = @import("NonDispatchable.zig").NonDispatchable;
const Dispatchable = @import("Dispatchable.zig").Dispatchable;
const CommandBuffer = @import("CommandBuffer.zig");
const Device = @import("Device.zig");
@@ -23,7 +23,7 @@ host_allocator: VulkanAllocator,
/// Contiguous dynamic array of command buffers with free ones
/// grouped at the end.
/// When freed swaps happen to keep the free buffers at the end.
buffers: std.ArrayList(*NonDispatchable(CommandBuffer)),
buffers: std.ArrayList(*Dispatchable(CommandBuffer)),
/// Index of the first free command buffer.
first_free_buffer_index: usize,
@@ -42,13 +42,13 @@ pub fn init(device: *Device, allocator: std.mem.Allocator, info: *const vk.Comma
.flags = info.flags,
.queue_family_index = info.queue_family_index,
.host_allocator = VulkanAllocator.from(allocator).clone(),
.buffers = std.ArrayList(*NonDispatchable(CommandBuffer)).initCapacity(allocator, BUFFER_POOL_BASE_CAPACITY) catch return VkError.OutOfHostMemory,
.buffers = std.ArrayList(*Dispatchable(CommandBuffer)).initCapacity(allocator, BUFFER_POOL_BASE_CAPACITY) catch return VkError.OutOfHostMemory,
.first_free_buffer_index = 0,
.vtable = undefined,
};
}
pub fn allocateCommandBuffers(self: *Self, info: *const vk.CommandBufferAllocateInfo) VkError![]*NonDispatchable(CommandBuffer) {
pub fn allocateCommandBuffers(self: *Self, info: *const vk.CommandBufferAllocateInfo) VkError![]*Dispatchable(CommandBuffer) {
const allocator = self.host_allocator.allocator();
if (self.buffers.items.len < self.first_free_buffer_index + info.command_buffer_count) {
@@ -57,7 +57,7 @@ pub fn allocateCommandBuffers(self: *Self, info: *const vk.CommandBufferAllocate
}
for (0..info.command_buffer_count) |_| {
const cmd = try self.vtable.createCommandBuffer(self, allocator, info);
const non_dis_cmd = try NonDispatchable(CommandBuffer).wrap(allocator, cmd);
const non_dis_cmd = try Dispatchable(CommandBuffer).wrap(allocator, cmd);
self.buffers.appendAssumeCapacity(non_dis_cmd);
}
}
@@ -68,6 +68,18 @@ pub fn allocateCommandBuffers(self: *Self, info: *const vk.CommandBufferAllocate
return slice;
}
pub fn freeCommandBuffers(self: *Self, cmds: []*Dispatchable(CommandBuffer)) VkError!void {
// Ugly method but it works well
for (cmds) |cmd| {
if (std.mem.indexOf(*Dispatchable(CommandBuffer), self.buffers.items, &[_]*Dispatchable(CommandBuffer){cmd})) |i| {
const save = self.buffers.orderedRemove(i);
// Append the now free command buffer at the end of the pool
self.buffers.appendAssumeCapacity(save);
}
}
self.first_free_buffer_index -= cmds.len;
}
pub fn destroy(self: *Self, allocator: std.mem.Allocator) void {
for (self.buffers.items) |non_dis_cmd| {
non_dis_cmd.object.destroy(allocator);

View File

@@ -30,13 +30,14 @@ pub const VTable = struct {
};
pub const DispatchTable = struct {
allocateCommandBuffers: *const fn (*Self, *const vk.CommandBufferAllocateInfo) VkError![]*NonDispatchable(CommandBuffer),
allocateCommandBuffers: *const fn (*Self, *const vk.CommandBufferAllocateInfo) VkError![]*Dispatchable(CommandBuffer),
allocateMemory: *const fn (*Self, std.mem.Allocator, *const vk.MemoryAllocateInfo) VkError!*DeviceMemory,
createCommandPool: *const fn (*Self, std.mem.Allocator, *const vk.CommandPoolCreateInfo) VkError!*CommandPool,
createFence: *const fn (*Self, std.mem.Allocator, *const vk.FenceCreateInfo) VkError!*Fence,
destroy: *const fn (*Self, std.mem.Allocator) VkError!void,
destroyCommandPool: *const fn (*Self, std.mem.Allocator, *CommandPool) VkError!void,
destroyFence: *const fn (*Self, std.mem.Allocator, *Fence) VkError!void,
freeCommandBuffers: *const fn (*Self, *CommandPool, []*Dispatchable(CommandBuffer)) VkError!void,
freeMemory: *const fn (*Self, std.mem.Allocator, *DeviceMemory) VkError!void,
getFenceStatus: *const fn (*Self, *Fence) VkError!void,
resetFences: *const fn (*Self, []*Fence) VkError!void,
@@ -113,7 +114,7 @@ pub inline fn waitForFences(self: *Self, fences: []*Fence, waitForAll: bool, tim
// Command Pool functions ============================================================================================================================
pub inline fn allocateCommandBuffers(self: *Self, info: *const vk.CommandBufferAllocateInfo) VkError![]*NonDispatchable(CommandBuffer) {
pub inline fn allocateCommandBuffers(self: *Self, info: *const vk.CommandBufferAllocateInfo) VkError![]*Dispatchable(CommandBuffer) {
return self.dispatch_table.allocateCommandBuffers(self, info);
}
@@ -125,6 +126,10 @@ pub inline fn destroyCommandPool(self: *Self, allocator: std.mem.Allocator, pool
try self.dispatch_table.destroyCommandPool(self, allocator, pool);
}
pub inline fn freeCommandBuffers(self: *Self, pool: *CommandPool, cmds: []*Dispatchable(CommandBuffer)) VkError!void {
try self.dispatch_table.freeCommandBuffers(self, pool, cmds);
}
// Memory functions ==================================================================================================================================
pub inline fn allocateMemory(self: *Self, allocator: std.mem.Allocator, info: *const vk.MemoryAllocateInfo) VkError!*DeviceMemory {

View File

@@ -76,11 +76,14 @@ const physical_device_pfn_map = std.StaticStringMap(vk.PfnVoidFunction).initComp
const device_pfn_map = std.StaticStringMap(vk.PfnVoidFunction).initComptime(.{
functionMapEntryPoint("vkAllocateCommandBuffers"),
functionMapEntryPoint("vkAllocateMemory"),
functionMapEntryPoint("vkBeginCommandBuffer"),
functionMapEntryPoint("vkCreateCommandPool"),
functionMapEntryPoint("vkCreateFence"),
functionMapEntryPoint("vkDestroyCommandPool"),
functionMapEntryPoint("vkDestroyFence"),
functionMapEntryPoint("vkDestroyDevice"),
functionMapEntryPoint("vkEndCommandBuffer"),
functionMapEntryPoint("vkFreeCommandBuffers"),
functionMapEntryPoint("vkFreeMemory"),
functionMapEntryPoint("vkGetDeviceQueue"),
functionMapEntryPoint("vkGetFenceStatus"),
@@ -362,6 +365,13 @@ pub export fn strollDestroyFence(p_device: vk.Device, p_fence: vk.Fence, callbac
non_dispatchable_fence.destroy(allocator);
}
pub export fn strollFreeCommandBuffers(p_device: vk.Device, p_pool: vk.CommandPool, count: u32, p_cmds: [*]const vk.CommandBuffer) callconv(vk.vulkan_call_conv) void {
const device = Dispatchable(Device).fromHandleObject(p_device) catch return;
const pool = NonDispatchable(CommandPool).fromHandleObject(p_pool) catch return;
const cmds: [*]*Dispatchable(CommandBuffer) = @ptrCast(@constCast(p_cmds));
device.freeCommandBuffers(pool, cmds[0..count]) catch return;
}
pub export fn strollFreeMemory(p_device: vk.Device, p_memory: vk.DeviceMemory, callbacks: ?*const vk.AllocationCallbacks) callconv(vk.vulkan_call_conv) void {
const allocator = VulkanAllocator.init(callbacks, .object).allocator();
const device = Dispatchable(Device).fromHandleObject(p_device) catch return;
@@ -420,7 +430,7 @@ pub export fn strollUnmapMemory(p_device: vk.Device, p_memory: vk.DeviceMemory)
pub export fn strollResetFences(p_device: vk.Device, count: u32, p_fences: [*]const vk.Fence) callconv(vk.vulkan_call_conv) vk.Result {
const device = Dispatchable(Device).fromHandleObject(p_device) catch |err| return toVkResult(err);
const allocator = std.heap.c_allocator;
const allocator = device.host_allocator.cloneWithScope(.command).allocator();
const fences: []*Fence = allocator.alloc(*Fence, count) catch return .error_unknown;
defer allocator.free(fences);
@@ -455,7 +465,7 @@ pub export fn strollQueueWaitIdle(p_queue: vk.Queue) callconv(vk.vulkan_call_con
pub export fn strollWaitForFences(p_device: vk.Device, count: u32, p_fences: [*]const vk.Fence, waitForAll: vk.Bool32, timeout: u64) callconv(vk.vulkan_call_conv) vk.Result {
const device = Dispatchable(Device).fromHandleObject(p_device) catch |err| return toVkResult(err);
const allocator = std.heap.c_allocator;
const allocator = device.host_allocator.cloneWithScope(.command).allocator();
const fences: []*Fence = allocator.alloc(*Fence, count) catch return .error_unknown;
defer allocator.free(fences);
@@ -467,3 +477,21 @@ pub export fn strollWaitForFences(p_device: vk.Device, count: u32, p_fences: [*]
device.waitForFences(fences, (waitForAll == .true), timeout) catch |err| return toVkResult(err);
return .success;
}
// Command Buffer functions ===================================================================================================================================
pub export fn strollBeginCommandBuffer(p_cmd: vk.CommandBuffer, p_info: ?*const vk.CommandBufferBeginInfo) callconv(vk.vulkan_call_conv) vk.Result {
const info = p_info orelse return .error_validation_failed;
if (info.s_type != .command_buffer_begin_info) {
return .error_validation_failed;
}
const cmd = Dispatchable(CommandBuffer).fromHandleObject(p_cmd) catch |err| return toVkResult(err);
cmd.begin(info) catch |err| return toVkResult(err);
return .success;
}
pub export fn strollEndCommandBuffer(p_cmd: vk.CommandBuffer) callconv(vk.vulkan_call_conv) vk.Result {
const cmd = Dispatchable(CommandBuffer).fromHandleObject(p_cmd) catch |err| return toVkResult(err);
cmd.end() catch |err| return toVkResult(err);
return .success;
}

View File

@@ -62,8 +62,11 @@ int main(void)
volkLoadDevice(device);
VkQueue queue = kvfGetDeviceQueue(device, KVF_GRAPHICS_QUEUE);
VkCommandBuffer cmd = kvfCreateCommandBuffer(device);
VkFence fence = kvfCreateFence(device);
VkCommandBuffer cmd = kvfCreateCommandBuffer(device);
kvfBeginCommandBuffer(cmd, 0);
kvfEndCommandBuffer(cmd);
kvfSubmitCommandBuffer(device, cmd, KVF_GRAPHICS_QUEUE, VK_NULL_HANDLE, VK_NULL_HANDLE, fence, NULL);
kvfWaitForFence(device, fence);