reworking vulkan allocator

This commit is contained in:
2025-11-13 23:27:45 +01:00
parent e08fba24a6
commit fae09760a3
10 changed files with 130 additions and 48 deletions

View File

@@ -39,7 +39,7 @@ pub fn build(b: *std.Build) !void {
base_mod.addSystemIncludePath(vulkan_headers.path("include")); base_mod.addSystemIncludePath(vulkan_headers.path("include"));
for (implementations) |impl| { for (implementations) |impl| {
var targets = std.ArrayListUnmanaged(*std.Build.Step.Compile){}; var targets = std.ArrayList(*std.Build.Step.Compile){};
const lib_mod = b.createModule(.{ const lib_mod = b.createModule(.{
.root_source_file = b.path(impl.root_source_file), .root_source_file = b.path(impl.root_source_file),

32
src/soft/SoftCommandBuffer.zig git.filemode.normal_file
View File

@@ -0,0 +1,32 @@
const std = @import("std");
const vk = @import("vulkan");
const base = @import("base");
const VkError = base.VkError;
const Device = base.Device;
const Self = @This();
pub const Interface = base.CommandBuffer;
interface: Interface,
pub fn create(device: *base.Device, allocator: std.mem.Allocator, info: *const vk.CommandBufferAllocateInfo) VkError!*Self {
const self = allocator.create(Self) catch return VkError.OutOfHostMemory;
errdefer allocator.destroy(self);
var interface = try Interface.init(device, allocator, info);
interface.vtable = &.{
.destroy = destroy,
};
self.* = .{
.interface = interface,
};
return self;
}
pub fn destroy(interface: *Interface, allocator: std.mem.Allocator) void {
const self: *Self = @alignCast(@fieldParentPtr("interface", interface));
allocator.destroy(self);
}

View File

@@ -7,6 +7,7 @@ pub const SoftDevice = @import("SoftDevice.zig");
pub const SoftPhysicalDevice = @import("SoftPhysicalDevice.zig"); pub const SoftPhysicalDevice = @import("SoftPhysicalDevice.zig");
pub const SoftQueue = @import("SoftQueue.zig"); pub const SoftQueue = @import("SoftQueue.zig");
pub const SoftCommandBuffer = @import("SoftCommandBuffer.zig");
pub const SoftCommandPool = @import("SoftCommandPool.zig"); pub const SoftCommandPool = @import("SoftCommandPool.zig");
pub const SoftDeviceMemory = @import("SoftDeviceMemory.zig"); pub const SoftDeviceMemory = @import("SoftDeviceMemory.zig");
pub const SoftFence = @import("SoftFence.zig"); pub const SoftFence = @import("SoftFence.zig");

29
src/vulkan/CommandBuffer.zig git.filemode.normal_file
View File

@@ -0,0 +1,29 @@
const std = @import("std");
const vk = @import("vulkan");
const VkError = @import("error_set.zig").VkError;
const Device = @import("Device.zig");
const Self = @This();
pub const ObjectType: vk.ObjectType = .command_buffer;
owner: *Device,
vtable: *const VTable,
pub const VTable = struct {
destroy: *const fn (*Self, std.mem.Allocator) void,
};
pub fn init(device: *Device, allocator: std.mem.Allocator, info: *const vk.CommandBufferAllocateInfo) VkError!Self {
_ = allocator;
_ = info;
return .{
.owner = device,
.vtable = undefined,
};
}
pub inline fn destroy(self: *Self, allocator: std.mem.Allocator) void {
self.vtable.destroy(self, allocator);
}

View File

@@ -2,33 +2,45 @@ const std = @import("std");
const vk = @import("vulkan"); const vk = @import("vulkan");
const VkError = @import("error_set.zig").VkError; const VkError = @import("error_set.zig").VkError;
const CommandBuffer = @import("CommandBuffer.zig");
const Device = @import("Device.zig"); const Device = @import("Device.zig");
const Self = @This(); const Self = @This();
pub const ObjectType: vk.ObjectType = .command_pool; pub const ObjectType: vk.ObjectType = .command_pool;
const BUFFER_POOL_BASE_CAPACITY = 64;
owner: *Device, owner: *Device,
flags: vk.CommandPoolCreateFlags, flags: vk.CommandPoolCreateFlags,
queue_family_index: u32, queue_family_index: u32,
buffers: std.ArrayList(*CommandBuffer),
first_free_buffer_index: usize,
vtable: *const VTable, vtable: *const VTable,
pub const VTable = struct { pub const VTable = struct {
allocateCommandBuffers: *const fn (*Self, vk.CommandBufferAllocateInfo) VkError!*CommandBuffer,
destroy: *const fn (*Self, std.mem.Allocator) void, destroy: *const fn (*Self, std.mem.Allocator) void,
reset: *const fn (*Self, vk.CommandPoolResetFlags) VkError!void, reset: *const fn (*Self, vk.CommandPoolResetFlags) VkError!void,
}; };
pub fn init(device: *Device, allocator: std.mem.Allocator, info: *const vk.CommandPoolCreateInfo) VkError!Self { pub fn init(device: *Device, allocator: std.mem.Allocator, info: *const vk.CommandPoolCreateInfo) VkError!Self {
_ = allocator;
return .{ return .{
.owner = device, .owner = device,
.flags = info.flags, .flags = info.flags,
.queue_family_index = info.queue_family_index, .queue_family_index = info.queue_family_index,
.buffers = .initCapacity(allocator, BUFFER_POOL_BASE_CAPACITY) catch return VkError.OutOfHostMemory,
.first_free_buffer_index = 0,
.vtable = undefined, .vtable = undefined,
}; };
} }
pub inline fn allocateCommandBuffers(self: *Self, info: vk.CommandBufferAllocateInfo) VkError!*CommandBuffer {
return self.vtable.allocateCommandBuffers(self, info);
}
pub inline fn destroy(self: *Self, allocator: std.mem.Allocator) void { pub inline fn destroy(self: *Self, allocator: std.mem.Allocator) void {
self.buffers.deinit(allocator);
self.vtable.destroy(self, allocator); self.vtable.destroy(self, allocator);
} }

View File

@@ -16,7 +16,8 @@ const Self = @This();
pub const ObjectType: vk.ObjectType = .device; pub const ObjectType: vk.ObjectType = .device;
physical_device: *const PhysicalDevice, physical_device: *const PhysicalDevice,
queues: std.AutoArrayHashMapUnmanaged(u32, std.ArrayListUnmanaged(*Dispatchable(Queue))), queues: std.AutoArrayHashMapUnmanaged(u32, std.ArrayList(*Dispatchable(Queue))),
host_allocator: *VulkanAllocator,
dispatch_table: *const DispatchTable, dispatch_table: *const DispatchTable,
vtable: *const VTable, vtable: *const VTable,
@@ -40,11 +41,11 @@ pub const DispatchTable = struct {
}; };
pub fn init(allocator: std.mem.Allocator, physical_device: *const PhysicalDevice, info: *const vk.DeviceCreateInfo) VkError!Self { pub fn init(allocator: std.mem.Allocator, physical_device: *const PhysicalDevice, info: *const vk.DeviceCreateInfo) VkError!Self {
_ = allocator;
_ = info; _ = info;
return .{ return .{
.physical_device = physical_device, .physical_device = physical_device,
.queues = .empty, .queues = .empty,
.host_allocator = @ptrCast(@alignCast(allocator.ptr)),
.dispatch_table = undefined, .dispatch_table = undefined,
.vtable = undefined, .vtable = undefined,
}; };

View File

@@ -18,7 +18,7 @@ comptime {
const Self = @This(); const Self = @This();
pub const ObjectType: vk.ObjectType = .instance; pub const ObjectType: vk.ObjectType = .instance;
physical_devices: std.ArrayListUnmanaged(*Dispatchable(PhysicalDevice)), physical_devices: std.ArrayList(*Dispatchable(PhysicalDevice)),
dispatch_table: *const DispatchTable, dispatch_table: *const DispatchTable,
vtable: *const VTable, vtable: *const VTable,

View File

@@ -26,9 +26,8 @@ pub fn init(callbacks: ?*const vk.AllocationCallbacks, scope: vk.SystemAllocatio
} }
pub fn allocator(self: *const Self) Allocator { pub fn allocator(self: *const Self) Allocator {
if (self.callbacks != null) {
return .{ return .{
.ptr = undefined, .ptr = @ptrCast(@constCast(self)), // Ugly const cast for convenience
.vtable = &.{ .vtable = &.{
.alloc = alloc, .alloc = alloc,
.resize = resize, .resize = resize,
@@ -38,6 +37,43 @@ pub fn allocator(self: *const Self) Allocator {
}; };
} }
fn alloc(context: *anyopaque, len: usize, alignment: Alignment, ret_addr: usize) ?[*]u8 {
const self: *Self = @ptrCast(@alignCast(context));
if (self.callbacks.?.pfn_allocation) |pfn_allocation| {
return @ptrCast(pfn_allocation(self.callbacks.?.p_user_data, len, alignment.toByteUnits(), self.scope));
} else {
return getFallbackAllocator().rawAlloc(len, alignment, ret_addr);
}
}
fn resize(context: *anyopaque, ptr: []u8, alignment: Alignment, new_len: usize, ret_addr: usize) bool {
const self: *Self = @ptrCast(@alignCast(context));
if (self.callbacks != null) {
return new_len <= ptr.len;
} else {
return getFallbackAllocator().rawResize(ptr, alignment, new_len, ret_addr);
}
}
fn remap(context: *anyopaque, ptr: []u8, alignment: Alignment, new_len: usize, ret_addr: usize) ?[*]u8 {
const self: *Self = @ptrCast(@alignCast(context));
if (self.callbacks.?.pfn_reallocation) |pfn_reallocation| {
return @ptrCast(pfn_reallocation(self.callbacks.?.p_user_data, ptr.ptr, new_len, alignment.toByteUnits(), self.scope));
} else {
return getFallbackAllocator().rawRemap(ptr, alignment, new_len, ret_addr);
}
}
fn free(context: *anyopaque, ptr: []u8, alignment: Alignment, ret_addr: usize) void {
const self: *Self = @ptrCast(@alignCast(context));
if (self.callbacks.?.pfn_free) |pfn_free| {
return pfn_free(self.callbacks.?.p_user_data, ptr.ptr);
} else {
return getFallbackAllocator().rawFree(ptr, alignment, ret_addr);
}
}
inline fn getFallbackAllocator() std.mem.Allocator {
if (std.process.hasEnvVarConstant(DRIVER_DEBUG_ALLOCATOR_ENV_NAME) or builtin.mode == std.builtin.OptimizeMode.Debug) { if (std.process.hasEnvVarConstant(DRIVER_DEBUG_ALLOCATOR_ENV_NAME) or builtin.mode == std.builtin.OptimizeMode.Debug) {
@branchHint(.unlikely); @branchHint(.unlikely);
return debug_allocator.allocator(); return debug_allocator.allocator();
@@ -45,34 +81,3 @@ pub fn allocator(self: *const Self) Allocator {
return std.heap.c_allocator; return std.heap.c_allocator;
} }
} }
fn alloc(context: *anyopaque, len: usize, alignment: Alignment, _: usize) ?[*]u8 {
const self: *Self = @ptrCast(@alignCast(context));
if (self.callbacks.?.pfn_allocation) |pfn_allocation| {
return @ptrCast(pfn_allocation(self.callbacks.?.p_user_data, len, alignment.toByteUnits(), self.scope));
}
@panic("Null PFN_vkAllocationFunction passed to VkAllocationCallbacks");
}
fn resize(context: *anyopaque, ptr: []u8, alignment: Alignment, new_len: usize, _: usize) bool {
_ = alignment;
_ = context;
return new_len <= ptr.len;
}
fn remap(context: *anyopaque, ptr: []u8, alignment: Alignment, new_len: usize, _: usize) ?[*]u8 {
const self: *Self = @ptrCast(@alignCast(context));
if (self.callbacks.?.pfn_reallocation) |pfn_reallocation| {
return @ptrCast(pfn_reallocation(self.callbacks.?.p_user_data, ptr.ptr, new_len, alignment.toByteUnits(), self.scope));
}
@panic("Null PFN_vkReallocationFunction passed to VkAllocationCallbacks");
}
fn free(context: *anyopaque, ptr: []u8, alignment: Alignment, _: usize) void {
_ = alignment;
const self: *Self = @ptrCast(@alignCast(context));
if (self.callbacks.?.pfn_free) |pfn_free| {
return pfn_free(self.callbacks.?.p_user_data, ptr.ptr);
}
@panic("Null PFN_vkFreeFunction passed to VkAllocationCallbacks");
}

View File

@@ -14,6 +14,7 @@ pub const Device = @import("Device.zig");
pub const PhysicalDevice = @import("PhysicalDevice.zig"); pub const PhysicalDevice = @import("PhysicalDevice.zig");
pub const Queue = @import("Queue.zig"); pub const Queue = @import("Queue.zig");
pub const CommandBuffer = @import("CommandBuffer.zig");
pub const CommandPool = @import("CommandPool.zig"); pub const CommandPool = @import("CommandPool.zig");
pub const DeviceMemory = @import("DeviceMemory.zig"); pub const DeviceMemory = @import("DeviceMemory.zig");
pub const Fence = @import("Fence.zig"); pub const Fence = @import("Fence.zig");

View File

@@ -62,9 +62,10 @@ int main(void)
volkLoadDevice(device); volkLoadDevice(device);
VkQueue queue = kvfGetDeviceQueue(device, KVF_GRAPHICS_QUEUE); VkQueue queue = kvfGetDeviceQueue(device, KVF_GRAPHICS_QUEUE);
VkCommandBuffer cmd = kvfCreateCommandBuffer(device);
VkFence fence = kvfCreateFence(device); VkFence fence = kvfCreateFence(device);
kvfSubmitCommandBuffer(device, VK_NULL_HANDLE, KVF_GRAPHICS_QUEUE, VK_NULL_HANDLE, VK_NULL_HANDLE, fence, NULL); kvfSubmitCommandBuffer(device, cmd, KVF_GRAPHICS_QUEUE, VK_NULL_HANDLE, VK_NULL_HANDLE, fence, NULL);
kvfWaitForFence(device, fence); kvfWaitForFence(device, fence);
kvfDestroyFence(device, fence); kvfDestroyFence(device, fence);