implementation of vkCmdFillBuffer and rework of logger

This commit is contained in:
2025-11-22 21:43:31 +01:00
parent 5df8677051
commit b586ff18e1
16 changed files with 325 additions and 82 deletions

View File

@@ -15,5 +15,20 @@ pub fn deinit(self: *Self) void {
pub fn dispatch(self: *Self, command: *const cmd.Command) void { pub fn dispatch(self: *Self, command: *const cmd.Command) void {
_ = self; _ = self;
_ = command; switch (command.*) {
.FillBuffer => |data| fillBuffer(&data),
else => {},
}
}
fn fillBuffer(data: *const cmd.CommandFillBuffer) void {
const memory = if (data.buffer.memory) |memory| memory else unreachable;
const raw_memory_map: [*]u32 = @ptrCast(@alignCast(memory.map(data.offset, data.size) catch unreachable));
var memory_map: []u32 = raw_memory_map[0..data.size];
for (0..@divExact(data.size, @sizeOf(u32))) |i| {
memory_map[i] = data.data;
}
memory.unmap();
} }

View File

@@ -23,8 +23,6 @@ pub fn create(device: *base.Device, allocator: std.mem.Allocator, info: *const v
.getMemoryRequirements = getMemoryRequirements, .getMemoryRequirements = getMemoryRequirements,
}; };
interface.allowed_memory_types = lib.MEMORY_TYPE_GENERIC_BIT;
self.* = .{ self.* = .{
.interface = interface, .interface = interface,
}; };

View File

@@ -25,13 +25,15 @@ pub fn create(device: *SoftDevice, allocator: std.mem.Allocator, size: vk.Device
self.* = .{ self.* = .{
.interface = interface, .interface = interface,
.data = device.device_allocator.allocator().alignedAlloc(u8, std.mem.Alignment.@"16", size) catch return VkError.OutOfDeviceMemory, .data = device.device_allocator.allocator().alloc(u8, size) catch return VkError.OutOfDeviceMemory,
}; };
return self; return self;
} }
pub fn destroy(interface: *Interface, allocator: std.mem.Allocator) void { pub fn destroy(interface: *Interface, allocator: std.mem.Allocator) void {
const self: *Self = @alignCast(@fieldParentPtr("interface", interface)); const self: *Self = @alignCast(@fieldParentPtr("interface", interface));
const soft_device: *SoftDevice = @alignCast(@fieldParentPtr("interface", interface.owner));
soft_device.device_allocator.allocator().free(self.data);
allocator.destroy(self); allocator.destroy(self);
} }

View File

@@ -36,6 +36,7 @@ pub fn create(allocator: std.mem.Allocator, instance: *const base.Instance) VkEr
interface.props.device_type = .cpu; interface.props.device_type = .cpu;
interface.props.limits.max_bound_descriptor_sets = 1024; // tmp interface.props.limits.max_bound_descriptor_sets = 1024; // tmp
interface.props.limits.max_memory_allocation_count = 1024;
interface.mem_props.memory_type_count = 1; interface.mem_props.memory_type_count = 1;
interface.mem_props.memory_types[0] = .{ interface.mem_props.memory_types[0] = .{

View File

@@ -25,7 +25,7 @@ pub const DRIVER_VERSION = vk.makeApiVersion(0, 0, 0, 1);
pub const DEVICE_ID = 0x600DCAFE; pub const DEVICE_ID = 0x600DCAFE;
/// Generic system memory. /// Generic system memory.
pub const MEMORY_TYPE_GENERIC_BIT = 0x1; pub const MEMORY_TYPE_GENERIC_BIT = 0;
/// 16 bytes for 128-bit vector types. /// 16 bytes for 128-bit vector types.
pub const MEMORY_REQUIREMENTS_ALIGNMENT = 16; pub const MEMORY_REQUIREMENTS_ALIGNMENT = 16;

View File

@@ -13,7 +13,7 @@ size: vk.DeviceSize,
offset: vk.DeviceSize, offset: vk.DeviceSize,
usage: vk.BufferUsageFlags, usage: vk.BufferUsageFlags,
memory: ?*DeviceMemory, memory: ?*DeviceMemory,
allowed_memory_types: u32, allowed_memory_types: std.bit_set.IntegerBitSet(32),
vtable: *const VTable, vtable: *const VTable,
@@ -30,7 +30,7 @@ pub fn init(device: *Device, allocator: std.mem.Allocator, info: *const vk.Buffe
.offset = 0, .offset = 0,
.usage = info.usage, .usage = info.usage,
.memory = null, .memory = null,
.allowed_memory_types = 0, .allowed_memory_types = std.bit_set.IntegerBitSet(32).initFull(),
.vtable = undefined, .vtable = undefined,
}; };
} }
@@ -40,7 +40,7 @@ pub inline fn destroy(self: *Self, allocator: std.mem.Allocator) void {
} }
pub inline fn bindMemory(self: *Self, memory: *DeviceMemory, offset: vk.DeviceSize) VkError!void { pub inline fn bindMemory(self: *Self, memory: *DeviceMemory, offset: vk.DeviceSize) VkError!void {
if (offset >= self.size or self.allowed_memory_types & memory.memory_type_index == 0) { if (offset >= self.size or !self.allowed_memory_types.isSet(memory.memory_type_index)) {
return VkError.ValidationFailed; return VkError.ValidationFailed;
} }
self.memory = memory; self.memory = memory;
@@ -49,6 +49,6 @@ pub inline fn bindMemory(self: *Self, memory: *DeviceMemory, offset: vk.DeviceSi
pub inline fn getMemoryRequirements(self: *Self, requirements: *vk.MemoryRequirements) void { pub inline fn getMemoryRequirements(self: *Self, requirements: *vk.MemoryRequirements) void {
requirements.size = self.size; requirements.size = self.size;
requirements.memory_type_bits = self.allowed_memory_types; requirements.memory_type_bits = self.allowed_memory_types.mask;
self.vtable.getMemoryRequirements(self, requirements); self.vtable.getMemoryRequirements(self, requirements);
} }

View File

@@ -110,11 +110,17 @@ pub inline fn submit(self: *Self) VkError!void {
// Commands ==================================================================================================== // Commands ====================================================================================================
pub inline fn fillBuffer(self: *Self, buffer: *Buffer, offset: vk.DeviceSize, size: vk.DeviceSize, data: u32) VkError!void { pub inline fn fillBuffer(self: *Self, buffer: *Buffer, offset: vk.DeviceSize, size: vk.DeviceSize, data: u32) VkError!void {
if (offset >= buffer.size) return VkError.ValidationFailed;
if (size != vk.WHOLE_SIZE and (size == 0 or size > offset + buffer.size)) return VkError.ValidationFailed;
if ((size != vk.WHOLE_SIZE and @mod(size, 4) != 0) or @mod(offset, 4) != 0) return VkError.ValidationFailed;
if (!buffer.usage.transfer_dst_bit) return VkError.ValidationFailed;
if (buffer.memory == null) return VkError.ValidationFailed;
const allocator = self.host_allocator.allocator(); const allocator = self.host_allocator.allocator();
self.commands.append(allocator, .{ .FillBuffer = .{ self.commands.append(allocator, .{ .FillBuffer = .{
.buffer = buffer, .buffer = buffer,
.offset = offset, .offset = offset,
.size = size, .size = if (size == vk.WHOLE_SIZE) buffer.size else size,
.data = data, .data = data,
} }) catch return VkError.OutOfHostMemory; } }) catch return VkError.OutOfHostMemory;
try self.dispatch_table.fillBuffer(self, buffer, offset, size, data); try self.dispatch_table.fillBuffer(self, buffer, offset, size, data);

View File

@@ -85,9 +85,7 @@ pub inline fn destroy(self: *Self, allocator: std.mem.Allocator) VkError!void {
} }
pub inline fn createBuffer(self: *Self, allocator: std.mem.Allocator, info: *const vk.BufferCreateInfo) VkError!*Buffer { pub inline fn createBuffer(self: *Self, allocator: std.mem.Allocator, info: *const vk.BufferCreateInfo) VkError!*Buffer {
const buffer = try self.dispatch_table.createBuffer(self, allocator, info); return self.dispatch_table.createBuffer(self, allocator, info);
std.debug.assert(buffer.allowed_memory_types != 0);
return buffer;
} }
pub inline fn createFence(self: *Self, allocator: std.mem.Allocator, info: *const vk.FenceCreateInfo) VkError!*Fence { pub inline fn createFence(self: *Self, allocator: std.mem.Allocator, info: *const vk.FenceCreateInfo) VkError!*Fence {

View File

@@ -7,7 +7,7 @@ const Device = @import("Device.zig");
const Self = @This(); const Self = @This();
pub const ObjectType: vk.ObjectType = .device_memory; pub const ObjectType: vk.ObjectType = .device_memory;
owner: *const Device, owner: *Device,
size: vk.DeviceSize, size: vk.DeviceSize,
memory_type_index: u32, memory_type_index: u32,
is_mapped: bool, is_mapped: bool,
@@ -20,7 +20,7 @@ pub const VTable = struct {
unmap: *const fn (*Self) void, unmap: *const fn (*Self) void,
}; };
pub fn init(device: *const Device, size: vk.DeviceSize, memory_type_index: u32) VkError!Self { pub fn init(device: *Device, size: vk.DeviceSize, memory_type_index: u32) VkError!Self {
return .{ return .{
.owner = device, .owner = device,
.size = size, .size = size,

View File

@@ -80,10 +80,10 @@ pub fn releasePhysicalDevices(self: *Self, allocator: std.mem.Allocator) VkError
pub fn requestPhysicalDevices(self: *Self, allocator: std.mem.Allocator) VkError!void { pub fn requestPhysicalDevices(self: *Self, allocator: std.mem.Allocator) VkError!void {
try self.vtable.requestPhysicalDevices(self, allocator); try self.vtable.requestPhysicalDevices(self, allocator);
if (self.physical_devices.items.len == 0) { if (self.physical_devices.items.len == 0) {
std.log.scoped(.vkCreateInstance).info("No VkPhysicalDevice found", .{}); std.log.scoped(.vkCreateInstance).err("No VkPhysicalDevice found", .{});
return; return;
} }
for (self.physical_devices.items) |physical_device| { for (self.physical_devices.items) |physical_device| {
std.log.scoped(.vkCreateInstance).info("Found VkPhysicalDevice named {s}", .{physical_device.object.props.device_name}); std.log.scoped(.vkCreateInstance).debug("Found VkPhysicalDevice named {s}", .{physical_device.object.props.device_name});
} }
} }

View File

@@ -62,7 +62,7 @@ pub const SubmitInfo = struct {
}; };
pub fn init(allocator: std.mem.Allocator, device: *Device, index: u32, family_index: u32, flags: vk.DeviceQueueCreateFlags) VkError!Self { pub fn init(allocator: std.mem.Allocator, device: *Device, index: u32, family_index: u32, flags: vk.DeviceQueueCreateFlags) VkError!Self {
std.log.scoped(.vkCreateDevice).info("Creating device queue with family index {d} and index {d}", .{ family_index, index }); std.log.scoped(.vkCreateDevice).debug("Creating device queue with family index {d} and index {d}", .{ family_index, index });
return .{ return .{
.owner = device, .owner = device,
.family_index = family_index, .family_index = family_index,

View File

@@ -1,3 +1,4 @@
const std = @import("std");
const vk = @import("vulkan"); const vk = @import("vulkan");
pub const VkError = error{ pub const VkError = error{
@@ -51,7 +52,12 @@ pub const VkError = error{
NotEnoughSpaceKhr, NotEnoughSpaceKhr,
}; };
pub inline fn errorLogger(err: VkError) void {
std.log.scoped(.errorLogger).err("Error logger catched a '{s}'", .{@errorName(err)});
}
pub inline fn toVkResult(err: VkError) vk.Result { pub inline fn toVkResult(err: VkError) vk.Result {
errorLogger(err);
return switch (err) { return switch (err) {
VkError.NotReady => .not_ready, VkError.NotReady => .not_ready,
VkError.Timeout => .timeout, VkError.Timeout => .timeout,

View File

@@ -40,6 +40,7 @@ pub const LogVerboseLevel = enum {
None, None,
Standard, Standard,
High, High,
TooMuch,
}; };
pub inline fn getLogVerboseLevel() LogVerboseLevel { pub inline fn getLogVerboseLevel() LogVerboseLevel {
@@ -49,6 +50,8 @@ pub inline fn getLogVerboseLevel() LogVerboseLevel {
.None .None
else if (std.mem.eql(u8, level, "all")) else if (std.mem.eql(u8, level, "all"))
.High .High
else if (std.mem.eql(u8, level, "stupid"))
.TooMuch
else else
.Standard; .Standard;
} }

View File

@@ -10,6 +10,7 @@ const logger = @import("logger.zig");
const error_set = @import("error_set.zig"); const error_set = @import("error_set.zig");
const VkError = error_set.VkError; const VkError = error_set.VkError;
const toVkResult = error_set.toVkResult; const toVkResult = error_set.toVkResult;
const errorLogger = error_set.errorLogger;
const Dispatchable = @import("Dispatchable.zig").Dispatchable; const Dispatchable = @import("Dispatchable.zig").Dispatchable;
const NonDispatchable = @import("NonDispatchable.zig").NonDispatchable; const NonDispatchable = @import("NonDispatchable.zig").NonDispatchable;
@@ -27,8 +28,17 @@ const CommandPool = @import("CommandPool.zig");
const DeviceMemory = @import("DeviceMemory.zig"); const DeviceMemory = @import("DeviceMemory.zig");
const Fence = @import("Fence.zig"); const Fence = @import("Fence.zig");
fn entryPointBeginLogTrace(comptime scope: @Type(.enum_literal)) void {
std.log.scoped(scope).debug("Calling {s}...", .{@tagName(scope)});
logger.indent();
}
fn entryPointEndLogTrace() void {
logger.unindent();
}
fn entryPointNotFoundErrorLog(comptime scope: @Type(.enum_literal), name: []const u8) void { fn entryPointNotFoundErrorLog(comptime scope: @Type(.enum_literal), name: []const u8) void {
if (lib.getLogVerboseLevel() != .High) return; if (lib.getLogVerboseLevel() != .TooMuch) return;
std.log.scoped(scope).err("Could not find function {s}", .{name}); std.log.scoped(scope).err("Could not find function {s}", .{name});
} }
@@ -79,6 +89,7 @@ const device_pfn_map = std.StaticStringMap(vk.PfnVoidFunction).initComptime(.{
functionMapEntryPoint("vkAllocateMemory"), functionMapEntryPoint("vkAllocateMemory"),
functionMapEntryPoint("vkBeginCommandBuffer"), functionMapEntryPoint("vkBeginCommandBuffer"),
functionMapEntryPoint("vkBindBufferMemory"), functionMapEntryPoint("vkBindBufferMemory"),
functionMapEntryPoint("vkCmdFillBuffer"),
functionMapEntryPoint("vkCreateCommandPool"), functionMapEntryPoint("vkCreateCommandPool"),
functionMapEntryPoint("vkCreateBuffer"), functionMapEntryPoint("vkCreateBuffer"),
functionMapEntryPoint("vkCreateFence"), functionMapEntryPoint("vkCreateFence"),
@@ -105,11 +116,19 @@ const device_pfn_map = std.StaticStringMap(vk.PfnVoidFunction).initComptime(.{
// ICD Interface ============================================================================================================================================= // ICD Interface =============================================================================================================================================
pub export fn stroll_icdNegotiateLoaderICDInterfaceVersion(p_version: *u32) callconv(vk.vulkan_call_conv) vk.Result { pub export fn stroll_icdNegotiateLoaderICDInterfaceVersion(p_version: *u32) callconv(vk.vulkan_call_conv) vk.Result {
entryPointBeginLogTrace(.vk_icdNegociateLoaderICDInterfaceVersion);
defer entryPointEndLogTrace();
p_version.* = 7; p_version.* = 7;
return .success; return .success;
} }
pub export fn vk_icdGetInstanceProcAddr(p_instance: vk.Instance, p_name: ?[*:0]const u8) callconv(vk.vulkan_call_conv) vk.PfnVoidFunction { pub export fn vk_icdGetInstanceProcAddr(p_instance: vk.Instance, p_name: ?[*:0]const u8) callconv(vk.vulkan_call_conv) vk.PfnVoidFunction {
if (lib.getLogVerboseLevel() == .TooMuch) {
entryPointBeginLogTrace(.vk_icdGetInstanceProcAddr);
}
defer entryPointEndLogTrace();
if (p_name == null) return null; if (p_name == null) return null;
const name = std.mem.span(p_name.?); const name = std.mem.span(p_name.?);
@@ -118,6 +137,11 @@ pub export fn vk_icdGetInstanceProcAddr(p_instance: vk.Instance, p_name: ?[*:0]c
} }
pub export fn stroll_icdGetPhysicalDeviceProcAddr(_: vk.Instance, p_name: ?[*:0]const u8) callconv(vk.vulkan_call_conv) vk.PfnVoidFunction { pub export fn stroll_icdGetPhysicalDeviceProcAddr(_: vk.Instance, p_name: ?[*:0]const u8) callconv(vk.vulkan_call_conv) vk.PfnVoidFunction {
if (lib.getLogVerboseLevel() == .TooMuch) {
entryPointBeginLogTrace(.vk_icdGetPhysicalDeviceProcAddr);
}
defer entryPointEndLogTrace();
if (p_name == null) return null; if (p_name == null) return null;
const name = std.mem.span(p_name.?); const name = std.mem.span(p_name.?);
@@ -130,6 +154,11 @@ pub export fn stroll_icdGetPhysicalDeviceProcAddr(_: vk.Instance, p_name: ?[*:0]
// Global functions ========================================================================================================================================== // Global functions ==========================================================================================================================================
pub export fn vkGetInstanceProcAddr(p_instance: vk.Instance, p_name: ?[*:0]const u8) callconv(vk.vulkan_call_conv) vk.PfnVoidFunction { pub export fn vkGetInstanceProcAddr(p_instance: vk.Instance, p_name: ?[*:0]const u8) callconv(vk.vulkan_call_conv) vk.PfnVoidFunction {
if (lib.getLogVerboseLevel() == .TooMuch) {
entryPointBeginLogTrace(.vkGetInstanceProcAddr);
}
defer entryPointEndLogTrace();
if (p_name == null) return null; if (p_name == null) return null;
const name = std.mem.span(p_name.?); const name = std.mem.span(p_name.?);
@@ -147,14 +176,13 @@ pub export fn vkGetInstanceProcAddr(p_instance: vk.Instance, p_name: ?[*:0]const
} }
pub export fn strollCreateInstance(p_info: ?*const vk.InstanceCreateInfo, callbacks: ?*const vk.AllocationCallbacks, p_instance: *vk.Instance) callconv(vk.vulkan_call_conv) vk.Result { pub export fn strollCreateInstance(p_info: ?*const vk.InstanceCreateInfo, callbacks: ?*const vk.AllocationCallbacks, p_instance: *vk.Instance) callconv(vk.vulkan_call_conv) vk.Result {
entryPointBeginLogTrace(.vkCreateInstance);
defer entryPointEndLogTrace();
const info = p_info orelse return .error_validation_failed; const info = p_info orelse return .error_validation_failed;
if (info.s_type != .instance_create_info) { if (info.s_type != .instance_create_info) {
return .error_validation_failed; return .error_validation_failed;
} }
std.log.scoped(.vkCreateInstance).info("Creating VkInstance", .{});
logger.indent();
defer logger.unindent();
const allocator = VulkanAllocator.init(callbacks, .instance).allocator(); const allocator = VulkanAllocator.init(callbacks, .instance).allocator();
var instance: *lib.Instance = undefined; var instance: *lib.Instance = undefined;
@@ -169,6 +197,9 @@ pub export fn strollCreateInstance(p_info: ?*const vk.InstanceCreateInfo, callba
} }
pub export fn strollEnumerateInstanceExtensionProperties(p_layer_name: ?[*:0]const u8, property_count: *u32, properties: ?*vk.ExtensionProperties) callconv(vk.vulkan_call_conv) vk.Result { pub export fn strollEnumerateInstanceExtensionProperties(p_layer_name: ?[*:0]const u8, property_count: *u32, properties: ?*vk.ExtensionProperties) callconv(vk.vulkan_call_conv) vk.Result {
entryPointBeginLogTrace(.vkEnumerateInstanceExtensionProperties);
defer entryPointEndLogTrace();
var name: ?[]const u8 = null; var name: ?[]const u8 = null;
if (p_layer_name) |layer_name| { if (p_layer_name) |layer_name| {
name = std.mem.span(layer_name); name = std.mem.span(layer_name);
@@ -178,6 +209,9 @@ pub export fn strollEnumerateInstanceExtensionProperties(p_layer_name: ?[*:0]con
} }
pub export fn strollEnumerateInstanceVersion(version: *u32) callconv(vk.vulkan_call_conv) vk.Result { pub export fn strollEnumerateInstanceVersion(version: *u32) callconv(vk.vulkan_call_conv) vk.Result {
entryPointBeginLogTrace(.vkEnumerateInstanceVersion);
defer entryPointEndLogTrace();
Instance.enumerateVersion(version) catch |err| return toVkResult(err); Instance.enumerateVersion(version) catch |err| return toVkResult(err);
return .success; return .success;
} }
@@ -185,13 +219,14 @@ pub export fn strollEnumerateInstanceVersion(version: *u32) callconv(vk.vulkan_c
// Instance functions ======================================================================================================================================== // Instance functions ========================================================================================================================================
pub export fn strollDestroyInstance(p_instance: vk.Instance, callbacks: ?*const vk.AllocationCallbacks) callconv(vk.vulkan_call_conv) void { pub export fn strollDestroyInstance(p_instance: vk.Instance, callbacks: ?*const vk.AllocationCallbacks) callconv(vk.vulkan_call_conv) void {
std.log.scoped(.vkDestroyInstance).info("Destroying VkInstance", .{}); defer logger.freeInnerDebugStack();
logger.indent();
defer logger.unindent(); entryPointBeginLogTrace(.vkDestroyInstance);
defer entryPointEndLogTrace();
const allocator = VulkanAllocator.init(callbacks, .instance).allocator(); const allocator = VulkanAllocator.init(callbacks, .instance).allocator();
const dispatchable = Dispatchable(Instance).fromHandle(p_instance) catch return; const dispatchable = Dispatchable(Instance).fromHandle(p_instance) catch |err| return errorLogger(err);
dispatchable.object.deinit(allocator) catch return; dispatchable.object.deinit(allocator) catch |err| return errorLogger(err);
dispatchable.destroy(allocator); dispatchable.destroy(allocator);
if (std.process.hasEnvVarConstant(lib.DRIVER_DEBUG_ALLOCATOR_ENV_NAME) or builtin.mode == std.builtin.OptimizeMode.Debug) { if (std.process.hasEnvVarConstant(lib.DRIVER_DEBUG_ALLOCATOR_ENV_NAME) or builtin.mode == std.builtin.OptimizeMode.Debug) {
@@ -203,6 +238,9 @@ pub export fn strollDestroyInstance(p_instance: vk.Instance, callbacks: ?*const
} }
pub export fn strollEnumeratePhysicalDevices(p_instance: vk.Instance, count: *u32, p_devices: ?[*]vk.PhysicalDevice) callconv(vk.vulkan_call_conv) vk.Result { pub export fn strollEnumeratePhysicalDevices(p_instance: vk.Instance, count: *u32, p_devices: ?[*]vk.PhysicalDevice) callconv(vk.vulkan_call_conv) vk.Result {
entryPointBeginLogTrace(.vkEnumeratePhysicalDevices);
defer entryPointEndLogTrace();
const instance = Dispatchable(Instance).fromHandleObject(p_instance) catch |err| return toVkResult(err); const instance = Dispatchable(Instance).fromHandleObject(p_instance) catch |err| return toVkResult(err);
count.* = @intCast(instance.physical_devices.items.len); count.* = @intCast(instance.physical_devices.items.len);
if (p_devices) |devices| { if (p_devices) |devices| {
@@ -216,15 +254,18 @@ pub export fn strollEnumeratePhysicalDevices(p_instance: vk.Instance, count: *u3
// Physical Device functions ================================================================================================================================= // Physical Device functions =================================================================================================================================
pub export fn strollCreateDevice(p_physical_device: vk.PhysicalDevice, p_info: ?*const vk.DeviceCreateInfo, callbacks: ?*const vk.AllocationCallbacks, p_device: *vk.Device) callconv(vk.vulkan_call_conv) vk.Result { pub export fn strollCreateDevice(p_physical_device: vk.PhysicalDevice, p_info: ?*const vk.DeviceCreateInfo, callbacks: ?*const vk.AllocationCallbacks, p_device: *vk.Device) callconv(vk.vulkan_call_conv) vk.Result {
entryPointBeginLogTrace(.vkCreateDevice);
defer entryPointEndLogTrace();
const info = p_info orelse return .error_validation_failed; const info = p_info orelse return .error_validation_failed;
if (info.s_type != .device_create_info) { if (info.s_type != .device_create_info) {
return .error_validation_failed; return .error_validation_failed;
} }
const allocator = VulkanAllocator.init(callbacks, .device).allocator(); const allocator = VulkanAllocator.init(callbacks, .device).allocator();
const physical_device = Dispatchable(PhysicalDevice).fromHandleObject(p_physical_device) catch |err| return toVkResult(err); const physical_device = Dispatchable(PhysicalDevice).fromHandleObject(p_physical_device) catch |err| return toVkResult(err);
std.log.scoped(.vkCreateDevice).info("Creating VkDevice from {s}", .{physical_device.props.device_name});
logger.indent(); std.log.scoped(.vkCreateDevice).debug("Using VkPhysicalDevice named {s}", .{physical_device.props.device_name});
defer logger.unindent();
const device = physical_device.createDevice(allocator, info) catch |err| return toVkResult(err); const device = physical_device.createDevice(allocator, info) catch |err| return toVkResult(err);
p_device.* = (Dispatchable(Device).wrap(allocator, device) catch |err| return toVkResult(err)).toVkHandle(vk.Device); p_device.* = (Dispatchable(Device).wrap(allocator, device) catch |err| return toVkResult(err)).toVkHandle(vk.Device);
@@ -232,6 +273,9 @@ pub export fn strollCreateDevice(p_physical_device: vk.PhysicalDevice, p_info: ?
} }
pub export fn strollEnumerateDeviceExtensionProperties(p_physical_device: vk.PhysicalDevice, p_layer_name: ?[*:0]const u8, property_count: *u32, properties: ?*vk.ExtensionProperties) callconv(vk.vulkan_call_conv) vk.Result { pub export fn strollEnumerateDeviceExtensionProperties(p_physical_device: vk.PhysicalDevice, p_layer_name: ?[*:0]const u8, property_count: *u32, properties: ?*vk.ExtensionProperties) callconv(vk.vulkan_call_conv) vk.Result {
entryPointBeginLogTrace(.vkEnumerateDeviceExtensionProperties);
defer entryPointEndLogTrace();
var name: ?[]const u8 = null; var name: ?[]const u8 = null;
if (p_layer_name) |layer_name| { if (p_layer_name) |layer_name| {
name = std.mem.span(layer_name); name = std.mem.span(layer_name);
@@ -243,33 +287,51 @@ pub export fn strollEnumerateDeviceExtensionProperties(p_physical_device: vk.Phy
} }
pub export fn strollGetPhysicalDeviceFormatProperties(p_physical_device: vk.PhysicalDevice, format: vk.Format, properties: *vk.FormatProperties) callconv(vk.vulkan_call_conv) void { pub export fn strollGetPhysicalDeviceFormatProperties(p_physical_device: vk.PhysicalDevice, format: vk.Format, properties: *vk.FormatProperties) callconv(vk.vulkan_call_conv) void {
const physical_device = Dispatchable(PhysicalDevice).fromHandleObject(p_physical_device) catch return; entryPointBeginLogTrace(.vkGetPhysicalDeviceFormatProperties);
properties.* = physical_device.getFormatProperties(format) catch return; defer entryPointEndLogTrace();
const physical_device = Dispatchable(PhysicalDevice).fromHandleObject(p_physical_device) catch |err| return errorLogger(err);
properties.* = physical_device.getFormatProperties(format) catch |err| return errorLogger(err);
} }
pub export fn strollGetPhysicalDeviceFeatures(p_physical_device: vk.PhysicalDevice, features: *vk.PhysicalDeviceFeatures) callconv(vk.vulkan_call_conv) void { pub export fn strollGetPhysicalDeviceFeatures(p_physical_device: vk.PhysicalDevice, features: *vk.PhysicalDeviceFeatures) callconv(vk.vulkan_call_conv) void {
const physical_device = Dispatchable(PhysicalDevice).fromHandleObject(p_physical_device) catch return; entryPointBeginLogTrace(.vkGetPhysicalDeviceFeatures);
defer entryPointEndLogTrace();
const physical_device = Dispatchable(PhysicalDevice).fromHandleObject(p_physical_device) catch |err| return errorLogger(err);
features.* = physical_device.features; features.* = physical_device.features;
} }
pub export fn strollGetPhysicalDeviceImageFormatProperties(p_physical_device: vk.PhysicalDevice, format: vk.Format, image_type: vk.ImageType, tiling: vk.ImageTiling, usage: vk.ImageUsageFlags, flags: vk.ImageCreateFlags, properties: *vk.ImageFormatProperties) callconv(vk.vulkan_call_conv) vk.Result { pub export fn strollGetPhysicalDeviceImageFormatProperties(p_physical_device: vk.PhysicalDevice, format: vk.Format, image_type: vk.ImageType, tiling: vk.ImageTiling, usage: vk.ImageUsageFlags, flags: vk.ImageCreateFlags, properties: *vk.ImageFormatProperties) callconv(vk.vulkan_call_conv) vk.Result {
entryPointBeginLogTrace(.vkGetPhysicalDeviceImageFormatProperties);
defer entryPointEndLogTrace();
const physical_device = Dispatchable(PhysicalDevice).fromHandleObject(p_physical_device) catch |err| return toVkResult(err); const physical_device = Dispatchable(PhysicalDevice).fromHandleObject(p_physical_device) catch |err| return toVkResult(err);
properties.* = physical_device.getImageFormatProperties(format, image_type, tiling, usage, flags) catch |err| return toVkResult(err); properties.* = physical_device.getImageFormatProperties(format, image_type, tiling, usage, flags) catch |err| return toVkResult(err);
return .success; return .success;
} }
pub export fn strollGetPhysicalDeviceProperties(p_physical_device: vk.PhysicalDevice, properties: *vk.PhysicalDeviceProperties) callconv(vk.vulkan_call_conv) void { pub export fn strollGetPhysicalDeviceProperties(p_physical_device: vk.PhysicalDevice, properties: *vk.PhysicalDeviceProperties) callconv(vk.vulkan_call_conv) void {
const physical_device = Dispatchable(PhysicalDevice).fromHandleObject(p_physical_device) catch return; entryPointBeginLogTrace(.vkGetPhysicalDeviceProperties);
defer entryPointEndLogTrace();
const physical_device = Dispatchable(PhysicalDevice).fromHandleObject(p_physical_device) catch |err| return errorLogger(err);
properties.* = physical_device.props; properties.* = physical_device.props;
} }
pub export fn strollGetPhysicalDeviceMemoryProperties(p_physical_device: vk.PhysicalDevice, properties: *vk.PhysicalDeviceMemoryProperties) callconv(vk.vulkan_call_conv) void { pub export fn strollGetPhysicalDeviceMemoryProperties(p_physical_device: vk.PhysicalDevice, properties: *vk.PhysicalDeviceMemoryProperties) callconv(vk.vulkan_call_conv) void {
const physical_device = Dispatchable(PhysicalDevice).fromHandleObject(p_physical_device) catch return; entryPointBeginLogTrace(.vkGetPhysicalDeviceMemoryProperties);
defer entryPointEndLogTrace();
const physical_device = Dispatchable(PhysicalDevice).fromHandleObject(p_physical_device) catch |err| return errorLogger(err);
properties.* = physical_device.mem_props; properties.* = physical_device.mem_props;
} }
pub export fn strollGetPhysicalDeviceQueueFamilyProperties(p_physical_device: vk.PhysicalDevice, count: *u32, properties: ?[*]vk.QueueFamilyProperties) callconv(vk.vulkan_call_conv) void { pub export fn strollGetPhysicalDeviceQueueFamilyProperties(p_physical_device: vk.PhysicalDevice, count: *u32, properties: ?[*]vk.QueueFamilyProperties) callconv(vk.vulkan_call_conv) void {
const physical_device = Dispatchable(PhysicalDevice).fromHandleObject(p_physical_device) catch return; entryPointBeginLogTrace(.vkGetPhysicalDeviceQueueFamilyProperties);
defer entryPointEndLogTrace();
const physical_device = Dispatchable(PhysicalDevice).fromHandleObject(p_physical_device) catch |err| return errorLogger(err);
count.* = @intCast(physical_device.queue_family_props.items.len); count.* = @intCast(physical_device.queue_family_props.items.len);
if (properties) |props| { if (properties) |props| {
@memcpy(props[0..count.*], physical_device.queue_family_props.items[0..count.*]); @memcpy(props[0..count.*], physical_device.queue_family_props.items[0..count.*]);
@@ -286,6 +348,9 @@ pub export fn strollGetPhysicalDeviceSparseImageFormatProperties(
flags: vk.ImageCreateFlags, flags: vk.ImageCreateFlags,
properties: *vk.SparseImageFormatProperties, properties: *vk.SparseImageFormatProperties,
) callconv(vk.vulkan_call_conv) vk.Result { ) callconv(vk.vulkan_call_conv) vk.Result {
entryPointBeginLogTrace(.vkGetPhysicalDeviceSparseImageFormatProperties);
defer entryPointEndLogTrace();
const physical_device = Dispatchable(PhysicalDevice).fromHandleObject(p_physical_device) catch |err| return toVkResult(err); const physical_device = Dispatchable(PhysicalDevice).fromHandleObject(p_physical_device) catch |err| return toVkResult(err);
properties.* = physical_device.getSparseImageFormatProperties(format, image_type, samples, tiling, usage, flags) catch |err| return toVkResult(err); properties.* = physical_device.getSparseImageFormatProperties(format, image_type, samples, tiling, usage, flags) catch |err| return toVkResult(err);
return .success; return .success;
@@ -294,6 +359,9 @@ pub export fn strollGetPhysicalDeviceSparseImageFormatProperties(
// Queue functions =========================================================================================================================================== // Queue functions ===========================================================================================================================================
pub export fn strollQueueBindSparse(p_queue: vk.Queue, count: u32, info: [*]vk.BindSparseInfo, p_fence: vk.Fence) callconv(vk.vulkan_call_conv) vk.Result { pub export fn strollQueueBindSparse(p_queue: vk.Queue, count: u32, info: [*]vk.BindSparseInfo, p_fence: vk.Fence) callconv(vk.vulkan_call_conv) vk.Result {
entryPointBeginLogTrace(.vkQueueBindSparse);
defer entryPointEndLogTrace();
const queue = Dispatchable(Queue).fromHandleObject(p_queue) catch |err| return toVkResult(err); const queue = Dispatchable(Queue).fromHandleObject(p_queue) catch |err| return toVkResult(err);
const fence = if (p_fence != .null_handle) NonDispatchable(Fence).fromHandleObject(p_fence) catch |err| return toVkResult(err) else null; const fence = if (p_fence != .null_handle) NonDispatchable(Fence).fromHandleObject(p_fence) catch |err| return toVkResult(err) else null;
queue.bindSparse(info[0..count], fence) catch |err| return toVkResult(err); queue.bindSparse(info[0..count], fence) catch |err| return toVkResult(err);
@@ -301,6 +369,9 @@ pub export fn strollQueueBindSparse(p_queue: vk.Queue, count: u32, info: [*]vk.B
} }
pub export fn strollQueueSubmit(p_queue: vk.Queue, count: u32, info: [*]const vk.SubmitInfo, p_fence: vk.Fence) callconv(vk.vulkan_call_conv) vk.Result { pub export fn strollQueueSubmit(p_queue: vk.Queue, count: u32, info: [*]const vk.SubmitInfo, p_fence: vk.Fence) callconv(vk.vulkan_call_conv) vk.Result {
entryPointBeginLogTrace(.vkQueueSubmit);
defer entryPointEndLogTrace();
if (count == 0) return .success; if (count == 0) return .success;
const queue = Dispatchable(Queue).fromHandleObject(p_queue) catch |err| return toVkResult(err); const queue = Dispatchable(Queue).fromHandleObject(p_queue) catch |err| return toVkResult(err);
@@ -310,6 +381,9 @@ pub export fn strollQueueSubmit(p_queue: vk.Queue, count: u32, info: [*]const vk
} }
pub export fn strollQueueWaitIdle(p_queue: vk.Queue) callconv(vk.vulkan_call_conv) vk.Result { pub export fn strollQueueWaitIdle(p_queue: vk.Queue) callconv(vk.vulkan_call_conv) vk.Result {
entryPointBeginLogTrace(.vkQueueWaitIdle);
defer entryPointEndLogTrace();
const queue = Dispatchable(Queue).fromHandleObject(p_queue) catch |err| return toVkResult(err); const queue = Dispatchable(Queue).fromHandleObject(p_queue) catch |err| return toVkResult(err);
queue.waitIdle() catch |err| return toVkResult(err); queue.waitIdle() catch |err| return toVkResult(err);
return .success; return .success;
@@ -318,6 +392,9 @@ pub export fn strollQueueWaitIdle(p_queue: vk.Queue) callconv(vk.vulkan_call_con
// Device functions ========================================================================================================================================== // Device functions ==========================================================================================================================================
pub export fn strollAllocateCommandBuffers(p_device: vk.Device, p_info: ?*const vk.CommandBufferAllocateInfo, p_cmds: [*]vk.CommandBuffer) callconv(vk.vulkan_call_conv) vk.Result { pub export fn strollAllocateCommandBuffers(p_device: vk.Device, p_info: ?*const vk.CommandBufferAllocateInfo, p_cmds: [*]vk.CommandBuffer) callconv(vk.vulkan_call_conv) vk.Result {
entryPointBeginLogTrace(.vkAllocateCommandBuffers);
defer entryPointEndLogTrace();
Dispatchable(Device).checkHandleValidity(p_device) catch |err| return toVkResult(err); Dispatchable(Device).checkHandleValidity(p_device) catch |err| return toVkResult(err);
const info = p_info orelse return .error_validation_failed; const info = p_info orelse return .error_validation_failed;
@@ -334,27 +411,43 @@ pub export fn strollAllocateCommandBuffers(p_device: vk.Device, p_info: ?*const
} }
pub export fn strollAllocateMemory(p_device: vk.Device, p_info: ?*const vk.MemoryAllocateInfo, callbacks: ?*const vk.AllocationCallbacks, p_memory: *vk.DeviceMemory) callconv(vk.vulkan_call_conv) vk.Result { pub export fn strollAllocateMemory(p_device: vk.Device, p_info: ?*const vk.MemoryAllocateInfo, callbacks: ?*const vk.AllocationCallbacks, p_memory: *vk.DeviceMemory) callconv(vk.vulkan_call_conv) vk.Result {
entryPointBeginLogTrace(.vkAllocateMemory);
defer entryPointEndLogTrace();
const info = p_info orelse return .error_validation_failed; const info = p_info orelse return .error_validation_failed;
if (info.s_type != .memory_allocate_info) { if (info.s_type != .memory_allocate_info) {
return .error_validation_failed; return .error_validation_failed;
} }
std.log.scoped(.vkAllocateMemory).debug("Allocating {d} bytes from device 0x{X}", .{ info.allocation_size, @intFromEnum(p_device) });
const allocator = VulkanAllocator.init(callbacks, .object).allocator(); const allocator = VulkanAllocator.init(callbacks, .object).allocator();
const device = Dispatchable(Device).fromHandleObject(p_device) catch |err| return toVkResult(err); const device = Dispatchable(Device).fromHandleObject(p_device) catch |err| return toVkResult(err);
const device_memory = device.allocateMemory(allocator, info) catch |err| return toVkResult(err); const device_memory = device.allocateMemory(allocator, info) catch |err| return toVkResult(err);
p_memory.* = (NonDispatchable(DeviceMemory).wrap(allocator, device_memory) catch |err| return toVkResult(err)).toVkHandle(vk.DeviceMemory); p_memory.* = (NonDispatchable(DeviceMemory).wrap(allocator, device_memory) catch |err| return toVkResult(err)).toVkHandle(vk.DeviceMemory);
return .success; return .success;
} }
pub export fn strollBindBufferMemory(p_device: vk.Device, p_buffer: vk.Buffer, p_memory: vk.DeviceMemory, offset: vk.DeviceSize) callconv(vk.vulkan_call_conv) vk.Result { pub export fn strollBindBufferMemory(p_device: vk.Device, p_buffer: vk.Buffer, p_memory: vk.DeviceMemory, offset: vk.DeviceSize) callconv(vk.vulkan_call_conv) vk.Result {
entryPointBeginLogTrace(.vkBindBufferMemory);
defer entryPointEndLogTrace();
std.log.scoped(.vkBindBufferMemory).debug("Binding device memory 0x{X} to buffer 0x{X}", .{ @intFromEnum(p_memory), @intFromEnum(p_buffer) });
Dispatchable(Device).checkHandleValidity(p_device) catch |err| return toVkResult(err); Dispatchable(Device).checkHandleValidity(p_device) catch |err| return toVkResult(err);
const buffer = NonDispatchable(Buffer).fromHandleObject(p_buffer) catch |err| return toVkResult(err); const buffer = NonDispatchable(Buffer).fromHandleObject(p_buffer) catch |err| return toVkResult(err);
const memory = NonDispatchable(DeviceMemory).fromHandleObject(p_memory) catch |err| return toVkResult(err); const memory = NonDispatchable(DeviceMemory).fromHandleObject(p_memory) catch |err| return toVkResult(err);
buffer.bindMemory(memory, offset) catch |err| return toVkResult(err); buffer.bindMemory(memory, offset) catch |err| return toVkResult(err);
return .success; return .success;
} }
pub export fn strollCreateBuffer(p_device: vk.Device, p_info: ?*const vk.BufferCreateInfo, callbacks: ?*const vk.AllocationCallbacks, p_buffer: *vk.Buffer) callconv(vk.vulkan_call_conv) vk.Result { pub export fn strollCreateBuffer(p_device: vk.Device, p_info: ?*const vk.BufferCreateInfo, callbacks: ?*const vk.AllocationCallbacks, p_buffer: *vk.Buffer) callconv(vk.vulkan_call_conv) vk.Result {
entryPointBeginLogTrace(.vkCreateBuffer);
defer entryPointEndLogTrace();
const info = p_info orelse return .error_validation_failed; const info = p_info orelse return .error_validation_failed;
if (info.s_type != .buffer_create_info) { if (info.s_type != .buffer_create_info) {
return .error_validation_failed; return .error_validation_failed;
@@ -367,10 +460,14 @@ pub export fn strollCreateBuffer(p_device: vk.Device, p_info: ?*const vk.BufferC
} }
pub export fn strollCreateCommandPool(p_device: vk.Device, p_info: ?*const vk.CommandPoolCreateInfo, callbacks: ?*const vk.AllocationCallbacks, p_pool: *vk.CommandPool) callconv(vk.vulkan_call_conv) vk.Result { pub export fn strollCreateCommandPool(p_device: vk.Device, p_info: ?*const vk.CommandPoolCreateInfo, callbacks: ?*const vk.AllocationCallbacks, p_pool: *vk.CommandPool) callconv(vk.vulkan_call_conv) vk.Result {
entryPointBeginLogTrace(.vkCreateCommandPool);
defer entryPointEndLogTrace();
const info = p_info orelse return .error_validation_failed; const info = p_info orelse return .error_validation_failed;
if (info.s_type != .command_pool_create_info) { if (info.s_type != .command_pool_create_info) {
return .error_validation_failed; return .error_validation_failed;
} }
const allocator = VulkanAllocator.init(callbacks, .object).allocator(); const allocator = VulkanAllocator.init(callbacks, .object).allocator();
const device = Dispatchable(Device).fromHandleObject(p_device) catch |err| return toVkResult(err); const device = Dispatchable(Device).fromHandleObject(p_device) catch |err| return toVkResult(err);
const pool = device.createCommandPool(allocator, info) catch |err| return toVkResult(err); const pool = device.createCommandPool(allocator, info) catch |err| return toVkResult(err);
@@ -379,10 +476,14 @@ pub export fn strollCreateCommandPool(p_device: vk.Device, p_info: ?*const vk.Co
} }
pub export fn strollCreateFence(p_device: vk.Device, p_info: ?*const vk.FenceCreateInfo, callbacks: ?*const vk.AllocationCallbacks, p_fence: *vk.Fence) callconv(vk.vulkan_call_conv) vk.Result { pub export fn strollCreateFence(p_device: vk.Device, p_info: ?*const vk.FenceCreateInfo, callbacks: ?*const vk.AllocationCallbacks, p_fence: *vk.Fence) callconv(vk.vulkan_call_conv) vk.Result {
entryPointBeginLogTrace(.vkCreateFence);
defer entryPointEndLogTrace();
const info = p_info orelse return .error_validation_failed; const info = p_info orelse return .error_validation_failed;
if (info.s_type != .fence_create_info) { if (info.s_type != .fence_create_info) {
return .error_validation_failed; return .error_validation_failed;
} }
const allocator = VulkanAllocator.init(callbacks, .object).allocator(); const allocator = VulkanAllocator.init(callbacks, .object).allocator();
const device = Dispatchable(Device).fromHandleObject(p_device) catch |err| return toVkResult(err); const device = Dispatchable(Device).fromHandleObject(p_device) catch |err| return toVkResult(err);
const fence = device.createFence(allocator, info) catch |err| return toVkResult(err); const fence = device.createFence(allocator, info) catch |err| return toVkResult(err);
@@ -391,57 +492,79 @@ pub export fn strollCreateFence(p_device: vk.Device, p_info: ?*const vk.FenceCre
} }
pub export fn strollDestroyBuffer(p_device: vk.Device, p_buffer: vk.Buffer, callbacks: ?*const vk.AllocationCallbacks) callconv(vk.vulkan_call_conv) void { pub export fn strollDestroyBuffer(p_device: vk.Device, p_buffer: vk.Buffer, callbacks: ?*const vk.AllocationCallbacks) callconv(vk.vulkan_call_conv) void {
Dispatchable(Device).checkHandleValidity(p_device) catch return; entryPointBeginLogTrace(.vkDestroyBuffer);
defer entryPointEndLogTrace();
Dispatchable(Device).checkHandleValidity(p_device) catch |err| return errorLogger(err);
const allocator = VulkanAllocator.init(callbacks, .object).allocator(); const allocator = VulkanAllocator.init(callbacks, .object).allocator();
const non_dispatchable = NonDispatchable(Buffer).fromHandle(p_buffer) catch return; const non_dispatchable = NonDispatchable(Buffer).fromHandle(p_buffer) catch |err| return errorLogger(err);
non_dispatchable.intrusiveDestroy(allocator); non_dispatchable.intrusiveDestroy(allocator);
} }
pub export fn strollDestroyCommandPool(p_device: vk.Device, p_pool: vk.CommandPool, callbacks: ?*const vk.AllocationCallbacks) callconv(vk.vulkan_call_conv) void { pub export fn strollDestroyCommandPool(p_device: vk.Device, p_pool: vk.CommandPool, callbacks: ?*const vk.AllocationCallbacks) callconv(vk.vulkan_call_conv) void {
Dispatchable(Device).checkHandleValidity(p_device) catch return; entryPointBeginLogTrace(.vkDestroyCommandPool);
defer entryPointEndLogTrace();
Dispatchable(Device).checkHandleValidity(p_device) catch |err| return errorLogger(err);
const allocator = VulkanAllocator.init(callbacks, .object).allocator(); const allocator = VulkanAllocator.init(callbacks, .object).allocator();
const non_dispatchable = NonDispatchable(CommandPool).fromHandle(p_pool) catch return; const non_dispatchable = NonDispatchable(CommandPool).fromHandle(p_pool) catch |err| return errorLogger(err);
non_dispatchable.intrusiveDestroy(allocator); non_dispatchable.intrusiveDestroy(allocator);
} }
pub export fn strollDestroyDevice(p_device: vk.Device, callbacks: ?*const vk.AllocationCallbacks) callconv(vk.vulkan_call_conv) void { pub export fn strollDestroyDevice(p_device: vk.Device, callbacks: ?*const vk.AllocationCallbacks) callconv(vk.vulkan_call_conv) void {
const allocator = VulkanAllocator.init(callbacks, .object).allocator(); entryPointBeginLogTrace(.vkDestroyDevice);
const dispatchable = Dispatchable(Device).fromHandle(p_device) catch return; defer entryPointEndLogTrace();
std.log.scoped(.vkDestroyDevice).info("Destroying VkDevice created from {s}", .{dispatchable.object.physical_device.props.device_name});
logger.indent();
defer logger.unindent();
dispatchable.object.destroy(allocator) catch return; const allocator = VulkanAllocator.init(callbacks, .object).allocator();
const dispatchable = Dispatchable(Device).fromHandle(p_device) catch |err| return errorLogger(err);
std.log.scoped(.vkDestroyDevice).debug("Destroying VkDevice created from {s}", .{dispatchable.object.physical_device.props.device_name});
dispatchable.object.destroy(allocator) catch |err| return errorLogger(err);
dispatchable.destroy(allocator); dispatchable.destroy(allocator);
} }
pub export fn strollDestroyFence(p_device: vk.Device, p_fence: vk.Fence, callbacks: ?*const vk.AllocationCallbacks) callconv(vk.vulkan_call_conv) void { pub export fn strollDestroyFence(p_device: vk.Device, p_fence: vk.Fence, callbacks: ?*const vk.AllocationCallbacks) callconv(vk.vulkan_call_conv) void {
Dispatchable(Device).checkHandleValidity(p_device) catch return; entryPointBeginLogTrace(.vkDestroyFence);
defer entryPointEndLogTrace();
Dispatchable(Device).checkHandleValidity(p_device) catch |err| return errorLogger(err);
const allocator = VulkanAllocator.init(callbacks, .object).allocator(); const allocator = VulkanAllocator.init(callbacks, .object).allocator();
const non_dispatchable = NonDispatchable(Fence).fromHandle(p_fence) catch return; const non_dispatchable = NonDispatchable(Fence).fromHandle(p_fence) catch |err| return errorLogger(err);
non_dispatchable.intrusiveDestroy(allocator); non_dispatchable.intrusiveDestroy(allocator);
} }
pub export fn strollFreeCommandBuffers(p_device: vk.Device, p_pool: vk.CommandPool, count: u32, p_cmds: [*]const vk.CommandBuffer) callconv(vk.vulkan_call_conv) void { pub export fn strollFreeCommandBuffers(p_device: vk.Device, p_pool: vk.CommandPool, count: u32, p_cmds: [*]const vk.CommandBuffer) callconv(vk.vulkan_call_conv) void {
Dispatchable(Device).checkHandleValidity(p_device) catch return; entryPointBeginLogTrace(.vkFreeCommandBuffers);
defer entryPointEndLogTrace();
const pool = NonDispatchable(CommandPool).fromHandleObject(p_pool) catch return; Dispatchable(Device).checkHandleValidity(p_device) catch |err| return errorLogger(err);
const pool = NonDispatchable(CommandPool).fromHandleObject(p_pool) catch |err| return errorLogger(err);
const cmds: [*]*Dispatchable(CommandBuffer) = @ptrCast(@constCast(p_cmds)); const cmds: [*]*Dispatchable(CommandBuffer) = @ptrCast(@constCast(p_cmds));
pool.freeCommandBuffers(cmds[0..count]) catch return; pool.freeCommandBuffers(cmds[0..count]) catch |err| return errorLogger(err);
} }
pub export fn strollFreeMemory(p_device: vk.Device, p_memory: vk.DeviceMemory, callbacks: ?*const vk.AllocationCallbacks) callconv(vk.vulkan_call_conv) void { pub export fn strollFreeMemory(p_device: vk.Device, p_memory: vk.DeviceMemory, callbacks: ?*const vk.AllocationCallbacks) callconv(vk.vulkan_call_conv) void {
Dispatchable(Device).checkHandleValidity(p_device) catch return; entryPointBeginLogTrace(.vkFreeMemory);
defer entryPointEndLogTrace();
Dispatchable(Device).checkHandleValidity(p_device) catch |err| return errorLogger(err);
const allocator = VulkanAllocator.init(callbacks, .object).allocator(); const allocator = VulkanAllocator.init(callbacks, .object).allocator();
const non_dispatchable = NonDispatchable(DeviceMemory).fromHandle(p_memory) catch return; const non_dispatchable = NonDispatchable(DeviceMemory).fromHandle(p_memory) catch |err| return errorLogger(err);
non_dispatchable.intrusiveDestroy(allocator); non_dispatchable.intrusiveDestroy(allocator);
} }
pub export fn strollGetDeviceProcAddr(p_device: vk.Device, p_name: ?[*:0]const u8) callconv(vk.vulkan_call_conv) vk.PfnVoidFunction { pub export fn strollGetDeviceProcAddr(p_device: vk.Device, p_name: ?[*:0]const u8) callconv(vk.vulkan_call_conv) vk.PfnVoidFunction {
if (lib.getLogVerboseLevel() == .TooMuch) {
entryPointBeginLogTrace(.vkGetDeviceProcAddr);
}
defer entryPointEndLogTrace();
if (p_name == null) return null; if (p_name == null) return null;
const name = std.mem.span(p_name.?); const name = std.mem.span(p_name.?);
@@ -453,8 +576,11 @@ pub export fn strollGetDeviceProcAddr(p_device: vk.Device, p_name: ?[*:0]const u
} }
pub export fn strollGetDeviceQueue(p_device: vk.Device, queue_family_index: u32, queue_index: u32, p_queue: *vk.Queue) callconv(vk.vulkan_call_conv) void { pub export fn strollGetDeviceQueue(p_device: vk.Device, queue_family_index: u32, queue_index: u32, p_queue: *vk.Queue) callconv(vk.vulkan_call_conv) void {
entryPointBeginLogTrace(.vkGetDeviceQueue);
defer entryPointEndLogTrace();
p_queue.* = .null_handle; p_queue.* = .null_handle;
const device = Dispatchable(Device).fromHandleObject(p_device) catch return; const device = Dispatchable(Device).fromHandleObject(p_device) catch |err| return errorLogger(err);
if (device.queues.get(queue_family_index)) |family| { if (device.queues.get(queue_family_index)) |family| {
if (queue_index >= family.items.len) return; if (queue_index >= family.items.len) return;
@@ -469,6 +595,9 @@ pub export fn strollGetDeviceQueue(p_device: vk.Device, queue_family_index: u32,
} }
pub export fn strollGetFenceStatus(p_device: vk.Device, p_fence: vk.Fence) callconv(vk.vulkan_call_conv) vk.Result { pub export fn strollGetFenceStatus(p_device: vk.Device, p_fence: vk.Fence) callconv(vk.vulkan_call_conv) vk.Result {
entryPointBeginLogTrace(.vkGetFenceStatus);
defer entryPointEndLogTrace();
Dispatchable(Device).checkHandleValidity(p_device) catch |err| return toVkResult(err); Dispatchable(Device).checkHandleValidity(p_device) catch |err| return toVkResult(err);
const fence = NonDispatchable(Fence).fromHandleObject(p_fence) catch |err| return toVkResult(err); const fence = NonDispatchable(Fence).fromHandleObject(p_fence) catch |err| return toVkResult(err);
@@ -477,13 +606,19 @@ pub export fn strollGetFenceStatus(p_device: vk.Device, p_fence: vk.Fence) callc
} }
pub export fn strollGetBufferMemoryRequirements(p_device: vk.Device, p_buffer: vk.Buffer, requirements: *vk.MemoryRequirements) callconv(vk.vulkan_call_conv) void { pub export fn strollGetBufferMemoryRequirements(p_device: vk.Device, p_buffer: vk.Buffer, requirements: *vk.MemoryRequirements) callconv(vk.vulkan_call_conv) void {
Dispatchable(Device).checkHandleValidity(p_device) catch return; entryPointBeginLogTrace(.vkGetBufferMemoryRequirements);
defer entryPointEndLogTrace();
const buffer = NonDispatchable(Buffer).fromHandleObject(p_buffer) catch return; Dispatchable(Device).checkHandleValidity(p_device) catch |err| return errorLogger(err);
const buffer = NonDispatchable(Buffer).fromHandleObject(p_buffer) catch |err| return errorLogger(err);
buffer.getMemoryRequirements(requirements); buffer.getMemoryRequirements(requirements);
} }
pub export fn strollMapMemory(p_device: vk.Device, p_memory: vk.DeviceMemory, offset: vk.DeviceSize, size: vk.DeviceSize, _: vk.MemoryMapFlags, pp_data: *?*anyopaque) callconv(vk.vulkan_call_conv) vk.Result { pub export fn strollMapMemory(p_device: vk.Device, p_memory: vk.DeviceMemory, offset: vk.DeviceSize, size: vk.DeviceSize, _: vk.MemoryMapFlags, pp_data: *?*anyopaque) callconv(vk.vulkan_call_conv) vk.Result {
entryPointBeginLogTrace(.vkMapMemory);
defer entryPointEndLogTrace();
Dispatchable(Device).checkHandleValidity(p_device) catch |err| return toVkResult(err); Dispatchable(Device).checkHandleValidity(p_device) catch |err| return toVkResult(err);
const device_memory = NonDispatchable(DeviceMemory).fromHandleObject(p_memory) catch |err| return toVkResult(err); const device_memory = NonDispatchable(DeviceMemory).fromHandleObject(p_memory) catch |err| return toVkResult(err);
@@ -492,13 +627,19 @@ pub export fn strollMapMemory(p_device: vk.Device, p_memory: vk.DeviceMemory, of
} }
pub export fn strollUnmapMemory(p_device: vk.Device, p_memory: vk.DeviceMemory) callconv(vk.vulkan_call_conv) void { pub export fn strollUnmapMemory(p_device: vk.Device, p_memory: vk.DeviceMemory) callconv(vk.vulkan_call_conv) void {
Dispatchable(Device).checkHandleValidity(p_device) catch return; entryPointBeginLogTrace(.vkUnmapMemory);
defer entryPointEndLogTrace();
const device_memory = NonDispatchable(DeviceMemory).fromHandleObject(p_memory) catch return; Dispatchable(Device).checkHandleValidity(p_device) catch |err| return errorLogger(err);
const device_memory = NonDispatchable(DeviceMemory).fromHandleObject(p_memory) catch |err| return errorLogger(err);
device_memory.unmap(); device_memory.unmap();
} }
pub export fn strollResetFences(p_device: vk.Device, count: u32, p_fences: [*]const vk.Fence) callconv(vk.vulkan_call_conv) vk.Result { pub export fn strollResetFences(p_device: vk.Device, count: u32, p_fences: [*]const vk.Fence) callconv(vk.vulkan_call_conv) vk.Result {
entryPointBeginLogTrace(.vkResetFences);
defer entryPointEndLogTrace();
Dispatchable(Device).checkHandleValidity(p_device) catch |err| return toVkResult(err); Dispatchable(Device).checkHandleValidity(p_device) catch |err| return toVkResult(err);
for (p_fences, 0..count) |p_fence, _| { for (p_fences, 0..count) |p_fence, _| {
@@ -509,6 +650,9 @@ pub export fn strollResetFences(p_device: vk.Device, count: u32, p_fences: [*]co
} }
pub export fn strollWaitForFences(p_device: vk.Device, count: u32, p_fences: [*]const vk.Fence, waitForAll: vk.Bool32, timeout: u64) callconv(vk.vulkan_call_conv) vk.Result { pub export fn strollWaitForFences(p_device: vk.Device, count: u32, p_fences: [*]const vk.Fence, waitForAll: vk.Bool32, timeout: u64) callconv(vk.vulkan_call_conv) vk.Result {
entryPointBeginLogTrace(.vkWaitForFences);
defer entryPointEndLogTrace();
Dispatchable(Device).checkHandleValidity(p_device) catch |err| return toVkResult(err); Dispatchable(Device).checkHandleValidity(p_device) catch |err| return toVkResult(err);
loop: for (p_fences, 0..count) |p_fence, _| { loop: for (p_fences, 0..count) |p_fence, _| {
@@ -522,6 +666,9 @@ pub export fn strollWaitForFences(p_device: vk.Device, count: u32, p_fences: [*]
// Command Buffer functions =================================================================================================================================== // Command Buffer functions ===================================================================================================================================
pub export fn strollBeginCommandBuffer(p_cmd: vk.CommandBuffer, p_info: ?*const vk.CommandBufferBeginInfo) callconv(vk.vulkan_call_conv) vk.Result { pub export fn strollBeginCommandBuffer(p_cmd: vk.CommandBuffer, p_info: ?*const vk.CommandBufferBeginInfo) callconv(vk.vulkan_call_conv) vk.Result {
entryPointBeginLogTrace(.vkBeginCommandBuffer);
defer entryPointEndLogTrace();
const info = p_info orelse return .error_validation_failed; const info = p_info orelse return .error_validation_failed;
if (info.s_type != .command_buffer_begin_info) { if (info.s_type != .command_buffer_begin_info) {
return .error_validation_failed; return .error_validation_failed;
@@ -531,13 +678,28 @@ pub export fn strollBeginCommandBuffer(p_cmd: vk.CommandBuffer, p_info: ?*const
return .success; return .success;
} }
pub export fn strollCmdFillBuffer(p_cmd: vk.CommandBuffer, p_buffer: vk.Buffer, offset: vk.DeviceSize, size: vk.DeviceSize, data: u32) callconv(vk.vulkan_call_conv) void {
entryPointBeginLogTrace(.vkCmdFillBuffer);
defer entryPointEndLogTrace();
const cmd = Dispatchable(CommandBuffer).fromHandleObject(p_cmd) catch |err| return errorLogger(err);
const buffer = NonDispatchable(Buffer).fromHandleObject(p_buffer) catch |err| return errorLogger(err);
cmd.fillBuffer(buffer, offset, size, data) catch |err| return errorLogger(err);
}
pub export fn strollEndCommandBuffer(p_cmd: vk.CommandBuffer) callconv(vk.vulkan_call_conv) vk.Result { pub export fn strollEndCommandBuffer(p_cmd: vk.CommandBuffer) callconv(vk.vulkan_call_conv) vk.Result {
entryPointBeginLogTrace(.vkEndCommandBuffer);
defer entryPointEndLogTrace();
const cmd = Dispatchable(CommandBuffer).fromHandleObject(p_cmd) catch |err| return toVkResult(err); const cmd = Dispatchable(CommandBuffer).fromHandleObject(p_cmd) catch |err| return toVkResult(err);
cmd.end() catch |err| return toVkResult(err); cmd.end() catch |err| return toVkResult(err);
return .success; return .success;
} }
pub export fn strollResetCommandBuffer(p_cmd: vk.CommandBuffer, flags: vk.CommandBufferResetFlags) callconv(vk.vulkan_call_conv) vk.Result { pub export fn strollResetCommandBuffer(p_cmd: vk.CommandBuffer, flags: vk.CommandBufferResetFlags) callconv(vk.vulkan_call_conv) vk.Result {
entryPointBeginLogTrace(.vkResetCommandBuffer);
defer entryPointEndLogTrace();
const cmd = Dispatchable(CommandBuffer).fromHandleObject(p_cmd) catch |err| return toVkResult(err); const cmd = Dispatchable(CommandBuffer).fromHandleObject(p_cmd) catch |err| return toVkResult(err);
cmd.reset(flags) catch |err| return toVkResult(err); cmd.reset(flags) catch |err| return toVkResult(err);
return .success; return .success;

View File

@@ -1,3 +1,6 @@
//! A driver-global logger that stack in memory all same-indent `debug` logs
//! and only displays them in reverse order if a non-debug log is requested
const std = @import("std"); const std = @import("std");
const builtin = @import("builtin"); const builtin = @import("builtin");
const zdt = @import("zdt"); const zdt = @import("zdt");
@@ -12,20 +15,37 @@ comptime {
} }
} }
var indent_level: usize = 0; const DebugStackElement = struct {
log: [512]u8,
indent_level: usize,
};
pub fn indent() void { var indent_level: usize = 0;
var debug_stack = std.ArrayList(DebugStackElement).empty;
pub inline fn indent() void {
const new_indent_level, const has_overflown = @addWithOverflow(indent_level, 1); const new_indent_level, const has_overflown = @addWithOverflow(indent_level, 1);
if (has_overflown == 0) { if (has_overflown == 0) {
indent_level = new_indent_level; indent_level = new_indent_level;
} }
} }
pub fn unindent() void { pub inline fn unindent() void {
const new_indent_level, const has_overflown = @subWithOverflow(indent_level, 1); const new_indent_level, const has_overflown = @subWithOverflow(indent_level, 1);
if (has_overflown == 0) { if (has_overflown == 0) {
indent_level = new_indent_level; indent_level = new_indent_level;
} }
loop: while (debug_stack.getLastOrNull()) |last| {
if (last.indent_level >= indent_level) {
_ = debug_stack.pop();
} else {
break :loop;
}
}
}
pub inline fn freeInnerDebugStack() void {
debug_stack.deinit(std.heap.c_allocator);
} }
pub fn log(comptime level: std.log.Level, comptime scope: @Type(.enum_literal), comptime format: []const u8, args: anytype) void { pub fn log(comptime level: std.log.Level, comptime scope: @Type(.enum_literal), comptime format: []const u8, args: anytype) void {
@@ -53,38 +73,50 @@ pub fn log(comptime level: std.log.Level, comptime scope: @Type(.enum_literal),
std.debug.lockStdErr(); std.debug.lockStdErr();
defer std.debug.unlockStdErr(); defer std.debug.unlockStdErr();
var stderr_buffer: [512]u8 = undefined; var buffer = std.mem.zeroes([512]u8);
var stderr_file = std.fs.File.stderr(); var stderr_file = std.fs.File.stderr();
var stderr_writer = stderr_file.writer(&stderr_buffer);
var writer: *std.Io.Writer = &stderr_writer.interface;
var out_config = std.Io.tty.Config.detect(stderr_file); var out_config = std.Io.tty.Config.detect(stderr_file);
var writer = std.Io.Writer.fixed(&buffer);
var timezone = zdt.Timezone.tzLocal(std.heap.page_allocator) catch zdt.Timezone.UTC; var timezone = zdt.Timezone.tzLocal(std.heap.page_allocator) catch zdt.Timezone.UTC;
defer timezone.deinit(); defer timezone.deinit();
const now = zdt.Datetime.now(.{ .tz = &timezone }) catch return; const now = zdt.Datetime.now(.{ .tz = &timezone }) catch zdt.Datetime{};
out_config.setColor(writer, .magenta) catch {}; out_config.setColor(&writer, .magenta) catch {};
writer.print("[StrollDriver ", .{}) catch return; writer.print("[StrollDriver ", .{}) catch {};
if (!builtin.is_test) { if (!builtin.is_test) {
out_config.setColor(writer, .cyan) catch {}; out_config.setColor(&writer, .cyan) catch {};
writer.print(root.DRIVER_NAME, .{}) catch return; writer.print(root.DRIVER_NAME, .{}) catch {};
} }
out_config.setColor(writer, .yellow) catch {}; out_config.setColor(&writer, .yellow) catch {};
writer.print(" {d:02}:{d:02}:{d:02}.{d:03}", .{ now.hour, now.minute, now.second, @divFloor(now.nanosecond, std.time.ns_per_ms) }) catch return; writer.print(" {d:02}:{d:02}:{d:02}.{d:03}", .{ now.hour, now.minute, now.second, @divFloor(now.nanosecond, std.time.ns_per_ms) }) catch {};
out_config.setColor(writer, .magenta) catch {}; out_config.setColor(&writer, .magenta) catch {};
writer.print("]", .{}) catch return; writer.print("]", .{}) catch {};
out_config.setColor(writer, level_color) catch {}; out_config.setColor(&writer, level_color) catch {};
writer.print(prefix, .{}) catch return; writer.print(prefix, .{}) catch {};
out_config.setColor(writer, if (level == .err) .red else .green) catch {}; out_config.setColor(&writer, if (level == .err) .red else .green) catch {};
writer.print("{s: >30}", .{scope_prefix}) catch return; writer.print("{s: >30}", .{scope_prefix}) catch {};
out_config.setColor(writer, .reset) catch {}; out_config.setColor(&writer, .reset) catch {};
for (0..indent_level) |_| { for (0..indent_level) |_| {
writer.print("> ", .{}) catch return; writer.print("> ", .{}) catch {};
} }
writer.print(format ++ "\n", args) catch return; writer.print(format ++ "\n", args) catch {};
writer.flush() catch return; writer.flush() catch return;
if (level == .debug and lib.getLogVerboseLevel() == .Standard) {
(debug_stack.addOne(std.heap.c_allocator) catch return).* = .{
.log = buffer,
.indent_level = indent_level,
};
} else {
while (debug_stack.items.len != 0) {
const elem_buffer = debug_stack.orderedRemove(0).log;
_ = stderr_file.write(&elem_buffer) catch return;
}
_ = stderr_file.write(&buffer) catch return;
}
} }

View File

@@ -52,7 +52,16 @@ int main(void)
VkDevice device = kvfCreateDevice(physical_device, NULL, 0, NULL); VkDevice device = kvfCreateDevice(physical_device, NULL, 0, NULL);
volkLoadDevice(device); volkLoadDevice(device);
VkBuffer buffer = kvfCreateBuffer(device, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, 256); VkBuffer buffer = kvfCreateBuffer(device, VK_BUFFER_USAGE_TRANSFER_DST_BIT, 256);
VkDeviceMemory memory = VK_NULL_HANDLE;
VkMemoryAllocateInfo alloc_info = {0};
alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
alloc_info.allocationSize = 256;
alloc_info.memoryTypeIndex = 0;
kvfCheckVk(vkAllocateMemory(device, &alloc_info, NULL, &memory));
kvfCheckVk(vkBindBufferMemory(device, buffer, memory, 0));
VkQueue queue = kvfGetDeviceQueue(device, KVF_GRAPHICS_QUEUE); VkQueue queue = kvfGetDeviceQueue(device, KVF_GRAPHICS_QUEUE);
VkFence fence = kvfCreateFence(device); VkFence fence = kvfCreateFence(device);
@@ -61,13 +70,24 @@ int main(void)
kvfCheckVk(vkResetCommandBuffer(cmd, 0)); kvfCheckVk(vkResetCommandBuffer(cmd, 0));
kvfBeginCommandBuffer(cmd, 0); kvfBeginCommandBuffer(cmd, 0);
{
vkCmdFillBuffer(cmd, buffer, 0, VK_WHOLE_SIZE, 0x12ABCDEF);
}
kvfEndCommandBuffer(cmd); kvfEndCommandBuffer(cmd);
kvfSubmitCommandBuffer(device, cmd, KVF_GRAPHICS_QUEUE, VK_NULL_HANDLE, VK_NULL_HANDLE, fence, NULL); kvfSubmitCommandBuffer(device, cmd, KVF_GRAPHICS_QUEUE, VK_NULL_HANDLE, VK_NULL_HANDLE, fence, NULL);
kvfWaitForFence(device, fence); kvfWaitForFence(device, fence);
uint32_t* map = NULL;
kvfCheckVk(vkMapMemory(device, memory, 0, VK_WHOLE_SIZE, 0, (void**)&map));
for(size_t i = 0; i < 64; i++)
printf("0x%X%s", map[i], (i + 1 == 64 ? "" : " - "));
puts("");
vkUnmapMemory(device, memory);
kvfDestroyFence(device, fence); kvfDestroyFence(device, fence);
kvfDestroyBuffer(device, buffer); kvfDestroyBuffer(device, buffer);
vkFreeMemory(device, memory, NULL);
kvfDestroyDevice(device); kvfDestroyDevice(device);
kvfDestroyInstance(instance); kvfDestroyInstance(instance);