fixing CTS issues, improving logger

This commit is contained in:
2025-12-05 23:01:17 +01:00
parent 96f69de54f
commit 453d965d0c
12 changed files with 168 additions and 83 deletions

View File

@@ -146,6 +146,7 @@ pub fn build(b: *std.Build) !void {
const run_cts = b.addSystemCommand(&[_][]const u8{
try cts_exe_path.getPath3(b, null).toString(b.allocator),
b.fmt("--deqp-archive-dir={s}", .{try cts.path("").getPath3(b, null).toString(b.allocator)}),
b.fmt("--deqp-caselist-file={s}", .{try cts.path("mustpass/1.0.0/vk-default.txt").getPath3(b, null).toString(b.allocator)}),
b.fmt("--deqp-vk-library-path={s}", .{b.getInstallPath(.lib, lib.out_lib_filename)}),
});
@@ -158,6 +159,7 @@ pub fn build(b: *std.Build) !void {
"gdb",
"--args",
try cts_exe_path.getPath3(b, null).toString(b.allocator),
b.fmt("--deqp-archive-dir={s}", .{try cts.path("").getPath3(b, null).toString(b.allocator)}),
b.fmt("--deqp-caselist-file={s}", .{try cts.path("mustpass/1.0.0/vk-default.txt").getPath3(b, null).toString(b.allocator)}),
b.fmt("--deqp-vk-library-path={s}", .{b.getInstallPath(.lib, lib.out_lib_filename)}),
});

View File

@@ -26,8 +26,8 @@
.hash = "zdt-0.8.1-xr0_vAxUDwCJRDh9pcAS_mdZBIsvcGTtN-K8JJSWY4I6",
},
.cts_bin = .{
.url = "git+https://github.com/Kbz-8/Vulkan-CTS-bin#f0317494ed3784c17cf44a4f59a40d9c868073d2",
.hash = "N-V-__8AAK0SyAbVT6fSdbtrr5f3TaxffmQDtge6sMrLN-R5",
.url = "git+https://github.com/Kbz-8/Vulkan-CTS-bin#2fa3e9310a627c13ba512b5781284f1b1481a938",
.hash = "N-V-__8AAIxh3gbzEZcY5tBSA2BSVhLyCyOG0tnAdYviHn99",
},
.cpuinfo = .{
.url = "git+https://github.com/Kbz-8/cpuinfo-zig#77f82a1248194e7fb706967343c66021f8522766",

View File

@@ -28,21 +28,19 @@ pub fn create(allocator: std.mem.Allocator, infos: *const vk.InstanceCreateInfo)
}
fn requestPhysicalDevices(interface: *Interface, allocator: std.mem.Allocator) VkError!void {
// Software driver only has one physical device (the CPU)
// Software driver has only one physical device (the CPU)
const physical_device = try SoftPhysicalDevice.create(allocator, interface);
errdefer physical_device.interface.releasePhysicalDevice(allocator) catch {};
interface.physical_devices.append(allocator, try Dispatchable(base.PhysicalDevice).wrap(allocator, &physical_device.interface)) catch return VkError.OutOfHostMemory;
}
fn releasePhysicalDevices(interface: *Interface, allocator: std.mem.Allocator) VkError!void {
defer {
interface.physical_devices.deinit(allocator);
interface.physical_devices = .empty;
}
const physical_device = interface.physical_devices.getLast();
try physical_device.object.releasePhysicalDevice(allocator);
physical_device.destroy(allocator);
interface.physical_devices.deinit(allocator);
interface.physical_devices = .empty;
}
fn destroyInstance(interface: *Interface, allocator: std.mem.Allocator) VkError!void {

View File

@@ -99,8 +99,8 @@ pub fn createQueues(self: *Self, allocator: std.mem.Allocator, info: *const vk.D
const queue = try self.vtable.createQueue(allocator, self, queue_info.queue_family_index, @intCast(family_ptr.items.len), queue_info.flags);
logger.indent();
defer logger.unindent();
logger.manager.get().indent();
defer logger.manager.get().unindent();
const dispatchable_queue = try Dispatchable(Queue).wrap(allocator, queue);
family_ptr.append(allocator, dispatchable_queue) catch return VkError.OutOfHostMemory;

View File

@@ -44,7 +44,7 @@ pub fn init(allocator: std.mem.Allocator, infos: *const vk.InstanceCreateInfo) V
};
}
// Dummy for docs creation and stuff
/// Dummy for docs creation and stuff
pub fn create(allocator: std.mem.Allocator, infos: *const vk.InstanceCreateInfo) VkError!*Self {
_ = allocator;
_ = infos;
@@ -80,8 +80,8 @@ pub fn releasePhysicalDevices(self: *Self, allocator: std.mem.Allocator) VkError
}
pub fn requestPhysicalDevices(self: *Self, allocator: std.mem.Allocator) VkError!void {
logger.indent();
defer logger.unindent();
logger.manager.get().indent();
defer logger.manager.get().unindent();
try self.vtable.requestPhysicalDevices(self, allocator);
if (self.physical_devices.items.len == 0) {

View File

@@ -11,12 +11,14 @@ const Self = @This();
callbacks: ?vk.AllocationCallbacks,
scope: vk.SystemAllocationScope,
fallback_allocator: std.heap.ThreadSafeAllocator,
pub fn init(callbacks: ?*const vk.AllocationCallbacks, scope: vk.SystemAllocationScope) Self {
const deref_callbacks = if (callbacks) |c| c.* else null;
return .{
.callbacks = deref_callbacks,
.scope = scope,
.fallback_allocator = .{ .child_allocator = std.heap.c_allocator },
};
}
@@ -45,6 +47,7 @@ pub fn cloneWithScope(self: *Self, scope: vk.SystemAllocationScope) Self {
return .{
.callbacks = self.callbacks,
.scope = scope,
.fallback_allocator = self.fallback_allocator,
};
}
@@ -57,7 +60,7 @@ fn alloc(context: *anyopaque, len: usize, alignment: Alignment, ret_addr: usize)
}
}
return getFallbackAllocator().rawAlloc(len, alignment, ret_addr);
return self.getFallbackAllocator().rawAlloc(len, alignment, ret_addr);
}
fn resize(context: *anyopaque, ptr: []u8, alignment: Alignment, new_len: usize, ret_addr: usize) bool {
@@ -65,7 +68,7 @@ fn resize(context: *anyopaque, ptr: []u8, alignment: Alignment, new_len: usize,
return if (self.callbacks != null)
new_len <= ptr.len
else
getFallbackAllocator().rawResize(ptr, alignment, new_len, ret_addr);
self.getFallbackAllocator().rawResize(ptr, alignment, new_len, ret_addr);
}
fn remap(context: *anyopaque, ptr: []u8, alignment: Alignment, new_len: usize, ret_addr: usize) ?[*]u8 {
@@ -75,7 +78,7 @@ fn remap(context: *anyopaque, ptr: []u8, alignment: Alignment, new_len: usize, r
return @ptrCast(pfn_reallocation(self.callbacks.?.p_user_data, ptr.ptr, new_len, alignment.toByteUnits(), self.scope));
}
}
return getFallbackAllocator().rawRemap(ptr, alignment, new_len, ret_addr);
return self.getFallbackAllocator().rawRemap(ptr, alignment, new_len, ret_addr);
}
fn free(context: *anyopaque, ptr: []u8, alignment: Alignment, ret_addr: usize) void {
@@ -83,12 +86,12 @@ fn free(context: *anyopaque, ptr: []u8, alignment: Alignment, ret_addr: usize) v
if (self.callbacks) |callbacks| {
if (callbacks.pfn_free) |pfn_free| {
pfn_free(self.callbacks.?.p_user_data, ptr.ptr);
return;
}
}
getFallbackAllocator().rawFree(ptr, alignment, ret_addr);
self.getFallbackAllocator().rawFree(ptr, alignment, ret_addr);
}
inline fn getFallbackAllocator() std.mem.Allocator {
var fallback_allocator: std.heap.ThreadSafeAllocator = .{ .child_allocator = std.heap.c_allocator };
return fallback_allocator.allocator();
inline fn getFallbackAllocator(self: *Self) std.mem.Allocator {
return self.fallback_allocator.allocator();
}

View File

@@ -9,7 +9,7 @@ pub const vku = @cImport({
pub const commands = @import("commands.zig");
pub const errors = @import("error_set.zig");
pub const lib_vulkan = @import("lib_vulkan.zig");
pub const logger = @import("logger.zig");
pub const logger = @import("logger/logger.zig");
pub const Dispatchable = @import("Dispatchable.zig").Dispatchable;
pub const NonDispatchable = @import("NonDispatchable.zig").NonDispatchable;

View File

@@ -6,11 +6,11 @@ const root = @import("root");
const lib = @import("lib.zig");
const builtin = @import("builtin");
const logger = @import("logger.zig");
const error_set = @import("error_set.zig");
const VkError = error_set.VkError;
const toVkResult = error_set.toVkResult;
const errorLogger = error_set.errorLogger;
const logger = lib.logger;
const errors = lib.errors;
const VkError = errors.VkError;
const toVkResult = errors.toVkResult;
const errorLogger = errors.errorLogger;
const Dispatchable = @import("Dispatchable.zig").Dispatchable;
const NonDispatchable = @import("NonDispatchable.zig").NonDispatchable;
@@ -46,11 +46,11 @@ pub const ShaderModule = @import("ShaderModule.zig");
fn entryPointBeginLogTrace(comptime scope: @Type(.enum_literal)) void {
std.log.scoped(scope).debug("Calling {s}...", .{@tagName(scope)});
logger.indent();
logger.manager.get().indent();
}
fn entryPointEndLogTrace() void {
logger.unindent();
logger.manager.get().unindent();
}
fn entryPointNotFoundErrorLog(comptime scope: @Type(.enum_literal), name: []const u8) void {
@@ -338,7 +338,7 @@ pub export fn strollEnumerateInstanceVersion(version: *u32) callconv(vk.vulkan_c
// Instance functions ========================================================================================================================================
pub export fn strollDestroyInstance(p_instance: vk.Instance, callbacks: ?*const vk.AllocationCallbacks) callconv(vk.vulkan_call_conv) void {
defer logger.freeInnerDebugStack();
//defer logger.manager.deinit();
entryPointBeginLogTrace(.vkDestroyInstance);
defer entryPointEndLogTrace();

41
src/vulkan/logger/DebugStack.zig git.filemode.normal_file
View File

@@ -0,0 +1,41 @@
const std = @import("std");
const Self = @This();
pub const Element = struct {
log: [512]u8,
indent_level: usize,
log_level: std.log.Level,
};
stack: std.ArrayList(Element),
allocator: std.mem.Allocator = std.heap.c_allocator,
pub const empty: Self = .{
.stack = .empty,
};
pub fn pushBack(self: *Self, element: Element) !void {
try self.stack.append(self.allocator, element);
}
pub fn popBack(self: *Self) ?Element {
return self.stack.pop();
}
pub fn popFront(self: *Self) Element {
return self.stack.orderedRemove(0);
}
pub fn getLastOrNull(self: *Self) ?Element {
return self.stack.getLastOrNull();
}
pub inline fn len(self: *Self) usize {
return self.stack.items.len;
}
pub fn deinit(self: *Self) void {
self.stack.deinit(self.allocator);
self.* = .empty;
}

47
src/vulkan/logger/Manager.zig git.filemode.normal_file
View File

@@ -0,0 +1,47 @@
const std = @import("std");
const DebugStack = @import("DebugStack.zig");
const Self = @This();
indent_enabled: bool,
indent_level: usize,
debug_stack: DebugStack,
pub const init: Self = .{
.indent_enabled = true,
.indent_level = 0,
.debug_stack = .empty,
};
pub fn indent(self: *Self) void {
const new_indent_level, const has_overflown = @addWithOverflow(self.indent_level, 1);
if (has_overflown == 0) {
self.indent_level = new_indent_level;
}
}
pub fn unindent(self: *Self) void {
const new_indent_level, const has_overflown = @subWithOverflow(self.indent_level, 1);
if (has_overflown == 0) {
self.indent_level = new_indent_level;
}
loop: while (self.debug_stack.getLastOrNull()) |last| {
if (last.indent_level >= self.indent_level) {
_ = self.debug_stack.popBack();
} else {
break :loop;
}
}
}
pub inline fn enableIndent(self: *Self) void {
self.indent_enabled = true;
}
pub inline fn disableIndent(self: *Self) void {
self.indent_enabled = false;
}
pub inline fn deinit(self: *Self) void {
self.debug_stack.deinit();
}

34
src/vulkan/logger/ThreadSafeManager.zig git.filemode.normal_file
View File

@@ -0,0 +1,34 @@
const std = @import("std");
const Manager = @import("Manager.zig");
const Self = @This();
managers: std.AutoArrayHashMapUnmanaged(std.Thread.Id, Manager),
allocator: std.heap.ThreadSafeAllocator,
mutex: std.Thread.Mutex,
pub const init: Self = .{
.managers = .empty,
.allocator = .{ .child_allocator = std.heap.c_allocator },
.mutex = .{},
};
pub fn get(self: *Self) *Manager {
const allocator = self.allocator.allocator();
self.mutex.lock();
defer self.mutex.unlock();
return (self.managers.getOrPutValue(allocator, std.Thread.getCurrentId(), .init) catch @panic("Out of memory")).value_ptr;
}
pub fn deinit(self: *Self) void {
self.mutex.lock();
defer self.mutex.unlock();
var it = self.managers.iterator();
while (it.next()) |entry| {
entry.value_ptr.deinit();
}
self.managers.deinit(self.allocator.allocator());
}

View File

@@ -5,7 +5,9 @@ const std = @import("std");
const builtin = @import("builtin");
const zdt = @import("zdt");
const root = @import("root");
const lib = @import("lib.zig");
const lib = @import("../lib.zig");
const ThreadSafeManager = @import("ThreadSafeManager.zig");
comptime {
if (!builtin.is_test) {
@@ -15,53 +17,11 @@ comptime {
}
}
const DebugStackElement = struct {
log: [512]u8,
indent_level: usize,
log_level: std.log.Level,
};
var indent_enabled = true;
var indent_level: usize = 0;
var debug_stack = std.ArrayList(DebugStackElement).empty;
pub inline fn indent() void {
const new_indent_level, const has_overflown = @addWithOverflow(indent_level, 1);
if (has_overflown == 0) {
indent_level = new_indent_level;
}
}
pub inline fn unindent() void {
const new_indent_level, const has_overflown = @subWithOverflow(indent_level, 1);
if (has_overflown == 0) {
indent_level = new_indent_level;
}
loop: while (debug_stack.getLastOrNull()) |last| {
if (last.indent_level >= indent_level) {
_ = debug_stack.pop();
} else {
break :loop;
}
}
}
pub inline fn enableIndent() void {
indent_enabled = true;
}
pub inline fn disableIndent() void {
indent_enabled = false;
}
pub inline fn freeInnerDebugStack() void {
debug_stack.deinit(std.heap.c_allocator);
debug_stack = .empty;
}
pub var manager: ThreadSafeManager = .init;
pub inline fn fixme(comptime format: []const u8, args: anytype) void {
disableIndent();
defer enableIndent();
manager.get().disableIndent();
defer manager.get().enableIndent();
nestedFixme(format, args);
}
@@ -133,8 +93,8 @@ pub fn log(comptime level: std.log.Level, comptime scope: @Type(.enum_literal),
out_config.setColor(&writer, .reset) catch {};
if (indent_enabled) {
for (0..indent_level) |_| {
if (manager.get().indent_enabled) {
for (0..manager.get().indent_level) |_| {
writer.print("> ", .{}) catch {};
}
}
@@ -142,17 +102,17 @@ pub fn log(comptime level: std.log.Level, comptime scope: @Type(.enum_literal),
writer.flush() catch return;
if (level == .debug and lib.getLogVerboseLevel() == .Standard) {
(debug_stack.addOne(std.heap.c_allocator) catch return).* = .{
manager.get().debug_stack.pushBack(.{
.log = buffer,
.indent_level = indent_level,
.indent_level = manager.get().indent_level,
.log_level = level,
};
}) catch return;
return;
}
if (indent_enabled) {
while (debug_stack.items.len != 0) {
const elem = debug_stack.orderedRemove(0);
if (manager.get().indent_enabled) {
while (manager.get().debug_stack.len() != 0) {
const elem = manager.get().debug_stack.popFront();
switch (elem.log_level) {
.info, .debug => _ = stdout_file.write(&elem.log) catch {},
.warn, .err => _ = stderr_file.write(&elem.log) catch {},