almost finished update to Zig 0.16.0
Some checks failed
Build / build (push) Successful in 2m18s
Test / build_and_test (push) Failing after 6m9s

This commit is contained in:
2026-04-18 02:26:29 +02:00
parent d5a520e261
commit e97ee8b23d
23 changed files with 355 additions and 466 deletions

View File

@@ -47,6 +47,8 @@ pub fn build(b: *std.Build) !void {
base_mod.addSystemIncludePath(vulkan_headers.path("include"));
base_mod.addSystemIncludePath(vulkan_utility_libraries.path("include"));
const use_llvm = b.option(bool, "use-llvm", "use llvm") orelse (b.release_mode != .off);
for (implementations) |impl| {
const lib_mod = b.createModule(.{
.root_source_file = b.path(impl.root_source_file),
@@ -65,7 +67,7 @@ pub fn build(b: *std.Build) !void {
.name = b.fmt("vulkan_{s}", .{impl.name}),
.root_module = lib_mod,
.linkage = .dynamic,
.use_llvm = true, // Fixes some random bugs happenning with custom backend. Investigations needed
.use_llvm = use_llvm,
});
if (impl.custom) |custom| {
@@ -145,9 +147,6 @@ fn customSoft(b: *std.Build, lib: *std.Build.Step.Compile) !void {
lib.root_module.addSystemIncludePath(cpuinfo.path("include"));
lib.root_module.linkLibrary(cpuinfo.artifact("cpuinfo"));
const interface = b.lazyDependency("interface", .{}) orelse return error.UnresolvedDependency;
lib.root_module.addImport("interface", interface.module("interface"));
const spv = b.dependency("SPIRV_Interpreter", .{
.@"no-example" = true,
.@"no-test" = true,

View File

@@ -26,8 +26,8 @@
.hash = "zigrc-1.0.0-lENlWzvQAACulrbkL9PVhWjFsWSkYhi7AmfSbCM-2Xlh",
},
.cpuinfo = .{
.url = "git+https://github.com/Kbz-8/cpuinfo#4883954cfcec3f6c9ca9c4aaddfc26107e08726f",
.hash = "cpuinfo-0.0.1-RLgIQTLRMgF4dLo8AJ-HvnpFsJe6jmXCJjMWWjil6RF1",
.url = "git+https://github.com/Kbz-8/cpuinfo.git#c9bea4f6c166a495ee0ce117821f9627d4aed118",
.hash = "cpuinfo-0.0.1-RLgIQYrTMgGqfQMOd1nAa2EuglXOh5gR9bNzwMzQTemt",
.lazy = true,
},
.volk = .{
@@ -45,11 +45,6 @@
.hash = "N-V-__8AABQ7TgCnPlp8MP4YA8znrjd6E-ZjpF1rvrS8J_2I",
.lazy = true,
},
.interface = .{
.url = "git+https://github.com/nilslice/zig-interface#8c0fe8fa9fd0702eee43f50cb75dce1cc5a7e1f4",
.hash = "interface-0.0.2-GFlWJ1mcAQARS-V4xJ7qDt5_cutxOHSEz6H9yiK-Sw0A",
.lazy = true,
},
.SPIRV_Interpreter = .{
.url = "git+https://git.kbz8.me/kbz_8/SPIRV-Interpreter#4bd688cf07ea7d71c18a02153bb197e7b1e3cd82",
.hash = "SPIRV_Interpreter-0.0.1-ajmpn1aKBACoxQzshafHCjoUx6OfuFcyt1dumaerdtDo",

View File

@@ -18,9 +18,14 @@ const ExecutionDevice = @import("device/Device.zig");
const Self = @This();
pub const Interface = base.CommandBuffer;
const Command = InterfaceFactory(.{
.execute = fn (*ExecutionDevice) VkError!void,
}, null);
const Command = struct {
const VTable = struct {
execute: *const fn (*anyopaque, *ExecutionDevice) VkError!void,
};
ptr: *anyopaque,
vtable: *const VTable,
};
interface: Interface,
@@ -79,10 +84,10 @@ pub fn execute(self: *Self, device: *ExecutionDevice) void {
defer self.interface.finish() catch {};
for (self.commands.items) |command| {
command.vtable.execute(command.ptr, device) catch |err| {
command.vtable.execute(@ptrCast(command.ptr), device) catch |err| {
base.errors.errorLoggerContext(err, "the software execution device");
if (@errorReturnTrace()) |trace| {
std.debug.dumpStackTrace(trace.*);
std.debug.dumpErrorReturnTrace(trace);
}
return; // Should we return or continue ? Maybe device lost ?
};
@@ -119,7 +124,8 @@ pub fn bindDescriptorSets(interface: *Interface, bind_point: vk.PipelineBindPoin
sets: [base.VULKAN_MAX_DESCRIPTOR_SETS]?*base.DescriptorSet,
dynamic_offsets: []const u32,
pub fn execute(impl: *const Impl, device: *ExecutionDevice) VkError!void {
pub fn execute(context: *anyopaque, device: *ExecutionDevice) VkError!void {
const impl: *Impl = @ptrCast(@alignCast(context));
for (impl.first_set.., impl.sets[0..]) |i, set| {
if (set == null)
break;
@@ -136,7 +142,7 @@ pub fn bindDescriptorSets(interface: *Interface, bind_point: vk.PipelineBindPoin
.sets = sets,
.dynamic_offsets = dynamic_offsets,
};
self.commands.append(allocator, Command.from(cmd)) catch return VkError.OutOfHostMemory;
self.commands.append(allocator, .{ .ptr = cmd, .vtable = &.{ .execute = CommandImpl.execute } }) catch return VkError.OutOfHostMemory;
}
pub fn bindPipeline(interface: *Interface, bind_point: vk.PipelineBindPoint, pipeline: *base.Pipeline) VkError!void {
@@ -149,7 +155,8 @@ pub fn bindPipeline(interface: *Interface, bind_point: vk.PipelineBindPoint, pip
bind_point: vk.PipelineBindPoint,
pipeline: *SoftPipeline,
pub fn execute(impl: *const Impl, device: *ExecutionDevice) VkError!void {
pub fn execute(context: *anyopaque, device: *ExecutionDevice) VkError!void {
const impl: *Impl = @ptrCast(@alignCast(context));
device.pipeline_states[@intCast(@intFromEnum(impl.bind_point))].pipeline = impl.pipeline;
}
};
@@ -160,7 +167,7 @@ pub fn bindPipeline(interface: *Interface, bind_point: vk.PipelineBindPoint, pip
.bind_point = bind_point,
.pipeline = @alignCast(@fieldParentPtr("interface", pipeline)),
};
self.commands.append(allocator, Command.from(cmd)) catch return VkError.OutOfHostMemory;
self.commands.append(allocator, .{ .ptr = cmd, .vtable = &.{ .execute = CommandImpl.execute } }) catch return VkError.OutOfHostMemory;
}
pub fn blitImage(interface: *Interface, src: *base.Image, _: vk.ImageLayout, dst: *base.Image, _: vk.ImageLayout, regions: []const vk.ImageBlit, filter: vk.Filter) VkError!void {
@@ -175,7 +182,8 @@ pub fn blitImage(interface: *Interface, src: *base.Image, _: vk.ImageLayout, dst
regions: []const vk.ImageBlit,
filter: vk.Filter,
pub fn execute(impl: *const Impl, device: *ExecutionDevice) VkError!void {
pub fn execute(context: *anyopaque, device: *ExecutionDevice) VkError!void {
const impl: *Impl = @ptrCast(@alignCast(context));
for (impl.regions[0..]) |region| {
try device.blitter.blitRegion(impl.src, impl.dst, region, impl.filter);
}
@@ -190,7 +198,7 @@ pub fn blitImage(interface: *Interface, src: *base.Image, _: vk.ImageLayout, dst
.regions = allocator.dupe(vk.ImageBlit, regions) catch return VkError.OutOfHostMemory, // Will be freed on cmdbuf reset or destroy
.filter = filter,
};
self.commands.append(allocator, Command.from(cmd)) catch return VkError.OutOfHostMemory;
self.commands.append(allocator, .{ .ptr = cmd, .vtable = &.{ .execute = CommandImpl.execute } }) catch return VkError.OutOfHostMemory;
}
pub fn clearColorImage(interface: *Interface, image: *base.Image, _: vk.ImageLayout, color: *const vk.ClearColorValue, range: vk.ImageSubresourceRange) VkError!void {
@@ -204,7 +212,8 @@ pub fn clearColorImage(interface: *Interface, image: *base.Image, _: vk.ImageLay
clear_color: vk.ClearColorValue,
range: vk.ImageSubresourceRange,
pub fn execute(impl: *const Impl, device: *ExecutionDevice) VkError!void {
pub fn execute(context: *anyopaque, device: *ExecutionDevice) VkError!void {
const impl: *Impl = @ptrCast(@alignCast(context));
const clear_format = try impl.image.getClearFormat();
try device.blitter.clear(.{ .color = impl.clear_color }, clear_format, impl.image, impl.image.interface.format, impl.range, null);
}
@@ -217,7 +226,7 @@ pub fn clearColorImage(interface: *Interface, image: *base.Image, _: vk.ImageLay
.clear_color = color.*,
.range = range,
};
self.commands.append(allocator, Command.from(cmd)) catch return VkError.OutOfHostMemory;
self.commands.append(allocator, .{ .ptr = cmd, .vtable = &.{ .execute = CommandImpl.execute } }) catch return VkError.OutOfHostMemory;
}
pub fn copyBuffer(interface: *Interface, src: *base.Buffer, dst: *base.Buffer, regions: []const vk.BufferCopy) VkError!void {
@@ -231,7 +240,8 @@ pub fn copyBuffer(interface: *Interface, src: *base.Buffer, dst: *base.Buffer, r
dst: *SoftBuffer,
regions: []const vk.BufferCopy,
pub fn execute(impl: *const Impl, _: *ExecutionDevice) VkError!void {
pub fn execute(context: *anyopaque, _: *ExecutionDevice) VkError!void {
const impl: *Impl = @ptrCast(@alignCast(context));
try impl.src.copyBuffer(impl.dst, impl.regions);
}
};
@@ -243,7 +253,7 @@ pub fn copyBuffer(interface: *Interface, src: *base.Buffer, dst: *base.Buffer, r
.dst = @alignCast(@fieldParentPtr("interface", dst)),
.regions = allocator.dupe(vk.BufferCopy, regions) catch return VkError.OutOfHostMemory, // Will be freed on cmdbuf reset or destroy
};
self.commands.append(allocator, Command.from(cmd)) catch return VkError.OutOfHostMemory;
self.commands.append(allocator, .{ .ptr = cmd, .vtable = &.{ .execute = CommandImpl.execute } }) catch return VkError.OutOfHostMemory;
}
pub fn copyBufferToImage(interface: *Interface, src: *base.Buffer, dst: *base.Image, dst_layout: vk.ImageLayout, regions: []const vk.BufferImageCopy) VkError!void {
@@ -258,7 +268,8 @@ pub fn copyBufferToImage(interface: *Interface, src: *base.Buffer, dst: *base.Im
dst_layout: vk.ImageLayout,
regions: []const vk.BufferImageCopy,
pub fn execute(impl: *const Impl, _: *ExecutionDevice) VkError!void {
pub fn execute(context: *anyopaque, _: *ExecutionDevice) VkError!void {
const impl: *Impl = @ptrCast(@alignCast(context));
for (impl.regions[0..]) |region| {
try impl.dst.copyFromBuffer(impl.src, region);
}
@@ -273,7 +284,7 @@ pub fn copyBufferToImage(interface: *Interface, src: *base.Buffer, dst: *base.Im
.dst = @alignCast(@fieldParentPtr("interface", dst)),
.regions = allocator.dupe(vk.BufferImageCopy, regions) catch return VkError.OutOfHostMemory, // Will be freed on cmdbuf reset or destroy
};
self.commands.append(allocator, Command.from(cmd)) catch return VkError.OutOfHostMemory;
self.commands.append(allocator, .{ .ptr = cmd, .vtable = &.{ .execute = CommandImpl.execute } }) catch return VkError.OutOfHostMemory;
}
pub fn copyImage(interface: *Interface, src: *base.Image, _: vk.ImageLayout, dst: *base.Image, _: vk.ImageLayout, regions: []const vk.ImageCopy) VkError!void {
@@ -287,7 +298,8 @@ pub fn copyImage(interface: *Interface, src: *base.Image, _: vk.ImageLayout, dst
dst: *SoftImage,
regions: []const vk.ImageCopy,
pub fn execute(impl: *const Impl, _: *ExecutionDevice) VkError!void {
pub fn execute(context: *anyopaque, _: *ExecutionDevice) VkError!void {
const impl: *Impl = @ptrCast(@alignCast(context));
for (impl.regions[0..]) |region| {
try impl.src.copyToImage(impl.dst, region);
}
@@ -301,7 +313,7 @@ pub fn copyImage(interface: *Interface, src: *base.Image, _: vk.ImageLayout, dst
.dst = @alignCast(@fieldParentPtr("interface", dst)),
.regions = allocator.dupe(vk.ImageCopy, regions) catch return VkError.OutOfHostMemory, // Will be freed on cmdbuf reset or destroy
};
self.commands.append(allocator, Command.from(cmd)) catch return VkError.OutOfHostMemory;
self.commands.append(allocator, .{ .ptr = cmd, .vtable = &.{ .execute = CommandImpl.execute } }) catch return VkError.OutOfHostMemory;
}
pub fn copyImageToBuffer(interface: *Interface, src: *base.Image, src_layout: vk.ImageLayout, dst: *base.Buffer, regions: []const vk.BufferImageCopy) VkError!void {
@@ -316,7 +328,8 @@ pub fn copyImageToBuffer(interface: *Interface, src: *base.Image, src_layout: vk
dst: *SoftBuffer,
regions: []const vk.BufferImageCopy,
pub fn execute(impl: *const Impl, _: *ExecutionDevice) VkError!void {
pub fn execute(context: *anyopaque, _: *ExecutionDevice) VkError!void {
const impl: *Impl = @ptrCast(@alignCast(context));
for (impl.regions[0..]) |region| {
try impl.src.copyToBuffer(impl.dst, region);
}
@@ -331,7 +344,7 @@ pub fn copyImageToBuffer(interface: *Interface, src: *base.Image, src_layout: vk
.dst = @alignCast(@fieldParentPtr("interface", dst)),
.regions = allocator.dupe(vk.BufferImageCopy, regions) catch return VkError.OutOfHostMemory, // Will be freed on cmdbuf reset or destroy
};
self.commands.append(allocator, Command.from(cmd)) catch return VkError.OutOfHostMemory;
self.commands.append(allocator, .{ .ptr = cmd, .vtable = &.{ .execute = CommandImpl.execute } }) catch return VkError.OutOfHostMemory;
}
pub fn dispatch(interface: *Interface, group_count_x: u32, group_count_y: u32, group_count_z: u32) VkError!void {
@@ -345,7 +358,8 @@ pub fn dispatch(interface: *Interface, group_count_x: u32, group_count_y: u32, g
group_count_y: u32,
group_count_z: u32,
pub fn execute(impl: *const Impl, device: *ExecutionDevice) VkError!void {
pub fn execute(context: *anyopaque, device: *ExecutionDevice) VkError!void {
const impl: *Impl = @ptrCast(@alignCast(context));
try device.compute_routines.dispatch(impl.group_count_x, impl.group_count_y, impl.group_count_z);
}
};
@@ -357,7 +371,7 @@ pub fn dispatch(interface: *Interface, group_count_x: u32, group_count_y: u32, g
.group_count_y = group_count_y,
.group_count_z = group_count_z,
};
self.commands.append(allocator, Command.from(cmd)) catch return VkError.OutOfHostMemory;
self.commands.append(allocator, .{ .ptr = cmd, .vtable = &.{ .execute = CommandImpl.execute } }) catch return VkError.OutOfHostMemory;
}
pub fn dispatchIndirect(interface: *Interface, buffer: *base.Buffer, offset: vk.DeviceSize) VkError!void {
@@ -370,7 +384,8 @@ pub fn dispatchIndirect(interface: *Interface, buffer: *base.Buffer, offset: vk.
buffer: *SoftBuffer,
offset: vk.DeviceSize,
pub fn execute(impl: *const Impl, device: *ExecutionDevice) VkError!void {
pub fn execute(context: *anyopaque, device: *ExecutionDevice) VkError!void {
const impl: *Impl = @ptrCast(@alignCast(context));
const size = 3 * @sizeOf(u32);
const memory = if (impl.buffer.interface.memory) |memory| memory else return VkError.InvalidDeviceMemoryDrv;
const map: []u32 = @as([*]u32, @ptrCast(@alignCast(try memory.map(impl.offset, size))))[0..3];
@@ -384,7 +399,7 @@ pub fn dispatchIndirect(interface: *Interface, buffer: *base.Buffer, offset: vk.
.buffer = @alignCast(@fieldParentPtr("interface", buffer)),
.offset = offset,
};
self.commands.append(allocator, Command.from(cmd)) catch return VkError.OutOfHostMemory;
self.commands.append(allocator, .{ .ptr = cmd, .vtable = &.{ .execute = CommandImpl.execute } }) catch return VkError.OutOfHostMemory;
}
pub fn executeCommands(interface: *Interface, commands: *Interface) VkError!void {
@@ -396,7 +411,8 @@ pub fn executeCommands(interface: *Interface, commands: *Interface) VkError!void
cmd: *Self,
pub fn execute(impl: *const Impl, device: *ExecutionDevice) VkError!void {
pub fn execute(context: *anyopaque, device: *ExecutionDevice) VkError!void {
const impl: *Impl = @ptrCast(@alignCast(context));
impl.cmd.execute(device);
}
};
@@ -406,7 +422,7 @@ pub fn executeCommands(interface: *Interface, commands: *Interface) VkError!void
cmd.* = .{
.cmd = @alignCast(@fieldParentPtr("interface", commands)),
};
self.commands.append(allocator, Command.from(cmd)) catch return VkError.OutOfHostMemory;
self.commands.append(allocator, .{ .ptr = cmd, .vtable = &.{ .execute = CommandImpl.execute } }) catch return VkError.OutOfHostMemory;
}
pub fn fillBuffer(interface: *Interface, buffer: *base.Buffer, offset: vk.DeviceSize, size: vk.DeviceSize, data: u32) VkError!void {
@@ -421,7 +437,8 @@ pub fn fillBuffer(interface: *Interface, buffer: *base.Buffer, offset: vk.Device
size: vk.DeviceSize,
data: u32,
pub fn execute(impl: *const Impl, _: *ExecutionDevice) VkError!void {
pub fn execute(context: *anyopaque, _: *ExecutionDevice) VkError!void {
const impl: *Impl = @ptrCast(@alignCast(context));
try impl.buffer.fillBuffer(impl.offset, impl.size, impl.data);
}
};
@@ -434,7 +451,7 @@ pub fn fillBuffer(interface: *Interface, buffer: *base.Buffer, offset: vk.Device
.size = size,
.data = data,
};
self.commands.append(allocator, Command.from(cmd)) catch return VkError.OutOfHostMemory;
self.commands.append(allocator, .{ .ptr = cmd, .vtable = &.{ .execute = CommandImpl.execute } }) catch return VkError.OutOfHostMemory;
}
pub fn pipelineBarrier(interface: *Interface, src_stage: vk.PipelineStageFlags, dst_stage: vk.PipelineStageFlags, dependency: vk.DependencyFlags, memory_barriers: []const vk.MemoryBarrier, buffer_barriers: []const vk.BufferMemoryBarrier, image_barriers: []const vk.ImageMemoryBarrier) VkError!void {
@@ -444,7 +461,7 @@ pub fn pipelineBarrier(interface: *Interface, src_stage: vk.PipelineStageFlags,
const CommandImpl = struct {
const Impl = @This();
pub fn execute(_: *const Impl, _: *ExecutionDevice) VkError!void {
pub fn execute(_: *anyopaque, _: *ExecutionDevice) VkError!void {
// TODO: implement synchronization for rasterization stages
}
};
@@ -452,7 +469,7 @@ pub fn pipelineBarrier(interface: *Interface, src_stage: vk.PipelineStageFlags,
const cmd = allocator.create(CommandImpl) catch return VkError.OutOfHostMemory;
errdefer allocator.destroy(cmd);
cmd.* = .{};
self.commands.append(allocator, Command.from(cmd)) catch return VkError.OutOfHostMemory;
self.commands.append(allocator, .{ .ptr = cmd, .vtable = &.{ .execute = CommandImpl.execute } }) catch return VkError.OutOfHostMemory;
_ = src_stage;
_ = dst_stage;

View File

@@ -18,6 +18,7 @@ pub const SoftEvent = @import("SoftEvent.zig");
pub const SoftFence = @import("SoftFence.zig");
pub const SoftFramebuffer = @import("SoftFramebuffer.zig");
pub const SoftImage = @import("SoftImage.zig");
pub const SoftInstance = @import("SoftInstance.zig");
pub const SoftImageView = @import("SoftImageView.zig");
pub const SoftPipeline = @import("SoftPipeline.zig");
pub const SoftPipelineCache = @import("SoftPipelineCache.zig");
@@ -42,13 +43,12 @@ const DeviceAllocator = struct {
interface: Interface,
device_allocator: if (config.debug_allocator) std.heap.DebugAllocator(.{}) else DeviceAllocator,
workers: std.Thread.Pool,
pub fn create(physical_device: *base.PhysicalDevice, allocator: std.mem.Allocator, info: *const vk.DeviceCreateInfo) VkError!*Self {
pub fn create(instance: *base.Instance, physical_device: *base.PhysicalDevice, allocator: std.mem.Allocator, info: *const vk.DeviceCreateInfo) VkError!*Self {
const self = allocator.create(Self) catch return VkError.OutOfHostMemory;
errdefer allocator.destroy(self);
var interface = try Interface.init(allocator, physical_device, info);
var interface = try Interface.init(allocator, instance, physical_device, info);
interface.vtable = &.{
.createQueue = SoftQueue.create,
@@ -82,12 +82,6 @@ pub fn create(physical_device: *base.PhysicalDevice, allocator: std.mem.Allocato
self.* = .{
.interface = interface,
.device_allocator = if (config.debug_allocator) .init else .{},
.workers = undefined,
};
self.workers.init(.{ .allocator = self.device_allocator.allocator() }) catch |err| return switch (err) {
SpawnError.OutOfMemory, SpawnError.LockedMemoryLimitExceeded => VkError.OutOfDeviceMemory,
else => VkError.Unknown,
};
try self.interface.createQueues(allocator, info);
@@ -96,7 +90,6 @@ pub fn create(physical_device: *base.PhysicalDevice, allocator: std.mem.Allocato
pub fn destroy(interface: *Interface, allocator: std.mem.Allocator) VkError!void {
const self: *Self = @alignCast(@fieldParentPtr("interface", interface));
self.workers.deinit();
if (config.debug_allocator) {
// All device memory allocations should've been freed by now

View File

@@ -9,8 +9,8 @@ const Self = @This();
pub const Interface = base.Event;
interface: Interface,
mutex: std.Thread.Mutex,
condition: std.Thread.Condition,
mutex: std.Io.Mutex,
condition: std.Io.Condition,
is_signaled: bool,
pub fn create(device: *base.Device, allocator: std.mem.Allocator, info: *const vk.EventCreateInfo) VkError!*Self {
@@ -29,8 +29,8 @@ pub fn create(device: *base.Device, allocator: std.mem.Allocator, info: *const v
self.* = .{
.interface = interface,
.mutex = std.Thread.Mutex{},
.condition = std.Thread.Condition{},
.mutex = .init,
.condition = .init,
.is_signaled = false,
};
return self;
@@ -43,9 +43,10 @@ pub fn destroy(interface: *Interface, allocator: std.mem.Allocator) void {
pub fn getStatus(interface: *Interface) VkError!void {
const self: *Self = @alignCast(@fieldParentPtr("interface", interface));
const io = interface.owner.io();
self.mutex.lock();
defer self.mutex.unlock();
self.mutex.lock(io) catch return VkError.DeviceLost;
defer self.mutex.unlock(io);
if (!self.is_signaled) {
return VkError.EventReset;
@@ -54,35 +55,41 @@ pub fn getStatus(interface: *Interface) VkError!void {
pub fn reset(interface: *Interface) VkError!void {
const self: *Self = @alignCast(@fieldParentPtr("interface", interface));
const io = interface.owner.io();
self.mutex.lock();
defer self.mutex.unlock();
self.mutex.lock(io) catch return VkError.DeviceLost;
defer self.mutex.unlock(io);
self.is_signaled = false;
}
pub fn signal(interface: *Interface) VkError!void {
const self: *Self = @alignCast(@fieldParentPtr("interface", interface));
const io = interface.owner.io();
self.mutex.lock();
defer self.mutex.unlock();
self.mutex.lock(io) catch return VkError.DeviceLost;
defer self.mutex.unlock(io);
self.is_signaled = true;
self.condition.broadcast();
self.condition.broadcast(io);
}
pub fn wait(interface: *Interface, timeout: u64) VkError!void {
const self: *Self = @alignCast(@fieldParentPtr("interface", interface));
const io = interface.owner.io();
self.mutex.lock();
defer self.mutex.unlock();
self.mutex.lock(io) catch return VkError.DeviceLost;
defer self.mutex.unlock(io);
if (self.is_signaled) return;
if (timeout == 0) return VkError.Timeout;
if (timeout == std.math.maxInt(@TypeOf(timeout))) {
self.condition.wait(&self.mutex);
} else {
self.condition.timedWait(&self.mutex, timeout) catch return VkError.Timeout;
if (timeout != std.math.maxInt(@TypeOf(timeout))) {
const duration: std.Io.Clock.Duration = .{
.raw = .fromNanoseconds(@intCast(timeout)),
.clock = .cpu_process,
};
duration.sleep(io) catch return VkError.DeviceLost;
}
self.condition.wait(io, &self.mutex) catch return VkError.DeviceLost;
}

View File

@@ -9,8 +9,8 @@ const Self = @This();
pub const Interface = base.Fence;
interface: Interface,
mutex: std.Thread.Mutex,
condition: std.Thread.Condition,
mutex: std.Io.Mutex,
condition: std.Io.Condition,
is_signaled: bool,
pub fn create(device: *Device, allocator: std.mem.Allocator, info: *const vk.FenceCreateInfo) VkError!*Self {
@@ -29,8 +29,8 @@ pub fn create(device: *Device, allocator: std.mem.Allocator, info: *const vk.Fen
self.* = .{
.interface = interface,
.mutex = std.Thread.Mutex{},
.condition = std.Thread.Condition{},
.mutex = .init,
.condition = .init,
.is_signaled = info.flags.signaled_bit,
};
return self;
@@ -55,26 +55,31 @@ pub fn reset(interface: *Interface) VkError!void {
pub fn signal(interface: *Interface) VkError!void {
const self: *Self = @alignCast(@fieldParentPtr("interface", interface));
const io = interface.owner.io();
self.mutex.lock();
defer self.mutex.unlock();
self.mutex.lock(io) catch return VkError.DeviceLost;
defer self.mutex.unlock(io);
self.is_signaled = true;
self.condition.broadcast();
self.condition.broadcast(io);
}
pub fn wait(interface: *Interface, timeout: u64) VkError!void {
const self: *Self = @alignCast(@fieldParentPtr("interface", interface));
const io = interface.owner.io();
self.mutex.lock();
defer self.mutex.unlock();
self.mutex.lock(io) catch return VkError.DeviceLost;
defer self.mutex.unlock(io);
if (self.is_signaled) return;
if (timeout == 0) return VkError.Timeout;
if (timeout == std.math.maxInt(@TypeOf(timeout))) {
self.condition.wait(&self.mutex);
} else {
self.condition.timedWait(&self.mutex, timeout) catch return VkError.Timeout;
if (timeout != std.math.maxInt(@TypeOf(timeout))) {
const duration: std.Io.Clock.Duration = .{
.raw = .fromNanoseconds(@intCast(timeout)),
.clock = .cpu_process,
};
duration.sleep(io) catch return VkError.DeviceLost;
}
self.condition.wait(io, &self.mutex) catch return VkError.DeviceLost;
}

View File

@@ -11,6 +11,8 @@ const Self = @This();
pub const Interface = base.Instance;
interface: Interface,
threaded: std.Io.Threaded,
allocator: std.mem.Allocator,
fn castExtension(comptime ext: vk.ApiInfo) vk.ExtensionProperties {
var props: vk.ExtensionProperties = .{
@@ -29,6 +31,9 @@ pub fn create(allocator: std.mem.Allocator, infos: *const vk.InstanceCreateInfo)
const self = allocator.create(Self) catch return VkError.OutOfHostMemory;
errdefer allocator.destroy(self);
self.allocator = std.heap.smp_allocator;
self.threaded = std.Io.Threaded.init(self.allocator, .{});
self.interface = try base.Instance.init(allocator, infos);
self.interface.dispatch_table = &.{
.destroy = destroy,
@@ -36,12 +41,14 @@ pub fn create(allocator: std.mem.Allocator, infos: *const vk.InstanceCreateInfo)
self.interface.vtable = &.{
.requestPhysicalDevices = requestPhysicalDevices,
.releasePhysicalDevices = releasePhysicalDevices,
.io = io,
};
return &self.interface;
}
fn destroy(interface: *Interface, allocator: std.mem.Allocator) VkError!void {
const self: *Self = @alignCast(@fieldParentPtr("interface", interface));
self.threaded.deinit();
allocator.destroy(self);
}
@@ -60,3 +67,8 @@ fn releasePhysicalDevices(interface: *Interface, allocator: std.mem.Allocator) V
interface.physical_devices.deinit(allocator);
interface.physical_devices = .empty;
}
fn io(interface: *Interface) std.Io {
const self: *Self = @alignCast(@fieldParentPtr("interface", interface));
return self.threaded.io();
}

View File

@@ -17,7 +17,7 @@ var device_name = [_]u8{0} ** vk.MAX_PHYSICAL_DEVICE_NAME_SIZE;
interface: Interface,
pub fn create(allocator: std.mem.Allocator, instance: *const base.Instance) VkError!*Self {
pub fn create(allocator: std.mem.Allocator, instance: *base.Instance) VkError!*Self {
const command_allocator = VulkanAllocator.from(allocator).cloneWithScope(.command).allocator();
const self = allocator.create(Self) catch return VkError.OutOfHostMemory;
@@ -224,7 +224,7 @@ pub fn destroy(interface: *Interface, allocator: std.mem.Allocator) VkError!void
}
pub fn createDevice(interface: *Interface, allocator: std.mem.Allocator, infos: *const vk.DeviceCreateInfo) VkError!*base.Device {
const device = try SoftDevice.create(interface, allocator, infos);
const device = try SoftDevice.create(interface.instance, interface, allocator, infos);
return &device.interface;
}

View File

@@ -10,6 +10,7 @@ const NonDispatchable = base.NonDispatchable;
const ShaderModule = base.ShaderModule;
const SoftDevice = @import("SoftDevice.zig");
const SoftInstance = @import("SoftInstance.zig");
const SoftShaderModule = @import("SoftShaderModule.zig");
const Self = @This();
@@ -49,6 +50,16 @@ pub fn createCompute(device: *base.Device, allocator: std.mem.Allocator, cache:
const device_allocator = soft_device.device_allocator.allocator();
const instance: *SoftInstance = @alignCast(@fieldParentPtr("interface", device.instance));
const runtimes_count = switch (instance.threaded.async_limit) {
.nothing => 1,
.unlimited => std.Thread.getCpuCount() catch 1, // If we cannot get the CPU count, fallback on single runtime
else => |count| blk: {
const cpu_count: usize = std.Thread.getCpuCount() catch break :blk @intFromEnum(count);
break :blk if (@intFromEnum(count) >= cpu_count) cpu_count else @intFromEnum(count);
},
};
self.* = .{
.interface = interface,
.stages = std.EnumMap(Stages, Shader).init(.{
@@ -57,7 +68,7 @@ pub fn createCompute(device: *base.Device, allocator: std.mem.Allocator, cache:
soft_module.ref();
shader.module = soft_module;
const runtimes = device_allocator.alloc(spv.Runtime, soft_device.workers.getIdCount()) catch return VkError.OutOfHostMemory;
const runtimes = device_allocator.alloc(spv.Runtime, runtimes_count) catch return VkError.OutOfHostMemory;
errdefer {
for (runtimes) |*runtime| {
runtime.deinit(device_allocator);
@@ -103,9 +114,17 @@ pub fn createGraphics(device: *base.Device, allocator: std.mem.Allocator, cache:
.destroy = destroy,
};
const soft_device: *SoftDevice = @alignCast(@fieldParentPtr("interface", device));
const instance: *SoftInstance = @alignCast(@fieldParentPtr("interface", device.instance));
const runtimes_count = switch (instance.threaded.async_limit) {
.nothing => 1,
.unlimited => std.Thread.getCpuCount() catch 1, // If we cannot get the CPU count, fallback on single runtime
else => |count| blk: {
const cpu_count: usize = std.Thread.getCpuCount() catch break :blk @intFromEnum(count);
break :blk if (@intFromEnum(count) >= cpu_count) cpu_count else @intFromEnum(count);
},
};
const runtimes = allocator.alloc(spv.Runtime, soft_device.workers.getIdCount()) catch return VkError.OutOfHostMemory;
const runtimes = allocator.alloc(spv.Runtime, runtimes_count) catch return VkError.OutOfHostMemory;
errdefer allocator.free(runtimes);
//for (runtimes) |*runtime| {

View File

@@ -17,7 +17,7 @@ const Self = @This();
pub const Interface = base.Queue;
interface: Interface,
lock: std.Thread.RwLock,
lock: std.Io.RwLock,
pub fn create(allocator: std.mem.Allocator, device: *base.Device, index: u32, family_index: u32, flags: vk.DeviceQueueCreateFlags) VkError!*Interface {
const self = allocator.create(Self) catch return VkError.OutOfHostMemory;
@@ -33,7 +33,7 @@ pub fn create(allocator: std.mem.Allocator, device: *base.Device, index: u32, fa
self.* = .{
.interface = interface,
.lock = .{},
.lock = .init,
};
return &self.interface;
}
@@ -56,10 +56,11 @@ pub fn submit(interface: *Interface, infos: []Interface.SubmitInfo, p_fence: ?*b
const soft_device: *SoftDevice = @alignCast(@fieldParentPtr("interface", interface.owner));
const allocator = soft_device.device_allocator.allocator();
const io = soft_device.interface.io();
// Lock here to avoid acquiring it in `waitIdle` before runners start
self.lock.lockShared();
defer self.lock.unlockShared();
self.lock.lockShared(io) catch return VkError.DeviceLost;
defer self.lock.unlockShared(io);
for (infos) |info| {
// Cloning info to keep them alive until command execution ends
@@ -68,19 +69,23 @@ pub fn submit(interface: *Interface, infos: []Interface.SubmitInfo, p_fence: ?*b
};
const runners_counter = allocator.create(RefCounter) catch return VkError.OutOfDeviceMemory;
runners_counter.* = .init;
soft_device.workers.spawn(Self.taskRunner, .{ self, cloned_info, p_fence, runners_counter }) catch return VkError.Unknown;
_ = soft_device.interface.io().async(Self.taskRunner, .{ self, cloned_info, p_fence, runners_counter });
}
}
pub fn waitIdle(interface: *Interface) VkError!void {
const self: *Self = @alignCast(@fieldParentPtr("interface", interface));
self.lock.lock();
defer self.lock.unlock();
const io = interface.owner.io();
self.lock.lock(io) catch return VkError.DeviceLost;
defer self.lock.unlock(io);
}
fn taskRunner(self: *Self, info: Interface.SubmitInfo, p_fence: ?*base.Fence, runners_counter: *RefCounter) void {
self.lock.lockShared();
defer self.lock.unlockShared();
const io = self.interface.owner.io();
self.lock.lockShared(io) catch return;
defer self.lock.unlockShared(io);
runners_counter.ref();
defer {

View File

@@ -32,13 +32,14 @@ pub fn create(device: *base.Device, allocator: std.mem.Allocator, info: *const v
self.* = .{
.interface = interface,
.module = spv.Module.init(allocator, code, .{
.use_simd_vectors_specializations = !std.process.hasEnvVarConstant(lib.NO_SHADER_SIMD_ENV_NAME),
//.use_simd_vectors_specializations = !std.process.hasEnvVarConstant(lib.NO_SHADER_SIMD_ENV_NAME),
.use_simd_vectors_specializations = true,
}) catch |err| switch (err) {
spv.Module.ModuleError.OutOfMemory => return VkError.OutOfHostMemory,
else => {
std.log.scoped(.@"SPIR-V module").err("module creation catched a '{s}'", .{@errorName(err)});
if (@errorReturnTrace()) |trace| {
std.debug.dumpStackTrace(trace.*);
std.debug.dumpErrorReturnTrace(trace);
}
return VkError.ValidationFailed;
},

View File

@@ -40,8 +40,10 @@ pub fn init(device: *SoftDevice, state: *PipelineState) Self {
.state = state,
.batch_size = 0,
.invocation_index = .init(0),
.early_dump = std.process.parseEnvVarInt(lib.DUMP_EARLY_RESULT_TABLE_ENV_NAME, u32, 10) catch null,
.final_dump = std.process.parseEnvVarInt(lib.DUMP_FINAL_RESULT_TABLE_ENV_NAME, u32, 10) catch null,
//.early_dump = std.process.parseEnvVarInt(lib.DUMP_EARLY_RESULT_TABLE_ENV_NAME, u32, 10) catch null,
//.final_dump = std.process.parseEnvVarInt(lib.DUMP_FINAL_RESULT_TABLE_ENV_NAME, u32, 10) catch null,
.early_dump = null,
.final_dump = null,
};
}
@@ -61,46 +63,46 @@ pub fn dispatch(self: *Self, group_count_x: u32, group_count_y: u32, group_count
self.invocation_index.store(0, .monotonic);
var wg: std.Thread.WaitGroup = .{};
var wg: std.Io.Group = .init;
for (0..@min(self.batch_size, group_count)) |batch_id| {
if (std.process.hasEnvVarConstant(lib.SINGLE_THREAD_COMPUTE_EXECUTION_ENV_NAME)) {
@branchHint(.cold); // Should only be reached for debugging
//if (std.process.hasEnvVarConstant(lib.SINGLE_THREAD_COMPUTE_EXECUTION_ENV_NAME)) {
// @branchHint(.cold); // Should only be reached for debugging
runWrapper(
RunData{
.self = self,
.batch_id = batch_id,
.group_count = group_count,
.group_count_x = @as(usize, @intCast(group_count_x)),
.group_count_y = @as(usize, @intCast(group_count_y)),
.group_count_z = @as(usize, @intCast(group_count_z)),
.invocations_per_workgroup = invocations_per_workgroup,
.pipeline = pipeline,
},
);
} else {
self.device.workers.spawnWg(&wg, runWrapper, .{
RunData{
.self = self,
.batch_id = batch_id,
.group_count = group_count,
.group_count_x = @as(usize, @intCast(group_count_x)),
.group_count_y = @as(usize, @intCast(group_count_y)),
.group_count_z = @as(usize, @intCast(group_count_z)),
.invocations_per_workgroup = invocations_per_workgroup,
.pipeline = pipeline,
},
});
}
// runWrapper(
// RunData{
// .self = self,
// .batch_id = batch_id,
// .group_count = group_count,
// .group_count_x = @as(usize, @intCast(group_count_x)),
// .group_count_y = @as(usize, @intCast(group_count_y)),
// .group_count_z = @as(usize, @intCast(group_count_z)),
// .invocations_per_workgroup = invocations_per_workgroup,
// .pipeline = pipeline,
// },
// );
//} else {
wg.async(self.device.interface.io(), runWrapper, .{
RunData{
.self = self,
.batch_id = batch_id,
.group_count = group_count,
.group_count_x = @as(usize, @intCast(group_count_x)),
.group_count_y = @as(usize, @intCast(group_count_y)),
.group_count_z = @as(usize, @intCast(group_count_z)),
.invocations_per_workgroup = invocations_per_workgroup,
.pipeline = pipeline,
},
});
//}
}
self.device.workers.waitAndWork(&wg);
wg.await(self.device.interface.io()) catch return VkError.DeviceLost;
}
fn runWrapper(data: RunData) void {
@call(.always_inline, run, .{data}) catch |err| {
std.log.scoped(.@"SPIR-V runtime").err("SPIR-V runtime catched a '{s}'", .{@errorName(err)});
if (@errorReturnTrace()) |trace| {
std.debug.dumpStackTrace(trace.*);
std.debug.dumpErrorReturnTrace(trace);
}
};
}
@@ -171,14 +173,17 @@ inline fn run(data: RunData) !void {
inline fn dumpResultsTable(allocator: std.mem.Allocator, rt: *spv.Runtime, is_early: bool) !void {
@branchHint(.cold);
const file = try std.fs.cwd().createFile(
std.fmt.comptimePrint("{s}_compute_result_table_dump.txt", .{if (is_early) "early" else "final"}),
.{ .truncate = true },
);
defer file.close();
var buffer = [_]u8{0} ** 1024;
var writer = file.writer(buffer[0..]);
try rt.dumpResultsTable(allocator, &writer.interface);
_ = allocator;
_ = rt;
_ = is_early;
//const file = try std.fs.cwd().createFile(
// std.fmt.comptimePrint("{s}_compute_result_table_dump.txt", .{if (is_early) "early" else "final"}),
// .{ .truncate = true },
//);
//defer file.close();
//var buffer = [_]u8{0} ** 1024;
//var writer = file.writer(buffer[0..]);
//try rt.dumpResultsTable(allocator, &writer.interface);
}
fn writeDescriptorSets(self: *Self, rt: *spv.Runtime) !void {

View File

@@ -31,7 +31,7 @@ pool: *CommandPool,
state: State,
begin_info: ?vk.CommandBufferBeginInfo,
host_allocator: VulkanAllocator,
state_mutex: std.Thread.Mutex,
state_mutex: std.Io.Mutex,
vtable: *const VTable,
dispatch_table: *const DispatchTable,
@@ -69,7 +69,7 @@ pub fn init(device: *Device, allocator: std.mem.Allocator, info: *const vk.Comma
.state = .Initial,
.begin_info = null,
.host_allocator = VulkanAllocator.from(allocator).cloneWithScope(.object),
.state_mutex = .{},
.state_mutex = .init,
.vtable = undefined,
.dispatch_table = undefined,
};
@@ -79,8 +79,11 @@ inline fn transitionState(self: *Self, target: State, from_allowed: []const Stat
if (!std.EnumSet(State).initMany(from_allowed).contains(self.state)) {
return error.NotAllowed;
}
self.state_mutex.lock();
defer self.state_mutex.unlock();
const io = self.owner.io();
self.state_mutex.lockUncancelable(io);
defer self.state_mutex.unlock(io);
self.state = target;
}

View File

@@ -1,8 +1,6 @@
const std = @import("std");
const vk = @import("vulkan");
const logger = @import("lib.zig").logger;
const Dispatchable = @import("Dispatchable.zig").Dispatchable;
const NonDispatchable = @import("NonDispatchable.zig").NonDispatchable;
const VulkanAllocator = @import("VulkanAllocator.zig");
@@ -23,6 +21,7 @@ const Event = @import("Event.zig");
const Fence = @import("Fence.zig");
const Framebuffer = @import("Framebuffer.zig");
const Image = @import("Image.zig");
const Instance = @import("Instance.zig");
const ImageView = @import("ImageView.zig");
const Pipeline = @import("Pipeline.zig");
const PipelineCache = @import("PipelineCache.zig");
@@ -35,6 +34,7 @@ const ShaderModule = @import("ShaderModule.zig");
const Self = @This();
pub const ObjectType: vk.ObjectType = .device;
instance: *Instance,
physical_device: *const PhysicalDevice,
queues: std.AutoArrayHashMapUnmanaged(u32, std.ArrayList(*Dispatchable(Queue))),
host_allocator: VulkanAllocator,
@@ -71,9 +71,10 @@ pub const DispatchTable = struct {
destroy: *const fn (*Self, std.mem.Allocator) VkError!void,
};
pub fn init(allocator: std.mem.Allocator, physical_device: *const PhysicalDevice, info: *const vk.DeviceCreateInfo) VkError!Self {
pub fn init(allocator: std.mem.Allocator, instance: *Instance, physical_device: *const PhysicalDevice, info: *const vk.DeviceCreateInfo) VkError!Self {
_ = info;
return .{
.instance = instance,
.physical_device = physical_device,
.queues = .empty,
.host_allocator = VulkanAllocator.from(allocator).clone(),
@@ -99,14 +100,15 @@ pub fn createQueues(self: *Self, allocator: std.mem.Allocator, info: *const vk.D
const queue = try self.vtable.createQueue(allocator, self, queue_info.queue_family_index, @intCast(family_ptr.items.len), queue_info.flags);
logger.getManager().get().indent();
defer logger.getManager().get().unindent();
const dispatchable_queue = try Dispatchable(Queue).wrap(allocator, queue);
family_ptr.append(allocator, dispatchable_queue) catch return VkError.OutOfHostMemory;
}
}
pub fn io(self: *const Self) std.Io {
return self.instance.io();
}
pub inline fn destroy(self: *Self, allocator: std.mem.Allocator) VkError!void {
var it = self.queues.iterator();
while (it.next()) |entry| {

View File

@@ -3,8 +3,6 @@ const builtin = @import("builtin");
const vk = @import("vulkan");
const config = @import("config");
const ThreadSafeLoggerManager = @import("logger/ThreadSafeManager.zig");
const VkError = @import("error_set.zig").VkError;
const Dispatchable = @import("Dispatchable.zig").Dispatchable;
const PhysicalDevice = @import("PhysicalDevice.zig");
@@ -29,15 +27,13 @@ const DeviceAllocator = struct {
};
physical_devices: std.ArrayList(*Dispatchable(PhysicalDevice)),
threaded: std.Io.Threaded,
allocator: if (config.debug_allocator) std.heap.DebugAllocator(.{}) else DeviceAllocator,
logger: ThreadSafeLoggerManager,
dispatch_table: *const DispatchTable,
vtable: *const VTable,
pub const VTable = struct {
releasePhysicalDevices: *const fn (*Self, std.mem.Allocator) VkError!void,
requestPhysicalDevices: *const fn (*Self, std.mem.Allocator) VkError!void,
io: *const fn (*Self) std.Io,
};
pub const DispatchTable = struct {
@@ -47,20 +43,11 @@ pub const DispatchTable = struct {
pub fn init(allocator: std.mem.Allocator, infos: *const vk.InstanceCreateInfo) VkError!Self {
_ = allocator;
_ = infos;
var self: Self = .{
return .{
.physical_devices = .empty,
.threaded = undefined,
.allocator = if (config.debug_allocator) .init else .{},
.logger = undefined,
.dispatch_table = undefined,
.vtable = undefined,
};
self.threaded = .init(self.allocator.allocator(), .{});
self.logger = .init(self.threaded.io(), self.allocator.allocator());
return self;
}
/// Dummy for docs creation and stuff
@@ -104,9 +91,6 @@ pub fn releasePhysicalDevices(self: *Self, allocator: std.mem.Allocator) VkError
}
pub fn requestPhysicalDevices(self: *Self, allocator: std.mem.Allocator) VkError!void {
logger.getManager().get().indent();
defer logger.getManager().get().unindent();
try self.vtable.requestPhysicalDevices(self, allocator);
if (self.physical_devices.items.len == 0) {
std.log.scoped(.vkCreateInstance).err("No VkPhysicalDevice found", .{});
@@ -116,3 +100,7 @@ pub fn requestPhysicalDevices(self: *Self, allocator: std.mem.Allocator) VkError
std.log.scoped(.vkCreateInstance).debug("Found VkPhysicalDevice named {s}", .{physical_device.object.props.device_name});
}
}
pub fn io(self: *Self) std.Io {
return self.vtable.io(self);
}

View File

@@ -13,7 +13,7 @@ props: vk.PhysicalDeviceProperties,
mem_props: vk.PhysicalDeviceMemoryProperties,
features: vk.PhysicalDeviceFeatures,
queue_family_props: std.ArrayList(vk.QueueFamilyProperties),
instance: *const Instance,
instance: *Instance,
dispatch_table: *const DispatchTable,
pub const DispatchTable = struct {
@@ -25,7 +25,7 @@ pub const DispatchTable = struct {
release: *const fn (*Self, std.mem.Allocator) VkError!void,
};
pub fn init(allocator: std.mem.Allocator, instance: *const Instance) VkError!Self {
pub fn init(allocator: std.mem.Allocator, instance: *Instance) VkError!Self {
_ = allocator;
return .{
.props = .{

View File

@@ -9,7 +9,7 @@ pub const vku = @cImport({
pub const errors = @import("error_set.zig");
pub const lib_vulkan = @import("lib_vulkan.zig");
pub const logger = @import("logger/logger.zig");
pub const logger = @import("logger.zig");
pub const format = @import("format.zig");
pub const Dispatchable = @import("Dispatchable.zig").Dispatchable;
@@ -92,17 +92,18 @@ pub const LogVerboseLevel = enum {
};
pub fn getLogVerboseLevel() LogVerboseLevel {
const allocator = std.heap.c_allocator;
const level = std.process.getEnvVarOwned(allocator, DRIVER_LOGS_ENV_NAME) catch return .None;
defer allocator.free(level);
return if (std.mem.eql(u8, level, "none"))
.None
else if (std.mem.eql(u8, level, "all"))
.High
else if (std.mem.eql(u8, level, "stupid"))
.TooMuch
else
.Standard;
//const allocator = std.heap.c_allocator;
//const level = std.process.getEnvVarOwned(allocator, DRIVER_LOGS_ENV_NAME) catch return .None;
//defer allocator.free(level);
//return if (std.mem.eql(u8, level, "none"))
// .None
//else if (std.mem.eql(u8, level, "all"))
// .High
//else if (std.mem.eql(u8, level, "stupid"))
// .TooMuch
//else
// .Standard;
return .High;
}
pub inline fn unsupported(comptime fmt: []const u8, args: anytype) void {

View File

@@ -44,22 +44,19 @@ pub const RenderPass = @import("RenderPass.zig");
pub const Sampler = @import("Sampler.zig");
pub const ShaderModule = @import("ShaderModule.zig");
fn entryPointBeginLogTrace(comptime scope: @Type(.enum_literal)) void {
fn entryPointBeginLogTrace(comptime scope: @EnumLiteral()) void {
std.log.scoped(scope).debug("Calling {s}...", .{@tagName(scope)});
logger.getManager().get().indent();
}
fn entryPointEndLogTrace() void {
logger.getManager().get().unindent();
}
fn entryPointEndLogTrace() void {}
fn entryPointNotFoundErrorLog(comptime scope: @Type(.enum_literal), name: []const u8) void {
fn entryPointNotFoundErrorLog(comptime scope: @EnumLiteral(), name: []const u8) void {
if (lib.getLogVerboseLevel() != .TooMuch) return;
std.log.scoped(scope).err("Could not find function {s}", .{name});
}
inline fn notImplementedWarning() void {
logger.nestedFixme("function not yet implemented", .{});
logger.fixme("function not yet implemented", .{});
}
fn functionMapEntryPoint(comptime name: []const u8) struct { []const u8, vk.PfnVoidFunction } {
@@ -342,8 +339,6 @@ pub export fn strollEnumerateInstanceVersion(version: *u32) callconv(vk.vulkan_c
// Instance functions ========================================================================================================================================
pub export fn strollDestroyInstance(p_instance: vk.Instance, callbacks: ?*const vk.AllocationCallbacks) callconv(vk.vulkan_call_conv) void {
defer logger.getManager().deinit();
entryPointBeginLogTrace(.vkDestroyInstance);
defer entryPointEndLogTrace();

118
src/vulkan/logger.zig git.filemode.normal_file
View File

@@ -0,0 +1,118 @@
const std = @import("std");
const builtin = @import("builtin");
const root = @import("root");
const lib = @import("lib.zig");
comptime {
if (!builtin.is_test) {
if (!@hasDecl(root, "DRIVER_NAME")) {
@compileError("Missing DRIVER_NAME in module root");
}
}
}
var mutex: std.Io.Mutex = .init;
pub inline fn fixme(comptime format: []const u8, args: anytype) void {
if (lib.getLogVerboseLevel() == .None) {
return;
}
std.log.scoped(.FIXME).warn("FIXME: " ++ format, args);
}
pub fn log(comptime level: std.log.Level, comptime scope: @EnumLiteral(), comptime format: []const u8, args: anytype) void {
if (lib.getLogVerboseLevel() == .None) {
return;
}
const scope_name = @tagName(scope);
const scope_prefix = comptime blk: {
const limit = 30 - 4;
break :blk if (scope_name.len >= limit)
std.fmt.comptimePrint("({s}...): ", .{scope_name[0..(limit - 3)]})
else
std.fmt.comptimePrint("({s}): ", .{scope_name});
};
const prefix = std.fmt.comptimePrint("{s: <10}", .{"[" ++ comptime level.asText() ++ "] "});
const level_color: std.Io.Terminal.Color = switch (level) {
.info, .debug => .blue,
.warn => .magenta,
.err => .red,
};
const allocator = std.heap.smp_allocator;
var threaded: std.Io.Threaded = .init(allocator, .{});
defer threaded.deinit();
const io = threaded.io();
const stderr_file = std.Io.File.stderr();
const stdout_file = std.Io.File.stdout();
const file = switch (level) {
.info, .debug => stdout_file,
.warn, .err => stderr_file,
};
file.lock(io, .exclusive) catch {};
defer file.unlock(io);
const now = std.Io.Timestamp.now(io, .cpu_process).toMilliseconds();
const now_ms = @mod(now, std.time.ms_per_s);
const now_sec = @mod(@divTrunc(now, std.time.ms_per_s), std.time.s_per_min);
const now_min = @mod(@divTrunc(now, std.time.ms_per_min), 60);
const now_hour = @mod(@divTrunc(now, std.time.ms_per_hour), 24);
var fmt_buffer = std.mem.zeroes([4096]u8);
var fmt_writer = std.Io.Writer.fixed(&fmt_buffer);
fmt_writer.print(format ++ "\n", args) catch {};
fmt_writer.flush() catch return;
mutex.lock(io) catch return;
defer mutex.unlock(io);
var last_pos: usize = 0;
while (std.mem.indexOfScalarPos(u8, &fmt_buffer, last_pos, '\n')) |pos| : (last_pos = pos + 1) {
var buffer = std.mem.zeroes([512]u8);
var file_writer = file.writer(io, &buffer);
var writer = &file_writer.interface;
const term: std.Io.Terminal = .{
.writer = writer,
.mode = std.Io.Terminal.Mode.detect(io, file, false, false) catch return,
};
term.setColor(.magenta) catch {};
writer.print("[StrollDriver ", .{}) catch continue;
if (!builtin.is_test) {
term.setColor(.cyan) catch {};
writer.print(root.DRIVER_NAME, .{}) catch continue;
}
term.setColor(.yellow) catch {};
writer.print(" {d}:{d}:{d}.{d}", .{ now_hour, now_min, now_sec, now_ms }) catch continue;
term.setColor(.magenta) catch {};
writer.print("]", .{}) catch continue;
term.setColor(.cyan) catch {};
writer.print("[Thread {d: >8}]", .{std.Thread.getCurrentId()}) catch continue;
term.setColor(level_color) catch {};
writer.print(prefix, .{}) catch continue;
term.setColor(switch (level) {
.err => .red,
.warn => .magenta,
else => .green,
}) catch {};
writer.print("{s: >30}", .{scope_prefix}) catch continue;
term.setColor(.reset) catch {};
writer.print("{s}\n", .{fmt_buffer[last_pos..pos]}) catch continue;
writer.flush() catch continue;
}
}

View File

@@ -1,41 +0,0 @@
const std = @import("std");
const Self = @This();
pub const Element = struct {
log: [512]u8,
indent_level: usize,
log_level: std.log.Level,
};
stack: std.ArrayList(Element),
allocator: std.mem.Allocator = std.heap.c_allocator,
pub const empty: Self = .{
.stack = .empty,
};
pub fn pushBack(self: *Self, element: Element) !void {
try self.stack.append(self.allocator, element);
}
pub fn popBack(self: *Self) ?Element {
return self.stack.pop();
}
pub fn popFront(self: *Self) Element {
return self.stack.orderedRemove(0);
}
pub fn getLastOrNull(self: *Self) ?Element {
return self.stack.getLastOrNull();
}
pub inline fn len(self: *Self) usize {
return self.stack.items.len;
}
pub fn deinit(self: *Self) void {
self.stack.deinit(self.allocator);
self.* = .empty;
}

View File

@@ -1,54 +0,0 @@
const std = @import("std");
const DebugStack = @import("DebugStack.zig");
const lib = @import("../lib.zig");
const Self = @This();
indent_enabled: bool,
indent_level: usize,
debug_stack: DebugStack,
pub const init: Self = .{
.indent_enabled = true,
.indent_level = 0,
.debug_stack = .empty,
};
pub fn indent(self: *Self) void {
if (lib.getLogVerboseLevel() == .None) {
return;
}
const new_indent_level, const has_overflown = @addWithOverflow(self.indent_level, 1);
if (has_overflown == 0) {
self.indent_level = new_indent_level;
}
}
pub fn unindent(self: *Self) void {
if (lib.getLogVerboseLevel() == .None) {
return;
}
const new_indent_level, const has_overflown = @subWithOverflow(self.indent_level, 1);
if (has_overflown == 0) {
self.indent_level = new_indent_level;
}
loop: while (self.debug_stack.getLastOrNull()) |last| {
if (last.indent_level >= self.indent_level) {
_ = self.debug_stack.popBack();
} else {
break :loop;
}
}
}
pub inline fn enableIndent(self: *Self) void {
self.indent_enabled = true;
}
pub inline fn disableIndent(self: *Self) void {
self.indent_enabled = false;
}
pub inline fn deinit(self: *Self) void {
self.debug_stack.deinit();
}

View File

@@ -1,40 +0,0 @@
const std = @import("std");
const Manager = @import("Manager.zig");
const Self = @This();
managers: std.AutoArrayHashMapUnmanaged(std.Thread.Id, Manager),
allocator: std.mem.Allocator,
mutex: std.Io.Mutex,
io: std.Io,
pub fn init(io: std.Io, allocator: std.mem.Allocator) Self {
return .{
.managers = .empty,
.allocator = allocator,
.mutex = .init,
.io = io,
};
}
pub fn get(self: *Self) *Manager {
const allocator = self.allocator.allocator();
self.mutex.lock();
defer self.mutex.unlock();
return (self.managers.getOrPutValue(allocator, std.Thread.getCurrentId(), .init) catch @panic("Out of memory")).value_ptr;
}
pub fn deinit(self: *Self) void {
self.mutex.lockUncancelable();
defer self.mutex.unlock();
if (self.managers.getPtr(std.Thread.getCurrentId())) |manager| {
manager.deinit();
_ = self.managers.orderedRemove(std.Thread.getCurrentId());
}
if (self.managers.count() == 0) {
self.managers.deinit(self.allocator);
}
}

View File

@@ -1,141 +0,0 @@
//! A instance-level logger that stack in memory all same-indent `debug` logs
//! and only displays them in reverse order if a non-debug log is requested
const std = @import("std");
const builtin = @import("builtin");
const root = @import("root");
const lib = @import("../lib.zig");
const ThreadSafeManager = @import("ThreadSafeManager.zig");
comptime {
if (!builtin.is_test) {
if (!@hasDecl(root, "DRIVER_NAME")) {
@compileError("Missing DRIVER_NAME in module root");
}
}
}
var manager: ThreadSafeManager = .init;
pub inline fn getManager() *ThreadSafeManager {
return &manager;
}
pub inline fn fixme(comptime format: []const u8, args: anytype) void {
if (lib.getLogVerboseLevel() == .None) {
return;
}
getManager().get().disableIndent();
defer getManager().get().enableIndent();
nestedFixme(format, args);
}
pub inline fn nestedFixme(comptime format: []const u8, args: anytype) void {
if (lib.getLogVerboseLevel() == .None) {
return;
}
std.log.scoped(.FIXME).warn("FIXME: " ++ format, args);
}
pub fn log(comptime level: std.log.Level, comptime scope: @EnumLiteral(), comptime format: []const u8, args: anytype) void {
if (lib.getLogVerboseLevel() == .None) {
return;
}
const scope_name = @tagName(scope);
const scope_prefix = comptime blk: {
const limit = 30 - 4;
break :blk if (scope_name.len >= limit)
std.fmt.comptimePrint("({s}...): ", .{scope_name[0..(limit - 3)]})
else
std.fmt.comptimePrint("({s}): ", .{scope_name});
};
const prefix = std.fmt.comptimePrint("{s: <10}", .{"[" ++ comptime level.asText() ++ "] "});
const level_color: std.Io.tty.Color = switch (level) {
.info, .debug => .blue,
.warn => .magenta,
.err => .red,
};
std.debug.lockStdErr();
defer std.debug.unlockStdErr();
var stderr_file = std.fs.File.stderr();
var stdout_file = std.fs.File.stdout();
const file = switch (level) {
.info, .debug => stdout_file,
.warn, .err => stderr_file,
};
var fmt_buffer = std.mem.zeroes([4096]u8);
var fmt_writer = std.Io.Writer.fixed(&fmt_buffer);
fmt_writer.print(format ++ "\n", args) catch {};
fmt_writer.flush() catch return;
var last_pos: usize = 0;
while (std.mem.indexOfScalarPos(u8, &fmt_buffer, last_pos, '\n')) |pos| {
var buffer = std.mem.zeroes([512]u8);
var out_config = std.Io.tty.Config.detect(file);
var writer = std.Io.Writer.fixed(&buffer);
out_config.setColor(&writer, .magenta) catch {};
writer.print("[StrollDriver ", .{}) catch {};
if (!builtin.is_test) {
out_config.setColor(&writer, .cyan) catch {};
writer.print(root.DRIVER_NAME, .{}) catch {};
}
out_config.setColor(&writer, .yellow) catch {};
writer.print(" {d:02}:{d:02}:{d:02}.{d:03}", .{ now.hour, now.minute, now.second, @divFloor(now.nanosecond, std.time.ns_per_ms) }) catch {};
out_config.setColor(&writer, .magenta) catch {};
writer.print("]", .{}) catch {};
out_config.setColor(&writer, level_color) catch {};
writer.print(prefix, .{}) catch {};
out_config.setColor(&writer, switch (level) {
.err => .red,
.warn => .magenta,
else => .green,
}) catch {};
writer.print("{s: >30}", .{scope_prefix}) catch {};
out_config.setColor(&writer, .reset) catch {};
if (getManager().get().indent_enabled) {
for (0..getManager().get().indent_level) |_| {
writer.print("> ", .{}) catch {};
}
}
writer.print("{s}\n", .{fmt_buffer[last_pos..pos]}) catch {};
writer.flush() catch return;
if (level == .debug and lib.getLogVerboseLevel() == .Standard) {
getManager().get().debug_stack.pushBack(.{
.log = buffer,
.indent_level = getManager().get().indent_level,
.log_level = level,
}) catch return;
return;
}
if (getManager().get().indent_enabled) {
while (getManager().get().debug_stack.len() != 0) {
const elem = getManager().get().debug_stack.popFront();
switch (elem.log_level) {
.info, .debug => _ = stdout_file.write(&elem.log) catch {},
.warn, .err => _ = stderr_file.write(&elem.log) catch {},
}
}
}
switch (level) {
.info, .debug => _ = stdout_file.write(&buffer) catch {},
.warn, .err => _ = stderr_file.write(&buffer) catch {},
}
last_pos = pos + 1;
}
}