adding base descriptor set layout

This commit is contained in:
2026-01-31 01:38:32 +01:00
parent 1d10a5748b
commit 970a7cb343
4 changed files with 207 additions and 9 deletions

View File

@@ -1,14 +1,52 @@
const std = @import("std"); const std = @import("std");
const vk = @import("vulkan"); const vk = @import("vulkan");
const VulkanAllocator = @import("VulkanAllocator.zig");
const VkError = @import("error_set.zig").VkError; const VkError = @import("error_set.zig").VkError;
const Device = @import("Device.zig"); const Device = @import("Device.zig");
const Sampler = @import("Sampler.zig");
const Self = @This(); const Self = @This();
pub const ObjectType: vk.ObjectType = .descriptor_set_layout; pub const ObjectType: vk.ObjectType = .descriptor_set_layout;
const BindingLayout = struct {
descriptor_type: vk.DescriptorType,
dynamic_index: usize,
array_size: usize,
/// This slice points to an array located after the binding layouts array
immutable_samplers: []*const Sampler,
driver_data: *anyopaque,
};
owner: *Device, owner: *Device,
bindings: ?[]const vk.DescriptorSetLayoutBinding,
/// Memory containing actual binding layouts array and immutable samplers array
heap: []u8,
bindings: []BindingLayout,
dynamic_offset_count: usize,
/// Shader stages affected by this descriptor set
stages: vk.ShaderStageFlags,
/// Mesa's common Vulkan runtime states:
///
/// It's often necessary to store a pointer to the descriptor set layout in
/// the descriptor so that any entrypoint which has access to a descriptor
/// set also has the layout. While layouts are often passed into various
/// entrypoints, they're notably missing from vkUpdateDescriptorSets(). In
/// order to implement descriptor writes, you either need to stash a pointer
/// to the descriptor set layout in the descriptor set or you need to copy
/// all of the relevant information. Storing a pointer is a lot cheaper.
///
/// Because descriptor set layout lifetimes and descriptor set lifetimes are
/// not guaranteed to coincide, we have to reference count if we're going to
/// do this.
ref_count: std.atomic.Value(usize),
vtable: *const VTable, vtable: *const VTable,
@@ -17,21 +55,96 @@ pub const VTable = struct {
}; };
pub fn init(device: *Device, allocator: std.mem.Allocator, info: *const vk.DescriptorSetLayoutCreateInfo) VkError!Self { pub fn init(device: *Device, allocator: std.mem.Allocator, info: *const vk.DescriptorSetLayoutCreateInfo) VkError!Self {
const bindings = if (info.p_bindings) |bindings| const command_allocator = VulkanAllocator.from(allocator).cloneWithScope(.command).allocator();
allocator.dupe(vk.DescriptorSetLayoutBinding, bindings[0..info.binding_count]) catch return VkError.OutOfHostMemory
else var binding_count: usize = 0;
null; var immutable_samplers_count: usize = 0;
if (info.p_bindings) |binding_infos| {
for (binding_infos, 0..info.binding_count) |binding, _| {
binding_count = @max(binding_count, binding.binding + 1);
if (bindingHasImmutableSamplers(binding)) {
immutable_samplers_count += binding.descriptor_count;
}
}
}
const size = (binding_count * @sizeOf(BindingLayout)) + (immutable_samplers_count * @sizeOf(*Sampler));
// Clean way to put the immutable samplers array right after the binding layouts one
const heap = allocator.alloc(u8, size) catch return VkError.OutOfHostMemory;
errdefer allocator.free(heap);
var local_heap = std.heap.FixedBufferAllocator.init(heap);
const local_allocator = local_heap.allocator();
const bindings = local_allocator.alloc(BindingLayout, binding_count) catch return VkError.OutOfHostMemory;
const immutable_samplers = local_allocator.alloc(*const Sampler, immutable_samplers_count) catch return VkError.OutOfHostMemory;
var stages: vk.ShaderStageFlags = .{};
if (info.p_bindings) |binding_infos| {
const sorted_bindings = command_allocator.dupe(vk.DescriptorSetLayoutBinding, binding_infos[0..info.binding_count]) catch return VkError.OutOfHostMemory;
defer command_allocator.free(sorted_bindings);
std.mem.sort(vk.DescriptorSetLayoutBinding, sorted_bindings, .{}, sortBindings);
for (sorted_bindings) |binding_info| {
const binding_index = binding_info.binding;
const descriptor_count = switch (binding_info.descriptor_type) {
.inline_uniform_block => 1,
else => binding_info.descriptor_count,
};
bindings[binding_index] = .{
.descriptor_type = binding_info.descriptor_type,
.array_size = descriptor_count,
.dynamic_index = 0,
.immutable_samplers = immutable_samplers[0..],
.driver_data = undefined,
};
stages = stages.merge(binding_info.stage_flags);
}
}
return .{ return .{
.owner = device, .owner = device,
.heap = heap,
.bindings = bindings, .bindings = bindings,
.dynamic_offset_count = 0,
.stages = stages,
.ref_count = std.atomic.Value(usize).init(1),
.vtable = undefined, .vtable = undefined,
}; };
} }
fn sortBindings(_: @TypeOf(.{}), lhs: vk.DescriptorSetLayoutBinding, rhs: vk.DescriptorSetLayoutBinding) bool {
return lhs.binding < rhs.binding;
}
inline fn bindingHasImmutableSamplers(binding: vk.DescriptorSetLayoutBinding) bool {
return switch (binding.descriptor_type) {
.sampler, .combined_image_sampler => binding.p_immutable_samplers != null,
else => false,
};
}
pub inline fn destroy(self: *Self, allocator: std.mem.Allocator) void { pub inline fn destroy(self: *Self, allocator: std.mem.Allocator) void {
if (self.bindings) |bindings| { self.unref(allocator);
allocator.free(bindings); }
}
pub inline fn drop(self: *Self, allocator: std.mem.Allocator) void {
allocator.free(self.heap);
self.vtable.destroy(self, allocator); self.vtable.destroy(self, allocator);
} }
pub inline fn ref(self: *Self) void {
_ = self.ref_count.fetchAdd(1, .monotonic);
}
pub inline fn unref(self: *Self, allocator: std.mem.Allocator) void {
if (self.ref_count.fetchSub(1, .release) == 1) {
self.drop(allocator);
}
}

View File

@@ -1,7 +1,9 @@
const std = @import("std"); const std = @import("std");
const vk = @import("vulkan"); const vk = @import("vulkan");
const lib = @import("lib.zig");
const NonDispatchable = @import("NonDispatchable.zig"); const NonDispatchable = @import("NonDispatchable.zig");
const DescriptorSetLayout = @import("DescriptorSetLayout.zig");
const VkError = @import("error_set.zig").VkError; const VkError = @import("error_set.zig").VkError;
@@ -12,6 +14,30 @@ pub const ObjectType: vk.ObjectType = .pipeline_layout;
owner: *Device, owner: *Device,
set_count: usize,
set_layouts: [lib.VULKAN_MAX_DESCRIPTOR_SETS]*DescriptorSetLayout,
dynamic_descriptor_offsets: [lib.VULKAN_MAX_DESCRIPTOR_SETS]usize,
push_ranges_count: usize,
push_ranges: [lib.VULKAN_MAX_PUSH_CONSTANT_RANGES]vk.PushConstantRange,
/// Mesa's common Vulkan runtime states:
///
/// It's often necessary to store a pointer to the descriptor set layout in
/// the descriptor so that any entrypoint which has access to a descriptor
/// set also has the layout. While layouts are often passed into various
/// entrypoints, they're notably missing from vkUpdateDescriptorSets(). In
/// order to implement descriptor writes, you either need to stash a pointer
/// to the descriptor set layout in the descriptor set or you need to copy
/// all of the relevant information. Storing a pointer is a lot cheaper.
///
/// Because descriptor set layout lifetimes and descriptor set lifetimes are
/// not guaranteed to coincide, we have to reference count if we're going to
/// do this.
ref_count: std.atomic.Value(usize),
vtable: *const VTable, vtable: *const VTable,
pub const VTable = struct { pub const VTable = struct {
@@ -23,10 +49,30 @@ pub fn init(device: *Device, allocator: std.mem.Allocator, info: *const vk.Pipel
_ = info; _ = info;
return .{ return .{
.owner = device, .owner = device,
.set_count = 0,
.set_layouts = undefined,
.dynamic_descriptor_offsets = [_]usize{0} ** lib.VULKAN_MAX_DESCRIPTOR_SETS,
.push_ranges_count = 0,
.push_ranges = undefined,
.ref_count = std.atomic.Value(usize).init(1),
.vtable = undefined, .vtable = undefined,
}; };
} }
pub inline fn destroy(self: *Self, allocator: std.mem.Allocator) void { pub inline fn destroy(self: *Self, allocator: std.mem.Allocator) void {
self.unref(allocator);
}
pub inline fn drop(self: *Self, allocator: std.mem.Allocator) void {
self.vtable.destroy(self, allocator); self.vtable.destroy(self, allocator);
} }
pub inline fn ref(self: *Self) void {
_ = self.ref_count.fetchAdd(1, .monotonic);
}
pub inline fn unref(self: *Self, allocator: std.mem.Allocator) void {
if (self.ref_count.fetchSub(1, .release) == 1) {
self.drop(allocator);
}
}

View File

@@ -55,6 +55,44 @@ pub const DRIVER_NAME = "Unnamed Stroll Driver";
/// Default Vulkan version /// Default Vulkan version
pub const VULKAN_VERSION = vk.makeApiVersion(0, 1, 0, 0); pub const VULKAN_VERSION = vk.makeApiVersion(0, 1, 0, 0);
/// Maximum number of descriptor sets per pipeline
pub const VULKAN_MAX_DESCRIPTOR_SETS = 32;
/// From the Vulkan 1.3.274 spec:
///
/// VUID-VkPipelineLayoutCreateInfo-pPushConstantRanges-00292
///
/// "Any two elements of pPushConstantRanges must not include the same
/// stage in stageFlags"
///
/// and
///
/// VUID-VkPushConstantRange-stageFlags-requiredbitmask
///
/// "stageFlags must not be 0"
///
/// This means that the number of push constant ranges is effectively bounded
/// by the number of possible shader stages. Not the number of stages that can
/// be compiled together (a pipeline layout can be used in multiple pipelnes
/// wth different sets of shaders) but the total number of stage bits supported
/// by the implementation. Currently, those are
///
/// - VK_SHADER_STAGE_VERTEX_BIT
/// - VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT
/// - VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT
/// - VK_SHADER_STAGE_GEOMETRY_BIT
/// - VK_SHADER_STAGE_FRAGMENT_BIT
/// - VK_SHADER_STAGE_COMPUTE_BIT
/// - VK_SHADER_STAGE_RAYGEN_BIT_KHR
/// - VK_SHADER_STAGE_ANY_HIT_BIT_KHR
/// - VK_SHADER_STAGE_CLOSEST_HIT_BIT_KHR
/// - VK_SHADER_STAGE_MISS_BIT_KHR
/// - VK_SHADER_STAGE_INTERSECTION_BIT_KHR
/// - VK_SHADER_STAGE_CALLABLE_BIT_KHR
/// - VK_SHADER_STAGE_TASK_BIT_EXT
/// - VK_SHADER_STAGE_MESH_BIT_EXT
pub const VULKAN_MAX_PUSH_CONSTANT_RANGES = 14;
pub const std_options: std.Options = .{ pub const std_options: std.Options = .{
.log_level = .debug, .log_level = .debug,
.logFn = logger.log, .logFn = logger.log,

View File

@@ -800,7 +800,8 @@ pub export fn strollCreateDescriptorSetLayout(p_device: vk.Device, info: *const
return .error_validation_failed; return .error_validation_failed;
} }
const allocator = VulkanAllocator.init(callbacks, .object).allocator(); // Device scoped because we're reference counting and layout may not be destroyed when vkDestroyDescriptorSetLayout is called
const allocator = VulkanAllocator.init(callbacks, .device).allocator();
const device = Dispatchable(Device).fromHandleObject(p_device) catch |err| return toVkResult(err); const device = Dispatchable(Device).fromHandleObject(p_device) catch |err| return toVkResult(err);
const layout = device.createDescriptorSetLayout(allocator, info) catch |err| return toVkResult(err); const layout = device.createDescriptorSetLayout(allocator, info) catch |err| return toVkResult(err);
p_layout.* = (NonDispatchable(DescriptorSetLayout).wrap(allocator, layout) catch |err| return toVkResult(err)).toVkHandle(vk.DescriptorSetLayout); p_layout.* = (NonDispatchable(DescriptorSetLayout).wrap(allocator, layout) catch |err| return toVkResult(err)).toVkHandle(vk.DescriptorSetLayout);