adding async chunk generation

This commit is contained in:
2025-05-17 23:10:31 +02:00
parent e6982f99e5
commit be29d9a3be
19 changed files with 167 additions and 75 deletions

View File

@@ -141,7 +141,7 @@ namespace Scop
ImGui::Text("Swapchain images count %ld", p_renderer->GetSwapchain().GetSwapchainImages().size());
ImGui::Text("Drawcalls %ld", p_renderer->GetDrawCallsCounterRef());
ImGui::Text("Polygon drawn %ld", p_renderer->GetPolygonDrawnCounterRef());
ImGui::Text("Allocations count %ld", RenderCore::Get().GetAllocator().GetAllocationsCount());
ImGui::Text("Allocations count %ld / %u", RenderCore::Get().GetAllocator().GetAllocationsCount(), props.limits.maxMemoryAllocationCount);
ImGui::Text("Buffer count %ld", GPUBuffer::GetBufferCount());
ImGui::Text("Image count %ld", Image::GetImageCount());
ImGui::Text("Window dimensions: %ux%u", p_renderer->GetWindow()->GetWidth(), p_renderer->GetWindow()->GetHeight());

View File

@@ -12,13 +12,17 @@ namespace Scop
if(p_mesh)
m_materials.resize(p_mesh->GetSubMeshCount() + 1);
CPUBuffer default_pixels{ kvfFormatSize(VK_FORMAT_R8G8B8A8_SRGB) };
default_pixels.GetDataAs<std::uint32_t>()[0] = 0xFFFFFFFF;
std::shared_ptr<Texture> default_texture = std::make_shared<Texture>(std::move(default_pixels), 1, 1, VK_FORMAT_R8G8B8A8_SRGB);
if(!s_default_material)
{
CPUBuffer default_pixels{ kvfFormatSize(VK_FORMAT_R8G8B8A8_SRGB) };
default_pixels.GetDataAs<std::uint32_t>()[0] = 0xFFFFFFFF;
std::shared_ptr<Texture> default_texture = std::make_shared<Texture>(std::move(default_pixels), 1, 1, VK_FORMAT_R8G8B8A8_SRGB);
MaterialTextures textures;
textures.albedo = default_texture;
m_materials.back() = std::make_shared<Material>(textures);
MaterialTextures textures;
textures.albedo = default_texture;
s_default_material = std::make_shared<Material>(textures);
}
m_materials.back() = s_default_material;
}
void Model::Draw(VkCommandBuffer cmd, const DescriptorSet& matrices_set, const GraphicPipeline& pipeline, DescriptorSet& set, std::size_t& drawcalls, std::size_t& polygondrawn, std::size_t frame_index) const

View File

@@ -4,7 +4,7 @@
namespace Scop
{
void GPUBuffer::Init(BufferType type, VkDeviceSize size, VkBufferUsageFlags usage, CPUBuffer data, std::string_view name)
void GPUBuffer::Init(BufferType type, VkDeviceSize size, VkBufferUsageFlags usage, CPUBuffer data, std::string_view name, bool dedicated_alloc)
{
if(type == BufferType::Constant)
{
@@ -30,7 +30,7 @@ namespace Scop
if(type == BufferType::Staging && data.Empty())
Warning("Vulkan: trying to create staging buffer without data (wtf?)");
CreateBuffer(size, m_usage, m_flags, std::move(name));
CreateBuffer(size, m_usage, m_flags, std::move(name), dedicated_alloc);
if(!data.Empty())
{
@@ -41,7 +41,7 @@ namespace Scop
PushToGPU();
}
void GPUBuffer::CreateBuffer(VkDeviceSize size, VkBufferUsageFlags usage, VkMemoryPropertyFlags properties, std::string_view name)
void GPUBuffer::CreateBuffer(VkDeviceSize size, VkBufferUsageFlags usage, VkMemoryPropertyFlags properties, std::string_view name, bool dedicated_alloc)
{
auto device = RenderCore::Get().GetDevice();
m_buffer = kvfCreateBuffer(device, usage, size);
@@ -49,7 +49,7 @@ namespace Scop
VkMemoryRequirements mem_requirements;
RenderCore::Get().vkGetBufferMemoryRequirements(device, m_buffer, &mem_requirements);
m_memory = RenderCore::Get().GetAllocator().Allocate(size, mem_requirements.alignment, *FindMemoryType(mem_requirements.memoryTypeBits, properties));
m_memory = RenderCore::Get().GetAllocator().Allocate(size, mem_requirements.alignment, *FindMemoryType(mem_requirements.memoryTypeBits, properties), dedicated_alloc);
//m_memory = RenderCore::Get().GetAllocator().Allocate(mem_requirements.size, mem_requirements.alignment, *FindMemoryType(mem_requirements.memoryTypeBits, properties));
RenderCore::Get().vkBindBufferMemory(device, m_buffer, m_memory.memory, m_memory.offset);
@@ -73,12 +73,10 @@ namespace Scop
name_info.objectHandle = reinterpret_cast<std::uint64_t>(m_buffer);
name_info.pObjectName = m_name.c_str();
RenderCore::Get().vkSetDebugUtilsObjectNameEXT(RenderCore::Get().GetDevice(), &name_info);
Message("Vulkan: % buffer created", m_name);
#else
Message("Vulkan: buffer created");
#endif
m_is_dedicated_alloc = dedicated_alloc;
s_buffer_count++;
}
@@ -99,10 +97,16 @@ namespace Scop
kvfBeginCommandBuffer(cmd, VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT);
kvfCopyBufferToBuffer(cmd, m_buffer, buffer.Get(), buffer.GetSize(), src_offset, dst_offset);
kvfEndCommandBuffer(cmd);
VkFence fence = kvfCreateFence(RenderCore::Get().GetDevice());
kvfSubmitSingleTimeCommandBuffer(RenderCore::Get().GetDevice(), cmd, KVF_GRAPHICS_QUEUE, fence);
kvfWaitForFence(RenderCore::Get().GetDevice(), fence);
kvfDestroyFence(RenderCore::Get().GetDevice(), fence);
if(!RenderCore::Get().StackSubmits())
{
VkFence fence = kvfCreateFence(RenderCore::Get().GetDevice());
kvfSubmitSingleTimeCommandBuffer(RenderCore::Get().GetDevice(), cmd, KVF_GRAPHICS_QUEUE, fence);
kvfWaitForFence(RenderCore::Get().GetDevice(), fence);
kvfDestroyFence(RenderCore::Get().GetDevice(), fence);
kvfDestroyCommandBuffer(RenderCore::Get().GetDevice(), cmd);
}
else
kvfSubmitSingleTimeCommandBuffer(RenderCore::Get().GetDevice(), cmd, KVF_GRAPHICS_QUEUE, VK_NULL_HANDLE);
return true;
}
@@ -111,12 +115,11 @@ namespace Scop
GPUBuffer new_buffer;
new_buffer.m_usage = (this->m_usage & 0xFFFFFFFC) | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
new_buffer.m_flags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
new_buffer.CreateBuffer(m_memory.size, new_buffer.m_usage, new_buffer.m_flags, m_name);
new_buffer.CreateBuffer(m_memory.size, new_buffer.m_usage, new_buffer.m_flags, m_name, m_is_dedicated_alloc);
if(new_buffer.CopyFrom(*this))
Swap(new_buffer);
new_buffer.Destroy();
Message("Vulkan: pushed buffer to GPU memory");
}
void GPUBuffer::Destroy() noexcept
@@ -128,7 +131,6 @@ namespace Scop
RenderCore::Get().GetAllocator().Deallocate(m_memory);
m_buffer = VK_NULL_HANDLE;
m_memory = NULL_MEMORY_BLOCK;
Message("Vulkan: destroyed buffer");
s_buffer_count--;
}

View File

@@ -57,9 +57,6 @@ namespace Scop
name_info.objectHandle = reinterpret_cast<std::uint64_t>(m_image);
name_info.pObjectName = name.data();
RenderCore::Get().vkSetDebugUtilsObjectNameEXT(RenderCore::Get().GetDevice(), &name_info);
Message("Vulkan: % image created", name);
#else
Message("Vulkan: image created");
#endif
s_image_count++;
@@ -151,7 +148,6 @@ namespace Scop
m_memory = NULL_MEMORY_BLOCK;
kvfDestroyImage(RenderCore::Get().GetDevice(), m_image);
}
Message("Vulkan: image destroyed");
m_image = VK_NULL_HANDLE;
m_memory = NULL_MEMORY_BLOCK;
m_image = VK_NULL_HANDLE;

View File

@@ -6,8 +6,8 @@
namespace Scop
{
MemoryChunk::MemoryChunk(VkDevice device, VkPhysicalDevice physical, VkDeviceSize size, std::int32_t memory_type_index)
: m_device(device), m_physical(physical), m_size(size), m_memory_type_index(memory_type_index)
MemoryChunk::MemoryChunk(VkDevice device, VkPhysicalDevice physical, VkDeviceSize size, std::int32_t memory_type_index, bool is_dedicated)
: m_device(device), m_physical(physical), m_size(size), m_memory_type_index(memory_type_index), m_is_dedicated(is_dedicated)
{
Verify(device != VK_NULL_HANDLE, "Memory Chunk : invalid device");
VkMemoryAllocateInfo alloc_info{};

View File

@@ -12,6 +12,7 @@ namespace Scop
{
Verify(m_device != VK_NULL_HANDLE, "invalid device");
Verify(m_physical != VK_NULL_HANDLE, "invalid physical device");
const std::lock_guard<std::mutex> guard(m_alloc_mutex);
if(!dedicated_chunk)
{
for(auto& chunk : m_chunks)
@@ -24,7 +25,7 @@ namespace Scop
}
}
}
m_chunks.emplace_back(std::make_unique<MemoryChunk>(m_device, m_physical, (CHUNK_SIZE < size + alignment ? size + alignment : CHUNK_SIZE), memory_type_index));
m_chunks.emplace_back(std::make_unique<MemoryChunk>(m_device, m_physical, (CHUNK_SIZE < size + alignment ? size + alignment : CHUNK_SIZE), memory_type_index, dedicated_chunk));
std::optional<MemoryBlock> block = m_chunks.back()->Allocate(size, alignment);
m_allocations_count++;
if(block.has_value())
@@ -37,11 +38,17 @@ namespace Scop
{
Verify(m_device != VK_NULL_HANDLE, "invalid device");
Verify(m_physical != VK_NULL_HANDLE, "invalid physical device");
for(auto& chunk : m_chunks)
const std::lock_guard<std::mutex> guard(m_dealloc_mutex);
for(auto it = m_chunks.begin(); it != m_chunks.end(); ++it)
{
if(chunk->Has(block))
if((*it)->Has(block))
{
chunk->Deallocate(block);
(*it)->Deallocate(block);
if((*it)->IsDedicated())
{
m_chunks.erase(it);
m_allocations_count--;
}
return;
}
}