mirror of
https://github.com/Kbz-8/Pulse.git
synced 2026-01-11 15:33:34 +00:00
yes
This commit is contained in:
@@ -29,12 +29,12 @@ PulseBuffer VulkanCreateBuffer(PulseDevice device, const PulseBufferCreateInfo*
|
||||
|
||||
if(buffer->usage & PULSE_BUFFER_USAGE_TRANSFER_UPLOAD)
|
||||
{
|
||||
vulkan_buffer->usage |= VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
|
||||
vulkan_buffer->usage |= VK_BUFFER_USAGE_TRANSFER_DST_BIT;
|
||||
allocation_create_info.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT;
|
||||
}
|
||||
if(buffer->usage & PULSE_BUFFER_USAGE_TRANSFER_DOWNLOAD)
|
||||
{
|
||||
vulkan_buffer->usage |= VK_BUFFER_USAGE_TRANSFER_DST_BIT;
|
||||
vulkan_buffer->usage |= VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
|
||||
allocation_create_info.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT;
|
||||
}
|
||||
if(buffer->usage & PULSE_INTERNAL_BUFFER_USAGE_UNIFORM_ACCESS)
|
||||
@@ -57,8 +57,9 @@ PulseBuffer VulkanCreateBuffer(PulseDevice device, const PulseBufferCreateInfo*
|
||||
return buffer;
|
||||
}
|
||||
|
||||
bool VulkanMapBuffer(PulseBuffer buffer, void** data)
|
||||
bool VulkanMapBuffer(PulseBuffer buffer, PulseMapMode mode, void** data)
|
||||
{
|
||||
PULSE_UNUSED(mode);
|
||||
VulkanBuffer* vulkan_buffer = VULKAN_RETRIEVE_DRIVER_DATA_AS(buffer, VulkanBuffer*);
|
||||
VulkanDevice* vulkan_device = VULKAN_RETRIEVE_DRIVER_DATA_AS(buffer->device, VulkanDevice*);
|
||||
CHECK_VK_RETVAL(buffer->device->backend, vmaMapMemory(vulkan_device->allocator, vulkan_buffer->allocation, data), PULSE_ERROR_MAP_FAILED, false);
|
||||
|
||||
@@ -23,7 +23,7 @@ typedef struct VulkanBuffer
|
||||
} VulkanBuffer;
|
||||
|
||||
PulseBuffer VulkanCreateBuffer(PulseDevice device, const PulseBufferCreateInfo* create_infos);
|
||||
bool VulkanMapBuffer(PulseBuffer buffer, void** data);
|
||||
bool VulkanMapBuffer(PulseBuffer buffer, PulseMapMode mode, void** data);
|
||||
void VulkanUnmapBuffer(PulseBuffer buffer);
|
||||
bool VulkanCopyBufferToBuffer(PulseCommandList cmd, const PulseBufferRegion* src, const PulseBufferRegion* dst);
|
||||
bool VulkanCopyBufferToImage(PulseCommandList cmd, const PulseBufferRegion* src, const PulseImageRegion* dst);
|
||||
|
||||
@@ -109,7 +109,7 @@ bool VulkanSubmitCommandList(PulseDevice device, PulseCommandList cmd, PulseFenc
|
||||
default: break;
|
||||
}
|
||||
|
||||
VkFence vulkan_fence;
|
||||
VkFence vulkan_fence = VK_NULL_HANDLE;
|
||||
if(fence != PULSE_NULL_HANDLE)
|
||||
{
|
||||
vulkan_fence = VULKAN_RETRIEVE_DRIVER_DATA_AS(fence, VkFence);
|
||||
@@ -132,7 +132,10 @@ bool VulkanSubmitCommandList(PulseDevice device, PulseCommandList cmd, PulseFenc
|
||||
submit_info.commandBufferCount = 1;
|
||||
submit_info.pCommandBuffers = &vulkan_cmd->cmd;
|
||||
res = vulkan_device->vkQueueSubmit(vulkan_queue->queue, 1, &submit_info, vulkan_fence);
|
||||
cmd->state = PULSE_COMMAND_LIST_STATE_SENT;
|
||||
if(fence != PULSE_NULL_HANDLE)
|
||||
cmd->state = PULSE_COMMAND_LIST_STATE_SENT;
|
||||
else
|
||||
cmd->state = PULSE_COMMAND_LIST_STATE_READY;
|
||||
switch(res)
|
||||
{
|
||||
case VK_SUCCESS: return true;
|
||||
|
||||
@@ -93,10 +93,7 @@ bool VulkanWaitForFences(PulseDevice device, const PulseFence* fences, uint32_t
|
||||
free(vulkan_fences);
|
||||
switch(result)
|
||||
{
|
||||
case VK_SUCCESS:
|
||||
for(uint32_t i = 0; i < fences_count; i++)
|
||||
fences[i]->cmd->state = PULSE_COMMAND_LIST_STATE_READY;
|
||||
break;
|
||||
case VK_SUCCESS: break;
|
||||
case VK_TIMEOUT: break;
|
||||
|
||||
case VK_ERROR_DEVICE_LOST: PulseSetInternalError(PULSE_ERROR_DEVICE_LOST); return false;
|
||||
|
||||
@@ -24,6 +24,8 @@ PulseBackendFlags WebGPUCheckSupport(PulseBackendFlags candidates, PulseShaderFo
|
||||
|
||||
bool WebGPULoadBackend(PulseBackend backend, PulseDebugLevel debug_level)
|
||||
{
|
||||
PULSE_UNUSED(backend);
|
||||
PULSE_UNUSED(debug_level);
|
||||
WebGPUDriverData* driver_data = (WebGPUDriverData*)calloc(1, sizeof(WebGPUDriverData));
|
||||
PULSE_CHECK_ALLOCATION_RETVAL(driver_data, false);
|
||||
driver_data->instance = wgpuCreateInstance(PULSE_NULLPTR);
|
||||
|
||||
@@ -23,4 +23,3 @@ PulseBackendFlags WebGPUCheckSupport(PulseBackendFlags candidates, PulseShaderFo
|
||||
#endif // PULSE_WEBGPU_H_
|
||||
|
||||
#endif // PULSE_ENABLE_WEBGPU_BACKEND
|
||||
|
||||
|
||||
@@ -2,19 +2,149 @@
|
||||
// This file is part of "Pulse"
|
||||
// For conditions of distribution and use, see copyright notice in LICENSE
|
||||
|
||||
#include <stdatomic.h>
|
||||
#include <time.h>
|
||||
|
||||
#include <Pulse.h>
|
||||
#include "../../PulseInternal.h"
|
||||
#include "WebGPU.h"
|
||||
#include "WebGPUDevice.h"
|
||||
#include "webgpu.h"
|
||||
#include "WebGPUBuffer.h"
|
||||
|
||||
PulseBuffer WebGPUCreateBuffer(PulseDevice device, const PulseBufferCreateInfo* create_infos)
|
||||
{
|
||||
WebGPUDevice* webgpu_device = WEBGPU_RETRIEVE_DRIVER_DATA_AS(device, WebGPUDevice*);
|
||||
|
||||
PulseBufferHandler* buffer = (PulseBufferHandler*)calloc(1, sizeof(PulseBufferHandler));
|
||||
PULSE_CHECK_ALLOCATION_RETVAL(buffer, PULSE_NULL_HANDLE);
|
||||
|
||||
WebGPUBuffer* webgpu_buffer = (WebGPUBuffer*)calloc(1, sizeof(WebGPUBuffer));
|
||||
PULSE_CHECK_ALLOCATION_RETVAL(webgpu_buffer, PULSE_NULL_HANDLE);
|
||||
|
||||
buffer->device = device;
|
||||
buffer->driver_data = webgpu_buffer;
|
||||
buffer->size = create_infos->size;
|
||||
buffer->usage = create_infos->usage;
|
||||
|
||||
bool is_storage = false;
|
||||
|
||||
WGPUBufferDescriptor descriptor = { 0 };
|
||||
descriptor.mappedAtCreation = false;
|
||||
descriptor.size = buffer->size;
|
||||
if(buffer->usage & PULSE_BUFFER_USAGE_STORAGE_READ || buffer->usage & PULSE_BUFFER_USAGE_STORAGE_WRITE)
|
||||
{
|
||||
descriptor.usage |= WGPUBufferUsage_Storage | WGPUBufferUsage_CopyDst | WGPUBufferUsage_CopySrc;
|
||||
is_storage = true;
|
||||
}
|
||||
if(buffer->usage & PULSE_BUFFER_USAGE_TRANSFER_DOWNLOAD)
|
||||
{
|
||||
descriptor.usage |= WGPUBufferUsage_CopyDst;
|
||||
if(!is_storage)
|
||||
descriptor.usage |= WGPUBufferUsage_MapRead;
|
||||
}
|
||||
if(buffer->usage & PULSE_BUFFER_USAGE_TRANSFER_UPLOAD)
|
||||
descriptor.usage |= WGPUBufferUsage_CopyDst | WGPUBufferUsage_CopySrc;
|
||||
if(buffer->usage & PULSE_INTERNAL_BUFFER_USAGE_UNIFORM_ACCESS)
|
||||
descriptor.usage |= WGPUBufferUsage_Uniform | WGPUBufferUsage_CopyDst | WGPUBufferUsage_CopySrc;
|
||||
|
||||
webgpu_buffer->buffer = wgpuDeviceCreateBuffer(webgpu_device->device, &descriptor);
|
||||
if(webgpu_buffer->buffer == PULSE_NULLPTR)
|
||||
{
|
||||
free(webgpu_buffer);
|
||||
free(buffer);
|
||||
return PULSE_NULL_HANDLE;
|
||||
}
|
||||
return buffer;
|
||||
}
|
||||
|
||||
bool WebGPUMapBuffer(PulseBuffer buffer, void** data)
|
||||
#include <stdio.h>
|
||||
|
||||
static void WebGPUMapBufferCallback(WGPUMapAsyncStatus status, WGPUStringView message, void* userdata1, void* userdata2)
|
||||
{
|
||||
atomic_int* mapping_finished = (atomic_int*)userdata1;
|
||||
PulseBuffer buffer = (PulseBuffer)userdata2;
|
||||
puts("test");
|
||||
if(status == WGPUMapAsyncStatus_Success)
|
||||
atomic_store(mapping_finished, 1);
|
||||
else
|
||||
{
|
||||
const char* reasons[] = {
|
||||
"nvm it was successfull",
|
||||
"instance has been dropped",
|
||||
"an error occured",
|
||||
"mapping was aborted",
|
||||
};
|
||||
if(PULSE_IS_BACKEND_LOW_LEVEL_DEBUG(buffer->device->backend))
|
||||
PulseLogErrorFmt(buffer->device->backend, "(WebGPU) buffer mapping failed because %s. %.*s", reasons[status], message.length, message.data);
|
||||
PulseSetInternalError(PULSE_ERROR_MAP_FAILED);
|
||||
atomic_store(mapping_finished, 2);
|
||||
}
|
||||
}
|
||||
|
||||
bool WebGPUMapBuffer(PulseBuffer buffer, PulseMapMode mode, void** data)
|
||||
{
|
||||
WebGPUBuffer* webgpu_buffer = WEBGPU_RETRIEVE_DRIVER_DATA_AS(buffer, WebGPUBuffer*);
|
||||
|
||||
// If we only upload we can just use wgpuQueueWriteBuffer
|
||||
// https://toji.dev/webgpu-best-practices/buffer-uploads.html
|
||||
if(mode == PULSE_MAP_WRITE)
|
||||
{
|
||||
if(webgpu_buffer->map == PULSE_NULLPTR)
|
||||
webgpu_buffer->map = malloc(buffer->size);
|
||||
else
|
||||
webgpu_buffer->map = realloc(webgpu_buffer->map, buffer->size);
|
||||
PULSE_CHECK_ALLOCATION_RETVAL(webgpu_buffer->map, false);
|
||||
}
|
||||
else
|
||||
{
|
||||
atomic_int mapping_finished;
|
||||
atomic_store(&mapping_finished, 0);
|
||||
|
||||
const uint32_t timeout = 5000;
|
||||
clock_t start = clock();
|
||||
|
||||
webgpu_buffer->map = PULSE_NULLPTR;
|
||||
|
||||
WGPUBufferMapCallbackInfo callback_info = { 0 };
|
||||
callback_info.mode = WGPUCallbackMode_AllowSpontaneous;
|
||||
callback_info.callback = WebGPUMapBufferCallback;
|
||||
callback_info.userdata1 = &mapping_finished;
|
||||
callback_info.userdata2 = buffer;
|
||||
wgpuBufferMapAsync(webgpu_buffer->buffer, WGPUMapMode_Read, 0, buffer->size, callback_info);
|
||||
|
||||
while(atomic_load(&mapping_finished) == 0)
|
||||
{
|
||||
clock_t elapsed = clock() - start;
|
||||
if(elapsed > timeout)
|
||||
{
|
||||
if(PULSE_IS_BACKEND_LOW_LEVEL_DEBUG(buffer->device->backend))
|
||||
PulseLogError(buffer->device->backend, "(WebGPU) buffer mapping failed (timeout)");
|
||||
PulseSetInternalError(PULSE_ERROR_MAP_FAILED);
|
||||
return false;
|
||||
}
|
||||
PulseSleep(1); // 1ms
|
||||
}
|
||||
|
||||
if(atomic_load(&mapping_finished) == 1)
|
||||
webgpu_buffer->map = (void*)wgpuBufferGetConstMappedRange(webgpu_buffer->buffer, 0, WGPU_WHOLE_MAP_SIZE);
|
||||
}
|
||||
if(webgpu_buffer->map == PULSE_NULLPTR)
|
||||
return false;
|
||||
webgpu_buffer->current_map_mode = mode;
|
||||
*data = webgpu_buffer->map;
|
||||
return true;
|
||||
}
|
||||
|
||||
void WebGPUUnmapBuffer(PulseBuffer buffer)
|
||||
{
|
||||
WebGPUDevice* webgpu_device = WEBGPU_RETRIEVE_DRIVER_DATA_AS(buffer->device, WebGPUDevice*);
|
||||
WebGPUBuffer* webgpu_buffer = WEBGPU_RETRIEVE_DRIVER_DATA_AS(buffer, WebGPUBuffer*);
|
||||
if(webgpu_buffer->current_map_mode == PULSE_MAP_WRITE)
|
||||
wgpuQueueWriteBuffer(webgpu_device->queue, webgpu_buffer->buffer, 0, webgpu_buffer->map, buffer->size);
|
||||
else
|
||||
wgpuBufferUnmap(webgpu_buffer->buffer);
|
||||
webgpu_buffer->map = PULSE_NULLPTR;
|
||||
}
|
||||
|
||||
bool WebGPUCopyBufferToBuffer(PulseCommandList cmd, const PulseBufferRegion* src, const PulseBufferRegion* dst)
|
||||
@@ -27,4 +157,9 @@ bool WebGPUCopyBufferToImage(PulseCommandList cmd, const PulseBufferRegion* src,
|
||||
|
||||
void WebGPUDestroyBuffer(PulseDevice device, PulseBuffer buffer)
|
||||
{
|
||||
PULSE_UNUSED(device);
|
||||
WebGPUBuffer* webgpu_buffer = WEBGPU_RETRIEVE_DRIVER_DATA_AS(buffer, WebGPUBuffer*);
|
||||
wgpuBufferRelease(webgpu_buffer->buffer);
|
||||
free(webgpu_buffer);
|
||||
free(buffer);
|
||||
}
|
||||
|
||||
@@ -10,14 +10,16 @@
|
||||
#include <webgpu/webgpu.h>
|
||||
|
||||
#include <Pulse.h>
|
||||
#include "../../PulseInternal.h"
|
||||
|
||||
typedef struct WebGPUBuffer
|
||||
{
|
||||
WGPUBuffer buffer;
|
||||
void* map;
|
||||
PulseMapMode current_map_mode;
|
||||
} WebGPUBuffer;
|
||||
|
||||
PulseBuffer WebGPUCreateBuffer(PulseDevice device, const PulseBufferCreateInfo* create_infos);
|
||||
bool WebGPUMapBuffer(PulseBuffer buffer, void** data);
|
||||
bool WebGPUMapBuffer(PulseBuffer buffer, PulseMapMode mode, void** data);
|
||||
void WebGPUUnmapBuffer(PulseBuffer buffer);
|
||||
bool WebGPUCopyBufferToBuffer(PulseCommandList cmd, const PulseBufferRegion* src, const PulseBufferRegion* dst);
|
||||
bool WebGPUCopyBufferToImage(PulseCommandList cmd, const PulseBufferRegion* src, const PulseImageRegion* dst);
|
||||
|
||||
@@ -41,15 +41,17 @@ PulseCommandList WebGPURequestCommandList(PulseDevice device, PulseCommandListUs
|
||||
return cmd;
|
||||
}
|
||||
|
||||
#include <stdio.h>
|
||||
|
||||
static void WebGPUFenceCallback(WGPUQueueWorkDoneStatus status, void* userdata1, void* userdata2)
|
||||
{
|
||||
PULSE_UNUSED(userdata2);
|
||||
WebGPUFence* webgpu_fence = (WebGPUFence*)userdata1;
|
||||
PulseCommandList cmd = (PulseCommandList)userdata2;
|
||||
if(status == WGPUQueueWorkDoneStatus_Success)
|
||||
atomic_store(&webgpu_fence->signal, true);
|
||||
puts("test");
|
||||
{
|
||||
if(webgpu_fence != PULSE_NULLPTR)
|
||||
atomic_store(&webgpu_fence->signal, true);
|
||||
cmd->state = PULSE_COMMAND_LIST_STATE_READY;
|
||||
}
|
||||
}
|
||||
|
||||
bool WebGPUSubmitCommandList(PulseDevice device, PulseCommandList cmd, PulseFence fence)
|
||||
@@ -60,17 +62,23 @@ bool WebGPUSubmitCommandList(PulseDevice device, PulseCommandList cmd, PulseFenc
|
||||
WGPUCommandBufferDescriptor command_buffer_descriptor = { 0 };
|
||||
WGPUCommandBuffer command_buffer = wgpuCommandEncoderFinish(webgpu_cmd->encoder, &command_buffer_descriptor);
|
||||
|
||||
wgpuQueueSubmit(webgpu_device->queue, 1, &command_buffer);
|
||||
|
||||
WebGPUFence* webgpu_fence = WEBGPU_RETRIEVE_DRIVER_DATA_AS(fence, WebGPUFence*);
|
||||
atomic_store(&webgpu_fence->signal, false);
|
||||
|
||||
WGPUQueueWorkDoneCallbackInfo callback = { 0 };
|
||||
callback.mode = WGPUCallbackMode_AllowSpontaneous;
|
||||
callback.callback = WebGPUFenceCallback;
|
||||
callback.userdata1 = webgpu_fence;
|
||||
callback.userdata1 = PULSE_NULLPTR;
|
||||
callback.userdata2 = cmd;
|
||||
if(fence != PULSE_NULL_HANDLE)
|
||||
{
|
||||
WebGPUFence* webgpu_fence = WEBGPU_RETRIEVE_DRIVER_DATA_AS(fence, WebGPUFence*);
|
||||
callback.userdata1 = webgpu_fence;
|
||||
atomic_store(&webgpu_fence->signal, false);
|
||||
fence->cmd = cmd;
|
||||
}
|
||||
wgpuQueueOnSubmittedWorkDone(webgpu_device->queue, callback);
|
||||
|
||||
cmd->state = PULSE_COMMAND_LIST_STATE_SENT;
|
||||
wgpuQueueSubmit(webgpu_device->queue, 1, &command_buffer);
|
||||
|
||||
wgpuCommandBufferRelease(command_buffer);
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
// For conditions of distribution and use, see copyright notice in LICENSE
|
||||
|
||||
#include <Pulse.h>
|
||||
#include "../../PulseInternal.h"
|
||||
#include "WebGPU.h"
|
||||
#include "WebGPUDevice.h"
|
||||
#include "WebGPUComputePass.h"
|
||||
|
||||
@@ -35,7 +35,7 @@
|
||||
device->adapter = adapter;
|
||||
}
|
||||
#else
|
||||
static uint64_t WebGPUScoreAdapter(WGPUInstance instance, WGPUAdapter adapter)
|
||||
static uint64_t WebGPUScoreAdapter(WGPUAdapter adapter)
|
||||
{
|
||||
uint64_t score = 0;
|
||||
WGPUAdapterInfo infos;
|
||||
@@ -172,7 +172,7 @@ PulseDevice WebGPUCreateDevice(PulseBackend backend, PulseDevice* forbiden_devic
|
||||
{
|
||||
if(WebGPUIsDeviceForbidden(adapters[i], forbiden_devices, forbiden_devices_count))
|
||||
continue;
|
||||
uint64_t current_device_score = WebGPUScoreAdapter(instance, adapters[i]);
|
||||
uint64_t current_device_score = WebGPUScoreAdapter(adapters[i]);
|
||||
if(current_device_score > best_device_score)
|
||||
{
|
||||
best_device_score = current_device_score;
|
||||
@@ -196,7 +196,7 @@ PulseDevice WebGPUCreateDevice(PulseBackend backend, PulseDevice* forbiden_devic
|
||||
|
||||
WGPUDeviceLostCallbackInfo lost_callback = { 0 };
|
||||
lost_callback.callback = WebGPUDeviceLostCallback;
|
||||
lost_callback.mode = WGPUCallbackMode_AllowProcessEvents;
|
||||
lost_callback.mode = WGPUCallbackMode_AllowSpontaneous;
|
||||
lost_callback.userdata1 = device;
|
||||
lost_callback.userdata2 = backend;
|
||||
WGPUUncapturedErrorCallbackInfo uncaptured_callback = { 0 };
|
||||
@@ -209,7 +209,7 @@ PulseDevice WebGPUCreateDevice(PulseBackend backend, PulseDevice* forbiden_devic
|
||||
descriptor.uncapturedErrorCallbackInfo = uncaptured_callback;
|
||||
WGPURequestDeviceCallbackInfo device_callback = { 0 };
|
||||
device_callback.callback = WebGPURequestDeviceCallback;
|
||||
device_callback.mode = WGPUCallbackMode_AllowProcessEvents;
|
||||
device_callback.mode = WGPUCallbackMode_AllowSpontaneous;
|
||||
device_callback.userdata1 = device;
|
||||
device_callback.userdata2 = backend;
|
||||
wgpuAdapterRequestDevice(device->adapter, &descriptor, device_callback);
|
||||
|
||||
@@ -35,11 +35,9 @@ bool WebGPUIsFenceReady(PulseDevice device, PulseFence fence)
|
||||
{
|
||||
PULSE_UNUSED(device);
|
||||
WebGPUFence* webgpu_fence = WEBGPU_RETRIEVE_DRIVER_DATA_AS(fence, WebGPUFence*);
|
||||
return atomic_load(&webgpu_fence->signal) == true;
|
||||
return atomic_load(&webgpu_fence->signal);
|
||||
}
|
||||
|
||||
#include <stdio.h>
|
||||
|
||||
bool WebGPUWaitForFences(PulseDevice device, const PulseFence* fences, uint32_t fences_count, bool wait_for_all)
|
||||
{
|
||||
PULSE_UNUSED(device);
|
||||
|
||||
@@ -19,6 +19,7 @@ PULSE_API PulseBuffer PulseCreateBuffer(PulseDevice device, const PulseBufferCre
|
||||
return PULSE_NULL_HANDLE;
|
||||
}
|
||||
}
|
||||
|
||||
PulseBuffer buffer = device->PFN_CreateBuffer(device, create_infos);
|
||||
if(buffer == PULSE_NULL_HANDLE)
|
||||
return PULSE_NULL_HANDLE;
|
||||
@@ -28,11 +29,53 @@ PULSE_API PulseBuffer PulseCreateBuffer(PulseDevice device, const PulseBufferCre
|
||||
return buffer;
|
||||
}
|
||||
|
||||
PULSE_API bool PulseMapBuffer(PulseBuffer buffer, void** data)
|
||||
PULSE_API bool PulseMapBuffer(PulseBuffer buffer, PulseMapMode mode, void** data)
|
||||
{
|
||||
PULSE_CHECK_HANDLE_RETVAL(buffer, false);
|
||||
PULSE_CHECK_PTR_RETVAL(data, false);
|
||||
bool res = buffer->device->PFN_MapBuffer(buffer, data);
|
||||
|
||||
if(buffer->is_mapped)
|
||||
{
|
||||
if(PULSE_IS_BACKEND_LOW_LEVEL_DEBUG(buffer->device->backend))
|
||||
PulseLogError(buffer->device->backend, "buffer is already mapped");
|
||||
PulseSetInternalError(PULSE_ERROR_MAP_FAILED);
|
||||
return false;
|
||||
}
|
||||
|
||||
PulseFlags storage_flags = PULSE_BUFFER_USAGE_STORAGE_READ | PULSE_BUFFER_USAGE_STORAGE_WRITE;
|
||||
if((buffer->usage & storage_flags) != 0)
|
||||
{
|
||||
if(PULSE_IS_BACKEND_LOW_LEVEL_DEBUG(buffer->device->backend))
|
||||
PulseLogError(buffer->device->backend, "cannot map a buffer that has been created with storage flags");
|
||||
PulseSetInternalError(PULSE_ERROR_MAP_FAILED);
|
||||
return false;
|
||||
}
|
||||
|
||||
PulseFlags transfer_flags = PULSE_BUFFER_USAGE_TRANSFER_UPLOAD | PULSE_BUFFER_USAGE_TRANSFER_DOWNLOAD;
|
||||
if((buffer->usage & transfer_flags) == 0)
|
||||
{
|
||||
if(PULSE_IS_BACKEND_LOW_LEVEL_DEBUG(buffer->device->backend))
|
||||
PulseLogError(buffer->device->backend, "cannot map a buffer that has not been created with upload or download flags");
|
||||
PulseSetInternalError(PULSE_ERROR_MAP_FAILED);
|
||||
return false;
|
||||
}
|
||||
|
||||
if((buffer->usage & PULSE_BUFFER_USAGE_TRANSFER_UPLOAD) == 0 && mode == PULSE_MAP_WRITE)
|
||||
{
|
||||
if(PULSE_IS_BACKEND_LOW_LEVEL_DEBUG(buffer->device->backend))
|
||||
PulseLogError(buffer->device->backend, "cannot map a buffer that has not been created with upload flags for writting");
|
||||
PulseSetInternalError(PULSE_ERROR_MAP_FAILED);
|
||||
return false;
|
||||
}
|
||||
if((buffer->usage & PULSE_BUFFER_USAGE_TRANSFER_DOWNLOAD) == 0 && mode == PULSE_MAP_READ)
|
||||
{
|
||||
if(PULSE_IS_BACKEND_LOW_LEVEL_DEBUG(buffer->device->backend))
|
||||
PulseLogError(buffer->device->backend, "cannot map a buffer that has not been created with download flags for reading");
|
||||
PulseSetInternalError(PULSE_ERROR_MAP_FAILED);
|
||||
return false;
|
||||
}
|
||||
|
||||
bool res = buffer->device->PFN_MapBuffer(buffer, mode, data);
|
||||
if(res)
|
||||
buffer->is_mapped = true;
|
||||
return res;
|
||||
@@ -41,6 +84,14 @@ PULSE_API bool PulseMapBuffer(PulseBuffer buffer, void** data)
|
||||
PULSE_API void PulseUnmapBuffer(PulseBuffer buffer)
|
||||
{
|
||||
PULSE_CHECK_HANDLE(buffer);
|
||||
|
||||
if(!buffer->is_mapped)
|
||||
{
|
||||
if(PULSE_IS_BACKEND_HIGH_LEVEL_DEBUG(buffer->device->backend))
|
||||
PulseLogError(buffer->device->backend, "buffer is not mapped");
|
||||
return;
|
||||
}
|
||||
|
||||
buffer->device->PFN_UnmapBuffer(buffer);
|
||||
buffer->is_mapped = false;
|
||||
}
|
||||
|
||||
@@ -61,35 +61,37 @@
|
||||
array[defrag_i] = array[defrag_i + 1]; \
|
||||
|
||||
#define PULSE_CHECK_COMMAND_LIST_STATE_RETVAL(cmd, retval) \
|
||||
if(cmd->state != PULSE_COMMAND_LIST_STATE_RECORDING) \
|
||||
{ \
|
||||
switch(cmd->state) \
|
||||
do { \
|
||||
if(cmd->state != PULSE_COMMAND_LIST_STATE_RECORDING) \
|
||||
{ \
|
||||
case PULSE_COMMAND_LIST_STATE_INVALID: \
|
||||
if(PULSE_IS_BACKEND_LOW_LEVEL_DEBUG(cmd->device->backend)) \
|
||||
PulseLogError(cmd->device->backend, "command list is in invalid state"); \
|
||||
return retval; \
|
||||
case PULSE_COMMAND_LIST_STATE_READY: \
|
||||
if(PULSE_IS_BACKEND_LOW_LEVEL_DEBUG(cmd->device->backend)) \
|
||||
PulseLogError(cmd->device->backend, "command list is not recording"); \
|
||||
return retval; \
|
||||
case PULSE_COMMAND_LIST_STATE_SENT: \
|
||||
if(PULSE_IS_BACKEND_LOW_LEVEL_DEBUG(cmd->device->backend)) \
|
||||
PulseLogWarning(cmd->device->backend, "command list has already been submitted"); \
|
||||
return retval; \
|
||||
default: break; \
|
||||
switch(cmd->state) \
|
||||
{ \
|
||||
case PULSE_COMMAND_LIST_STATE_INVALID: \
|
||||
if(PULSE_IS_BACKEND_LOW_LEVEL_DEBUG(cmd->device->backend)) \
|
||||
PulseLogError(cmd->device->backend, "command list is in invalid state"); \
|
||||
return retval; \
|
||||
case PULSE_COMMAND_LIST_STATE_READY: \
|
||||
if(PULSE_IS_BACKEND_LOW_LEVEL_DEBUG(cmd->device->backend)) \
|
||||
PulseLogError(cmd->device->backend, "command list is not recording"); \
|
||||
return retval; \
|
||||
case PULSE_COMMAND_LIST_STATE_SENT: \
|
||||
if(PULSE_IS_BACKEND_LOW_LEVEL_DEBUG(cmd->device->backend)) \
|
||||
PulseLogWarning(cmd->device->backend, "command list has already been submitted"); \
|
||||
return retval; \
|
||||
default: break; \
|
||||
} \
|
||||
} \
|
||||
} \
|
||||
} while(0); \
|
||||
|
||||
#define PULSE_CHECK_COMMAND_LIST_STATE(cmd) PULSE_CHECK_COMMAND_LIST_STATE_RETVAL(cmd, )
|
||||
|
||||
#ifndef PULSE_STATIC_ASSERT
|
||||
#ifdef __cplusplus
|
||||
#if __cplusplus >= 201103L
|
||||
#define PULSE_STATIC_ASSERT(name, x) static_assert(x, #x)
|
||||
#define PULSE_STATIC_ASSERT(name, x) static_assert(x, #x)
|
||||
#endif
|
||||
#elif PULSE_C_VERSION >= 2023
|
||||
#define PULSE_STATIC_ASSERT(name, x) static_assert(x, #x)
|
||||
#define PULSE_STATIC_ASSERT(name, x) static_assert(x, #x)
|
||||
#elif PULSE_C_VERSION >= 2011
|
||||
#define PULSE_STATIC_ASSERT(name, x) _Static_assert(x, #x)
|
||||
#else
|
||||
|
||||
@@ -28,5 +28,11 @@ PULSE_API bool PulseWaitForFences(PulseDevice device, const PulseFence* fences,
|
||||
{
|
||||
PULSE_CHECK_HANDLE_RETVAL(device, false);
|
||||
PULSE_CHECK_PTR_RETVAL(fences, false);
|
||||
return device->PFN_WaitForFences(device, fences, fences_count, wait_for_all);
|
||||
bool res = device->PFN_WaitForFences(device, fences, fences_count, wait_for_all);
|
||||
if(res)
|
||||
{
|
||||
for(uint32_t i = 0; i < fences_count; i++)
|
||||
fences[i]->cmd->state = PULSE_COMMAND_LIST_STATE_READY;
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
@@ -25,7 +25,7 @@ typedef PulseCommandList (*PulseRequestCommandListPFN)(PulseDevice, PulseCommand
|
||||
typedef bool (*PulseSubmitCommandListPFN)(PulseDevice, PulseCommandList, PulseFence);
|
||||
typedef void (*PulseReleaseCommandListPFN)(PulseDevice, PulseCommandList);
|
||||
typedef PulseBuffer (*PulseCreateBufferPFN)(PulseDevice, const PulseBufferCreateInfo*);
|
||||
typedef bool (*PulseMapBufferPFN)(PulseBuffer, void**);
|
||||
typedef bool (*PulseMapBufferPFN)(PulseBuffer, PulseMapMode, void**);
|
||||
typedef void (*PulseUnmapBufferPFN)(PulseBuffer);
|
||||
typedef void (*PulseDestroyBufferPFN)(PulseDevice, PulseBuffer);
|
||||
typedef PulseImage (*PulseCreateImagePFN)(PulseDevice, const PulseImageCreateInfo*);
|
||||
|
||||
Reference in New Issue
Block a user