vkd3d: Do not allow synchronization primitives to fail.

In practice they never fail. If they fail, it means that there
is some underlying platform problem and there is little we can do
anyway. Under pthreads function prototypes allow returning failure,
but that's only used for "error checking" mutexes, which we
don't use.

On the other hand, error handling in vkd3d is rather inconsistent:
sometimes the errors are ignored, sometimes logged, sometimes
passed to the caller. It's hard to handle failures appropriately
if you can't even keep your state consistent, so I think it's
better to avoid trying, assume that synchronization primitives do
not fail and at least have consistent logging if something goes
wrong.
This commit is contained in:
Giovanni Mascellani 2023-01-27 16:45:05 +01:00 committed by Alexandre Julliard
parent a66fe31fe5
commit 552926cfca
Notes: Alexandre Julliard 2023-02-02 22:14:51 +01:00
Approved-by: Conor McCarthy (@cmccarthy)
Approved-by: Henri Verbeet (@hverbeet)
Approved-by: Alexandre Julliard (@julliard)
Merge-Request: https://gitlab.winehq.org/wine/vkd3d/-/merge_requests/75
6 changed files with 129 additions and 355 deletions

View File

@ -33,17 +33,11 @@ HRESULT vkd3d_queue_create(struct d3d12_device *device,
{ {
const struct vkd3d_vk_device_procs *vk_procs = &device->vk_procs; const struct vkd3d_vk_device_procs *vk_procs = &device->vk_procs;
struct vkd3d_queue *object; struct vkd3d_queue *object;
int rc;
if (!(object = vkd3d_malloc(sizeof(*object)))) if (!(object = vkd3d_malloc(sizeof(*object))))
return E_OUTOFMEMORY; return E_OUTOFMEMORY;
if ((rc = vkd3d_mutex_init(&object->mutex))) vkd3d_mutex_init(&object->mutex);
{
ERR("Failed to initialize mutex, error %d.\n", rc);
vkd3d_free(object);
return hresult_from_errno(rc);
}
object->completed_sequence_number = 0; object->completed_sequence_number = 0;
object->submitted_sequence_number = 0; object->submitted_sequence_number = 0;
@ -71,10 +65,8 @@ void vkd3d_queue_destroy(struct vkd3d_queue *queue, struct d3d12_device *device)
{ {
const struct vkd3d_vk_device_procs *vk_procs = &device->vk_procs; const struct vkd3d_vk_device_procs *vk_procs = &device->vk_procs;
unsigned int i; unsigned int i;
int rc;
if ((rc = vkd3d_mutex_lock(&queue->mutex))) vkd3d_mutex_lock(&queue->mutex);
ERR("Failed to lock mutex, error %d.\n", rc);
for (i = 0; i < queue->semaphore_count; ++i) for (i = 0; i < queue->semaphore_count; ++i)
VK_CALL(vkDestroySemaphore(device->vk_device, queue->semaphores[i].vk_semaphore, NULL)); VK_CALL(vkDestroySemaphore(device->vk_device, queue->semaphores[i].vk_semaphore, NULL));
@ -87,7 +79,6 @@ void vkd3d_queue_destroy(struct vkd3d_queue *queue, struct d3d12_device *device)
VK_CALL(vkDestroySemaphore(device->vk_device, queue->old_vk_semaphores[i], NULL)); VK_CALL(vkDestroySemaphore(device->vk_device, queue->old_vk_semaphores[i], NULL));
} }
if (!rc)
vkd3d_mutex_unlock(&queue->mutex); vkd3d_mutex_unlock(&queue->mutex);
vkd3d_mutex_destroy(&queue->mutex); vkd3d_mutex_destroy(&queue->mutex);
@ -96,15 +87,9 @@ void vkd3d_queue_destroy(struct vkd3d_queue *queue, struct d3d12_device *device)
VkQueue vkd3d_queue_acquire(struct vkd3d_queue *queue) VkQueue vkd3d_queue_acquire(struct vkd3d_queue *queue)
{ {
int rc;
TRACE("queue %p.\n", queue); TRACE("queue %p.\n", queue);
if ((rc = vkd3d_mutex_lock(&queue->mutex))) vkd3d_mutex_lock(&queue->mutex);
{
ERR("Failed to lock mutex, error %d.\n", rc);
return VK_NULL_HANDLE;
}
assert(queue->vk_queue); assert(queue->vk_queue);
return queue->vk_queue; return queue->vk_queue;
@ -148,13 +133,8 @@ static void vkd3d_queue_update_sequence_number(struct vkd3d_queue *queue,
uint64_t completed_sequence_number; uint64_t completed_sequence_number;
VkSemaphore vk_semaphore; VkSemaphore vk_semaphore;
unsigned int i, j; unsigned int i, j;
int rc;
if ((rc = vkd3d_mutex_lock(&queue->mutex))) vkd3d_mutex_lock(&queue->mutex);
{
ERR("Failed to lock mutex, error %d.\n", rc);
return;
}
completed_sequence_number = queue->completed_sequence_number; completed_sequence_number = queue->completed_sequence_number;
queue->completed_sequence_number = max(sequence_number, queue->completed_sequence_number); queue->completed_sequence_number = max(sequence_number, queue->completed_sequence_number);
@ -255,15 +235,10 @@ static HRESULT vkd3d_enqueue_gpu_fence(struct vkd3d_fence_worker *worker,
struct vkd3d_queue *queue, uint64_t queue_sequence_number) struct vkd3d_queue *queue, uint64_t queue_sequence_number)
{ {
struct vkd3d_waiting_fence *waiting_fence; struct vkd3d_waiting_fence *waiting_fence;
int rc;
TRACE("worker %p, fence %p, value %#"PRIx64".\n", worker, fence, value); TRACE("worker %p, fence %p, value %#"PRIx64".\n", worker, fence, value);
if ((rc = vkd3d_mutex_lock(&worker->mutex))) vkd3d_mutex_lock(&worker->mutex);
{
ERR("Failed to lock mutex, error %d.\n", rc);
return hresult_from_errno(rc);
}
if (!vkd3d_array_reserve((void **)&worker->fences, &worker->fences_size, if (!vkd3d_array_reserve((void **)&worker->fences, &worker->fences_size,
worker->fence_count + 1, sizeof(*worker->fences))) worker->fence_count + 1, sizeof(*worker->fences)))
@ -349,24 +324,15 @@ static void *vkd3d_fence_worker_main(void *arg)
struct vkd3d_waiting_fence *old_fences, *cur_fences = NULL; struct vkd3d_waiting_fence *old_fences, *cur_fences = NULL;
struct vkd3d_fence_worker *worker = arg; struct vkd3d_fence_worker *worker = arg;
unsigned int i; unsigned int i;
int rc;
vkd3d_set_thread_name("vkd3d_fence"); vkd3d_set_thread_name("vkd3d_fence");
for (;;) for (;;)
{ {
if ((rc = vkd3d_mutex_lock(&worker->mutex))) vkd3d_mutex_lock(&worker->mutex);
{
ERR("Failed to lock mutex, error %d.\n", rc);
break;
}
if (!worker->fence_count && !worker->should_exit && (rc = vkd3d_cond_wait(&worker->cond, &worker->mutex))) if (!worker->fence_count && !worker->should_exit)
{ vkd3d_cond_wait(&worker->cond, &worker->mutex);
ERR("Failed to wait on condition variable, error %d.\n", rc);
vkd3d_mutex_unlock(&worker->mutex);
break;
}
if (worker->should_exit) if (worker->should_exit)
{ {
@ -399,7 +365,6 @@ static HRESULT vkd3d_fence_worker_start(struct vkd3d_fence_worker *worker,
struct vkd3d_queue *queue, struct d3d12_device *device) struct vkd3d_queue *queue, struct d3d12_device *device)
{ {
HRESULT hr; HRESULT hr;
int rc;
TRACE("worker %p.\n", worker); TRACE("worker %p.\n", worker);
@ -414,18 +379,9 @@ static HRESULT vkd3d_fence_worker_start(struct vkd3d_fence_worker *worker,
worker->wait_for_gpu_fence = device->vk_info.KHR_timeline_semaphore worker->wait_for_gpu_fence = device->vk_info.KHR_timeline_semaphore
? vkd3d_wait_for_gpu_timeline_semaphore : vkd3d_wait_for_gpu_fence; ? vkd3d_wait_for_gpu_timeline_semaphore : vkd3d_wait_for_gpu_fence;
if ((rc = vkd3d_mutex_init(&worker->mutex))) vkd3d_mutex_init(&worker->mutex);
{
ERR("Failed to initialize mutex, error %d.\n", rc);
return hresult_from_errno(rc);
}
if ((rc = vkd3d_cond_init(&worker->cond))) vkd3d_cond_init(&worker->cond);
{
ERR("Failed to initialize condition variable, error %d.\n", rc);
vkd3d_mutex_destroy(&worker->mutex);
return hresult_from_errno(rc);
}
if (FAILED(hr = vkd3d_create_thread(device->vkd3d_instance, if (FAILED(hr = vkd3d_create_thread(device->vkd3d_instance,
vkd3d_fence_worker_main, worker, &worker->thread))) vkd3d_fence_worker_main, worker, &worker->thread)))
@ -441,15 +397,10 @@ static HRESULT vkd3d_fence_worker_stop(struct vkd3d_fence_worker *worker,
struct d3d12_device *device) struct d3d12_device *device)
{ {
HRESULT hr; HRESULT hr;
int rc;
TRACE("worker %p.\n", worker); TRACE("worker %p.\n", worker);
if ((rc = vkd3d_mutex_lock(&worker->mutex))) vkd3d_mutex_lock(&worker->mutex);
{
ERR("Failed to lock mutex, error %d.\n", rc);
return hresult_from_errno(rc);
}
worker->should_exit = true; worker->should_exit = true;
vkd3d_cond_signal(&worker->cond); vkd3d_cond_signal(&worker->cond);
@ -513,15 +464,10 @@ static VkResult d3d12_fence_create_vk_fence(struct d3d12_fence *fence, VkFence *
VkFenceCreateInfo fence_info; VkFenceCreateInfo fence_info;
unsigned int i; unsigned int i;
VkResult vr; VkResult vr;
int rc;
*vk_fence = VK_NULL_HANDLE; *vk_fence = VK_NULL_HANDLE;
if ((rc = vkd3d_mutex_lock(&fence->mutex))) vkd3d_mutex_lock(&fence->mutex);
{
ERR("Failed to lock mutex, error %d.\n", rc);
goto create_fence;
}
for (i = 0; i < ARRAY_SIZE(fence->old_vk_fences); ++i) for (i = 0; i < ARRAY_SIZE(fence->old_vk_fences); ++i)
{ {
@ -537,7 +483,6 @@ static VkResult d3d12_fence_create_vk_fence(struct d3d12_fence *fence, VkFence *
if (*vk_fence) if (*vk_fence)
return VK_SUCCESS; return VK_SUCCESS;
create_fence:
vk_procs = &device->vk_procs; vk_procs = &device->vk_procs;
fence_info.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; fence_info.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
@ -596,13 +541,8 @@ static void d3d12_fence_destroy_vk_objects(struct d3d12_fence *fence)
const struct vkd3d_vk_device_procs *vk_procs; const struct vkd3d_vk_device_procs *vk_procs;
struct d3d12_device *device = fence->device; struct d3d12_device *device = fence->device;
unsigned int i; unsigned int i;
int rc;
if ((rc = vkd3d_mutex_lock(&fence->mutex))) vkd3d_mutex_lock(&fence->mutex);
{
ERR("Failed to lock mutex, error %d.\n", rc);
return;
}
vk_procs = &device->vk_procs; vk_procs = &device->vk_procs;
@ -655,13 +595,7 @@ static struct vkd3d_signaled_semaphore *d3d12_fence_acquire_vk_semaphore_locked(
static void d3d12_fence_remove_vk_semaphore(struct d3d12_fence *fence, struct vkd3d_signaled_semaphore *semaphore) static void d3d12_fence_remove_vk_semaphore(struct d3d12_fence *fence, struct vkd3d_signaled_semaphore *semaphore)
{ {
int rc; vkd3d_mutex_lock(&fence->mutex);
if ((rc = vkd3d_mutex_lock(&fence->mutex)))
{
ERR("Failed to lock mutex, error %d.\n", rc);
return;
}
assert(semaphore->u.binary.is_acquired); assert(semaphore->u.binary.is_acquired);
@ -672,13 +606,7 @@ static void d3d12_fence_remove_vk_semaphore(struct d3d12_fence *fence, struct vk
static void d3d12_fence_release_vk_semaphore(struct d3d12_fence *fence, struct vkd3d_signaled_semaphore *semaphore) static void d3d12_fence_release_vk_semaphore(struct d3d12_fence *fence, struct vkd3d_signaled_semaphore *semaphore)
{ {
int rc; vkd3d_mutex_lock(&fence->mutex);
if ((rc = vkd3d_mutex_lock(&fence->mutex)))
{
ERR("Failed to lock mutex, error %d.\n", rc);
return;
}
assert(semaphore->u.binary.is_acquired); assert(semaphore->u.binary.is_acquired);
semaphore->u.binary.is_acquired = false; semaphore->u.binary.is_acquired = false;
@ -699,13 +627,7 @@ static void d3d12_fence_update_pending_value_locked(struct d3d12_fence *fence)
static HRESULT d3d12_fence_update_pending_value(struct d3d12_fence *fence) static HRESULT d3d12_fence_update_pending_value(struct d3d12_fence *fence)
{ {
int rc; vkd3d_mutex_lock(&fence->mutex);
if ((rc = vkd3d_mutex_lock(&fence->mutex)))
{
ERR("Failed to lock mutex, error %d.\n", rc);
return hresult_from_errno(rc);
}
d3d12_fence_update_pending_value_locked(fence); d3d12_fence_update_pending_value_locked(fence);
@ -719,13 +641,8 @@ static HRESULT d3d12_device_add_blocked_command_queues(struct d3d12_device *devi
{ {
HRESULT hr = S_OK; HRESULT hr = S_OK;
unsigned int i; unsigned int i;
int rc;
if ((rc = vkd3d_mutex_lock(&device->mutex))) vkd3d_mutex_lock(&device->mutex);
{
ERR("Failed to lock mutex, error %d.\n", rc);
return hresult_from_errno(rc);
}
if ((i = ARRAY_SIZE(device->blocked_queues) - device->blocked_queue_count) < count) if ((i = ARRAY_SIZE(device->blocked_queues) - device->blocked_queue_count) < count)
{ {
@ -745,15 +662,10 @@ static HRESULT d3d12_device_flush_blocked_queues_once(struct d3d12_device *devic
{ {
struct d3d12_command_queue *blocked_queues[VKD3D_MAX_DEVICE_BLOCKED_QUEUES]; struct d3d12_command_queue *blocked_queues[VKD3D_MAX_DEVICE_BLOCKED_QUEUES];
unsigned int i, blocked_queue_count; unsigned int i, blocked_queue_count;
int rc;
*flushed_any = false; *flushed_any = false;
if ((rc = vkd3d_mutex_lock(&device->mutex))) vkd3d_mutex_lock(&device->mutex);
{
ERR("Failed to lock mutex, error %d.\n", rc);
return hresult_from_errno(rc);
}
/* Flush any ops unblocked by a new pending value. These cannot be flushed /* Flush any ops unblocked by a new pending value. These cannot be flushed
* with the device locked, so move the queue pointers to a local array. */ * with the device locked, so move the queue pointers to a local array. */
@ -799,15 +711,10 @@ static HRESULT d3d12_fence_add_vk_semaphore(struct d3d12_fence *fence, VkSemapho
VkFence vk_fence, uint64_t value, const struct vkd3d_queue *signalling_queue) VkFence vk_fence, uint64_t value, const struct vkd3d_queue *signalling_queue)
{ {
struct vkd3d_signaled_semaphore *semaphore; struct vkd3d_signaled_semaphore *semaphore;
int rc;
TRACE("fence %p, value %#"PRIx64".\n", fence, value); TRACE("fence %p, value %#"PRIx64".\n", fence, value);
if ((rc = vkd3d_mutex_lock(&fence->mutex))) vkd3d_mutex_lock(&fence->mutex);
{
ERR("Failed to lock mutex, error %d.\n", rc);
return hresult_from_errno(rc);
}
d3d12_fence_garbage_collect_vk_semaphores_locked(fence, false); d3d12_fence_garbage_collect_vk_semaphores_locked(fence, false);
@ -874,13 +781,8 @@ static HRESULT d3d12_fence_signal(struct d3d12_fence *fence, uint64_t value, VkF
struct d3d12_device *device = fence->device; struct d3d12_device *device = fence->device;
struct vkd3d_signaled_semaphore *current; struct vkd3d_signaled_semaphore *current;
unsigned int i; unsigned int i;
int rc;
if ((rc = vkd3d_mutex_lock(&fence->mutex))) vkd3d_mutex_lock(&fence->mutex);
{
ERR("Failed to lock mutex, error %d.\n", rc);
return hresult_from_errno(rc);
}
fence->value = value; fence->value = value;
@ -922,13 +824,8 @@ static uint64_t d3d12_fence_add_pending_timeline_signal(struct d3d12_fence *fenc
const struct vkd3d_queue *signalling_queue) const struct vkd3d_queue *signalling_queue)
{ {
struct vkd3d_signaled_semaphore *semaphore; struct vkd3d_signaled_semaphore *semaphore;
int rc;
if ((rc = vkd3d_mutex_lock(&fence->mutex))) vkd3d_mutex_lock(&fence->mutex);
{
ERR("Failed to lock mutex, error %d.\n", rc);
return hresult_from_errno(rc);
}
if (!vkd3d_array_reserve((void **)&fence->semaphores, &fence->semaphores_size, if (!vkd3d_array_reserve((void **)&fence->semaphores, &fence->semaphores_size,
fence->semaphore_count + 1, sizeof(*fence->semaphores))) fence->semaphore_count + 1, sizeof(*fence->semaphores)))
@ -967,13 +864,8 @@ static void d3d12_fence_signal_timeline_semaphore(struct d3d12_fence *fence, uin
{ {
bool did_signal; bool did_signal;
unsigned int i; unsigned int i;
int rc;
if ((rc = vkd3d_mutex_lock(&fence->mutex))) vkd3d_mutex_lock(&fence->mutex);
{
ERR("Failed to lock mutex, error %d.\n", rc);
return;
}
/* With multiple fence workers, it is possible that signal calls are out of /* With multiple fence workers, it is possible that signal calls are out of
* order. The physical value itself is monotonic, but we need to make sure * order. The physical value itself is monotonic, but we need to make sure
@ -1061,7 +953,6 @@ static ULONG STDMETHODCALLTYPE d3d12_fence_Release(ID3D12Fence *iface)
static void d3d12_fence_decref(struct d3d12_fence *fence) static void d3d12_fence_decref(struct d3d12_fence *fence)
{ {
ULONG internal_refcount = InterlockedDecrement(&fence->internal_refcount); ULONG internal_refcount = InterlockedDecrement(&fence->internal_refcount);
int rc;
if (!internal_refcount) if (!internal_refcount)
{ {
@ -1073,8 +964,7 @@ static void d3d12_fence_decref(struct d3d12_fence *fence)
vkd3d_free(fence->events); vkd3d_free(fence->events);
vkd3d_free(fence->semaphores); vkd3d_free(fence->semaphores);
if ((rc = vkd3d_mutex_destroy(&fence->mutex))) vkd3d_mutex_destroy(&fence->mutex);
ERR("Failed to destroy mutex, error %d.\n", rc);
vkd3d_cond_destroy(&fence->null_event_cond); vkd3d_cond_destroy(&fence->null_event_cond);
vkd3d_free(fence); vkd3d_free(fence);
@ -1136,15 +1026,10 @@ static UINT64 STDMETHODCALLTYPE d3d12_fence_GetCompletedValue(ID3D12Fence *iface
{ {
struct d3d12_fence *fence = impl_from_ID3D12Fence(iface); struct d3d12_fence *fence = impl_from_ID3D12Fence(iface);
uint64_t completed_value; uint64_t completed_value;
int rc;
TRACE("iface %p.\n", iface); TRACE("iface %p.\n", iface);
if ((rc = vkd3d_mutex_lock(&fence->mutex))) vkd3d_mutex_lock(&fence->mutex);
{
ERR("Failed to lock mutex, error %d.\n", rc);
return 0;
}
completed_value = fence->value; completed_value = fence->value;
vkd3d_mutex_unlock(&fence->mutex); vkd3d_mutex_unlock(&fence->mutex);
return completed_value; return completed_value;
@ -1156,15 +1041,10 @@ static HRESULT STDMETHODCALLTYPE d3d12_fence_SetEventOnCompletion(ID3D12Fence *i
struct d3d12_fence *fence = impl_from_ID3D12Fence(iface); struct d3d12_fence *fence = impl_from_ID3D12Fence(iface);
unsigned int i; unsigned int i;
bool latch = false; bool latch = false;
int rc;
TRACE("iface %p, value %#"PRIx64", event %p.\n", iface, value, event); TRACE("iface %p, value %#"PRIx64", event %p.\n", iface, value, event);
if ((rc = vkd3d_mutex_lock(&fence->mutex))) vkd3d_mutex_lock(&fence->mutex);
{
ERR("Failed to lock mutex, error %d.\n", rc);
return hresult_from_errno(rc);
}
if (value <= fence->value) if (value <= fence->value)
{ {
@ -1215,13 +1095,7 @@ static HRESULT STDMETHODCALLTYPE d3d12_fence_SetEventOnCompletion(ID3D12Fence *i
static HRESULT d3d12_fence_signal_cpu_timeline_semaphore(struct d3d12_fence *fence, uint64_t value) static HRESULT d3d12_fence_signal_cpu_timeline_semaphore(struct d3d12_fence *fence, uint64_t value)
{ {
int rc; vkd3d_mutex_lock(&fence->mutex);
if ((rc = vkd3d_mutex_lock(&fence->mutex)))
{
ERR("Failed to lock mutex, error %d.\n", rc);
return hresult_from_errno(rc);
}
fence->value = value; fence->value = value;
d3d12_fence_signal_external_events_locked(fence); d3d12_fence_signal_external_events_locked(fence);
@ -1276,7 +1150,6 @@ static HRESULT d3d12_fence_init(struct d3d12_fence *fence, struct d3d12_device *
const struct vkd3d_vk_device_procs *vk_procs = &device->vk_procs; const struct vkd3d_vk_device_procs *vk_procs = &device->vk_procs;
VkResult vr; VkResult vr;
HRESULT hr; HRESULT hr;
int rc;
fence->ID3D12Fence_iface.lpVtbl = &d3d12_fence_vtbl; fence->ID3D12Fence_iface.lpVtbl = &d3d12_fence_vtbl;
fence->internal_refcount = 1; fence->internal_refcount = 1;
@ -1285,18 +1158,9 @@ static HRESULT d3d12_fence_init(struct d3d12_fence *fence, struct d3d12_device *
fence->value = initial_value; fence->value = initial_value;
fence->max_pending_value = initial_value; fence->max_pending_value = initial_value;
if ((rc = vkd3d_mutex_init(&fence->mutex))) vkd3d_mutex_init(&fence->mutex);
{
ERR("Failed to initialize mutex, error %d.\n", rc);
return hresult_from_errno(rc);
}
if ((rc = vkd3d_cond_init(&fence->null_event_cond))) vkd3d_cond_init(&fence->null_event_cond);
{
ERR("Failed to initialize cond variable, error %d.\n", rc);
hr = hresult_from_errno(rc);
goto fail_destroy_mutex;
}
if (flags) if (flags)
FIXME("Ignoring flags %#x.\n", flags); FIXME("Ignoring flags %#x.\n", flags);
@ -1335,7 +1199,6 @@ fail_destroy_timeline_semaphore:
VK_CALL(vkDestroySemaphore(device->vk_device, fence->timeline_semaphore, NULL)); VK_CALL(vkDestroySemaphore(device->vk_device, fence->timeline_semaphore, NULL));
fail_destroy_null_cond: fail_destroy_null_cond:
vkd3d_cond_destroy(&fence->null_event_cond); vkd3d_cond_destroy(&fence->null_event_cond);
fail_destroy_mutex:
vkd3d_mutex_destroy(&fence->mutex); vkd3d_mutex_destroy(&fence->mutex);
return hr; return hr;
@ -6294,7 +6157,6 @@ static void STDMETHODCALLTYPE d3d12_command_queue_ExecuteCommandLists(ID3D12Comm
struct vkd3d_cs_op_data *op; struct vkd3d_cs_op_data *op;
VkCommandBuffer *buffers; VkCommandBuffer *buffers;
unsigned int i; unsigned int i;
int rc;
TRACE("iface %p, command_list_count %u, command_lists %p.\n", TRACE("iface %p, command_list_count %u, command_lists %p.\n",
iface, command_list_count, command_lists); iface, command_list_count, command_lists);
@ -6323,11 +6185,7 @@ static void STDMETHODCALLTYPE d3d12_command_queue_ExecuteCommandLists(ID3D12Comm
buffers[i] = cmd_list->vk_command_buffer; buffers[i] = cmd_list->vk_command_buffer;
} }
if ((rc = vkd3d_mutex_lock(&command_queue->op_mutex))) vkd3d_mutex_lock(&command_queue->op_mutex);
{
ERR("Failed to lock mutex, error %d.\n", rc);
return;
}
if (!command_queue->ops_count) if (!command_queue->ops_count)
{ {
@ -6372,15 +6230,10 @@ static HRESULT vkd3d_enqueue_timeline_semaphore(struct vkd3d_fence_worker *worke
struct d3d12_fence *fence, uint64_t value, struct vkd3d_queue *queue) struct d3d12_fence *fence, uint64_t value, struct vkd3d_queue *queue)
{ {
struct vkd3d_waiting_fence *waiting_fence; struct vkd3d_waiting_fence *waiting_fence;
int rc;
TRACE("worker %p, fence %p, value %#"PRIx64".\n", worker, fence, value); TRACE("worker %p, fence %p, value %#"PRIx64".\n", worker, fence, value);
if ((rc = vkd3d_mutex_lock(&worker->mutex))) vkd3d_mutex_lock(&worker->mutex);
{
ERR("Failed to lock mutex, error %d.\n", rc);
return hresult_from_errno(rc);
}
if (!vkd3d_array_reserve((void **)&worker->fences, &worker->fences_size, if (!vkd3d_array_reserve((void **)&worker->fences, &worker->fences_size,
worker->fence_count + 1, sizeof(*worker->fences))) worker->fence_count + 1, sizeof(*worker->fences)))
@ -6410,15 +6263,10 @@ static HRESULT STDMETHODCALLTYPE d3d12_command_queue_Signal(ID3D12CommandQueue *
struct d3d12_fence *fence = unsafe_impl_from_ID3D12Fence(fence_iface); struct d3d12_fence *fence = unsafe_impl_from_ID3D12Fence(fence_iface);
struct vkd3d_cs_op_data *op; struct vkd3d_cs_op_data *op;
HRESULT hr = S_OK; HRESULT hr = S_OK;
int rc;
TRACE("iface %p, fence %p, value %#"PRIx64".\n", iface, fence_iface, value); TRACE("iface %p, fence %p, value %#"PRIx64".\n", iface, fence_iface, value);
if ((rc = vkd3d_mutex_lock(&command_queue->op_mutex))) vkd3d_mutex_lock(&command_queue->op_mutex);
{
ERR("Failed to lock mutex, error %d.\n", rc);
return hresult_from_errno(rc);
}
if (!command_queue->ops_count) if (!command_queue->ops_count)
{ {
@ -6757,21 +6605,11 @@ static HRESULT STDMETHODCALLTYPE d3d12_command_queue_Wait(ID3D12CommandQueue *if
struct d3d12_fence *fence = unsafe_impl_from_ID3D12Fence(fence_iface); struct d3d12_fence *fence = unsafe_impl_from_ID3D12Fence(fence_iface);
struct vkd3d_cs_op_data *op; struct vkd3d_cs_op_data *op;
HRESULT hr = S_OK; HRESULT hr = S_OK;
int rc;
TRACE("iface %p, fence %p, value %#"PRIx64".\n", iface, fence_iface, value); TRACE("iface %p, fence %p, value %#"PRIx64".\n", iface, fence_iface, value);
if ((rc = vkd3d_mutex_lock(&command_queue->op_mutex))) vkd3d_mutex_lock(&command_queue->op_mutex);
{ vkd3d_mutex_lock(&fence->mutex);
ERR("Failed to lock mutex, error %d.\n", rc);
return hresult_from_errno(rc);
}
if ((rc = vkd3d_mutex_lock(&fence->mutex)))
{
ERR("Failed to lock mutex, error %d.\n", rc);
hr = hresult_from_errno(rc);
goto done;
}
if (!command_queue->ops_count && value <= fence->max_pending_value) if (!command_queue->ops_count && value <= fence->max_pending_value)
{ {
@ -6940,7 +6778,6 @@ static bool d3d12_command_queue_flush_ops(struct d3d12_command_queue *queue, boo
struct d3d12_fence *fence; struct d3d12_fence *fence;
bool flushed_all = false; bool flushed_all = false;
unsigned int i; unsigned int i;
int rc;
if (!queue->ops_count) if (!queue->ops_count)
return true; return true;
@ -6951,11 +6788,7 @@ static bool d3d12_command_queue_flush_ops(struct d3d12_command_queue *queue, boo
if (queue->is_flushing) if (queue->is_flushing)
return true; return true;
if ((rc = vkd3d_mutex_lock(&queue->op_mutex))) vkd3d_mutex_lock(&queue->op_mutex);
{
ERR("Failed to lock mutex, error %d.\n", rc);
return true;
}
/* Currently only required for d3d12_command_queue_signal(), but set it here anyway. */ /* Currently only required for d3d12_command_queue_signal(), but set it here anyway. */
queue->is_flushing = true; queue->is_flushing = true;
@ -7008,7 +6841,6 @@ static HRESULT d3d12_command_queue_init(struct d3d12_command_queue *queue,
struct d3d12_device *device, const D3D12_COMMAND_QUEUE_DESC *desc) struct d3d12_device *device, const D3D12_COMMAND_QUEUE_DESC *desc)
{ {
HRESULT hr; HRESULT hr;
int rc;
queue->ID3D12CommandQueue_iface.lpVtbl = &d3d12_command_queue_vtbl; queue->ID3D12CommandQueue_iface.lpVtbl = &d3d12_command_queue_vtbl;
queue->refcount = 1; queue->refcount = 1;
@ -7042,11 +6874,7 @@ static HRESULT d3d12_command_queue_init(struct d3d12_command_queue *queue,
if (FAILED(hr = vkd3d_private_store_init(&queue->private_store))) if (FAILED(hr = vkd3d_private_store_init(&queue->private_store)))
return hr; return hr;
if ((rc = vkd3d_mutex_init(&queue->op_mutex)) < 0) vkd3d_mutex_init(&queue->op_mutex);
{
hr = hresult_from_errno(rc);
goto fail_destroy_private_store;
}
if (FAILED(hr = vkd3d_fence_worker_start(&queue->fence_worker, queue->vkd3d_queue, device))) if (FAILED(hr = vkd3d_fence_worker_start(&queue->fence_worker, queue->vkd3d_queue, device)))
goto fail_destroy_op_mutex; goto fail_destroy_op_mutex;
@ -7057,7 +6885,6 @@ static HRESULT d3d12_command_queue_init(struct d3d12_command_queue *queue,
fail_destroy_op_mutex: fail_destroy_op_mutex:
vkd3d_mutex_destroy(&queue->op_mutex); vkd3d_mutex_destroy(&queue->op_mutex);
fail_destroy_private_store:
vkd3d_private_store_destroy(&queue->private_store); vkd3d_private_store_destroy(&queue->private_store);
return hr; return hr;
} }

View File

@ -2052,13 +2052,8 @@ static HRESULT d3d12_device_init_pipeline_cache(struct d3d12_device *device)
const struct vkd3d_vk_device_procs *vk_procs = &device->vk_procs; const struct vkd3d_vk_device_procs *vk_procs = &device->vk_procs;
VkPipelineCacheCreateInfo cache_info; VkPipelineCacheCreateInfo cache_info;
VkResult vr; VkResult vr;
int rc;
if ((rc = vkd3d_mutex_init(&device->mutex))) vkd3d_mutex_init(&device->mutex);
{
ERR("Failed to initialize mutex, error %d.\n", rc);
return hresult_from_errno(rc);
}
cache_info.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO; cache_info.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
cache_info.pNext = NULL; cache_info.pNext = NULL;
@ -2149,17 +2144,12 @@ D3D12_GPU_VIRTUAL_ADDRESS vkd3d_gpu_va_allocator_allocate(struct vkd3d_gpu_va_al
size_t alignment, size_t size, void *ptr) size_t alignment, size_t size, void *ptr)
{ {
D3D12_GPU_VIRTUAL_ADDRESS address; D3D12_GPU_VIRTUAL_ADDRESS address;
int rc;
if (size > ~(size_t)0 - (alignment - 1)) if (size > ~(size_t)0 - (alignment - 1))
return 0; return 0;
size = align(size, alignment); size = align(size, alignment);
if ((rc = vkd3d_mutex_lock(&allocator->mutex))) vkd3d_mutex_lock(&allocator->mutex);
{
ERR("Failed to lock mutex, error %d.\n", rc);
return 0;
}
if (size <= VKD3D_VA_SLAB_SIZE && allocator->free_slab) if (size <= VKD3D_VA_SLAB_SIZE && allocator->free_slab)
address = vkd3d_gpu_va_allocator_allocate_slab(allocator, size, ptr); address = vkd3d_gpu_va_allocator_allocate_slab(allocator, size, ptr);
@ -2225,7 +2215,6 @@ void *vkd3d_gpu_va_allocator_dereference(struct vkd3d_gpu_va_allocator *allocato
D3D12_GPU_VIRTUAL_ADDRESS address) D3D12_GPU_VIRTUAL_ADDRESS address)
{ {
void *ret; void *ret;
int rc;
/* If we land in the non-fallback region, dereferencing VA is lock-less. /* If we land in the non-fallback region, dereferencing VA is lock-less.
* The base pointer is immutable, and the only way we can have a data race * The base pointer is immutable, and the only way we can have a data race
@ -2237,11 +2226,7 @@ void *vkd3d_gpu_va_allocator_dereference(struct vkd3d_gpu_va_allocator *allocato
return vkd3d_gpu_va_allocator_dereference_slab(allocator, address); return vkd3d_gpu_va_allocator_dereference_slab(allocator, address);
/* Slow fallback. */ /* Slow fallback. */
if ((rc = vkd3d_mutex_lock(&allocator->mutex))) vkd3d_mutex_lock(&allocator->mutex);
{
ERR("Failed to lock mutex, error %d.\n", rc);
return NULL;
}
ret = vkd3d_gpu_va_allocator_dereference_fallback(allocator, address); ret = vkd3d_gpu_va_allocator_dereference_fallback(allocator, address);
@ -2298,13 +2283,7 @@ static void vkd3d_gpu_va_allocator_free_fallback(struct vkd3d_gpu_va_allocator *
void vkd3d_gpu_va_allocator_free(struct vkd3d_gpu_va_allocator *allocator, D3D12_GPU_VIRTUAL_ADDRESS address) void vkd3d_gpu_va_allocator_free(struct vkd3d_gpu_va_allocator *allocator, D3D12_GPU_VIRTUAL_ADDRESS address)
{ {
int rc; vkd3d_mutex_lock(&allocator->mutex);
if ((rc = vkd3d_mutex_lock(&allocator->mutex)))
{
ERR("Failed to lock mutex, error %d.\n", rc);
return;
}
if (address < VKD3D_VA_FALLBACK_BASE) if (address < VKD3D_VA_FALLBACK_BASE)
{ {
@ -2321,7 +2300,6 @@ void vkd3d_gpu_va_allocator_free(struct vkd3d_gpu_va_allocator *allocator, D3D12
static bool vkd3d_gpu_va_allocator_init(struct vkd3d_gpu_va_allocator *allocator) static bool vkd3d_gpu_va_allocator_init(struct vkd3d_gpu_va_allocator *allocator)
{ {
unsigned int i; unsigned int i;
int rc;
memset(allocator, 0, sizeof(*allocator)); memset(allocator, 0, sizeof(*allocator));
allocator->fallback_floor = VKD3D_VA_FALLBACK_BASE; allocator->fallback_floor = VKD3D_VA_FALLBACK_BASE;
@ -2341,25 +2319,14 @@ static bool vkd3d_gpu_va_allocator_init(struct vkd3d_gpu_va_allocator *allocator
allocator->slabs[i].ptr = &allocator->slabs[i + 1]; allocator->slabs[i].ptr = &allocator->slabs[i + 1];
} }
if ((rc = vkd3d_mutex_init(&allocator->mutex))) vkd3d_mutex_init(&allocator->mutex);
{
ERR("Failed to initialize mutex, error %d.\n", rc);
vkd3d_free(allocator->slabs);
return false;
}
return true; return true;
} }
static void vkd3d_gpu_va_allocator_cleanup(struct vkd3d_gpu_va_allocator *allocator) static void vkd3d_gpu_va_allocator_cleanup(struct vkd3d_gpu_va_allocator *allocator)
{ {
int rc; vkd3d_mutex_lock(&allocator->mutex);
if ((rc = vkd3d_mutex_lock(&allocator->mutex)))
{
ERR("Failed to lock mutex, error %d.\n", rc);
return;
}
vkd3d_free(allocator->slabs); vkd3d_free(allocator->slabs);
vkd3d_free(allocator->fallback_allocations); vkd3d_free(allocator->fallback_allocations);
vkd3d_mutex_unlock(&allocator->mutex); vkd3d_mutex_unlock(&allocator->mutex);

View File

@ -443,15 +443,8 @@ static HRESULT d3d12_heap_map(struct d3d12_heap *heap, uint64_t offset,
struct d3d12_device *device = heap->device; struct d3d12_device *device = heap->device;
HRESULT hr = S_OK; HRESULT hr = S_OK;
VkResult vr; VkResult vr;
int rc;
if ((rc = vkd3d_mutex_lock(&heap->mutex))) vkd3d_mutex_lock(&heap->mutex);
{
ERR("Failed to lock mutex, error %d.\n", rc);
if (data)
*data = NULL;
return hresult_from_errno(rc);
}
assert(!resource->map_count || heap->map_ptr); assert(!resource->map_count || heap->map_ptr);
@ -501,13 +494,8 @@ static HRESULT d3d12_heap_map(struct d3d12_heap *heap, uint64_t offset,
static void d3d12_heap_unmap(struct d3d12_heap *heap, struct d3d12_resource *resource) static void d3d12_heap_unmap(struct d3d12_heap *heap, struct d3d12_resource *resource)
{ {
struct d3d12_device *device = heap->device; struct d3d12_device *device = heap->device;
int rc;
if ((rc = vkd3d_mutex_lock(&heap->mutex))) vkd3d_mutex_lock(&heap->mutex);
{
ERR("Failed to lock mutex, error %d.\n", rc);
return;
}
if (!resource->map_count) if (!resource->map_count)
{ {
@ -570,7 +558,6 @@ static HRESULT d3d12_heap_init(struct d3d12_heap *heap,
VkMemoryRequirements memory_requirements; VkMemoryRequirements memory_requirements;
VkDeviceSize vk_memory_size; VkDeviceSize vk_memory_size;
HRESULT hr; HRESULT hr;
int rc;
heap->ID3D12Heap_iface.lpVtbl = &d3d12_heap_vtbl; heap->ID3D12Heap_iface.lpVtbl = &d3d12_heap_vtbl;
heap->refcount = 1; heap->refcount = 1;
@ -596,11 +583,7 @@ static HRESULT d3d12_heap_init(struct d3d12_heap *heap,
if (FAILED(hr = validate_heap_desc(&heap->desc, resource))) if (FAILED(hr = validate_heap_desc(&heap->desc, resource)))
return hr; return hr;
if ((rc = vkd3d_mutex_init(&heap->mutex))) vkd3d_mutex_init(&heap->mutex);
{
ERR("Failed to initialize mutex, error %d.\n", rc);
return hresult_from_errno(rc);
}
if (FAILED(hr = vkd3d_private_store_init(&heap->private_store))) if (FAILED(hr = vkd3d_private_store_init(&heap->private_store)))
{ {

View File

@ -1689,14 +1689,8 @@ HRESULT vkd3d_render_pass_cache_find(struct vkd3d_render_pass_cache *cache,
bool found = false; bool found = false;
HRESULT hr = S_OK; HRESULT hr = S_OK;
unsigned int i; unsigned int i;
int rc;
if ((rc = vkd3d_mutex_lock(&device->mutex))) vkd3d_mutex_lock(&device->mutex);
{
ERR("Failed to lock mutex, error %d.\n", rc);
*vk_render_pass = VK_NULL_HANDLE;
return hresult_from_errno(rc);
}
for (i = 0; i < cache->render_pass_count; ++i) for (i = 0; i < cache->render_pass_count; ++i)
{ {
@ -3371,12 +3365,11 @@ static VkPipeline d3d12_pipeline_state_find_compiled_pipeline(const struct d3d12
struct d3d12_device *device = state->device; struct d3d12_device *device = state->device;
VkPipeline vk_pipeline = VK_NULL_HANDLE; VkPipeline vk_pipeline = VK_NULL_HANDLE;
struct vkd3d_compiled_pipeline *current; struct vkd3d_compiled_pipeline *current;
int rc;
*vk_render_pass = VK_NULL_HANDLE; *vk_render_pass = VK_NULL_HANDLE;
if (!(rc = vkd3d_mutex_lock(&device->mutex))) vkd3d_mutex_lock(&device->mutex);
{
LIST_FOR_EACH_ENTRY(current, &graphics->compiled_pipelines, struct vkd3d_compiled_pipeline, entry) LIST_FOR_EACH_ENTRY(current, &graphics->compiled_pipelines, struct vkd3d_compiled_pipeline, entry)
{ {
if (!memcmp(&current->key, key, sizeof(*key))) if (!memcmp(&current->key, key, sizeof(*key)))
@ -3386,12 +3379,8 @@ static VkPipeline d3d12_pipeline_state_find_compiled_pipeline(const struct d3d12
break; break;
} }
} }
vkd3d_mutex_unlock(&device->mutex); vkd3d_mutex_unlock(&device->mutex);
}
else
{
ERR("Failed to lock mutex, error %d.\n", rc);
}
return vk_pipeline; return vk_pipeline;
} }
@ -3402,7 +3391,6 @@ static bool d3d12_pipeline_state_put_pipeline_to_cache(struct d3d12_pipeline_sta
struct d3d12_graphics_pipeline_state *graphics = &state->u.graphics; struct d3d12_graphics_pipeline_state *graphics = &state->u.graphics;
struct vkd3d_compiled_pipeline *compiled_pipeline, *current; struct vkd3d_compiled_pipeline *compiled_pipeline, *current;
struct d3d12_device *device = state->device; struct d3d12_device *device = state->device;
int rc;
if (!(compiled_pipeline = vkd3d_malloc(sizeof(*compiled_pipeline)))) if (!(compiled_pipeline = vkd3d_malloc(sizeof(*compiled_pipeline))))
return false; return false;
@ -3411,12 +3399,7 @@ static bool d3d12_pipeline_state_put_pipeline_to_cache(struct d3d12_pipeline_sta
compiled_pipeline->vk_pipeline = vk_pipeline; compiled_pipeline->vk_pipeline = vk_pipeline;
compiled_pipeline->vk_render_pass = vk_render_pass; compiled_pipeline->vk_render_pass = vk_render_pass;
if ((rc = vkd3d_mutex_lock(&device->mutex))) vkd3d_mutex_lock(&device->mutex);
{
ERR("Failed to lock mutex, error %d.\n", rc);
vkd3d_free(compiled_pipeline);
return false;
}
LIST_FOR_EACH_ENTRY(current, &graphics->compiled_pipelines, struct vkd3d_compiled_pipeline, entry) LIST_FOR_EACH_ENTRY(current, &graphics->compiled_pipelines, struct vkd3d_compiled_pipeline, entry)
{ {

View File

@ -948,16 +948,11 @@ HRESULT vkd3d_get_private_data(struct vkd3d_private_store *store,
const struct vkd3d_private_data *data; const struct vkd3d_private_data *data;
HRESULT hr = S_OK; HRESULT hr = S_OK;
unsigned int size; unsigned int size;
int rc;
if (!out_size) if (!out_size)
return E_INVALIDARG; return E_INVALIDARG;
if ((rc = vkd3d_mutex_lock(&store->mutex))) vkd3d_mutex_lock(&store->mutex);
{
ERR("Failed to lock mutex, error %d.\n", rc);
return hresult_from_errno(rc);
}
if (!(data = vkd3d_private_store_get_private_data(store, tag))) if (!(data = vkd3d_private_store_get_private_data(store, tag)))
{ {
@ -990,13 +985,8 @@ HRESULT vkd3d_set_private_data(struct vkd3d_private_store *store,
const GUID *tag, unsigned int data_size, const void *data) const GUID *tag, unsigned int data_size, const void *data)
{ {
HRESULT hr; HRESULT hr;
int rc;
if ((rc = vkd3d_mutex_lock(&store->mutex))) vkd3d_mutex_lock(&store->mutex);
{
ERR("Failed to lock mutex, error %d.\n", rc);
return hresult_from_errno(rc);
}
hr = vkd3d_private_store_set_private_data(store, tag, data, data_size, false); hr = vkd3d_private_store_set_private_data(store, tag, data, data_size, false);
@ -1009,13 +999,8 @@ HRESULT vkd3d_set_private_data_interface(struct vkd3d_private_store *store,
{ {
const void *data = object ? object : (void *)&object; const void *data = object ? object : (void *)&object;
HRESULT hr; HRESULT hr;
int rc;
if ((rc = vkd3d_mutex_lock(&store->mutex))) vkd3d_mutex_lock(&store->mutex);
{
ERR("Failed to lock mutex, error %d.\n", rc);
return hresult_from_errno(rc);
}
hr = vkd3d_private_store_set_private_data(store, tag, data, sizeof(object), !!object); hr = vkd3d_private_store_set_private_data(store, tag, data, sizeof(object), !!object);

View File

@ -207,56 +207,52 @@ struct vkd3d_cond
CONDITION_VARIABLE cond; CONDITION_VARIABLE cond;
}; };
static inline int vkd3d_mutex_init(struct vkd3d_mutex *lock) static inline void vkd3d_mutex_init(struct vkd3d_mutex *lock)
{ {
InitializeCriticalSection(&lock->lock); InitializeCriticalSection(&lock->lock);
return 0;
} }
static inline int vkd3d_mutex_lock(struct vkd3d_mutex *lock) static inline void vkd3d_mutex_lock(struct vkd3d_mutex *lock)
{ {
EnterCriticalSection(&lock->lock); EnterCriticalSection(&lock->lock);
return 0;
} }
static inline int vkd3d_mutex_unlock(struct vkd3d_mutex *lock) static inline void vkd3d_mutex_unlock(struct vkd3d_mutex *lock)
{ {
LeaveCriticalSection(&lock->lock); LeaveCriticalSection(&lock->lock);
return 0;
} }
static inline int vkd3d_mutex_destroy(struct vkd3d_mutex *lock) static inline void vkd3d_mutex_destroy(struct vkd3d_mutex *lock)
{ {
DeleteCriticalSection(&lock->lock); DeleteCriticalSection(&lock->lock);
return 0;
} }
static inline int vkd3d_cond_init(struct vkd3d_cond *cond) static inline void vkd3d_cond_init(struct vkd3d_cond *cond)
{ {
InitializeConditionVariable(&cond->cond); InitializeConditionVariable(&cond->cond);
return 0;
} }
static inline int vkd3d_cond_signal(struct vkd3d_cond *cond) static inline void vkd3d_cond_signal(struct vkd3d_cond *cond)
{ {
WakeConditionVariable(&cond->cond); WakeConditionVariable(&cond->cond);
return 0;
} }
static inline int vkd3d_cond_broadcast(struct vkd3d_cond *cond) static inline void vkd3d_cond_broadcast(struct vkd3d_cond *cond)
{ {
WakeAllConditionVariable(&cond->cond); WakeAllConditionVariable(&cond->cond);
return 0;
} }
static inline int vkd3d_cond_wait(struct vkd3d_cond *cond, struct vkd3d_mutex *lock) static inline void vkd3d_cond_wait(struct vkd3d_cond *cond, struct vkd3d_mutex *lock)
{ {
return !SleepConditionVariableCS(&cond->cond, &lock->lock, INFINITE); BOOL ret;
ret = SleepConditionVariableCS(&cond->cond, &lock->lock, INFINITE);
if (ret)
ERR("Could not sleep on the condition variable, error %u.\n", GetLastError());
} }
static inline int vkd3d_cond_destroy(struct vkd3d_cond *cond) static inline void vkd3d_cond_destroy(struct vkd3d_cond *cond)
{ {
return 0;
} }
#else /* _WIN32 */ #else /* _WIN32 */
@ -280,49 +276,85 @@ struct vkd3d_cond
}; };
static inline int vkd3d_mutex_init(struct vkd3d_mutex *lock) static inline void vkd3d_mutex_init(struct vkd3d_mutex *lock)
{ {
return pthread_mutex_init(&lock->lock, NULL); int ret;
ret = pthread_mutex_init(&lock->lock, NULL);
if (ret)
ERR("Could not initialize the mutex, error %d.\n", ret);
} }
static inline int vkd3d_mutex_lock(struct vkd3d_mutex *lock) static inline void vkd3d_mutex_lock(struct vkd3d_mutex *lock)
{ {
return pthread_mutex_lock(&lock->lock); int ret;
ret = pthread_mutex_lock(&lock->lock);
if (ret)
ERR("Could not lock the mutex, error %d.\n", ret);
} }
static inline int vkd3d_mutex_unlock(struct vkd3d_mutex *lock) static inline void vkd3d_mutex_unlock(struct vkd3d_mutex *lock)
{ {
return pthread_mutex_unlock(&lock->lock); int ret;
ret = pthread_mutex_unlock(&lock->lock);
if (ret)
ERR("Could not unlock the mutex, error %d.\n", ret);
} }
static inline int vkd3d_mutex_destroy(struct vkd3d_mutex *lock) static inline void vkd3d_mutex_destroy(struct vkd3d_mutex *lock)
{ {
return pthread_mutex_destroy(&lock->lock); int ret;
ret = pthread_mutex_destroy(&lock->lock);
if (ret)
ERR("Could not destroy the mutex, error %d.\n", ret);
} }
static inline int vkd3d_cond_init(struct vkd3d_cond *cond) static inline void vkd3d_cond_init(struct vkd3d_cond *cond)
{ {
return pthread_cond_init(&cond->cond, NULL); int ret;
ret = pthread_cond_init(&cond->cond, NULL);
if (ret)
ERR("Could not initialize the condition variable, error %d.\n", ret);
} }
static inline int vkd3d_cond_signal(struct vkd3d_cond *cond) static inline void vkd3d_cond_signal(struct vkd3d_cond *cond)
{ {
return pthread_cond_signal(&cond->cond); int ret;
ret = pthread_cond_signal(&cond->cond);
if (ret)
ERR("Could not signal the condition variable, error %d.\n", ret);
} }
static inline int vkd3d_cond_broadcast(struct vkd3d_cond *cond) static inline void vkd3d_cond_broadcast(struct vkd3d_cond *cond)
{ {
return pthread_cond_broadcast(&cond->cond); int ret;
ret = pthread_cond_broadcast(&cond->cond);
if (ret)
ERR("Could not broadcast the condition variable, error %d.\n", ret);
} }
static inline int vkd3d_cond_wait(struct vkd3d_cond *cond, struct vkd3d_mutex *lock) static inline void vkd3d_cond_wait(struct vkd3d_cond *cond, struct vkd3d_mutex *lock)
{ {
return pthread_cond_wait(&cond->cond, &lock->lock); int ret;
ret = pthread_cond_wait(&cond->cond, &lock->lock);
if (ret)
ERR("Could not wait on the condition variable, error %d.\n", ret);
} }
static inline int vkd3d_cond_destroy(struct vkd3d_cond *cond) static inline void vkd3d_cond_destroy(struct vkd3d_cond *cond)
{ {
return pthread_cond_destroy(&cond->cond); int ret;
ret = pthread_cond_destroy(&cond->cond);
if (ret)
ERR("Could not destroy the condition variable, error %d.\n", ret);
} }
#endif /* _WIN32 */ #endif /* _WIN32 */
@ -447,14 +479,11 @@ static inline void vkd3d_private_data_destroy(struct vkd3d_private_data *data)
static inline HRESULT vkd3d_private_store_init(struct vkd3d_private_store *store) static inline HRESULT vkd3d_private_store_init(struct vkd3d_private_store *store)
{ {
int rc;
list_init(&store->content); list_init(&store->content);
if ((rc = vkd3d_mutex_init(&store->mutex))) vkd3d_mutex_init(&store->mutex);
ERR("Failed to initialize mutex, error %d.\n", rc);
return hresult_from_errno(rc); return S_OK;
} }
static inline void vkd3d_private_store_destroy(struct vkd3d_private_store *store) static inline void vkd3d_private_store_destroy(struct vkd3d_private_store *store)