vkd3d: Do not allow synchronization primitives to fail.

In practice they never fail. If they fail, it means that there
is some underlying platform problem and there is little we can do
anyway. Under pthreads function prototypes allow returning failure,
but that's only used for "error checking" mutexes, which we
don't use.

On the other hand, error handling in vkd3d is rather inconsistent:
sometimes the errors are ignored, sometimes logged, sometimes
passed to the caller. It's hard to handle failures appropriately
if you can't even keep your state consistent, so I think it's
better to avoid trying, assume that synchronization primitives do
not fail and at least have consistent logging if something goes
wrong.
This commit is contained in:
Giovanni Mascellani
2023-01-27 16:45:05 +01:00
committed by Alexandre Julliard
parent a66fe31fe5
commit 552926cfca
Notes: Alexandre Julliard 2023-02-02 22:14:51 +01:00
Approved-by: Conor McCarthy (@cmccarthy)
Approved-by: Henri Verbeet (@hverbeet)
Approved-by: Alexandre Julliard (@julliard)
Merge-Request: https://gitlab.winehq.org/wine/vkd3d/-/merge_requests/75
6 changed files with 129 additions and 355 deletions

File diff suppressed because it is too large Load Diff

View File

@ -2052,13 +2052,8 @@ static HRESULT d3d12_device_init_pipeline_cache(struct d3d12_device *device)
const struct vkd3d_vk_device_procs *vk_procs = &device->vk_procs;
VkPipelineCacheCreateInfo cache_info;
VkResult vr;
int rc;
if ((rc = vkd3d_mutex_init(&device->mutex)))
{
ERR("Failed to initialize mutex, error %d.\n", rc);
return hresult_from_errno(rc);
}
vkd3d_mutex_init(&device->mutex);
cache_info.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
cache_info.pNext = NULL;
@ -2149,17 +2144,12 @@ D3D12_GPU_VIRTUAL_ADDRESS vkd3d_gpu_va_allocator_allocate(struct vkd3d_gpu_va_al
size_t alignment, size_t size, void *ptr)
{
D3D12_GPU_VIRTUAL_ADDRESS address;
int rc;
if (size > ~(size_t)0 - (alignment - 1))
return 0;
size = align(size, alignment);
if ((rc = vkd3d_mutex_lock(&allocator->mutex)))
{
ERR("Failed to lock mutex, error %d.\n", rc);
return 0;
}
vkd3d_mutex_lock(&allocator->mutex);
if (size <= VKD3D_VA_SLAB_SIZE && allocator->free_slab)
address = vkd3d_gpu_va_allocator_allocate_slab(allocator, size, ptr);
@ -2225,7 +2215,6 @@ void *vkd3d_gpu_va_allocator_dereference(struct vkd3d_gpu_va_allocator *allocato
D3D12_GPU_VIRTUAL_ADDRESS address)
{
void *ret;
int rc;
/* If we land in the non-fallback region, dereferencing VA is lock-less.
* The base pointer is immutable, and the only way we can have a data race
@ -2237,11 +2226,7 @@ void *vkd3d_gpu_va_allocator_dereference(struct vkd3d_gpu_va_allocator *allocato
return vkd3d_gpu_va_allocator_dereference_slab(allocator, address);
/* Slow fallback. */
if ((rc = vkd3d_mutex_lock(&allocator->mutex)))
{
ERR("Failed to lock mutex, error %d.\n", rc);
return NULL;
}
vkd3d_mutex_lock(&allocator->mutex);
ret = vkd3d_gpu_va_allocator_dereference_fallback(allocator, address);
@ -2298,13 +2283,7 @@ static void vkd3d_gpu_va_allocator_free_fallback(struct vkd3d_gpu_va_allocator *
void vkd3d_gpu_va_allocator_free(struct vkd3d_gpu_va_allocator *allocator, D3D12_GPU_VIRTUAL_ADDRESS address)
{
int rc;
if ((rc = vkd3d_mutex_lock(&allocator->mutex)))
{
ERR("Failed to lock mutex, error %d.\n", rc);
return;
}
vkd3d_mutex_lock(&allocator->mutex);
if (address < VKD3D_VA_FALLBACK_BASE)
{
@ -2321,7 +2300,6 @@ void vkd3d_gpu_va_allocator_free(struct vkd3d_gpu_va_allocator *allocator, D3D12
static bool vkd3d_gpu_va_allocator_init(struct vkd3d_gpu_va_allocator *allocator)
{
unsigned int i;
int rc;
memset(allocator, 0, sizeof(*allocator));
allocator->fallback_floor = VKD3D_VA_FALLBACK_BASE;
@ -2341,25 +2319,14 @@ static bool vkd3d_gpu_va_allocator_init(struct vkd3d_gpu_va_allocator *allocator
allocator->slabs[i].ptr = &allocator->slabs[i + 1];
}
if ((rc = vkd3d_mutex_init(&allocator->mutex)))
{
ERR("Failed to initialize mutex, error %d.\n", rc);
vkd3d_free(allocator->slabs);
return false;
}
vkd3d_mutex_init(&allocator->mutex);
return true;
}
static void vkd3d_gpu_va_allocator_cleanup(struct vkd3d_gpu_va_allocator *allocator)
{
int rc;
if ((rc = vkd3d_mutex_lock(&allocator->mutex)))
{
ERR("Failed to lock mutex, error %d.\n", rc);
return;
}
vkd3d_mutex_lock(&allocator->mutex);
vkd3d_free(allocator->slabs);
vkd3d_free(allocator->fallback_allocations);
vkd3d_mutex_unlock(&allocator->mutex);

View File

@ -443,15 +443,8 @@ static HRESULT d3d12_heap_map(struct d3d12_heap *heap, uint64_t offset,
struct d3d12_device *device = heap->device;
HRESULT hr = S_OK;
VkResult vr;
int rc;
if ((rc = vkd3d_mutex_lock(&heap->mutex)))
{
ERR("Failed to lock mutex, error %d.\n", rc);
if (data)
*data = NULL;
return hresult_from_errno(rc);
}
vkd3d_mutex_lock(&heap->mutex);
assert(!resource->map_count || heap->map_ptr);
@ -501,13 +494,8 @@ static HRESULT d3d12_heap_map(struct d3d12_heap *heap, uint64_t offset,
static void d3d12_heap_unmap(struct d3d12_heap *heap, struct d3d12_resource *resource)
{
struct d3d12_device *device = heap->device;
int rc;
if ((rc = vkd3d_mutex_lock(&heap->mutex)))
{
ERR("Failed to lock mutex, error %d.\n", rc);
return;
}
vkd3d_mutex_lock(&heap->mutex);
if (!resource->map_count)
{
@ -570,7 +558,6 @@ static HRESULT d3d12_heap_init(struct d3d12_heap *heap,
VkMemoryRequirements memory_requirements;
VkDeviceSize vk_memory_size;
HRESULT hr;
int rc;
heap->ID3D12Heap_iface.lpVtbl = &d3d12_heap_vtbl;
heap->refcount = 1;
@ -596,11 +583,7 @@ static HRESULT d3d12_heap_init(struct d3d12_heap *heap,
if (FAILED(hr = validate_heap_desc(&heap->desc, resource)))
return hr;
if ((rc = vkd3d_mutex_init(&heap->mutex)))
{
ERR("Failed to initialize mutex, error %d.\n", rc);
return hresult_from_errno(rc);
}
vkd3d_mutex_init(&heap->mutex);
if (FAILED(hr = vkd3d_private_store_init(&heap->private_store)))
{

View File

@ -1689,14 +1689,8 @@ HRESULT vkd3d_render_pass_cache_find(struct vkd3d_render_pass_cache *cache,
bool found = false;
HRESULT hr = S_OK;
unsigned int i;
int rc;
if ((rc = vkd3d_mutex_lock(&device->mutex)))
{
ERR("Failed to lock mutex, error %d.\n", rc);
*vk_render_pass = VK_NULL_HANDLE;
return hresult_from_errno(rc);
}
vkd3d_mutex_lock(&device->mutex);
for (i = 0; i < cache->render_pass_count; ++i)
{
@ -3371,12 +3365,11 @@ static VkPipeline d3d12_pipeline_state_find_compiled_pipeline(const struct d3d12
struct d3d12_device *device = state->device;
VkPipeline vk_pipeline = VK_NULL_HANDLE;
struct vkd3d_compiled_pipeline *current;
int rc;
*vk_render_pass = VK_NULL_HANDLE;
if (!(rc = vkd3d_mutex_lock(&device->mutex)))
{
vkd3d_mutex_lock(&device->mutex);
LIST_FOR_EACH_ENTRY(current, &graphics->compiled_pipelines, struct vkd3d_compiled_pipeline, entry)
{
if (!memcmp(&current->key, key, sizeof(*key)))
@ -3386,12 +3379,8 @@ static VkPipeline d3d12_pipeline_state_find_compiled_pipeline(const struct d3d12
break;
}
}
vkd3d_mutex_unlock(&device->mutex);
}
else
{
ERR("Failed to lock mutex, error %d.\n", rc);
}
return vk_pipeline;
}
@ -3402,7 +3391,6 @@ static bool d3d12_pipeline_state_put_pipeline_to_cache(struct d3d12_pipeline_sta
struct d3d12_graphics_pipeline_state *graphics = &state->u.graphics;
struct vkd3d_compiled_pipeline *compiled_pipeline, *current;
struct d3d12_device *device = state->device;
int rc;
if (!(compiled_pipeline = vkd3d_malloc(sizeof(*compiled_pipeline))))
return false;
@ -3411,12 +3399,7 @@ static bool d3d12_pipeline_state_put_pipeline_to_cache(struct d3d12_pipeline_sta
compiled_pipeline->vk_pipeline = vk_pipeline;
compiled_pipeline->vk_render_pass = vk_render_pass;
if ((rc = vkd3d_mutex_lock(&device->mutex)))
{
ERR("Failed to lock mutex, error %d.\n", rc);
vkd3d_free(compiled_pipeline);
return false;
}
vkd3d_mutex_lock(&device->mutex);
LIST_FOR_EACH_ENTRY(current, &graphics->compiled_pipelines, struct vkd3d_compiled_pipeline, entry)
{

View File

@ -948,16 +948,11 @@ HRESULT vkd3d_get_private_data(struct vkd3d_private_store *store,
const struct vkd3d_private_data *data;
HRESULT hr = S_OK;
unsigned int size;
int rc;
if (!out_size)
return E_INVALIDARG;
if ((rc = vkd3d_mutex_lock(&store->mutex)))
{
ERR("Failed to lock mutex, error %d.\n", rc);
return hresult_from_errno(rc);
}
vkd3d_mutex_lock(&store->mutex);
if (!(data = vkd3d_private_store_get_private_data(store, tag)))
{
@ -990,13 +985,8 @@ HRESULT vkd3d_set_private_data(struct vkd3d_private_store *store,
const GUID *tag, unsigned int data_size, const void *data)
{
HRESULT hr;
int rc;
if ((rc = vkd3d_mutex_lock(&store->mutex)))
{
ERR("Failed to lock mutex, error %d.\n", rc);
return hresult_from_errno(rc);
}
vkd3d_mutex_lock(&store->mutex);
hr = vkd3d_private_store_set_private_data(store, tag, data, data_size, false);
@ -1009,13 +999,8 @@ HRESULT vkd3d_set_private_data_interface(struct vkd3d_private_store *store,
{
const void *data = object ? object : (void *)&object;
HRESULT hr;
int rc;
if ((rc = vkd3d_mutex_lock(&store->mutex)))
{
ERR("Failed to lock mutex, error %d.\n", rc);
return hresult_from_errno(rc);
}
vkd3d_mutex_lock(&store->mutex);
hr = vkd3d_private_store_set_private_data(store, tag, data, sizeof(object), !!object);

View File

@ -207,56 +207,52 @@ struct vkd3d_cond
CONDITION_VARIABLE cond;
};
static inline int vkd3d_mutex_init(struct vkd3d_mutex *lock)
static inline void vkd3d_mutex_init(struct vkd3d_mutex *lock)
{
InitializeCriticalSection(&lock->lock);
return 0;
}
static inline int vkd3d_mutex_lock(struct vkd3d_mutex *lock)
static inline void vkd3d_mutex_lock(struct vkd3d_mutex *lock)
{
EnterCriticalSection(&lock->lock);
return 0;
}
static inline int vkd3d_mutex_unlock(struct vkd3d_mutex *lock)
static inline void vkd3d_mutex_unlock(struct vkd3d_mutex *lock)
{
LeaveCriticalSection(&lock->lock);
return 0;
}
static inline int vkd3d_mutex_destroy(struct vkd3d_mutex *lock)
static inline void vkd3d_mutex_destroy(struct vkd3d_mutex *lock)
{
DeleteCriticalSection(&lock->lock);
return 0;
}
static inline int vkd3d_cond_init(struct vkd3d_cond *cond)
static inline void vkd3d_cond_init(struct vkd3d_cond *cond)
{
InitializeConditionVariable(&cond->cond);
return 0;
}
static inline int vkd3d_cond_signal(struct vkd3d_cond *cond)
static inline void vkd3d_cond_signal(struct vkd3d_cond *cond)
{
WakeConditionVariable(&cond->cond);
return 0;
}
static inline int vkd3d_cond_broadcast(struct vkd3d_cond *cond)
static inline void vkd3d_cond_broadcast(struct vkd3d_cond *cond)
{
WakeAllConditionVariable(&cond->cond);
return 0;
}
static inline int vkd3d_cond_wait(struct vkd3d_cond *cond, struct vkd3d_mutex *lock)
static inline void vkd3d_cond_wait(struct vkd3d_cond *cond, struct vkd3d_mutex *lock)
{
return !SleepConditionVariableCS(&cond->cond, &lock->lock, INFINITE);
BOOL ret;
ret = SleepConditionVariableCS(&cond->cond, &lock->lock, INFINITE);
if (ret)
ERR("Could not sleep on the condition variable, error %u.\n", GetLastError());
}
static inline int vkd3d_cond_destroy(struct vkd3d_cond *cond)
static inline void vkd3d_cond_destroy(struct vkd3d_cond *cond)
{
return 0;
}
#else /* _WIN32 */
@ -280,49 +276,85 @@ struct vkd3d_cond
};
static inline int vkd3d_mutex_init(struct vkd3d_mutex *lock)
static inline void vkd3d_mutex_init(struct vkd3d_mutex *lock)
{
return pthread_mutex_init(&lock->lock, NULL);
int ret;
ret = pthread_mutex_init(&lock->lock, NULL);
if (ret)
ERR("Could not initialize the mutex, error %d.\n", ret);
}
static inline int vkd3d_mutex_lock(struct vkd3d_mutex *lock)
static inline void vkd3d_mutex_lock(struct vkd3d_mutex *lock)
{
return pthread_mutex_lock(&lock->lock);
int ret;
ret = pthread_mutex_lock(&lock->lock);
if (ret)
ERR("Could not lock the mutex, error %d.\n", ret);
}
static inline int vkd3d_mutex_unlock(struct vkd3d_mutex *lock)
static inline void vkd3d_mutex_unlock(struct vkd3d_mutex *lock)
{
return pthread_mutex_unlock(&lock->lock);
int ret;
ret = pthread_mutex_unlock(&lock->lock);
if (ret)
ERR("Could not unlock the mutex, error %d.\n", ret);
}
static inline int vkd3d_mutex_destroy(struct vkd3d_mutex *lock)
static inline void vkd3d_mutex_destroy(struct vkd3d_mutex *lock)
{
return pthread_mutex_destroy(&lock->lock);
int ret;
ret = pthread_mutex_destroy(&lock->lock);
if (ret)
ERR("Could not destroy the mutex, error %d.\n", ret);
}
static inline int vkd3d_cond_init(struct vkd3d_cond *cond)
static inline void vkd3d_cond_init(struct vkd3d_cond *cond)
{
return pthread_cond_init(&cond->cond, NULL);
int ret;
ret = pthread_cond_init(&cond->cond, NULL);
if (ret)
ERR("Could not initialize the condition variable, error %d.\n", ret);
}
static inline int vkd3d_cond_signal(struct vkd3d_cond *cond)
static inline void vkd3d_cond_signal(struct vkd3d_cond *cond)
{
return pthread_cond_signal(&cond->cond);
int ret;
ret = pthread_cond_signal(&cond->cond);
if (ret)
ERR("Could not signal the condition variable, error %d.\n", ret);
}
static inline int vkd3d_cond_broadcast(struct vkd3d_cond *cond)
static inline void vkd3d_cond_broadcast(struct vkd3d_cond *cond)
{
return pthread_cond_broadcast(&cond->cond);
int ret;
ret = pthread_cond_broadcast(&cond->cond);
if (ret)
ERR("Could not broadcast the condition variable, error %d.\n", ret);
}
static inline int vkd3d_cond_wait(struct vkd3d_cond *cond, struct vkd3d_mutex *lock)
static inline void vkd3d_cond_wait(struct vkd3d_cond *cond, struct vkd3d_mutex *lock)
{
return pthread_cond_wait(&cond->cond, &lock->lock);
int ret;
ret = pthread_cond_wait(&cond->cond, &lock->lock);
if (ret)
ERR("Could not wait on the condition variable, error %d.\n", ret);
}
static inline int vkd3d_cond_destroy(struct vkd3d_cond *cond)
static inline void vkd3d_cond_destroy(struct vkd3d_cond *cond)
{
return pthread_cond_destroy(&cond->cond);
int ret;
ret = pthread_cond_destroy(&cond->cond);
if (ret)
ERR("Could not destroy the condition variable, error %d.\n", ret);
}
#endif /* _WIN32 */
@ -447,14 +479,11 @@ static inline void vkd3d_private_data_destroy(struct vkd3d_private_data *data)
static inline HRESULT vkd3d_private_store_init(struct vkd3d_private_store *store)
{
int rc;
list_init(&store->content);
if ((rc = vkd3d_mutex_init(&store->mutex)))
ERR("Failed to initialize mutex, error %d.\n", rc);
vkd3d_mutex_init(&store->mutex);
return hresult_from_errno(rc);
return S_OK;
}
static inline void vkd3d_private_store_destroy(struct vkd3d_private_store *store)