mirror of
https://gitlab.winehq.org/wine/vkd3d.git
synced 2025-09-12 18:50:22 -07:00
vkd3d: Implement simple pipeline cache.
Ideally, we would like to introduce a Vulkan extension to make vertex buffer strides and primitive topology dynamic in Vulkan. Signed-off-by: Józef Kucia <jkucia@codeweavers.com> Signed-off-by: Henri Verbeet <hverbeet@codeweavers.com> Signed-off-by: Alexandre Julliard <julliard@winehq.org>
This commit is contained in:
committed by
Alexandre Julliard
parent
e7eb9ca936
commit
11c00a157c
@@ -1169,11 +1169,116 @@ static HRESULT d3d12_device_create_dummy_sampler(struct d3d12_device *device)
|
||||
return vkd3d_create_static_sampler(device, &sampler_desc, &device->vk_dummy_sampler);
|
||||
}
|
||||
|
||||
static void d3d12_device_init_pipeline_cache(struct d3d12_device *device)
|
||||
static void destroy_compiled_pipeline(struct vkd3d_compiled_pipeline *pipeline,
|
||||
struct d3d12_device *device)
|
||||
{
|
||||
const struct vkd3d_vk_device_procs *vk_procs = &device->vk_procs;
|
||||
|
||||
VK_CALL(vkDestroyPipeline(device->vk_device, pipeline->vk_pipeline, NULL));
|
||||
vkd3d_free(pipeline);
|
||||
}
|
||||
|
||||
static int compare_pipeline_cache_entry(const void *key, const struct rb_entry *entry)
|
||||
{
|
||||
const struct vkd3d_compiled_pipeline *compiled_pipeline;
|
||||
const struct vkd3d_pipeline_key *pipeline_key;
|
||||
|
||||
pipeline_key = key;
|
||||
compiled_pipeline = RB_ENTRY_VALUE(entry, const struct vkd3d_compiled_pipeline, entry);
|
||||
return memcmp(&compiled_pipeline->key, pipeline_key, sizeof(*pipeline_key));
|
||||
}
|
||||
|
||||
static void destroy_pipeline_cache_entry(struct rb_entry *entry, void *context)
|
||||
{
|
||||
struct vkd3d_compiled_pipeline *pipeline;
|
||||
struct d3d12_device *device = context;
|
||||
|
||||
pipeline = RB_ENTRY_VALUE(entry, struct vkd3d_compiled_pipeline, entry);
|
||||
destroy_compiled_pipeline(pipeline, device);
|
||||
}
|
||||
|
||||
VkPipeline d3d12_device_find_cached_pipeline(struct d3d12_device *device,
|
||||
const struct vkd3d_pipeline_key *key)
|
||||
{
|
||||
VkPipeline vk_pipeline = VK_NULL_HANDLE;
|
||||
struct rb_entry *entry;
|
||||
int rc;
|
||||
|
||||
if (!(rc = pthread_mutex_lock(&device->pipeline_cache_mutex)))
|
||||
{
|
||||
if ((entry = rb_get(&device->pipeline_cache, key)))
|
||||
vk_pipeline = RB_ENTRY_VALUE(entry, struct vkd3d_compiled_pipeline, entry)->vk_pipeline;
|
||||
pthread_mutex_unlock(&device->pipeline_cache_mutex);
|
||||
}
|
||||
else
|
||||
{
|
||||
ERR("Failed to lock mutex, error %d.\n", rc);
|
||||
}
|
||||
|
||||
return vk_pipeline;
|
||||
}
|
||||
|
||||
bool d3d12_device_put_pipeline_to_cache(struct d3d12_device *device,
|
||||
const struct vkd3d_pipeline_key *key, VkPipeline vk_pipeline, struct list *list)
|
||||
{
|
||||
struct vkd3d_compiled_pipeline *compiled_pipeline;
|
||||
bool ret = true;
|
||||
int rc;
|
||||
|
||||
if (!(compiled_pipeline = vkd3d_malloc(sizeof(*compiled_pipeline))))
|
||||
return false;
|
||||
|
||||
compiled_pipeline->key = *key;
|
||||
compiled_pipeline->vk_pipeline = vk_pipeline;
|
||||
|
||||
if ((rc = pthread_mutex_lock(&device->pipeline_cache_mutex)))
|
||||
{
|
||||
ERR("Failed to lock mutex, error %d.\n", rc);
|
||||
vkd3d_free(compiled_pipeline);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (rb_put(&device->pipeline_cache, key, &compiled_pipeline->entry) >= 0)
|
||||
{
|
||||
list_add_tail(list, &compiled_pipeline->list);
|
||||
}
|
||||
else
|
||||
{
|
||||
WARN("Failed to put pipeline to cache.\n");
|
||||
vkd3d_free(compiled_pipeline);
|
||||
ret = false;
|
||||
}
|
||||
|
||||
pthread_mutex_unlock(&device->pipeline_cache_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void d3d12_device_destroy_compiled_pipelines(struct d3d12_device *device, struct list *list)
|
||||
{
|
||||
struct vkd3d_compiled_pipeline *pipeline, *cursor;
|
||||
int rc;
|
||||
|
||||
if ((rc = pthread_mutex_lock(&device->pipeline_cache_mutex)))
|
||||
{
|
||||
ERR("Failed to lock mutex, error %d.\n", rc);
|
||||
return;
|
||||
}
|
||||
|
||||
LIST_FOR_EACH_ENTRY_SAFE(pipeline, cursor, list, struct vkd3d_compiled_pipeline, list)
|
||||
{
|
||||
rb_remove(&device->pipeline_cache, &pipeline->entry);
|
||||
destroy_compiled_pipeline(pipeline, device);
|
||||
}
|
||||
|
||||
pthread_mutex_unlock(&device->pipeline_cache_mutex);
|
||||
}
|
||||
|
||||
static HRESULT d3d12_device_init_pipeline_cache(struct d3d12_device *device)
|
||||
{
|
||||
const struct vkd3d_vk_device_procs *vk_procs = &device->vk_procs;
|
||||
VkPipelineCacheCreateInfo cache_info;
|
||||
VkResult vr;
|
||||
int rc;
|
||||
|
||||
cache_info.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
|
||||
cache_info.pNext = NULL;
|
||||
@@ -1183,9 +1288,39 @@ static void d3d12_device_init_pipeline_cache(struct d3d12_device *device)
|
||||
if ((vr = VK_CALL(vkCreatePipelineCache(device->vk_device, &cache_info, NULL,
|
||||
&device->vk_pipeline_cache))) < 0)
|
||||
{
|
||||
ERR("Failed to create pipeline cache, vr %d.\n", vr);
|
||||
ERR("Failed to create Vulkan pipeline cache, vr %d.\n", vr);
|
||||
device->vk_pipeline_cache = VK_NULL_HANDLE;
|
||||
}
|
||||
|
||||
rb_init(&device->pipeline_cache, compare_pipeline_cache_entry);
|
||||
|
||||
if ((rc = pthread_mutex_init(&device->pipeline_cache_mutex, NULL)))
|
||||
{
|
||||
ERR("Failed to initialize mutex, error %d.\n", rc);
|
||||
return E_FAIL;
|
||||
}
|
||||
|
||||
return S_OK;
|
||||
}
|
||||
|
||||
static void d3d12_device_destroy_pipeline_cache(struct d3d12_device *device)
|
||||
{
|
||||
const struct vkd3d_vk_device_procs *vk_procs = &device->vk_procs;
|
||||
int rc;
|
||||
|
||||
if (device->vk_pipeline_cache)
|
||||
VK_CALL(vkDestroyPipelineCache(device->vk_device, device->vk_pipeline_cache, NULL));
|
||||
|
||||
if ((rc = pthread_mutex_lock(&device->pipeline_cache_mutex)))
|
||||
{
|
||||
ERR("Failed to lock mutex, error %d.\n", rc);
|
||||
return;
|
||||
}
|
||||
|
||||
rb_destroy(&device->pipeline_cache, destroy_pipeline_cache_entry, device);
|
||||
|
||||
pthread_mutex_unlock(&device->pipeline_cache_mutex);
|
||||
pthread_mutex_destroy(&device->pipeline_cache_mutex);
|
||||
}
|
||||
|
||||
D3D12_GPU_VIRTUAL_ADDRESS vkd3d_gpu_va_allocator_allocate(struct vkd3d_gpu_va_allocator *allocator,
|
||||
@@ -1357,8 +1492,7 @@ static ULONG STDMETHODCALLTYPE d3d12_device_Release(ID3D12Device *iface)
|
||||
vkd3d_gpu_va_allocator_cleanup(&device->gpu_va_allocator);
|
||||
vkd3d_fence_worker_stop(&device->fence_worker, device);
|
||||
VK_CALL(vkDestroySampler(device->vk_device, device->vk_dummy_sampler, NULL));
|
||||
if (device->vk_pipeline_cache)
|
||||
VK_CALL(vkDestroyPipelineCache(device->vk_device, device->vk_pipeline_cache, NULL));
|
||||
d3d12_device_destroy_pipeline_cache(device);
|
||||
d3d12_device_destroy_vkd3d_queues(device);
|
||||
VK_CALL(vkDestroyDevice(device->vk_device, NULL));
|
||||
if (device->parent)
|
||||
@@ -2310,18 +2444,21 @@ static HRESULT d3d12_device_init(struct d3d12_device *device,
|
||||
goto out_free_vk_resources;
|
||||
}
|
||||
|
||||
if (FAILED(hr = vkd3d_fence_worker_start(&device->fence_worker, device)))
|
||||
if (FAILED(hr = d3d12_device_init_pipeline_cache(device)))
|
||||
goto out_free_vk_resources;
|
||||
|
||||
vkd3d_gpu_va_allocator_init(&device->gpu_va_allocator);
|
||||
if (FAILED(hr = vkd3d_fence_worker_start(&device->fence_worker, device)))
|
||||
goto out_free_pipeline_cache;
|
||||
|
||||
d3d12_device_init_pipeline_cache(device);
|
||||
vkd3d_gpu_va_allocator_init(&device->gpu_va_allocator);
|
||||
|
||||
if ((device->parent = create_info->parent))
|
||||
IUnknown_AddRef(device->parent);
|
||||
|
||||
return S_OK;
|
||||
|
||||
out_free_pipeline_cache:
|
||||
d3d12_device_destroy_pipeline_cache(device);
|
||||
out_free_vk_resources:
|
||||
vk_procs = &device->vk_procs;
|
||||
VK_CALL(vkDestroySampler(device->vk_device, device->vk_dummy_sampler, NULL));
|
||||
|
||||
Reference in New Issue
Block a user