vkd3d: Store a heap array index in each CBV/SRV/UAV descriptor.

A pointer to the containing descriptor heap can be derived from this
information.

PE build of vkd3d uses Windows critical sections for synchronisation,
and these slow down on the very high lock/unlock rate during multithreaded
descriptor copying in Shadow of the Tomb Raider. This patch speeds up the
demo by about 8%. By comparison, using SRW locks in the allocators and
locking them for read only where applicable is about 4% faster.
This commit is contained in:
Conor McCarthy 2023-01-25 15:25:48 +10:00 committed by Alexandre Julliard
parent d14f42be9d
commit 3db509383b
Notes: Alexandre Julliard 2023-01-25 22:43:21 +01:00
Approved-by: Henri Verbeet (@hverbeet)
Approved-by: Alexandre Julliard (@julliard)
Merge-Request: https://gitlab.winehq.org/wine/vkd3d/-/merge_requests/67
4 changed files with 149 additions and 334 deletions

View File

@ -2802,7 +2802,6 @@ static void d3d12_command_list_prepare_descriptors(struct d3d12_command_list *li
unsigned int variable_binding_size, unbounded_offset, table_index, heap_size, i;
const struct d3d12_root_signature *root_signature = bindings->root_signature;
const struct d3d12_descriptor_set_layout *layout;
struct d3d12_device *device = list->device;
const struct d3d12_desc *base_descriptor;
VkDescriptorSet vk_descriptor_set;
@ -2833,8 +2832,7 @@ static void d3d12_command_list_prepare_descriptors(struct d3d12_command_list *li
/* Descriptors may not be set, eg. WoW. */
&& (base_descriptor = bindings->descriptor_tables[table_index]))
{
heap_size = vkd3d_gpu_descriptor_allocator_range_size_from_descriptor(
&device->gpu_descriptor_allocator, base_descriptor);
heap_size = d3d12_desc_heap_range_size(base_descriptor);
if (heap_size < unbounded_offset)
WARN("Descriptor heap size %u is less than the offset %u of an unbounded range in table %u, "
@ -2860,11 +2858,11 @@ static bool vk_write_descriptor_set_from_d3d12_desc(VkWriteDescriptorSet *vk_des
unsigned int index, bool use_array)
{
uint32_t descriptor_range_magic = range->descriptor_magic;
const struct vkd3d_view *view = descriptor->u.view_info.view;
const struct vkd3d_view *view = descriptor->s.u.view_info.view;
uint32_t vk_binding = range->binding;
uint32_t set = range->set;
if (descriptor->magic != descriptor_range_magic)
if (descriptor->s.magic != descriptor_range_magic)
return false;
vk_descriptor_write->sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
@ -2873,15 +2871,15 @@ static bool vk_write_descriptor_set_from_d3d12_desc(VkWriteDescriptorSet *vk_des
vk_descriptor_write->dstBinding = use_array ? vk_binding : vk_binding + index;
vk_descriptor_write->dstArrayElement = use_array ? index : 0;
vk_descriptor_write->descriptorCount = 1;
vk_descriptor_write->descriptorType = descriptor->vk_descriptor_type;
vk_descriptor_write->descriptorType = descriptor->s.vk_descriptor_type;
vk_descriptor_write->pImageInfo = NULL;
vk_descriptor_write->pBufferInfo = NULL;
vk_descriptor_write->pTexelBufferView = NULL;
switch (descriptor->magic)
switch (descriptor->s.magic)
{
case VKD3D_DESCRIPTOR_MAGIC_CBV:
vk_descriptor_write->pBufferInfo = &descriptor->u.vk_cbv_info;
vk_descriptor_write->pBufferInfo = &descriptor->s.u.vk_cbv_info;
break;
case VKD3D_DESCRIPTOR_MAGIC_SRV:
@ -2892,8 +2890,8 @@ static bool vk_write_descriptor_set_from_d3d12_desc(VkWriteDescriptorSet *vk_des
* in pairs in one set. */
if (range->descriptor_count == UINT_MAX)
{
if (descriptor->vk_descriptor_type != VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER
&& descriptor->vk_descriptor_type != VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER)
if (descriptor->s.vk_descriptor_type != VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER
&& descriptor->s.vk_descriptor_type != VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER)
{
vk_descriptor_write->dstSet = vk_descriptor_sets[set + 1];
vk_descriptor_write->dstBinding = 0;
@ -2903,13 +2901,13 @@ static bool vk_write_descriptor_set_from_d3d12_desc(VkWriteDescriptorSet *vk_des
{
if (!use_array)
vk_descriptor_write->dstBinding = vk_binding + 2 * index;
if (descriptor->vk_descriptor_type != VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER
&& descriptor->vk_descriptor_type != VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER)
if (descriptor->s.vk_descriptor_type != VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER
&& descriptor->s.vk_descriptor_type != VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER)
++vk_descriptor_write->dstBinding;
}
if (descriptor->vk_descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER
|| descriptor->vk_descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER)
if (descriptor->s.vk_descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER
|| descriptor->s.vk_descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER)
{
vk_descriptor_write->pTexelBufferView = &view->u.vk_buffer_view;
}
@ -2917,7 +2915,7 @@ static bool vk_write_descriptor_set_from_d3d12_desc(VkWriteDescriptorSet *vk_des
{
vk_image_info->sampler = VK_NULL_HANDLE;
vk_image_info->imageView = view->u.vk_image_view;
vk_image_info->imageLayout = descriptor->magic == VKD3D_DESCRIPTOR_MAGIC_SRV
vk_image_info->imageLayout = descriptor->s.magic == VKD3D_DESCRIPTOR_MAGIC_SRV
? VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL : VK_IMAGE_LAYOUT_GENERAL;
vk_descriptor_write->pImageInfo = vk_image_info;
@ -2933,7 +2931,7 @@ static bool vk_write_descriptor_set_from_d3d12_desc(VkWriteDescriptorSet *vk_des
break;
default:
ERR("Invalid descriptor %#x.\n", descriptor->magic);
ERR("Invalid descriptor %#x.\n", descriptor->s.magic);
return false;
}
@ -2974,8 +2972,7 @@ static void d3d12_command_list_update_descriptor_table(struct d3d12_command_list
descriptor_count = range->descriptor_count;
if ((unbounded = descriptor_count == UINT_MAX))
{
descriptor_count = vkd3d_gpu_descriptor_allocator_range_size_from_descriptor(
&list->device->gpu_descriptor_allocator, descriptor);
descriptor_count = d3d12_desc_heap_range_size(descriptor);
if (descriptor_count > range->vk_binding_count)
{
@ -2997,8 +2994,8 @@ static void d3d12_command_list_update_descriptor_table(struct d3d12_command_list
if (state->uav_counters.bindings[k].register_space == range->register_space
&& state->uav_counters.bindings[k].register_index == register_idx)
{
VkBufferView vk_counter_view = descriptor->magic == VKD3D_DESCRIPTOR_MAGIC_UAV
? descriptor->u.view_info.view->vk_counter_view : VK_NULL_HANDLE;
VkBufferView vk_counter_view = descriptor->s.magic == VKD3D_DESCRIPTOR_MAGIC_UAV
? descriptor->s.u.view_info.view->vk_counter_view : VK_NULL_HANDLE;
if (bindings->vk_uav_counter_views[k] != vk_counter_view)
bindings->uav_counters_dirty = true;
bindings->vk_uav_counter_views[k] = vk_counter_view;
@ -3008,7 +3005,7 @@ static void d3d12_command_list_update_descriptor_table(struct d3d12_command_list
}
/* Not all descriptors are necessarily populated if the range is unbounded. */
if (descriptor->magic == VKD3D_DESCRIPTOR_MAGIC_FREE)
if (descriptor->s.magic == VKD3D_DESCRIPTOR_MAGIC_FREE)
continue;
if (!vk_write_descriptor_set_from_d3d12_desc(current_descriptor_write, current_image_info,
@ -3242,8 +3239,8 @@ static unsigned int d3d12_command_list_bind_descriptor_table(struct d3d12_comman
/* AMD, Nvidia and Intel drivers on Windows work if SetDescriptorHeaps()
* is not called, so we bind heaps from the tables instead. No NULL check is
* needed here because it's checked when descriptor tables are set. */
heap = vkd3d_gpu_descriptor_allocator_heap_from_descriptor(&list->device->gpu_descriptor_allocator, desc);
offset = desc - (const struct d3d12_desc *)heap->descriptors;
heap = d3d12_desc_get_descriptor_heap(desc);
offset = desc->index;
if (heap->desc.Type == D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV)
{
@ -4517,16 +4514,6 @@ static void d3d12_command_list_set_descriptor_table(struct d3d12_command_list *l
if (bindings->descriptor_tables[index] == desc)
return;
if (desc && !vkd3d_gpu_descriptor_allocator_heap_from_descriptor(&list->device->gpu_descriptor_allocator,
desc))
{
/* Failure to find a heap means the descriptor handle is from
* the wrong heap type or not a handle at all. */
ERR("Invalid heap for base descriptor %"PRIx64".\n", base_descriptor.ptr);
/* TODO: Mark list as invalid? */
return;
}
bindings->descriptor_tables[index] = desc;
bindings->descriptor_table_dirty_mask |= (uint64_t)1 << index;
bindings->descriptor_table_active_mask |= (uint64_t)1 << index;
@ -5457,7 +5444,7 @@ static void STDMETHODCALLTYPE d3d12_command_list_ClearUnorderedAccessViewUint(ID
iface, gpu_handle.ptr, cpu_handle.ptr, resource, values, rect_count, rects);
resource_impl = unsafe_impl_from_ID3D12Resource(resource);
view = d3d12_desc_from_cpu_handle(cpu_handle)->u.view_info.view;
view = d3d12_desc_from_cpu_handle(cpu_handle)->s.u.view_info.view;
memcpy(colour.uint32, values, sizeof(colour.uint32));
if (view->format->type != VKD3D_FORMAT_TYPE_UINT)
@ -5516,7 +5503,7 @@ static void STDMETHODCALLTYPE d3d12_command_list_ClearUnorderedAccessViewFloat(I
iface, gpu_handle.ptr, cpu_handle.ptr, resource, values, rect_count, rects);
resource_impl = unsafe_impl_from_ID3D12Resource(resource);
view = d3d12_desc_from_cpu_handle(cpu_handle)->u.view_info.view;
view = d3d12_desc_from_cpu_handle(cpu_handle)->s.u.view_info.view;
memcpy(colour.float32, values, sizeof(colour.float32));
d3d12_command_list_clear_uav(list, resource_impl, view, &colour, rect_count, rects);

View File

@ -2366,175 +2366,6 @@ static void vkd3d_gpu_va_allocator_cleanup(struct vkd3d_gpu_va_allocator *alloca
vkd3d_mutex_destroy(&allocator->mutex);
}
/* We could use bsearch() or recursion here, but it probably helps to omit
* all the extra function calls. */
static struct vkd3d_gpu_descriptor_allocation *vkd3d_gpu_descriptor_allocator_binary_search(
const struct vkd3d_gpu_descriptor_allocator *allocator, const struct d3d12_desc *desc)
{
struct vkd3d_gpu_descriptor_allocation *allocations = allocator->allocations;
const struct d3d12_desc *base;
size_t centre, count;
for (count = allocator->allocation_count; count > 1; )
{
centre = count >> 1;
base = allocations[centre].base;
if (base <= desc)
{
allocations += centre;
count -= centre;
}
else
{
count = centre;
}
}
return allocations;
}
bool vkd3d_gpu_descriptor_allocator_register_range(struct vkd3d_gpu_descriptor_allocator *allocator,
const struct d3d12_desc *base, size_t count)
{
struct vkd3d_gpu_descriptor_allocation *allocation;
int rc;
if ((rc = vkd3d_mutex_lock(&allocator->mutex)))
{
ERR("Failed to lock mutex, error %d.\n", rc);
return false;
}
if (!vkd3d_array_reserve((void **)&allocator->allocations, &allocator->allocations_size,
allocator->allocation_count + 1, sizeof(*allocator->allocations)))
{
vkd3d_mutex_unlock(&allocator->mutex);
return false;
}
if (allocator->allocation_count > 1)
allocation = vkd3d_gpu_descriptor_allocator_binary_search(allocator, base);
else
allocation = allocator->allocations;
allocation += allocator->allocation_count && base > allocation->base;
memmove(&allocation[1], allocation, (allocator->allocation_count++ - (allocation - allocator->allocations))
* sizeof(*allocation));
allocation->base = base;
allocation->count = count;
vkd3d_mutex_unlock(&allocator->mutex);
return true;
}
bool vkd3d_gpu_descriptor_allocator_unregister_range(
struct vkd3d_gpu_descriptor_allocator *allocator, const struct d3d12_desc *base)
{
bool found;
size_t i;
int rc;
if ((rc = vkd3d_mutex_lock(&allocator->mutex)))
{
ERR("Failed to lock mutex, error %d.\n", rc);
return false;
}
for (i = 0, found = false; i < allocator->allocation_count; ++i)
{
if (allocator->allocations[i].base != base)
continue;
memmove(&allocator->allocations[i], &allocator->allocations[i + 1],
(--allocator->allocation_count - i) * sizeof(allocator->allocations[0]));
found = true;
break;
}
vkd3d_mutex_unlock(&allocator->mutex);
return found;
}
static inline const struct vkd3d_gpu_descriptor_allocation *vkd3d_gpu_descriptor_allocator_allocation_from_descriptor(
const struct vkd3d_gpu_descriptor_allocator *allocator, const struct d3d12_desc *desc)
{
const struct vkd3d_gpu_descriptor_allocation *allocation;
allocation = vkd3d_gpu_descriptor_allocator_binary_search(allocator, desc);
return (desc >= allocation->base && desc - allocation->base < allocation->count) ? allocation : NULL;
}
/* Return the available size from the specified descriptor to the heap end. */
size_t vkd3d_gpu_descriptor_allocator_range_size_from_descriptor(
struct vkd3d_gpu_descriptor_allocator *allocator, const struct d3d12_desc *desc)
{
const struct vkd3d_gpu_descriptor_allocation *allocation;
size_t remaining;
int rc;
assert(allocator->allocation_count);
if ((rc = vkd3d_mutex_lock(&allocator->mutex)))
{
ERR("Failed to lock mutex, error %d.\n", rc);
return 0;
}
remaining = 0;
if ((allocation = vkd3d_gpu_descriptor_allocator_allocation_from_descriptor(allocator, desc)))
remaining = allocation->count - (desc - allocation->base);
vkd3d_mutex_unlock(&allocator->mutex);
return remaining;
}
struct d3d12_descriptor_heap *vkd3d_gpu_descriptor_allocator_heap_from_descriptor(
struct vkd3d_gpu_descriptor_allocator *allocator, const struct d3d12_desc *desc)
{
const struct vkd3d_gpu_descriptor_allocation *allocation;
int rc;
if (!allocator->allocation_count)
return NULL;
if ((rc = vkd3d_mutex_lock(&allocator->mutex)))
{
ERR("Failed to lock mutex, error %d.\n", rc);
return NULL;
}
allocation = vkd3d_gpu_descriptor_allocator_allocation_from_descriptor(allocator, desc);
vkd3d_mutex_unlock(&allocator->mutex);
return allocation ? CONTAINING_RECORD(allocation->base, struct d3d12_descriptor_heap, descriptors)
: NULL;
}
static bool vkd3d_gpu_descriptor_allocator_init(struct vkd3d_gpu_descriptor_allocator *allocator)
{
int rc;
memset(allocator, 0, sizeof(*allocator));
if ((rc = vkd3d_mutex_init(&allocator->mutex)))
{
ERR("Failed to initialise mutex, error %d.\n", rc);
return false;
}
return true;
}
static void vkd3d_gpu_descriptor_allocator_cleanup(struct vkd3d_gpu_descriptor_allocator *allocator)
{
vkd3d_free(allocator->allocations);
vkd3d_mutex_destroy(&allocator->mutex);
}
static bool have_vk_time_domain(VkTimeDomainEXT *domains, unsigned int count, VkTimeDomainEXT domain)
{
unsigned int i;
@ -2671,7 +2502,6 @@ static ULONG STDMETHODCALLTYPE d3d12_device_Release(ID3D12Device *iface)
vkd3d_uav_clear_state_cleanup(&device->uav_clear_state, device);
vkd3d_destroy_null_resources(&device->null_resources, device);
vkd3d_gpu_va_allocator_cleanup(&device->gpu_va_allocator);
vkd3d_gpu_descriptor_allocator_cleanup(&device->gpu_descriptor_allocator);
vkd3d_render_pass_cache_cleanup(&device->render_pass_cache, device);
d3d12_device_destroy_pipeline_cache(device);
d3d12_device_destroy_vkd3d_queues(device);
@ -3594,7 +3424,7 @@ static void d3d12_desc_buffered_copy_atomic(struct d3d12_desc *dst, const struct
mutex = d3d12_device_get_descriptor_mutex(device, src);
vkd3d_mutex_lock(mutex);
if (src->magic == VKD3D_DESCRIPTOR_MAGIC_FREE)
if (src->s.magic == VKD3D_DESCRIPTOR_MAGIC_FREE)
{
/* Source must be unlocked first, and therefore can't be used as a null source. */
static const struct d3d12_desc null = {0};
@ -3603,18 +3433,18 @@ static void d3d12_desc_buffered_copy_atomic(struct d3d12_desc *dst, const struct
return;
}
set = vkd3d_vk_descriptor_set_index_from_vk_descriptor_type(src->vk_descriptor_type);
set = vkd3d_vk_descriptor_set_index_from_vk_descriptor_type(src->s.vk_descriptor_type);
location = &locations[set][infos[set].count++];
location->src = *src;
location->src.s = src->s;
if (location->src.magic & VKD3D_DESCRIPTOR_MAGIC_HAS_VIEW)
vkd3d_view_incref(location->src.u.view_info.view);
if (location->src.s.magic & VKD3D_DESCRIPTOR_MAGIC_HAS_VIEW)
vkd3d_view_incref(location->src.s.u.view_info.view);
vkd3d_mutex_unlock(mutex);
infos[set].uav_counter |= (location->src.magic == VKD3D_DESCRIPTOR_MAGIC_UAV)
&& !!location->src.u.view_info.view->vk_counter_view;
infos[set].uav_counter |= (location->src.s.magic == VKD3D_DESCRIPTOR_MAGIC_UAV)
&& !!location->src.s.u.view_info.view->vk_counter_view;
location->dst = dst;
if (infos[set].count == ARRAY_SIZE(locations[0]))
@ -3643,8 +3473,7 @@ static void d3d12_device_vk_heaps_copy_descriptors(struct d3d12_device *device,
unsigned int dst_range_size, src_range_size;
struct d3d12_desc *dst;
descriptor_heap = vkd3d_gpu_descriptor_allocator_heap_from_descriptor(&device->gpu_descriptor_allocator,
d3d12_desc_from_cpu_handle(dst_descriptor_range_offsets[0]));
descriptor_heap = d3d12_desc_get_descriptor_heap(d3d12_desc_from_cpu_handle(dst_descriptor_range_offsets[0]));
heap_base = (const struct d3d12_desc *)descriptor_heap->descriptors;
heap_end = heap_base + descriptor_heap->desc.NumDescriptors;
@ -3662,8 +3491,7 @@ static void d3d12_device_vk_heaps_copy_descriptors(struct d3d12_device *device,
if (dst < heap_base || dst >= heap_end)
{
flush_desc_writes(locations, infos, descriptor_heap, device);
descriptor_heap = vkd3d_gpu_descriptor_allocator_heap_from_descriptor(&device->gpu_descriptor_allocator,
dst);
descriptor_heap = d3d12_desc_get_descriptor_heap(dst);
heap_base = (const struct d3d12_desc *)descriptor_heap->descriptors;
heap_end = heap_base + descriptor_heap->desc.NumDescriptors;
}
@ -3674,8 +3502,8 @@ static void d3d12_device_vk_heaps_copy_descriptors(struct d3d12_device *device,
* mutex is only intended to prevent use-after-free of the vkd3d_view caused by a
* race condition in the calling app. It is unnecessary to protect this test as it's
* the app's race condition, not ours. */
if (dst[dst_idx].magic == src[src_idx].magic && (dst[dst_idx].magic & VKD3D_DESCRIPTOR_MAGIC_HAS_VIEW)
&& dst[dst_idx].u.view_info.written_serial_id == src[src_idx].u.view_info.view->serial_id)
if (dst[dst_idx].s.magic == src[src_idx].s.magic && (dst[dst_idx].s.magic & VKD3D_DESCRIPTOR_MAGIC_HAS_VIEW)
&& dst[dst_idx].s.u.view_info.written_serial_id == src[src_idx].s.u.view_info.view->serial_id)
continue;
d3d12_desc_buffered_copy_atomic(&dst[dst_idx], &src[src_idx], locations, infos, descriptor_heap, device);
}
@ -4320,7 +4148,6 @@ static HRESULT d3d12_device_init(struct d3d12_device *device,
goto out_cleanup_uav_clear_state;
vkd3d_render_pass_cache_init(&device->render_pass_cache);
vkd3d_gpu_descriptor_allocator_init(&device->gpu_descriptor_allocator);
vkd3d_gpu_va_allocator_init(&device->gpu_va_allocator);
vkd3d_time_domains_init(device);

View File

@ -2137,14 +2137,14 @@ static void d3d12_descriptor_heap_write_vk_descriptor_range(struct d3d12_descrip
{
unsigned int i, info_index = 0, write_index = 0;
switch (locations[0].src.vk_descriptor_type)
switch (locations[0].src.s.vk_descriptor_type)
{
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
for (; write_index < write_count; ++write_index)
{
descriptor_set->vk_descriptor_writes[write_index].pBufferInfo = &descriptor_set->vk_buffer_infos[info_index];
for (i = 0; i < descriptor_set->vk_descriptor_writes[write_index].descriptorCount; ++i, ++info_index)
descriptor_set->vk_buffer_infos[info_index] = locations[info_index].src.u.vk_cbv_info;
descriptor_set->vk_buffer_infos[info_index] = locations[info_index].src.s.u.vk_cbv_info;
}
break;
case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
@ -2153,7 +2153,7 @@ static void d3d12_descriptor_heap_write_vk_descriptor_range(struct d3d12_descrip
{
descriptor_set->vk_descriptor_writes[write_index].pImageInfo = &descriptor_set->vk_image_infos[info_index];
for (i = 0; i < descriptor_set->vk_descriptor_writes[write_index].descriptorCount; ++i, ++info_index)
descriptor_set->vk_image_infos[info_index].imageView = locations[info_index].src.u.view_info.view->u.vk_image_view;
descriptor_set->vk_image_infos[info_index].imageView = locations[info_index].src.s.u.view_info.view->u.vk_image_view;
}
break;
case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
@ -2162,7 +2162,7 @@ static void d3d12_descriptor_heap_write_vk_descriptor_range(struct d3d12_descrip
{
descriptor_set->vk_descriptor_writes[write_index].pTexelBufferView = &descriptor_set->vk_buffer_views[info_index];
for (i = 0; i < descriptor_set->vk_descriptor_writes[write_index].descriptorCount; ++i, ++info_index)
descriptor_set->vk_buffer_views[info_index] = locations[info_index].src.u.view_info.view->u.vk_buffer_view;
descriptor_set->vk_buffer_views[info_index] = locations[info_index].src.s.u.view_info.view->u.vk_buffer_view;
}
break;
case VK_DESCRIPTOR_TYPE_SAMPLER:
@ -2170,11 +2170,11 @@ static void d3d12_descriptor_heap_write_vk_descriptor_range(struct d3d12_descrip
{
descriptor_set->vk_descriptor_writes[write_index].pImageInfo = &descriptor_set->vk_image_infos[info_index];
for (i = 0; i < descriptor_set->vk_descriptor_writes[write_index].descriptorCount; ++i, ++info_index)
descriptor_set->vk_image_infos[info_index].sampler = locations[info_index].src.u.view_info.view->u.vk_sampler;
descriptor_set->vk_image_infos[info_index].sampler = locations[info_index].src.s.u.view_info.view->u.vk_sampler;
}
break;
default:
ERR("Unhandled descriptor type %#x.\n", locations[0].src.vk_descriptor_type);
ERR("Unhandled descriptor type %#x.\n", locations[0].src.s.vk_descriptor_type);
break;
}
}
@ -2230,36 +2230,35 @@ static void d3d12_desc_write_vk_heap(const struct d3d12_desc *dst, const struct
const struct vkd3d_vk_device_procs *vk_procs;
bool is_null = false;
descriptor_heap = vkd3d_gpu_descriptor_allocator_heap_from_descriptor(&device->gpu_descriptor_allocator, dst);
descriptor_heap = d3d12_desc_get_descriptor_heap(dst);
descriptor_set = &descriptor_heap->vk_descriptor_sets[vkd3d_vk_descriptor_set_index_from_vk_descriptor_type(
src->vk_descriptor_type)];
src->s.vk_descriptor_type)];
vk_procs = &device->vk_procs;
vkd3d_mutex_lock(&descriptor_heap->vk_sets_mutex);
descriptor_set->vk_descriptor_writes[0].dstArrayElement = dst
- (const struct d3d12_desc *)descriptor_heap->descriptors;
descriptor_set->vk_descriptor_writes[0].dstArrayElement = dst->index;
descriptor_set->vk_descriptor_writes[0].descriptorCount = 1;
switch (src->vk_descriptor_type)
switch (src->s.vk_descriptor_type)
{
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
descriptor_set->vk_descriptor_writes[0].pBufferInfo = &src->u.vk_cbv_info;
is_null = !src->u.vk_cbv_info.buffer;
descriptor_set->vk_descriptor_writes[0].pBufferInfo = &src->s.u.vk_cbv_info;
is_null = !src->s.u.vk_cbv_info.buffer;
break;
case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
is_null = !(descriptor_set->vk_image_infos[0].imageView = src->u.view_info.view->u.vk_image_view);
is_null = !(descriptor_set->vk_image_infos[0].imageView = src->s.u.view_info.view->u.vk_image_view);
break;
case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
descriptor_set->vk_descriptor_writes[0].pTexelBufferView = &src->u.view_info.view->u.vk_buffer_view;
is_null = !src->u.view_info.view->u.vk_buffer_view;
descriptor_set->vk_descriptor_writes[0].pTexelBufferView = &src->s.u.view_info.view->u.vk_buffer_view;
is_null = !src->s.u.view_info.view->u.vk_buffer_view;
break;
case VK_DESCRIPTOR_TYPE_SAMPLER:
descriptor_set->vk_image_infos[0].sampler = src->u.view_info.view->u.vk_sampler;
descriptor_set->vk_image_infos[0].sampler = src->s.u.view_info.view->u.vk_sampler;
break;
default:
ERR("Unhandled descriptor type %#x.\n", src->vk_descriptor_type);
ERR("Unhandled descriptor type %#x.\n", src->s.vk_descriptor_type);
break;
}
if (is_null && device->vk_info.EXT_robustness2)
@ -2272,13 +2271,12 @@ static void d3d12_desc_write_vk_heap(const struct d3d12_desc *dst, const struct
VK_CALL(vkUpdateDescriptorSets(device->vk_device, 1, descriptor_set->vk_descriptor_writes, 0, NULL));
if (src->magic == VKD3D_DESCRIPTOR_MAGIC_UAV && src->u.view_info.view->vk_counter_view)
if (src->s.magic == VKD3D_DESCRIPTOR_MAGIC_UAV && src->s.u.view_info.view->vk_counter_view)
{
descriptor_set = &descriptor_heap->vk_descriptor_sets[VKD3D_SET_INDEX_UAV_COUNTER];
descriptor_set->vk_descriptor_writes[0].dstArrayElement = dst
- (const struct d3d12_desc *)descriptor_heap->descriptors;
descriptor_set->vk_descriptor_writes[0].dstArrayElement = dst->index;
descriptor_set->vk_descriptor_writes[0].descriptorCount = 1;
descriptor_set->vk_descriptor_writes[0].pTexelBufferView = &src->u.view_info.view->vk_counter_view;
descriptor_set->vk_descriptor_writes[0].pTexelBufferView = &src->s.u.view_info.view->vk_counter_view;
VK_CALL(vkUpdateDescriptorSets(device->vk_device, 1, descriptor_set->vk_descriptor_writes, 0, NULL));
}
@ -2293,15 +2291,15 @@ static void d3d12_desc_write_atomic_d3d12_only(struct d3d12_desc *dst, const str
mutex = d3d12_device_get_descriptor_mutex(device, dst);
vkd3d_mutex_lock(mutex);
if (!(dst->magic & VKD3D_DESCRIPTOR_MAGIC_HAS_VIEW) || InterlockedDecrement(&dst->u.view_info.view->refcount))
if (!(dst->s.magic & VKD3D_DESCRIPTOR_MAGIC_HAS_VIEW) || InterlockedDecrement(&dst->s.u.view_info.view->refcount))
{
*dst = *src;
d3d12_desc_copy_raw(dst, src);
vkd3d_mutex_unlock(mutex);
return;
}
defunct_view = dst->u.view_info.view;
*dst = *src;
defunct_view = dst->s.u.view_info.view;
d3d12_desc_copy_raw(dst, src);
vkd3d_mutex_unlock(mutex);
/* Destroy the view after unlocking to reduce wait time. */
@ -2318,11 +2316,11 @@ void d3d12_desc_write_atomic(struct d3d12_desc *dst, const struct d3d12_desc *sr
vkd3d_mutex_lock(mutex);
/* Nothing to do for VKD3D_DESCRIPTOR_MAGIC_CBV. */
if ((dst->magic & VKD3D_DESCRIPTOR_MAGIC_HAS_VIEW)
&& !InterlockedDecrement(&dst->u.view_info.view->refcount))
defunct_view = dst->u.view_info.view;
if ((dst->s.magic & VKD3D_DESCRIPTOR_MAGIC_HAS_VIEW)
&& !InterlockedDecrement(&dst->s.u.view_info.view->refcount))
defunct_view = dst->s.u.view_info.view;
*dst = *src;
d3d12_desc_copy_raw(dst, src);
vkd3d_mutex_unlock(mutex);
@ -2330,7 +2328,7 @@ void d3d12_desc_write_atomic(struct d3d12_desc *dst, const struct d3d12_desc *sr
if (defunct_view)
vkd3d_view_destroy(defunct_view, device);
if (device->use_vk_heaps && dst->magic)
if (device->use_vk_heaps && dst->s.magic)
d3d12_desc_write_vk_heap(dst, src, device);
}
@ -2360,6 +2358,7 @@ void d3d12_desc_copy_vk_heap_range(struct d3d12_desc_copy_location *locations, c
++descriptor_set->vk_descriptor_writes[write_count - 1].descriptorCount;
continue;
}
/* Accessing dst->index will be slow if a cache miss occurs, so calculate instead. */
descriptor_set->vk_descriptor_writes[write_count].dstArrayElement = locations[i].dst
- (const struct d3d12_desc *)descriptor_heap->descriptors;
descriptor_set->vk_descriptor_writes[write_count++].descriptorCount = 1;
@ -2377,10 +2376,11 @@ void d3d12_desc_copy_vk_heap_range(struct d3d12_desc_copy_location *locations, c
for (i = 0, write_count = 0; i < info->count; ++i)
{
if (!locations[i].src.u.view_info.view->vk_counter_view)
if (!locations[i].src.s.u.view_info.view->vk_counter_view)
continue;
descriptor_set->vk_buffer_views[write_count] = locations[i].src.u.view_info.view->vk_counter_view;
descriptor_set->vk_buffer_views[write_count] = locations[i].src.s.u.view_info.view->vk_counter_view;
descriptor_set->vk_descriptor_writes[write_count].pTexelBufferView = &descriptor_set->vk_buffer_views[write_count];
/* Accessing dst->index will be slow if a cache miss occurs, so calculate instead. */
descriptor_set->vk_descriptor_writes[write_count].dstArrayElement = locations[i].dst
- (const struct d3d12_desc *)descriptor_heap->descriptors;
descriptor_set->vk_descriptor_writes[write_count++].descriptorCount = 1;
@ -2404,10 +2404,10 @@ void d3d12_desc_copy(struct d3d12_desc *dst, const struct d3d12_desc *src,
mutex = d3d12_device_get_descriptor_mutex(device, src);
vkd3d_mutex_lock(mutex);
if (src->magic & VKD3D_DESCRIPTOR_MAGIC_HAS_VIEW)
vkd3d_view_incref(src->u.view_info.view);
if (src->s.magic & VKD3D_DESCRIPTOR_MAGIC_HAS_VIEW)
vkd3d_view_incref(src->s.u.view_info.view);
tmp = *src;
d3d12_desc_copy_raw(&tmp, src);
vkd3d_mutex_unlock(mutex);
@ -2825,7 +2825,7 @@ void d3d12_desc_create_cbv(struct d3d12_desc *descriptor,
return;
}
buffer_info = &descriptor->u.vk_cbv_info;
buffer_info = &descriptor->s.u.vk_cbv_info;
if (desc->BufferLocation)
{
resource = vkd3d_gpu_va_allocator_dereference(&device->gpu_va_allocator, desc->BufferLocation);
@ -2841,8 +2841,8 @@ void d3d12_desc_create_cbv(struct d3d12_desc *descriptor,
buffer_info->range = VK_WHOLE_SIZE;
}
descriptor->magic = VKD3D_DESCRIPTOR_MAGIC_CBV;
descriptor->vk_descriptor_type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
descriptor->s.magic = VKD3D_DESCRIPTOR_MAGIC_CBV;
descriptor->s.vk_descriptor_type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
}
static unsigned int vkd3d_view_flags_from_d3d12_buffer_srv_flags(D3D12_BUFFER_SRV_FLAGS flags)
@ -2878,10 +2878,10 @@ static void vkd3d_create_null_srv(struct d3d12_desc *descriptor,
vkd3d_get_format(device, DXGI_FORMAT_R32_UINT, false),
0, VKD3D_NULL_BUFFER_SIZE, &view))
{
descriptor->magic = VKD3D_DESCRIPTOR_MAGIC_SRV;
descriptor->vk_descriptor_type = VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER;
descriptor->u.view_info.view = view;
descriptor->u.view_info.written_serial_id = view->serial_id;
descriptor->s.magic = VKD3D_DESCRIPTOR_MAGIC_SRV;
descriptor->s.vk_descriptor_type = VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER;
descriptor->s.u.view_info.view = view;
descriptor->s.u.view_info.written_serial_id = view->serial_id;
}
return;
@ -2923,10 +2923,10 @@ static void vkd3d_create_null_srv(struct d3d12_desc *descriptor,
if (!vkd3d_create_texture_view(device, vk_image, &vkd3d_desc, &view))
return;
descriptor->magic = VKD3D_DESCRIPTOR_MAGIC_SRV;
descriptor->vk_descriptor_type = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE;
descriptor->u.view_info.view = view;
descriptor->u.view_info.written_serial_id = view->serial_id;
descriptor->s.magic = VKD3D_DESCRIPTOR_MAGIC_SRV;
descriptor->s.vk_descriptor_type = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE;
descriptor->s.u.view_info.view = view;
descriptor->s.u.view_info.written_serial_id = view->serial_id;
}
static void vkd3d_create_buffer_srv(struct d3d12_desc *descriptor,
@ -2954,10 +2954,10 @@ static void vkd3d_create_buffer_srv(struct d3d12_desc *descriptor,
desc->u.Buffer.StructureByteStride, flags, &view))
return;
descriptor->magic = VKD3D_DESCRIPTOR_MAGIC_SRV;
descriptor->vk_descriptor_type = VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER;
descriptor->u.view_info.view = view;
descriptor->u.view_info.written_serial_id = view->serial_id;
descriptor->s.magic = VKD3D_DESCRIPTOR_MAGIC_SRV;
descriptor->s.vk_descriptor_type = VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER;
descriptor->s.u.view_info.view = view;
descriptor->s.u.view_info.written_serial_id = view->serial_id;
}
static VkImageAspectFlags vk_image_aspect_flags_from_d3d12_plane_slice(const struct vkd3d_format *format,
@ -3085,10 +3085,10 @@ void d3d12_desc_create_srv(struct d3d12_desc *descriptor,
if (!vkd3d_create_texture_view(device, resource->u.vk_image, &vkd3d_desc, &view))
return;
descriptor->magic = VKD3D_DESCRIPTOR_MAGIC_SRV;
descriptor->vk_descriptor_type = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE;
descriptor->u.view_info.view = view;
descriptor->u.view_info.written_serial_id = view->serial_id;
descriptor->s.magic = VKD3D_DESCRIPTOR_MAGIC_SRV;
descriptor->s.vk_descriptor_type = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE;
descriptor->s.u.view_info.view = view;
descriptor->s.u.view_info.written_serial_id = view->serial_id;
}
static unsigned int vkd3d_view_flags_from_d3d12_buffer_uav_flags(D3D12_BUFFER_UAV_FLAGS flags)
@ -3124,10 +3124,10 @@ static void vkd3d_create_null_uav(struct d3d12_desc *descriptor,
vkd3d_get_format(device, DXGI_FORMAT_R32_UINT, false),
0, VKD3D_NULL_BUFFER_SIZE, &view))
{
descriptor->magic = VKD3D_DESCRIPTOR_MAGIC_UAV;
descriptor->vk_descriptor_type = VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER;
descriptor->u.view_info.view = view;
descriptor->u.view_info.written_serial_id = view->serial_id;
descriptor->s.magic = VKD3D_DESCRIPTOR_MAGIC_UAV;
descriptor->s.vk_descriptor_type = VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER;
descriptor->s.u.view_info.view = view;
descriptor->s.u.view_info.written_serial_id = view->serial_id;
}
return;
@ -3169,10 +3169,10 @@ static void vkd3d_create_null_uav(struct d3d12_desc *descriptor,
if (!vkd3d_create_texture_view(device, vk_image, &vkd3d_desc, &view))
return;
descriptor->magic = VKD3D_DESCRIPTOR_MAGIC_UAV;
descriptor->vk_descriptor_type = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
descriptor->u.view_info.view = view;
descriptor->u.view_info.written_serial_id = view->serial_id;
descriptor->s.magic = VKD3D_DESCRIPTOR_MAGIC_UAV;
descriptor->s.vk_descriptor_type = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
descriptor->s.u.view_info.view = view;
descriptor->s.u.view_info.written_serial_id = view->serial_id;
}
static void vkd3d_create_buffer_uav(struct d3d12_desc *descriptor, struct d3d12_device *device,
@ -3200,10 +3200,10 @@ static void vkd3d_create_buffer_uav(struct d3d12_desc *descriptor, struct d3d12_
desc->u.Buffer.StructureByteStride, flags, &view))
return;
descriptor->magic = VKD3D_DESCRIPTOR_MAGIC_UAV;
descriptor->vk_descriptor_type = VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER;
descriptor->u.view_info.view = view;
descriptor->u.view_info.written_serial_id = view->serial_id;
descriptor->s.magic = VKD3D_DESCRIPTOR_MAGIC_UAV;
descriptor->s.vk_descriptor_type = VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER;
descriptor->s.u.view_info.view = view;
descriptor->s.u.view_info.written_serial_id = view->serial_id;
if (counter_resource)
{
@ -3276,10 +3276,10 @@ static void vkd3d_create_texture_uav(struct d3d12_desc *descriptor,
if (!vkd3d_create_texture_view(device, resource->u.vk_image, &vkd3d_desc, &view))
return;
descriptor->magic = VKD3D_DESCRIPTOR_MAGIC_UAV;
descriptor->vk_descriptor_type = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
descriptor->u.view_info.view = view;
descriptor->u.view_info.written_serial_id = view->serial_id;
descriptor->s.magic = VKD3D_DESCRIPTOR_MAGIC_UAV;
descriptor->s.vk_descriptor_type = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
descriptor->s.u.view_info.view = view;
descriptor->s.u.view_info.written_serial_id = view->serial_id;
}
void d3d12_desc_create_uav(struct d3d12_desc *descriptor, struct d3d12_device *device,
@ -3439,10 +3439,10 @@ void d3d12_desc_create_sampler(struct d3d12_desc *sampler,
return;
}
sampler->magic = VKD3D_DESCRIPTOR_MAGIC_SAMPLER;
sampler->vk_descriptor_type = VK_DESCRIPTOR_TYPE_SAMPLER;
sampler->u.view_info.view = view;
sampler->u.view_info.written_serial_id = view->serial_id;
sampler->s.magic = VKD3D_DESCRIPTOR_MAGIC_SAMPLER;
sampler->s.vk_descriptor_type = VK_DESCRIPTOR_TYPE_SAMPLER;
sampler->s.u.view_info.view = view;
sampler->s.u.view_info.written_serial_id = view->serial_id;
}
HRESULT vkd3d_create_static_sampler(struct d3d12_device *device,
@ -3708,9 +3708,6 @@ static ULONG STDMETHODCALLTYPE d3d12_descriptor_heap_Release(ID3D12DescriptorHea
d3d12_desc_destroy(&descriptors[i], device);
}
if (device->vk_info.EXT_descriptor_indexing && !vkd3d_gpu_descriptor_allocator_unregister_range(
&device->gpu_descriptor_allocator, descriptors))
ERR("Failed to unregister descriptor range.\n");
break;
}
@ -4025,6 +4022,8 @@ HRESULT d3d12_descriptor_heap_create(struct d3d12_device *device,
{
size_t max_descriptor_count, descriptor_size;
struct d3d12_descriptor_heap *object;
struct d3d12_desc *dst;
unsigned int i;
HRESULT hr;
if (!(descriptor_size = d3d12_device_get_descriptor_handle_increment_size(device, desc->Type)))
@ -4057,12 +4056,19 @@ HRESULT d3d12_descriptor_heap_create(struct d3d12_device *device,
return hr;
}
memset(object->descriptors, 0, descriptor_size * desc->NumDescriptors);
if ((desc->Type == D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV || desc->Type == D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER)
&& device->vk_info.EXT_descriptor_indexing && !vkd3d_gpu_descriptor_allocator_register_range(
&device->gpu_descriptor_allocator, (struct d3d12_desc *)object->descriptors, desc->NumDescriptors))
ERR("Failed to register descriptor range.\n");
if (desc->Type == D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV || desc->Type == D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER)
{
dst = (struct d3d12_desc *)object->descriptors;
for (i = 0; i < desc->NumDescriptors; ++i)
{
memset(&dst[i].s, 0, sizeof(dst[i].s));
dst[i].index = i;
}
}
else
{
memset(object->descriptors, 0, descriptor_size * desc->NumDescriptors);
}
TRACE("Created descriptor heap %p.\n", object);

View File

@ -391,30 +391,6 @@ D3D12_GPU_VIRTUAL_ADDRESS vkd3d_gpu_va_allocator_allocate(struct vkd3d_gpu_va_al
void *vkd3d_gpu_va_allocator_dereference(struct vkd3d_gpu_va_allocator *allocator, D3D12_GPU_VIRTUAL_ADDRESS address);
void vkd3d_gpu_va_allocator_free(struct vkd3d_gpu_va_allocator *allocator, D3D12_GPU_VIRTUAL_ADDRESS address);
struct vkd3d_gpu_descriptor_allocation
{
const struct d3d12_desc *base;
size_t count;
};
struct vkd3d_gpu_descriptor_allocator
{
struct vkd3d_mutex mutex;
struct vkd3d_gpu_descriptor_allocation *allocations;
size_t allocations_size;
size_t allocation_count;
};
size_t vkd3d_gpu_descriptor_allocator_range_size_from_descriptor(
struct vkd3d_gpu_descriptor_allocator *allocator, const struct d3d12_desc *desc);
bool vkd3d_gpu_descriptor_allocator_register_range(struct vkd3d_gpu_descriptor_allocator *allocator,
const struct d3d12_desc *base, size_t count);
bool vkd3d_gpu_descriptor_allocator_unregister_range(
struct vkd3d_gpu_descriptor_allocator *allocator, const struct d3d12_desc *base);
struct d3d12_descriptor_heap *vkd3d_gpu_descriptor_allocator_heap_from_descriptor(
struct vkd3d_gpu_descriptor_allocator *allocator, const struct d3d12_desc *desc);
struct vkd3d_render_pass_key
{
unsigned int attachment_count;
@ -718,13 +694,17 @@ struct vkd3d_view_info
struct d3d12_desc
{
uint32_t magic;
VkDescriptorType vk_descriptor_type;
union
struct
{
VkDescriptorBufferInfo vk_cbv_info;
struct vkd3d_view_info view_info;
} u;
uint32_t magic;
VkDescriptorType vk_descriptor_type;
union
{
VkDescriptorBufferInfo vk_cbv_info;
struct vkd3d_view_info view_info;
} u;
} s;
unsigned int index;
};
static inline struct d3d12_desc *d3d12_desc_from_cpu_handle(D3D12_CPU_DESCRIPTOR_HANDLE cpu_handle)
@ -737,6 +717,11 @@ static inline struct d3d12_desc *d3d12_desc_from_gpu_handle(D3D12_GPU_DESCRIPTOR
return (struct d3d12_desc *)(intptr_t)gpu_handle.ptr;
}
static inline void d3d12_desc_copy_raw(struct d3d12_desc *dst, const struct d3d12_desc *src)
{
dst->s = src->s;
}
void d3d12_desc_copy(struct d3d12_desc *dst, const struct d3d12_desc *src, struct d3d12_device *device);
void d3d12_desc_create_cbv(struct d3d12_desc *descriptor,
struct d3d12_device *device, const D3D12_CONSTANT_BUFFER_VIEW_DESC *desc);
@ -857,6 +842,17 @@ struct d3d12_descriptor_heap
BYTE descriptors[];
};
static inline struct d3d12_descriptor_heap *d3d12_desc_get_descriptor_heap(const struct d3d12_desc *descriptor)
{
return CONTAINING_RECORD(descriptor - descriptor->index, struct d3d12_descriptor_heap, descriptors);
}
static inline unsigned int d3d12_desc_heap_range_size(const struct d3d12_desc *descriptor)
{
const struct d3d12_descriptor_heap *heap = d3d12_desc_get_descriptor_heap(descriptor);
return heap->desc.NumDescriptors - descriptor->index;
}
HRESULT d3d12_descriptor_heap_create(struct d3d12_device *device,
const D3D12_DESCRIPTOR_HEAP_DESC *desc, struct d3d12_descriptor_heap **descriptor_heap);
@ -1465,7 +1461,6 @@ struct d3d12_device
PFN_vkd3d_signal_event signal_event;
size_t wchar_size;
struct vkd3d_gpu_descriptor_allocator gpu_descriptor_allocator;
struct vkd3d_gpu_va_allocator gpu_va_allocator;
struct vkd3d_mutex mutex;