2024-01-26 17:33:52 -08:00
|
|
|
From 2d21093fb4c3716f6bb02e4021d23e3ead9d381a Mon Sep 17 00:00:00 2001
|
2024-01-23 17:28:57 -08:00
|
|
|
From: Alistair Leslie-Hughes <leslie_alistair@hotmail.com>
|
|
|
|
Date: Wed, 24 Jan 2024 08:40:28 +1100
|
|
|
|
Subject: [PATCH] Updated vkd3d to 72e2eeaf146e856497890241213a98327c098c6d.
|
|
|
|
|
|
|
|
---
|
|
|
|
libs/vkd3d/include/private/vkd3d_common.h | 34 +-
|
|
|
|
libs/vkd3d/include/vkd3d_shader.h | 2 +
|
|
|
|
libs/vkd3d/libs/vkd3d-common/blob.c | 6 +-
|
|
|
|
libs/vkd3d/libs/vkd3d-common/debug.c | 6 +-
|
|
|
|
libs/vkd3d/libs/vkd3d-shader/d3d_asm.c | 28 +-
|
|
|
|
libs/vkd3d/libs/vkd3d-shader/d3dbc.c | 44 +-
|
|
|
|
libs/vkd3d/libs/vkd3d-shader/dxil.c | 1064 ++++++++++++++++-
|
|
|
|
libs/vkd3d/libs/vkd3d-shader/glsl.c | 6 +-
|
|
|
|
libs/vkd3d/libs/vkd3d-shader/hlsl.y | 40 +
|
|
|
|
libs/vkd3d/libs/vkd3d-shader/ir.c | 372 +++++-
|
|
|
|
libs/vkd3d/libs/vkd3d-shader/spirv.c | 93 +-
|
|
|
|
libs/vkd3d/libs/vkd3d-shader/tpf.c | 22 +-
|
|
|
|
.../libs/vkd3d-shader/vkd3d_shader_main.c | 19 +-
|
|
|
|
.../libs/vkd3d-shader/vkd3d_shader_private.h | 51 +-
|
|
|
|
libs/vkd3d/libs/vkd3d/command.c | 6 +-
|
|
|
|
libs/vkd3d/libs/vkd3d/device.c | 9 +-
|
|
|
|
libs/vkd3d/libs/vkd3d/vkd3d_private.h | 3 +-
|
|
|
|
17 files changed, 1571 insertions(+), 234 deletions(-)
|
|
|
|
|
|
|
|
diff --git a/libs/vkd3d/include/private/vkd3d_common.h b/libs/vkd3d/include/private/vkd3d_common.h
|
|
|
|
index 63e21c22067..979676c4d5a 100644
|
|
|
|
--- a/libs/vkd3d/include/private/vkd3d_common.h
|
|
|
|
+++ b/libs/vkd3d/include/private/vkd3d_common.h
|
|
|
|
@@ -278,30 +278,42 @@ static inline uint64_t vkd3d_atomic_add_fetch_u64(uint64_t volatile *x, uint64_t
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
+static inline uint32_t vkd3d_atomic_add_fetch_u32(uint32_t volatile *x, uint32_t val)
|
|
|
|
+{
|
|
|
|
+#if HAVE_SYNC_ADD_AND_FETCH
|
|
|
|
+ return __sync_add_and_fetch(x, val);
|
|
|
|
+#elif defined(_WIN32)
|
|
|
|
+ return InterlockedAdd((LONG *)x, val);
|
|
|
|
+#else
|
|
|
|
+# error "vkd3d_atomic_add_fetch_u32() not implemented for this platform"
|
|
|
|
+#endif
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
static inline uint64_t vkd3d_atomic_increment_u64(uint64_t volatile *x)
|
|
|
|
{
|
|
|
|
return vkd3d_atomic_add_fetch_u64(x, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
+static inline uint32_t vkd3d_atomic_decrement_u32(uint32_t volatile *x)
|
|
|
|
+{
|
|
|
|
+ return vkd3d_atomic_add_fetch_u32(x, ~0u);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static inline uint32_t vkd3d_atomic_increment_u32(uint32_t volatile *x)
|
|
|
|
+{
|
|
|
|
+ return vkd3d_atomic_add_fetch_u32(x, 1);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
#ifndef _WIN32
|
|
|
|
-# if HAVE_SYNC_ADD_AND_FETCH
|
|
|
|
static inline LONG InterlockedIncrement(LONG volatile *x)
|
|
|
|
{
|
|
|
|
- return __sync_add_and_fetch(x, 1);
|
|
|
|
+ return vkd3d_atomic_increment_u32((uint32_t *)x);
|
|
|
|
}
|
|
|
|
-# else
|
|
|
|
-# error "InterlockedIncrement() not implemented for this platform"
|
|
|
|
-# endif /* HAVE_SYNC_ADD_AND_FETCH */
|
|
|
|
|
|
|
|
-# if HAVE_SYNC_SUB_AND_FETCH
|
|
|
|
static inline LONG InterlockedDecrement(LONG volatile *x)
|
|
|
|
{
|
|
|
|
- return __sync_sub_and_fetch(x, 1);
|
|
|
|
+ return vkd3d_atomic_decrement_u32((uint32_t *)x);
|
|
|
|
}
|
|
|
|
-# else
|
|
|
|
-# error "InterlockedDecrement() not implemented for this platform"
|
|
|
|
-# endif
|
|
|
|
-
|
|
|
|
#endif /* _WIN32 */
|
|
|
|
|
|
|
|
static inline void vkd3d_parse_version(const char *version, int *major, int *minor)
|
|
|
|
diff --git a/libs/vkd3d/include/vkd3d_shader.h b/libs/vkd3d/include/vkd3d_shader.h
|
|
|
|
index a9c9ccc4a52..449b3684a10 100644
|
|
|
|
--- a/libs/vkd3d/include/vkd3d_shader.h
|
|
|
|
+++ b/libs/vkd3d/include/vkd3d_shader.h
|
|
|
|
@@ -876,6 +876,8 @@ enum vkd3d_shader_spirv_extension
|
|
|
|
VKD3D_SHADER_SPIRV_EXTENSION_EXT_DESCRIPTOR_INDEXING,
|
|
|
|
/** \since 1.3 */
|
|
|
|
VKD3D_SHADER_SPIRV_EXTENSION_EXT_STENCIL_EXPORT,
|
|
|
|
+ /** \since 1.11 */
|
|
|
|
+ VKD3D_SHADER_SPIRV_EXTENSION_EXT_VIEWPORT_INDEX_LAYER,
|
|
|
|
|
|
|
|
VKD3D_FORCE_32_BIT_ENUM(VKD3D_SHADER_SPIRV_EXTENSION),
|
|
|
|
};
|
|
|
|
diff --git a/libs/vkd3d/libs/vkd3d-common/blob.c b/libs/vkd3d/libs/vkd3d-common/blob.c
|
|
|
|
index fa2812619ac..4d347acef05 100644
|
|
|
|
--- a/libs/vkd3d/libs/vkd3d-common/blob.c
|
|
|
|
+++ b/libs/vkd3d/libs/vkd3d-common/blob.c
|
|
|
|
@@ -27,7 +27,7 @@
|
|
|
|
struct vkd3d_blob
|
|
|
|
{
|
|
|
|
ID3D10Blob ID3DBlob_iface;
|
|
|
|
- LONG refcount;
|
|
|
|
+ unsigned int refcount;
|
|
|
|
|
|
|
|
void *buffer;
|
|
|
|
SIZE_T size;
|
|
|
|
@@ -59,7 +59,7 @@ static HRESULT STDMETHODCALLTYPE vkd3d_blob_QueryInterface(ID3DBlob *iface, REFI
|
|
|
|
static ULONG STDMETHODCALLTYPE vkd3d_blob_AddRef(ID3DBlob *iface)
|
|
|
|
{
|
|
|
|
struct vkd3d_blob *blob = impl_from_ID3DBlob(iface);
|
|
|
|
- ULONG refcount = InterlockedIncrement(&blob->refcount);
|
|
|
|
+ unsigned int refcount = vkd3d_atomic_increment_u32(&blob->refcount);
|
|
|
|
|
|
|
|
TRACE("%p increasing refcount to %u.\n", blob, refcount);
|
|
|
|
|
|
|
|
@@ -69,7 +69,7 @@ static ULONG STDMETHODCALLTYPE vkd3d_blob_AddRef(ID3DBlob *iface)
|
|
|
|
static ULONG STDMETHODCALLTYPE vkd3d_blob_Release(ID3DBlob *iface)
|
|
|
|
{
|
|
|
|
struct vkd3d_blob *blob = impl_from_ID3DBlob(iface);
|
|
|
|
- ULONG refcount = InterlockedDecrement(&blob->refcount);
|
|
|
|
+ unsigned int refcount = vkd3d_atomic_decrement_u32(&blob->refcount);
|
|
|
|
|
|
|
|
TRACE("%p decreasing refcount to %u.\n", blob, refcount);
|
|
|
|
|
|
|
|
diff --git a/libs/vkd3d/libs/vkd3d-common/debug.c b/libs/vkd3d/libs/vkd3d-common/debug.c
|
|
|
|
index aa7df5bd764..e12cd39450a 100644
|
|
|
|
--- a/libs/vkd3d/libs/vkd3d-common/debug.c
|
|
|
|
+++ b/libs/vkd3d/libs/vkd3d-common/debug.c
|
|
|
|
@@ -126,10 +126,10 @@ void vkd3d_dbg_set_log_callback(PFN_vkd3d_log callback)
|
|
|
|
static char *get_buffer(void)
|
|
|
|
{
|
|
|
|
static char buffers[VKD3D_DEBUG_BUFFER_COUNT][VKD3D_DEBUG_BUFFER_SIZE];
|
|
|
|
- static LONG buffer_index;
|
|
|
|
- LONG current_index;
|
|
|
|
+ static unsigned int buffer_index;
|
|
|
|
+ unsigned int current_index;
|
|
|
|
|
|
|
|
- current_index = InterlockedIncrement(&buffer_index) % ARRAY_SIZE(buffers);
|
|
|
|
+ current_index = vkd3d_atomic_increment_u32(&buffer_index) % ARRAY_SIZE(buffers);
|
|
|
|
return buffers[current_index];
|
|
|
|
}
|
|
|
|
|
|
|
|
diff --git a/libs/vkd3d/libs/vkd3d-shader/d3d_asm.c b/libs/vkd3d/libs/vkd3d-shader/d3d_asm.c
|
|
|
|
index 5685fe4d4a4..d9751945d8a 100644
|
|
|
|
--- a/libs/vkd3d/libs/vkd3d-shader/d3d_asm.c
|
|
|
|
+++ b/libs/vkd3d/libs/vkd3d-shader/d3d_asm.c
|
|
|
|
@@ -242,6 +242,7 @@ static const char * const shader_opcode_names[] =
|
|
|
|
[VKD3DSIH_NRM ] = "nrm",
|
|
|
|
[VKD3DSIH_OR ] = "or",
|
|
|
|
[VKD3DSIH_PHASE ] = "phase",
|
|
|
|
+ [VKD3DSIH_PHI ] = "phi",
|
|
|
|
[VKD3DSIH_POW ] = "pow",
|
|
|
|
[VKD3DSIH_RCP ] = "rcp",
|
|
|
|
[VKD3DSIH_REP ] = "rep",
|
|
|
|
@@ -830,6 +831,13 @@ static void shader_print_uint_literal(struct vkd3d_d3d_asm_compiler *compiler,
|
|
|
|
prefix, compiler->colours.literal, i, compiler->colours.reset, suffix);
|
|
|
|
}
|
|
|
|
|
|
|
|
+static void shader_print_uint64_literal(struct vkd3d_d3d_asm_compiler *compiler,
|
|
|
|
+ const char *prefix, uint64_t i, const char *suffix)
|
|
|
|
+{
|
|
|
|
+ vkd3d_string_buffer_printf(&compiler->buffer, "%s%s%"PRIu64"%s%s",
|
|
|
|
+ prefix, compiler->colours.literal, i, compiler->colours.reset, suffix);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
static void shader_print_hex_literal(struct vkd3d_d3d_asm_compiler *compiler,
|
|
|
|
const char *prefix, unsigned int i, const char *suffix)
|
|
|
|
{
|
|
|
|
@@ -1222,6 +1230,12 @@ static void shader_dump_register(struct vkd3d_d3d_asm_compiler *compiler, const
|
|
|
|
if (reg->dimension == VSIR_DIMENSION_VEC4)
|
|
|
|
shader_print_double_literal(compiler, ", ", reg->u.immconst_f64[1], "");
|
|
|
|
}
|
|
|
|
+ else if (reg->data_type == VKD3D_DATA_UINT64)
|
|
|
|
+ {
|
|
|
|
+ shader_print_uint64_literal(compiler, "", reg->u.immconst_u64[0], "");
|
|
|
|
+ if (reg->dimension == VSIR_DIMENSION_VEC4)
|
|
|
|
+ shader_print_uint64_literal(compiler, "", reg->u.immconst_u64[1], "");
|
|
|
|
+ }
|
|
|
|
else
|
|
|
|
{
|
|
|
|
shader_addline(buffer, "<unhandled data type %#x>", reg->data_type);
|
|
|
|
@@ -2003,10 +2017,11 @@ static void shader_dump_instruction(struct vkd3d_d3d_asm_compiler *compiler,
|
|
|
|
shader_addline(buffer, "\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
-enum vkd3d_result vkd3d_dxbc_binary_to_text(const struct vkd3d_shader_instruction_array *instructions,
|
|
|
|
- const struct vkd3d_shader_version *shader_version, const struct vkd3d_shader_compile_info *compile_info,
|
|
|
|
+enum vkd3d_result vkd3d_dxbc_binary_to_text(const struct vsir_program *program,
|
|
|
|
+ const struct vkd3d_shader_compile_info *compile_info,
|
|
|
|
struct vkd3d_shader_code *out, enum vsir_asm_dialect dialect)
|
|
|
|
{
|
|
|
|
+ const struct vkd3d_shader_version *shader_version = &program->shader_version;
|
|
|
|
enum vkd3d_shader_compile_option_formatting_flags formatting;
|
|
|
|
struct vkd3d_d3d_asm_compiler compiler =
|
|
|
|
{
|
|
|
|
@@ -2075,9 +2090,9 @@ enum vkd3d_result vkd3d_dxbc_binary_to_text(const struct vkd3d_shader_instructio
|
|
|
|
shader_version->minor, compiler.colours.reset);
|
|
|
|
|
|
|
|
indent = 0;
|
|
|
|
- for (i = 0; i < instructions->count; ++i)
|
|
|
|
+ for (i = 0; i < program->instructions.count; ++i)
|
|
|
|
{
|
|
|
|
- struct vkd3d_shader_instruction *ins = &instructions->elements[i];
|
|
|
|
+ struct vkd3d_shader_instruction *ins = &program->instructions.elements[i];
|
|
|
|
|
|
|
|
switch (ins->handler_idx)
|
|
|
|
{
|
|
|
|
@@ -2131,13 +2146,12 @@ enum vkd3d_result vkd3d_dxbc_binary_to_text(const struct vkd3d_shader_instructio
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
-void vkd3d_shader_trace(const struct vkd3d_shader_instruction_array *instructions,
|
|
|
|
- const struct vkd3d_shader_version *shader_version)
|
|
|
|
+void vkd3d_shader_trace(const struct vsir_program *program)
|
|
|
|
{
|
|
|
|
const char *p, *q, *end;
|
|
|
|
struct vkd3d_shader_code code;
|
|
|
|
|
|
|
|
- if (vkd3d_dxbc_binary_to_text(instructions, shader_version, NULL, &code, VSIR_ASM_VSIR) != VKD3D_OK)
|
|
|
|
+ if (vkd3d_dxbc_binary_to_text(program, NULL, &code, VSIR_ASM_VSIR) != VKD3D_OK)
|
|
|
|
return;
|
|
|
|
|
|
|
|
end = (const char *)code.code + code.size;
|
|
|
|
diff --git a/libs/vkd3d/libs/vkd3d-shader/d3dbc.c b/libs/vkd3d/libs/vkd3d-shader/d3dbc.c
|
|
|
|
index aa0dd8f4b0d..035095d5b48 100644
|
|
|
|
--- a/libs/vkd3d/libs/vkd3d-shader/d3dbc.c
|
|
|
|
+++ b/libs/vkd3d/libs/vkd3d-shader/d3dbc.c
|
|
|
|
@@ -414,6 +414,7 @@ static bool has_relative_address(uint32_t param)
|
|
|
|
static const struct vkd3d_sm1_opcode_info *shader_sm1_get_opcode_info(
|
|
|
|
const struct vkd3d_shader_sm1_parser *sm1, enum vkd3d_sm1_opcode opcode)
|
|
|
|
{
|
|
|
|
+ const struct vkd3d_shader_version *version = &sm1->p.program.shader_version;
|
|
|
|
const struct vkd3d_sm1_opcode_info *info;
|
|
|
|
unsigned int i = 0;
|
|
|
|
|
|
|
|
@@ -424,8 +425,8 @@ static const struct vkd3d_sm1_opcode_info *shader_sm1_get_opcode_info(
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
if (opcode == info->sm1_opcode
|
|
|
|
- && vkd3d_shader_ver_ge(&sm1->p.shader_version, info->min_version.major, info->min_version.minor)
|
|
|
|
- && (vkd3d_shader_ver_le(&sm1->p.shader_version, info->max_version.major, info->max_version.minor)
|
|
|
|
+ && vkd3d_shader_ver_ge(version, info->min_version.major, info->min_version.minor)
|
|
|
|
+ && (vkd3d_shader_ver_le(version, info->max_version.major, info->max_version.minor)
|
|
|
|
|| !info->max_version.major))
|
|
|
|
return info;
|
|
|
|
}
|
|
|
|
@@ -444,7 +445,7 @@ static uint32_t swizzle_from_sm1(uint32_t swizzle)
|
|
|
|
shader_sm1_get_swizzle_component(swizzle, 3));
|
|
|
|
}
|
|
|
|
|
|
|
|
-static void shader_sm1_parse_src_param(uint32_t param, const struct vkd3d_shader_src_param *rel_addr,
|
|
|
|
+static void shader_sm1_parse_src_param(uint32_t param, struct vkd3d_shader_src_param *rel_addr,
|
|
|
|
struct vkd3d_shader_src_param *src)
|
|
|
|
{
|
|
|
|
enum vkd3d_shader_register_type reg_type = ((param & VKD3D_SM1_REGISTER_TYPE_MASK) >> VKD3D_SM1_REGISTER_TYPE_SHIFT)
|
|
|
|
@@ -465,7 +466,7 @@ static void shader_sm1_parse_src_param(uint32_t param, const struct vkd3d_shader
|
|
|
|
src->modifiers = (param & VKD3D_SM1_SRC_MODIFIER_MASK) >> VKD3D_SM1_SRC_MODIFIER_SHIFT;
|
|
|
|
}
|
|
|
|
|
|
|
|
-static void shader_sm1_parse_dst_param(uint32_t param, const struct vkd3d_shader_src_param *rel_addr,
|
|
|
|
+static void shader_sm1_parse_dst_param(uint32_t param, struct vkd3d_shader_src_param *rel_addr,
|
|
|
|
struct vkd3d_shader_dst_param *dst)
|
|
|
|
{
|
|
|
|
enum vkd3d_shader_register_type reg_type = ((param & VKD3D_SM1_REGISTER_TYPE_MASK) >> VKD3D_SM1_REGISTER_TYPE_SHIFT)
|
|
|
|
@@ -567,7 +568,7 @@ static bool add_signature_element(struct vkd3d_shader_sm1_parser *sm1, bool outp
|
|
|
|
element->register_count = 1;
|
|
|
|
element->mask = mask;
|
|
|
|
element->used_mask = is_dcl ? 0 : mask;
|
|
|
|
- if (sm1->p.shader_version.type == VKD3D_SHADER_TYPE_PIXEL && !output)
|
|
|
|
+ if (sm1->p.program.shader_version.type == VKD3D_SHADER_TYPE_PIXEL && !output)
|
|
|
|
element->interpolation_mode = VKD3DSIM_LINEAR;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
@@ -597,20 +598,20 @@ static void add_signature_mask(struct vkd3d_shader_sm1_parser *sm1, bool output,
|
|
|
|
static bool add_signature_element_from_register(struct vkd3d_shader_sm1_parser *sm1,
|
|
|
|
const struct vkd3d_shader_register *reg, bool is_dcl, unsigned int mask)
|
|
|
|
{
|
|
|
|
+ const struct vkd3d_shader_version *version = &sm1->p.program.shader_version;
|
|
|
|
unsigned int register_index = reg->idx[0].offset;
|
|
|
|
|
|
|
|
switch (reg->type)
|
|
|
|
{
|
|
|
|
case VKD3DSPR_TEMP:
|
|
|
|
- if (sm1->p.shader_version.type == VKD3D_SHADER_TYPE_PIXEL
|
|
|
|
- && sm1->p.shader_version.major == 1 && !register_index)
|
|
|
|
+ if (version->type == VKD3D_SHADER_TYPE_PIXEL && version->major == 1 && !register_index)
|
|
|
|
return add_signature_element(sm1, true, "COLOR", 0, VKD3D_SHADER_SV_TARGET, 0, is_dcl, mask);
|
|
|
|
return true;
|
|
|
|
|
|
|
|
case VKD3DSPR_INPUT:
|
|
|
|
/* For vertex shaders or sm3 pixel shaders, we should have already
|
|
|
|
* had a DCL instruction. Otherwise, this is a colour input. */
|
|
|
|
- if (sm1->p.shader_version.type == VKD3D_SHADER_TYPE_VERTEX || sm1->p.shader_version.major == 3)
|
|
|
|
+ if (version->type == VKD3D_SHADER_TYPE_VERTEX || version->major == 3)
|
|
|
|
{
|
|
|
|
add_signature_mask(sm1, false, register_index, mask);
|
|
|
|
return true;
|
|
|
|
@@ -620,19 +621,19 @@ static bool add_signature_element_from_register(struct vkd3d_shader_sm1_parser *
|
|
|
|
|
|
|
|
case VKD3DSPR_TEXTURE:
|
|
|
|
/* For vertex shaders, this is ADDR. */
|
|
|
|
- if (sm1->p.shader_version.type == VKD3D_SHADER_TYPE_VERTEX)
|
|
|
|
+ if (version->type == VKD3D_SHADER_TYPE_VERTEX)
|
|
|
|
return true;
|
|
|
|
return add_signature_element(sm1, false, "TEXCOORD", register_index,
|
|
|
|
VKD3D_SHADER_SV_NONE, register_index, is_dcl, mask);
|
|
|
|
|
|
|
|
case VKD3DSPR_OUTPUT:
|
|
|
|
- if (sm1->p.shader_version.type == VKD3D_SHADER_TYPE_VERTEX)
|
|
|
|
+ if (version->type == VKD3D_SHADER_TYPE_VERTEX)
|
|
|
|
{
|
|
|
|
/* For sm < 2 vertex shaders, this is TEXCRDOUT.
|
|
|
|
*
|
|
|
|
* For sm3 vertex shaders, this is OUTPUT, but we already
|
|
|
|
* should have had a DCL instruction. */
|
|
|
|
- if (sm1->p.shader_version.major == 3)
|
|
|
|
+ if (version->major == 3)
|
|
|
|
{
|
|
|
|
add_signature_mask(sm1, true, register_index, mask);
|
|
|
|
return true;
|
|
|
|
@@ -700,6 +701,7 @@ static bool add_signature_element_from_register(struct vkd3d_shader_sm1_parser *
|
|
|
|
static bool add_signature_element_from_semantic(struct vkd3d_shader_sm1_parser *sm1,
|
|
|
|
const struct vkd3d_shader_semantic *semantic)
|
|
|
|
{
|
|
|
|
+ const struct vkd3d_shader_version *version = &sm1->p.program.shader_version;
|
|
|
|
const struct vkd3d_shader_register *reg = &semantic->resource.reg.reg;
|
|
|
|
enum vkd3d_shader_sysval_semantic sysval = VKD3D_SHADER_SV_NONE;
|
|
|
|
unsigned int mask = semantic->resource.reg.write_mask;
|
|
|
|
@@ -731,13 +733,13 @@ static bool add_signature_element_from_semantic(struct vkd3d_shader_sm1_parser *
|
|
|
|
return add_signature_element_from_register(sm1, reg, true, mask);
|
|
|
|
|
|
|
|
/* sm2 pixel shaders use DCL but don't provide a semantic. */
|
|
|
|
- if (sm1->p.shader_version.type == VKD3D_SHADER_TYPE_PIXEL && sm1->p.shader_version.major == 2)
|
|
|
|
+ if (version->type == VKD3D_SHADER_TYPE_PIXEL && version->major == 2)
|
|
|
|
return add_signature_element_from_register(sm1, reg, true, mask);
|
|
|
|
|
|
|
|
/* With the exception of vertex POSITION output, none of these are system
|
|
|
|
* values. Pixel POSITION input is not equivalent to SV_Position; the closer
|
|
|
|
* equivalent is VPOS, which is not declared as a semantic. */
|
|
|
|
- if (sm1->p.shader_version.type == VKD3D_SHADER_TYPE_VERTEX
|
|
|
|
+ if (version->type == VKD3D_SHADER_TYPE_VERTEX
|
|
|
|
&& output && semantic->usage == VKD3D_DECL_USAGE_POSITION)
|
|
|
|
sysval = VKD3D_SHADER_SV_POSITION;
|
|
|
|
|
|
|
|
@@ -763,13 +765,13 @@ static void record_constant_register(struct vkd3d_shader_sm1_parser *sm1,
|
|
|
|
static void shader_sm1_scan_register(struct vkd3d_shader_sm1_parser *sm1,
|
|
|
|
const struct vkd3d_shader_register *reg, unsigned int mask, bool from_def)
|
|
|
|
{
|
|
|
|
- struct vkd3d_shader_desc *desc = &sm1->p.shader_desc;
|
|
|
|
+ struct vsir_program *program = &sm1->p.program;
|
|
|
|
uint32_t register_index = reg->idx[0].offset;
|
|
|
|
|
|
|
|
switch (reg->type)
|
|
|
|
{
|
|
|
|
case VKD3DSPR_TEMP:
|
|
|
|
- desc->temp_count = max(desc->temp_count, register_index + 1);
|
|
|
|
+ program->temp_count = max(program->temp_count, register_index + 1);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case VKD3DSPR_CONST:
|
|
|
|
@@ -824,7 +826,7 @@ static void shader_sm1_read_param(struct vkd3d_shader_sm1_parser *sm1,
|
|
|
|
* VS >= 2.0 have relative addressing (with token)
|
|
|
|
* VS >= 1.0 < 2.0 have relative addressing (without token)
|
|
|
|
* The version check below should work in general. */
|
|
|
|
- if (sm1->p.shader_version.major < 2)
|
|
|
|
+ if (sm1->p.program.shader_version.major < 2)
|
|
|
|
{
|
|
|
|
*addr_token = (1u << 31)
|
|
|
|
| ((VKD3DSPR_ADDR << VKD3D_SM1_REGISTER_TYPE_SHIFT2) & VKD3D_SM1_REGISTER_TYPE_MASK2)
|
|
|
|
@@ -853,7 +855,7 @@ static void shader_sm1_skip_opcode(const struct vkd3d_shader_sm1_parser *sm1, co
|
|
|
|
/* Version 2.0+ shaders may contain address tokens, but fortunately they
|
|
|
|
* have a useful length mask - use it here. Version 1.x shaders contain no
|
|
|
|
* such tokens. */
|
|
|
|
- if (sm1->p.shader_version.major >= 2)
|
|
|
|
+ if (sm1->p.program.shader_version.major >= 2)
|
|
|
|
{
|
|
|
|
length = (opcode_token & VKD3D_SM1_INSTRUCTION_LENGTH_MASK) >> VKD3D_SM1_INSTRUCTION_LENGTH_SHIFT;
|
|
|
|
*ptr += length;
|
|
|
|
@@ -883,7 +885,7 @@ static void shader_sm1_destroy(struct vkd3d_shader_parser *parser)
|
|
|
|
{
|
|
|
|
struct vkd3d_shader_sm1_parser *sm1 = vkd3d_shader_sm1_parser(parser);
|
|
|
|
|
|
|
|
- shader_instruction_array_destroy(&parser->instructions);
|
|
|
|
+ vsir_program_cleanup(&parser->program);
|
|
|
|
free_shader_desc(&sm1->p.shader_desc);
|
|
|
|
vkd3d_free(sm1);
|
|
|
|
}
|
|
|
|
@@ -1109,7 +1111,7 @@ static void shader_sm1_read_instruction(struct vkd3d_shader_sm1_parser *sm1, str
|
|
|
|
vkd3d_shader_parser_error(&sm1->p, VKD3D_SHADER_ERROR_D3DBC_INVALID_OPCODE,
|
|
|
|
"Invalid opcode %#x (token 0x%08x, shader version %u.%u).",
|
|
|
|
opcode_token & VKD3D_SM1_OPCODE_MASK, opcode_token,
|
|
|
|
- sm1->p.shader_version.major, sm1->p.shader_version.minor);
|
|
|
|
+ sm1->p.program.shader_version.major, sm1->p.program.shader_version.minor);
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
@@ -1334,7 +1336,7 @@ int vkd3d_shader_sm1_parser_create(const struct vkd3d_shader_compile_info *compi
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
- instructions = &sm1->p.instructions;
|
|
|
|
+ instructions = &sm1->p.program.instructions;
|
|
|
|
while (!shader_sm1_is_end(sm1))
|
|
|
|
{
|
|
|
|
if (!shader_instruction_array_reserve(instructions, instructions->count + 1))
|
|
|
|
@@ -1389,7 +1391,7 @@ bool hlsl_sm1_register_from_semantic(struct hlsl_ctx *ctx, const struct hlsl_sem
|
|
|
|
enum vkd3d_shader_type shader_type;
|
|
|
|
unsigned int major_version;
|
|
|
|
D3DSHADER_PARAM_REGISTER_TYPE type;
|
|
|
|
- DWORD offset;
|
|
|
|
+ unsigned int offset;
|
|
|
|
}
|
|
|
|
register_table[] =
|
|
|
|
{
|
|
|
|
diff --git a/libs/vkd3d/libs/vkd3d-shader/dxil.c b/libs/vkd3d/libs/vkd3d-shader/dxil.c
|
|
|
|
index 78c1a052539..15cc380f5f2 100644
|
|
|
|
--- a/libs/vkd3d/libs/vkd3d-shader/dxil.c
|
|
|
|
+++ b/libs/vkd3d/libs/vkd3d-shader/dxil.c
|
|
|
|
@@ -171,6 +171,35 @@ enum bitcode_linkage
|
|
|
|
LINKAGE_INTERNAL = 3,
|
|
|
|
};
|
|
|
|
|
|
|
|
+enum dxil_resource_kind
|
|
|
|
+{
|
|
|
|
+ RESOURCE_KIND_INVALID = 0,
|
|
|
|
+ RESOURCE_KIND_TEXTURE1D = 1,
|
|
|
|
+ RESOURCE_KIND_TEXTURE2D = 2,
|
|
|
|
+ RESOURCE_KIND_TEXTURE2DMS = 3,
|
|
|
|
+ RESOURCE_KIND_TEXTURE3D = 4,
|
|
|
|
+ RESOURCE_KIND_TEXTURECUBE = 5,
|
|
|
|
+ RESOURCE_KIND_TEXTURE1DARRAY = 6,
|
|
|
|
+ RESOURCE_KIND_TEXTURE2DARRAY = 7,
|
|
|
|
+ RESOURCE_KIND_TEXTURE2DMSARRAY = 8,
|
|
|
|
+ RESOURCE_KIND_TEXTURECUBEARRAY = 9,
|
|
|
|
+ RESOURCE_KIND_TYPEDBUFFER = 10,
|
|
|
|
+ RESOURCE_KIND_RAWBUFFER = 11,
|
|
|
|
+ RESOURCE_KIND_STRUCTUREDBUFFER = 12,
|
|
|
|
+ RESOURCE_KIND_CBUFFER = 13,
|
|
|
|
+ RESOURCE_KIND_SAMPLER = 14,
|
|
|
|
+ RESOURCE_KIND_TBUFFER = 15,
|
|
|
|
+ RESOURCE_KIND_RTACCELERATIONSTRUCTURE = 16,
|
|
|
|
+ RESOURCE_KIND_FEEDBACKTEXTURE2D = 17,
|
|
|
|
+ RESOURCE_KIND_FEEDBACKTEXTURE2DARRAY = 18,
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
+enum dxil_resource_type
|
|
|
|
+{
|
|
|
|
+ RESOURCE_TYPE_NON_RAW_STRUCTURED = 0,
|
|
|
|
+ RESOURCE_TYPE_RAW_STRUCTURED = 1,
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
enum dxil_component_type
|
|
|
|
{
|
|
|
|
COMPONENT_TYPE_INVALID = 0,
|
|
|
|
@@ -315,6 +344,7 @@ enum dx_intrinsic_opcode
|
|
|
|
DX_FIRST_BIT_SHI = 34,
|
|
|
|
DX_CREATE_HANDLE = 57,
|
|
|
|
DX_CBUFFER_LOAD_LEGACY = 59,
|
|
|
|
+ DX_BUFFER_LOAD = 68,
|
|
|
|
DX_DERIV_COARSEX = 83,
|
|
|
|
DX_DERIV_COARSEY = 84,
|
|
|
|
DX_DERIV_FINEX = 85,
|
|
|
|
@@ -472,18 +502,67 @@ struct sm6_symbol
|
|
|
|
const char *name;
|
|
|
|
};
|
|
|
|
|
|
|
|
+struct incoming_value
|
|
|
|
+{
|
|
|
|
+ const struct sm6_block *block;
|
|
|
|
+ struct vkd3d_shader_register reg;
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
+struct sm6_phi
|
|
|
|
+{
|
|
|
|
+ struct vkd3d_shader_register reg;
|
|
|
|
+ struct incoming_value *incoming;
|
|
|
|
+ size_t incoming_capacity;
|
|
|
|
+ size_t incoming_count;
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
+enum sm6_block_terminator_type
|
|
|
|
+{
|
|
|
|
+ TERMINATOR_UNCOND_BR,
|
|
|
|
+ TERMINATOR_COND_BR,
|
|
|
|
+ TERMINATOR_SWITCH,
|
|
|
|
+ TERMINATOR_RET,
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
+struct terminator_case
|
|
|
|
+{
|
|
|
|
+ const struct sm6_block *block;
|
|
|
|
+ uint64_t value;
|
|
|
|
+ bool is_default;
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
+struct sm6_block_terminator
|
|
|
|
+{
|
|
|
|
+ struct vkd3d_shader_register conditional_reg;
|
|
|
|
+ enum sm6_block_terminator_type type;
|
|
|
|
+ const struct sm6_block *true_block;
|
|
|
|
+ const struct sm6_block *false_block;
|
|
|
|
+ struct terminator_case *cases;
|
|
|
|
+ unsigned int case_count;
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
struct sm6_block
|
|
|
|
{
|
|
|
|
struct vkd3d_shader_instruction *instructions;
|
|
|
|
size_t instruction_capacity;
|
|
|
|
size_t instruction_count;
|
|
|
|
+
|
|
|
|
+ /* A nonzero id. */
|
|
|
|
+ unsigned int id;
|
|
|
|
+
|
|
|
|
+ struct sm6_phi *phi;
|
|
|
|
+ size_t phi_capacity;
|
|
|
|
+ size_t phi_count;
|
|
|
|
+
|
|
|
|
+ struct sm6_block_terminator terminator;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct sm6_function
|
|
|
|
{
|
|
|
|
const struct sm6_value *declaration;
|
|
|
|
|
|
|
|
- struct sm6_block *blocks[1];
|
|
|
|
+ struct sm6_block **blocks;
|
|
|
|
+ size_t block_capacity;
|
|
|
|
size_t block_count;
|
|
|
|
|
|
|
|
size_t value_count;
|
|
|
|
@@ -565,6 +644,11 @@ struct sm6_descriptor_info
|
|
|
|
enum vkd3d_shader_descriptor_type type;
|
|
|
|
unsigned int id;
|
|
|
|
struct vkd3d_shader_register_range range;
|
|
|
|
+ enum vkd3d_shader_resource_type resource_type;
|
|
|
|
+ enum dxil_resource_kind kind;
|
|
|
|
+ enum vkd3d_data_type resource_data_type;
|
|
|
|
+ enum vkd3d_shader_register_type reg_type;
|
|
|
|
+ enum vkd3d_data_type reg_data_type;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct sm6_parser
|
|
|
|
@@ -1937,6 +2021,12 @@ static inline bool sm6_value_is_constant(const struct sm6_value *value)
|
|
|
|
return sm6_value_is_register(value) && register_is_constant(&value->u.reg);
|
|
|
|
}
|
|
|
|
|
|
|
|
+static bool sm6_value_is_constant_zero(const struct sm6_value *value)
|
|
|
|
+{
|
|
|
|
+ /* Constant vectors do not occur. */
|
|
|
|
+ return sm6_value_is_register(value) && register_is_scalar_constant_zero(&value->u.reg);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
static inline bool sm6_value_is_undef(const struct sm6_value *value)
|
|
|
|
{
|
|
|
|
return sm6_value_is_register(value) && value->u.reg.type == VKD3DSPR_UNDEF;
|
|
|
|
@@ -1947,6 +2037,11 @@ static bool sm6_value_is_icb(const struct sm6_value *value)
|
|
|
|
return value->value_type == VALUE_TYPE_ICB;
|
|
|
|
}
|
|
|
|
|
|
|
|
+static bool sm6_value_is_ssa(const struct sm6_value *value)
|
|
|
|
+{
|
|
|
|
+ return sm6_value_is_register(value) && register_is_ssa(&value->u.reg);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
static inline unsigned int sm6_value_get_constant_uint(const struct sm6_value *value)
|
|
|
|
{
|
|
|
|
if (!sm6_value_is_constant(value))
|
|
|
|
@@ -1954,11 +2049,25 @@ static inline unsigned int sm6_value_get_constant_uint(const struct sm6_value *v
|
|
|
|
return register_get_uint_value(&value->u.reg);
|
|
|
|
}
|
|
|
|
|
|
|
|
+static uint64_t sm6_value_get_constant_uint64(const struct sm6_value *value)
|
|
|
|
+{
|
|
|
|
+ if (!sm6_value_is_constant(value))
|
|
|
|
+ return UINT64_MAX;
|
|
|
|
+ return register_get_uint64_value(&value->u.reg);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
static unsigned int sm6_parser_alloc_ssa_id(struct sm6_parser *sm6)
|
|
|
|
{
|
|
|
|
return sm6->ssa_next_id++;
|
|
|
|
}
|
|
|
|
|
|
|
|
+static void instruction_init_with_resource(struct vkd3d_shader_instruction *ins,
|
|
|
|
+ enum vkd3d_shader_opcode handler_idx, const struct sm6_value *resource, struct sm6_parser *sm6)
|
|
|
|
+{
|
|
|
|
+ vsir_instruction_init(ins, &sm6->p.location, handler_idx);
|
|
|
|
+ ins->resource_type = resource->u.handle.d->resource_type;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
static struct vkd3d_shader_src_param *instruction_src_params_alloc(struct vkd3d_shader_instruction *ins,
|
|
|
|
unsigned int count, struct sm6_parser *sm6)
|
|
|
|
{
|
|
|
|
@@ -2463,7 +2572,7 @@ static enum vkd3d_result value_allocate_constant_array(struct sm6_value *dst, co
|
|
|
|
"Out of memory allocating an immediate constant buffer of count %u.", count);
|
|
|
|
return VKD3D_ERROR_OUT_OF_MEMORY;
|
|
|
|
}
|
|
|
|
- if (!shader_instruction_array_add_icb(&sm6->p.instructions, icb))
|
|
|
|
+ if (!shader_instruction_array_add_icb(&sm6->p.program.instructions, icb))
|
|
|
|
{
|
|
|
|
ERR("Failed to store icb object.\n");
|
|
|
|
vkd3d_free(icb);
|
|
|
|
@@ -2659,12 +2768,12 @@ static bool bitcode_parse_alignment(uint64_t encoded_alignment, unsigned int *al
|
|
|
|
|
|
|
|
static struct vkd3d_shader_instruction *sm6_parser_require_space(struct sm6_parser *sm6, size_t extra)
|
|
|
|
{
|
|
|
|
- if (!shader_instruction_array_reserve(&sm6->p.instructions, sm6->p.instructions.count + extra))
|
|
|
|
+ if (!shader_instruction_array_reserve(&sm6->p.program.instructions, sm6->p.program.instructions.count + extra))
|
|
|
|
{
|
|
|
|
ERR("Failed to allocate instruction.\n");
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
- return &sm6->p.instructions.elements[sm6->p.instructions.count];
|
|
|
|
+ return &sm6->p.program.instructions.elements[sm6->p.program.instructions.count];
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Space should be reserved before calling this. It is intended to require no checking of the returned pointer. */
|
|
|
|
@@ -2674,7 +2783,7 @@ static struct vkd3d_shader_instruction *sm6_parser_add_instruction(struct sm6_pa
|
|
|
|
struct vkd3d_shader_instruction *ins = sm6_parser_require_space(sm6, 1);
|
|
|
|
assert(ins);
|
|
|
|
vsir_instruction_init(ins, &sm6->p.location, handler_idx);
|
|
|
|
- ++sm6->p.instructions.count;
|
|
|
|
+ ++sm6->p.program.instructions.count;
|
|
|
|
return ins;
|
|
|
|
}
|
|
|
|
|
|
|
|
@@ -2937,9 +3046,9 @@ static enum vkd3d_result sm6_parser_globals_init(struct sm6_parser *sm6)
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Resolve initialiser forward references. */
|
|
|
|
- for (i = 0; i < sm6->p.instructions.count; ++i)
|
|
|
|
+ for (i = 0; i < sm6->p.program.instructions.count; ++i)
|
|
|
|
{
|
|
|
|
- ins = &sm6->p.instructions.elements[i];
|
|
|
|
+ ins = &sm6->p.program.instructions.elements[i];
|
|
|
|
if (ins->handler_idx == VKD3DSIH_DCL_INDEXABLE_TEMP && ins->declaration.indexable_temp.initialiser)
|
|
|
|
{
|
|
|
|
ins->declaration.indexable_temp.initialiser = resolve_forward_initialiser(
|
|
|
|
@@ -3032,6 +3141,32 @@ static struct sm6_block *sm6_block_create()
|
|
|
|
return block;
|
|
|
|
}
|
|
|
|
|
|
|
|
+static struct sm6_phi *sm6_block_phi_require_space(struct sm6_block *block, struct sm6_parser *sm6)
|
|
|
|
+{
|
|
|
|
+ struct sm6_phi *phi;
|
|
|
|
+
|
|
|
|
+ if (!vkd3d_array_reserve((void **)&block->phi, &block->phi_capacity, block->phi_count + 1, sizeof(*block->phi)))
|
|
|
|
+ {
|
|
|
|
+ ERR("Failed to allocate phi array.\n");
|
|
|
|
+ vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_OUT_OF_MEMORY,
|
|
|
|
+ "Out of memory allocating a phi instruction.");
|
|
|
|
+ return NULL;
|
|
|
|
+ }
|
|
|
|
+ phi = &block->phi[block->phi_count++];
|
|
|
|
+
|
|
|
|
+ phi->incoming = NULL;
|
|
|
|
+ phi->incoming_capacity = 0;
|
|
|
|
+ phi->incoming_count = 0;
|
|
|
|
+
|
|
|
|
+ return phi;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+struct function_emission_state
|
|
|
|
+{
|
|
|
|
+ struct sm6_block *code_block;
|
|
|
|
+ struct vkd3d_shader_instruction *ins;
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
static void sm6_parser_emit_alloca(struct sm6_parser *sm6, const struct dxil_record *record,
|
|
|
|
struct vkd3d_shader_instruction *ins, struct sm6_value *dst)
|
|
|
|
{
|
|
|
|
@@ -3294,6 +3429,61 @@ static void sm6_parser_emit_binop(struct sm6_parser *sm6, const struct dxil_reco
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
+static const struct sm6_block *sm6_function_get_block(const struct sm6_function *function, uint64_t index,
|
|
|
|
+ struct sm6_parser *sm6)
|
|
|
|
+{
|
|
|
|
+ if (index >= function->block_count)
|
|
|
|
+ {
|
|
|
|
+ WARN("Invalid code block index %#"PRIx64".\n", index);
|
|
|
|
+ vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_INVALID_OPERAND_COUNT,
|
|
|
|
+ "Invalid code block index %#"PRIx64" for a control flow instruction.", index);
|
|
|
|
+ return NULL;
|
|
|
|
+ }
|
|
|
|
+ return function->blocks[index];
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void sm6_parser_emit_br(struct sm6_parser *sm6, const struct dxil_record *record,
|
|
|
|
+ struct sm6_function *function, struct sm6_block *code_block, struct vkd3d_shader_instruction *ins)
|
|
|
|
+{
|
|
|
|
+ const struct sm6_value *value;
|
|
|
|
+ unsigned int i = 2;
|
|
|
|
+
|
|
|
|
+ if (record->operand_count != 1 && record->operand_count < 3)
|
|
|
|
+ {
|
|
|
|
+ WARN("Invalid operand count %u.\n", record->operand_count);
|
|
|
|
+ vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_INVALID_OPERAND_COUNT,
|
|
|
|
+ "Invalid operand count %u for a branch instruction.", record->operand_count);
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (record->operand_count == 1)
|
|
|
|
+ {
|
|
|
|
+ code_block->terminator.type = TERMINATOR_UNCOND_BR;
|
|
|
|
+ code_block->terminator.true_block = sm6_function_get_block(function, record->operands[0], sm6);
|
|
|
|
+ }
|
|
|
|
+ else
|
|
|
|
+ {
|
|
|
|
+ if (!sm6->bool_type)
|
|
|
|
+ {
|
|
|
|
+ WARN("Bool type not found.\n");
|
|
|
|
+ vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_INVALID_MODULE,
|
|
|
|
+ "Module does not define a boolean type for conditions.");
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+ if (!(value = sm6_parser_get_value_by_ref(sm6, record, sm6->bool_type, &i))
|
|
|
|
+ || !sm6_value_validate_is_bool(value, sm6))
|
|
|
|
+ return;
|
|
|
|
+ dxil_record_validate_operand_max_count(record, i, sm6);
|
|
|
|
+
|
|
|
|
+ code_block->terminator.type = TERMINATOR_COND_BR;
|
|
|
|
+ code_block->terminator.conditional_reg = value->u.reg;
|
|
|
|
+ code_block->terminator.true_block = sm6_function_get_block(function, record->operands[0], sm6);
|
|
|
|
+ code_block->terminator.false_block = sm6_function_get_block(function, record->operands[1], sm6);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ ins->handler_idx = VKD3DSIH_NOP;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
static enum vkd3d_shader_opcode map_dx_unary_op(enum dx_intrinsic_opcode op)
|
|
|
|
{
|
|
|
|
switch (op)
|
|
|
|
@@ -3343,9 +3533,10 @@ static enum vkd3d_shader_opcode map_dx_unary_op(enum dx_intrinsic_opcode op)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
-static void sm6_parser_emit_dx_unary(struct sm6_parser *sm6, struct sm6_block *code_block,
|
|
|
|
- enum dx_intrinsic_opcode op, const struct sm6_value **operands, struct vkd3d_shader_instruction *ins)
|
|
|
|
+static void sm6_parser_emit_dx_unary(struct sm6_parser *sm6, enum dx_intrinsic_opcode op,
|
|
|
|
+ const struct sm6_value **operands, struct function_emission_state *state)
|
|
|
|
{
|
|
|
|
+ struct vkd3d_shader_instruction *ins = state->ins;
|
|
|
|
struct vkd3d_shader_src_param *src_param;
|
|
|
|
|
|
|
|
vsir_instruction_init(ins, &sm6->p.location, map_dx_unary_op(op));
|
|
|
|
@@ -3355,10 +3546,11 @@ static void sm6_parser_emit_dx_unary(struct sm6_parser *sm6, struct sm6_block *c
|
|
|
|
instruction_dst_param_init_ssa_scalar(ins, sm6);
|
|
|
|
}
|
|
|
|
|
|
|
|
-static void sm6_parser_emit_dx_cbuffer_load(struct sm6_parser *sm6, struct sm6_block *code_block,
|
|
|
|
- enum dx_intrinsic_opcode op, const struct sm6_value **operands, struct vkd3d_shader_instruction *ins)
|
|
|
|
+static void sm6_parser_emit_dx_cbuffer_load(struct sm6_parser *sm6, enum dx_intrinsic_opcode op,
|
|
|
|
+ const struct sm6_value **operands, struct function_emission_state *state)
|
|
|
|
{
|
|
|
|
struct sm6_value *dst = sm6_parser_get_current_value(sm6);
|
|
|
|
+ struct vkd3d_shader_instruction *ins = state->ins;
|
|
|
|
struct vkd3d_shader_src_param *src_param;
|
|
|
|
const struct sm6_value *buffer;
|
|
|
|
const struct sm6_type *type;
|
|
|
|
@@ -3406,9 +3598,10 @@ static const struct sm6_descriptor_info *sm6_parser_get_descriptor(struct sm6_pa
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
-static void sm6_parser_emit_dx_create_handle(struct sm6_parser *sm6, struct sm6_block *code_block,
|
|
|
|
- enum dx_intrinsic_opcode op, const struct sm6_value **operands, struct vkd3d_shader_instruction *ins)
|
|
|
|
+static void sm6_parser_emit_dx_create_handle(struct sm6_parser *sm6, enum dx_intrinsic_opcode op,
|
|
|
|
+ const struct sm6_value **operands, struct function_emission_state *state)
|
|
|
|
{
|
|
|
|
+ struct vkd3d_shader_instruction *ins = state->ins;
|
|
|
|
enum vkd3d_shader_descriptor_type type;
|
|
|
|
const struct sm6_descriptor_info *d;
|
|
|
|
struct vkd3d_shader_register *reg;
|
|
|
|
@@ -3430,9 +3623,8 @@ static void sm6_parser_emit_dx_create_handle(struct sm6_parser *sm6, struct sm6_
|
|
|
|
dst->u.handle.d = d;
|
|
|
|
|
|
|
|
reg = &dst->u.handle.reg;
|
|
|
|
- /* Set idx_count to 3 for use with load instructions.
|
|
|
|
- * TODO: set register type from resource type when other types are supported. */
|
|
|
|
- vsir_register_init(reg, VKD3DSPR_CONSTBUFFER, VKD3D_DATA_FLOAT, 3);
|
|
|
|
+ /* Set idx_count to 3 for use with load/store instructions. */
|
|
|
|
+ vsir_register_init(reg, d->reg_type, d->reg_data_type, 3);
|
|
|
|
reg->dimension = VSIR_DIMENSION_VEC4;
|
|
|
|
reg->idx[0].offset = id;
|
|
|
|
register_index_address_init(®->idx[1], operands[2], sm6);
|
|
|
|
@@ -3442,9 +3634,10 @@ static void sm6_parser_emit_dx_create_handle(struct sm6_parser *sm6, struct sm6_
|
|
|
|
ins->handler_idx = VKD3DSIH_NOP;
|
|
|
|
}
|
|
|
|
|
|
|
|
-static void sm6_parser_emit_dx_load_input(struct sm6_parser *sm6, struct sm6_block *code_block,
|
|
|
|
- enum dx_intrinsic_opcode op, const struct sm6_value **operands, struct vkd3d_shader_instruction *ins)
|
|
|
|
+static void sm6_parser_emit_dx_load_input(struct sm6_parser *sm6, enum dx_intrinsic_opcode op,
|
|
|
|
+ const struct sm6_value **operands, struct function_emission_state *state)
|
|
|
|
{
|
|
|
|
+ struct vkd3d_shader_instruction *ins = state->ins;
|
|
|
|
struct vkd3d_shader_src_param *src_param;
|
|
|
|
const struct shader_signature *signature;
|
|
|
|
unsigned int row_index, column_index;
|
|
|
|
@@ -3474,9 +3667,45 @@ static void sm6_parser_emit_dx_load_input(struct sm6_parser *sm6, struct sm6_blo
|
|
|
|
instruction_dst_param_init_ssa_scalar(ins, sm6);
|
|
|
|
}
|
|
|
|
|
|
|
|
-static void sm6_parser_emit_dx_store_output(struct sm6_parser *sm6, struct sm6_block *code_block,
|
|
|
|
- enum dx_intrinsic_opcode op, const struct sm6_value **operands, struct vkd3d_shader_instruction *ins)
|
|
|
|
+static void sm6_parser_emit_dx_buffer_load(struct sm6_parser *sm6, enum dx_intrinsic_opcode op,
|
|
|
|
+ const struct sm6_value **operands, struct function_emission_state *state)
|
|
|
|
{
|
|
|
|
+ struct vkd3d_shader_instruction *ins = state->ins;
|
|
|
|
+ struct vkd3d_shader_src_param *src_params;
|
|
|
|
+ const struct sm6_value *resource;
|
|
|
|
+
|
|
|
|
+ resource = operands[0];
|
|
|
|
+ if (!sm6_value_validate_is_handle(resource, sm6))
|
|
|
|
+ return;
|
|
|
|
+
|
|
|
|
+ if (resource->u.handle.d->kind != RESOURCE_KIND_TYPEDBUFFER)
|
|
|
|
+ {
|
|
|
|
+ WARN("Resource is not a typed buffer.\n");
|
|
|
|
+ vkd3d_shader_parser_warning(&sm6->p, VKD3D_SHADER_WARNING_DXIL_INVALID_OPERATION,
|
|
|
|
+ "Resource for a typed buffer load is not a typed buffer.");
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ instruction_init_with_resource(ins, (resource->u.handle.d->type == VKD3D_SHADER_DESCRIPTOR_TYPE_UAV)
|
|
|
|
+ ? VKD3DSIH_LD_UAV_TYPED : VKD3DSIH_LD, resource, sm6);
|
|
|
|
+
|
|
|
|
+ src_params = instruction_src_params_alloc(ins, 2, sm6);
|
|
|
|
+ src_param_init_from_value(&src_params[0], operands[1]);
|
|
|
|
+ if (!sm6_value_is_undef(operands[2]))
|
|
|
|
+ {
|
|
|
|
+ /* Constant zero would be ok, but is not worth checking for unless it shows up. */
|
|
|
|
+ WARN("Ignoring structure offset.\n");
|
|
|
|
+ vkd3d_shader_parser_warning(&sm6->p, VKD3D_SHADER_WARNING_DXIL_IGNORING_OPERANDS,
|
|
|
|
+ "Ignoring structure offset for a typed buffer load.");
|
|
|
|
+ }
|
|
|
|
+ src_param_init_vector_from_reg(&src_params[1], &resource->u.handle.reg);
|
|
|
|
+
|
|
|
|
+ instruction_dst_param_init_ssa_vector(ins, VKD3D_VEC4_SIZE, sm6);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void sm6_parser_emit_dx_store_output(struct sm6_parser *sm6, enum dx_intrinsic_opcode op,
|
|
|
|
+ const struct sm6_value **operands, struct function_emission_state *state)
|
|
|
|
+{
|
|
|
|
+ struct vkd3d_shader_instruction *ins = state->ins;
|
|
|
|
struct vkd3d_shader_src_param *src_param;
|
|
|
|
struct vkd3d_shader_dst_param *dst_param;
|
|
|
|
const struct shader_signature *signature;
|
|
|
|
@@ -3531,8 +3760,8 @@ struct sm6_dx_opcode_info
|
|
|
|
{
|
|
|
|
const char *ret_type;
|
|
|
|
const char *operand_info;
|
|
|
|
- void (*handler)(struct sm6_parser *, struct sm6_block *, enum dx_intrinsic_opcode,
|
|
|
|
- const struct sm6_value **, struct vkd3d_shader_instruction *);
|
|
|
|
+ void (*handler)(struct sm6_parser *, enum dx_intrinsic_opcode, const struct sm6_value **,
|
|
|
|
+ struct function_emission_state *);
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
@@ -3551,6 +3780,7 @@ struct sm6_dx_opcode_info
|
|
|
|
static const struct sm6_dx_opcode_info sm6_dx_op_table[] =
|
|
|
|
{
|
|
|
|
[DX_BFREV ] = {"m0", "m", sm6_parser_emit_dx_unary},
|
|
|
|
+ [DX_BUFFER_LOAD ] = {"o", "Hii", sm6_parser_emit_dx_buffer_load},
|
|
|
|
[DX_CBUFFER_LOAD_LEGACY ] = {"o", "Hi", sm6_parser_emit_dx_cbuffer_load},
|
|
|
|
[DX_COUNT_BITS ] = {"i", "m", sm6_parser_emit_dx_unary},
|
|
|
|
[DX_CREATE_HANDLE ] = {"H", "ccib", sm6_parser_emit_dx_create_handle},
|
|
|
|
@@ -3684,27 +3914,27 @@ static void sm6_parser_emit_unhandled(struct sm6_parser *sm6, struct vkd3d_shade
|
|
|
|
/* dst->is_undefined is not set here because it flags only explicitly undefined values. */
|
|
|
|
}
|
|
|
|
|
|
|
|
-static void sm6_parser_decode_dx_op(struct sm6_parser *sm6, struct sm6_block *code_block, enum dx_intrinsic_opcode op,
|
|
|
|
+static void sm6_parser_decode_dx_op(struct sm6_parser *sm6, enum dx_intrinsic_opcode op,
|
|
|
|
const char *name, const struct sm6_value **operands, unsigned int operand_count,
|
|
|
|
- struct vkd3d_shader_instruction *ins, struct sm6_value *dst)
|
|
|
|
+ struct function_emission_state *state, struct sm6_value *dst)
|
|
|
|
{
|
|
|
|
if (op >= ARRAY_SIZE(sm6_dx_op_table) || !sm6_dx_op_table[op].operand_info)
|
|
|
|
{
|
|
|
|
FIXME("Unhandled dx intrinsic function id %u, '%s'.\n", op, name);
|
|
|
|
vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_UNHANDLED_INTRINSIC,
|
|
|
|
"Call to intrinsic function %s is unhandled.", name);
|
|
|
|
- sm6_parser_emit_unhandled(sm6, ins, dst);
|
|
|
|
+ sm6_parser_emit_unhandled(sm6, state->ins, dst);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (sm6_parser_validate_dx_op(sm6, op, name, operands, operand_count, dst))
|
|
|
|
- sm6_dx_op_table[op].handler(sm6, code_block, op, operands, ins);
|
|
|
|
+ sm6_dx_op_table[op].handler(sm6, op, operands, state);
|
|
|
|
else
|
|
|
|
- sm6_parser_emit_unhandled(sm6, ins, dst);
|
|
|
|
+ sm6_parser_emit_unhandled(sm6, state->ins, dst);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void sm6_parser_emit_call(struct sm6_parser *sm6, const struct dxil_record *record,
|
|
|
|
- struct sm6_block *code_block, struct vkd3d_shader_instruction *ins, struct sm6_value *dst)
|
|
|
|
+ struct function_emission_state *state, struct sm6_value *dst)
|
|
|
|
{
|
|
|
|
const struct sm6_value *operands[DXIL_OP_MAX_OPERANDS];
|
|
|
|
const struct sm6_value *fn_value, *op_value;
|
|
|
|
@@ -3786,8 +4016,8 @@ static void sm6_parser_emit_call(struct sm6_parser *sm6, const struct dxil_recor
|
|
|
|
"Expected a constant integer dx intrinsic function id.");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
- sm6_parser_decode_dx_op(sm6, code_block, register_get_uint_value(&op_value->u.reg),
|
|
|
|
- fn_value->u.function.name, &operands[1], operand_count - 1, ins, dst);
|
|
|
|
+ sm6_parser_decode_dx_op(sm6, register_get_uint_value(&op_value->u.reg),
|
|
|
|
+ fn_value->u.function.name, &operands[1], operand_count - 1, state, dst);
|
|
|
|
}
|
|
|
|
|
|
|
|
static enum vkd3d_shader_opcode sm6_map_cast_op(uint64_t code, const struct sm6_type *from,
|
|
|
|
@@ -4288,6 +4518,102 @@ static void sm6_parser_emit_load(struct sm6_parser *sm6, const struct dxil_recor
|
|
|
|
instruction_dst_param_init_ssa_scalar(ins, sm6);
|
|
|
|
}
|
|
|
|
|
|
|
|
+static int phi_incoming_compare(const void *a, const void *b)
|
|
|
|
+{
|
|
|
|
+ const struct incoming_value *incoming_a = a, *incoming_b = b;
|
|
|
|
+
|
|
|
|
+ return (incoming_a->block > incoming_b->block) - (incoming_a->block < incoming_b->block);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void sm6_parser_emit_phi(struct sm6_parser *sm6, const struct dxil_record *record,
|
|
|
|
+ struct sm6_function *function, struct sm6_block *code_block, struct vkd3d_shader_instruction *ins,
|
|
|
|
+ struct sm6_value *dst)
|
|
|
|
+{
|
|
|
|
+ struct incoming_value *incoming;
|
|
|
|
+ const struct sm6_type *type;
|
|
|
|
+ struct sm6_phi *phi;
|
|
|
|
+ unsigned int i, j;
|
|
|
|
+ uint64_t src_idx;
|
|
|
|
+
|
|
|
|
+ if (!(record->operand_count & 1))
|
|
|
|
+ {
|
|
|
|
+ WARN("Invalid operand count %u.\n", record->operand_count);
|
|
|
|
+ vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_INVALID_OPERAND_COUNT,
|
|
|
|
+ "Invalid operand count %u for phi instruction.", record->operand_count);
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (!(type = sm6_parser_get_type(sm6, record->operands[0])))
|
|
|
|
+ return;
|
|
|
|
+ if (!sm6_type_is_numeric(type))
|
|
|
|
+ {
|
|
|
|
+ /* dxc doesn't seem to use buffer/resource read return types here. */
|
|
|
|
+ FIXME("Only scalar numeric types are supported.\n");
|
|
|
|
+ vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_INVALID_OPERAND_COUNT,
|
|
|
|
+ "Result type class %u of a phi instruction is not scalar numeric.", type->class);
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ dst->type = type;
|
|
|
|
+ register_init_ssa_scalar(&dst->u.reg, type, sm6);
|
|
|
|
+
|
|
|
|
+ if (!(phi = sm6_block_phi_require_space(code_block, sm6)))
|
|
|
|
+ return;
|
|
|
|
+ phi->reg = dst->u.reg;
|
|
|
|
+ phi->incoming_count = record->operand_count / 2u;
|
|
|
|
+
|
|
|
|
+ if (!vkd3d_array_reserve((void **)&phi->incoming, &phi->incoming_capacity, phi->incoming_count,
|
|
|
|
+ sizeof(*phi->incoming)))
|
|
|
|
+ {
|
|
|
|
+ ERR("Failed to allocate phi incoming array.\n");
|
|
|
|
+ vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_OUT_OF_MEMORY,
|
|
|
|
+ "Out of memory allocating a phi incoming array.");
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+ incoming = phi->incoming;
|
|
|
|
+
|
|
|
|
+ for (i = 1; i < record->operand_count; i += 2)
|
|
|
|
+ {
|
|
|
|
+ src_idx = sm6->value_count - decode_rotated_signed_value(record->operands[i]);
|
|
|
|
+ /* May be a forward reference. */
|
|
|
|
+ if (src_idx >= sm6->cur_max_value)
|
|
|
|
+ {
|
|
|
|
+ WARN("Invalid value index %"PRIu64".\n", src_idx);
|
|
|
|
+ vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_INVALID_OPERAND,
|
|
|
|
+ "Invalid value index %"PRIu64" for a phi incoming value.", src_idx);
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ j = i / 2u;
|
|
|
|
+ /* Store the value index in the register for later resolution. */
|
|
|
|
+ incoming[j].reg.idx[0].offset = src_idx;
|
|
|
|
+ incoming[j].block = sm6_function_get_block(function, record->operands[i + 1], sm6);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ ins->handler_idx = VKD3DSIH_NOP;
|
|
|
|
+
|
|
|
|
+ qsort(incoming, phi->incoming_count, sizeof(*incoming), phi_incoming_compare);
|
|
|
|
+
|
|
|
|
+ for (i = 1, j = 1; i < phi->incoming_count; ++i)
|
|
|
|
+ {
|
|
|
|
+ if (incoming[i].block != incoming[i - 1].block)
|
|
|
|
+ {
|
|
|
|
+ incoming[j++] = incoming[i];
|
|
|
|
+ continue;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (incoming[i].reg.idx[0].offset != incoming[i - 1].reg.idx[0].offset)
|
|
|
|
+ {
|
|
|
|
+ WARN("PHI conflict.\n");
|
|
|
|
+ vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_INVALID_OPERAND,
|
|
|
|
+ "Two phi incomings have the same block but different values.");
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ /* if (j == 1) we should be able to set dst->u.reg to incoming[0].reg, but structurisation
|
|
|
|
+ * may potentially add new incomings. */
|
|
|
|
+ phi->incoming_count = j;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
static void sm6_parser_emit_ret(struct sm6_parser *sm6, const struct dxil_record *record,
|
|
|
|
struct sm6_block *code_block, struct vkd3d_shader_instruction *ins)
|
|
|
|
{
|
|
|
|
@@ -4297,6 +4623,8 @@ static void sm6_parser_emit_ret(struct sm6_parser *sm6, const struct dxil_record
|
|
|
|
if (record->operand_count)
|
|
|
|
FIXME("Non-void return is not implemented.\n");
|
|
|
|
|
|
|
|
+ code_block->terminator.type = TERMINATOR_RET;
|
|
|
|
+
|
|
|
|
ins->handler_idx = VKD3DSIH_NOP;
|
|
|
|
}
|
|
|
|
|
|
|
|
@@ -4351,6 +4679,89 @@ static void sm6_parser_emit_store(struct sm6_parser *sm6, const struct dxil_reco
|
|
|
|
dst_param->reg.alignment = alignment;
|
|
|
|
}
|
|
|
|
|
|
|
|
+static void sm6_parser_emit_switch(struct sm6_parser *sm6, const struct dxil_record *record,
|
|
|
|
+ struct sm6_function *function, struct sm6_block *code_block, struct vkd3d_shader_instruction *ins)
|
|
|
|
+{
|
|
|
|
+ struct sm6_block_terminator *terminator = &code_block->terminator;
|
|
|
|
+ const struct sm6_type *type;
|
|
|
|
+ const struct sm6_value *src;
|
|
|
|
+ unsigned int i = 1, j;
|
|
|
|
+
|
|
|
|
+ if (record->operand_count < 3 || !(record->operand_count & 1))
|
|
|
|
+ {
|
|
|
|
+ WARN("Invalid operand count %u.\n", record->operand_count);
|
|
|
|
+ vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_INVALID_OPERAND_COUNT,
|
|
|
|
+ "Invalid operand count %u for a switch instruction.", record->operand_count);
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (!(type = sm6_parser_get_type(sm6, record->operands[0])))
|
|
|
|
+ return;
|
|
|
|
+
|
|
|
|
+ if (!(src = sm6_parser_get_value_by_ref(sm6, record, type, &i))
|
|
|
|
+ || !sm6_value_validate_is_register(src, sm6))
|
|
|
|
+ return;
|
|
|
|
+ assert(i == 2);
|
|
|
|
+
|
|
|
|
+ if (src->type != type)
|
|
|
|
+ {
|
|
|
|
+ WARN("Type mismatch.\n");
|
|
|
|
+ vkd3d_shader_parser_warning(&sm6->p, VKD3D_SHADER_WARNING_DXIL_TYPE_MISMATCH,
|
|
|
|
+ "The type of a switch selector value does not match the selector type.");
|
|
|
|
+ }
|
|
|
|
+ if (!sm6_type_is_integer(type))
|
|
|
|
+ {
|
|
|
|
+ WARN("Selector is not scalar integer.\n");
|
|
|
|
+ vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_INVALID_OPERAND,
|
|
|
|
+ "Selector type class %u of a switch instruction is not scalar integer.", type->class);
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ terminator->conditional_reg = src->u.reg;
|
|
|
|
+ terminator->type = TERMINATOR_SWITCH;
|
|
|
|
+
|
|
|
|
+ terminator->case_count = record->operand_count / 2u;
|
|
|
|
+ if (!(terminator->cases = vkd3d_calloc(terminator->case_count, sizeof(*terminator->cases))))
|
|
|
|
+ {
|
|
|
|
+ ERR("Failed to allocate case array.\n");
|
|
|
|
+ vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_OUT_OF_MEMORY,
|
|
|
|
+ "Out of memory allocating a switch case array.");
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /* Executes 'operand_count / 2' times because operand_count is uneven. */
|
|
|
|
+ for (; i < record->operand_count; i += 2)
|
|
|
|
+ {
|
|
|
|
+ j = i / 2u - 1;
|
|
|
|
+ terminator->cases[j].block = sm6_function_get_block(function, record->operands[i], sm6);
|
|
|
|
+ /* For structurisation it is convenient to store the default in the case array. */
|
|
|
|
+ terminator->cases[j].is_default = !j;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ for (i = 3; i < record->operand_count; i += 2)
|
|
|
|
+ {
|
|
|
|
+ if (!(src = sm6_parser_get_value_safe(sm6, record->operands[i])))
|
|
|
|
+ return;
|
|
|
|
+
|
|
|
|
+ if (src->type != type)
|
|
|
|
+ {
|
|
|
|
+ WARN("Type mismatch.\n");
|
|
|
|
+ vkd3d_shader_parser_warning(&sm6->p, VKD3D_SHADER_WARNING_DXIL_TYPE_MISMATCH,
|
|
|
|
+ "The type of a switch case value does not match the selector type.");
|
|
|
|
+ }
|
|
|
|
+ if (!sm6_value_is_constant(src))
|
|
|
|
+ {
|
|
|
|
+ WARN("Case value is not a constant.\n");
|
|
|
|
+ vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_INVALID_OPERAND,
|
|
|
|
+ "A switch case value is not a constant.");
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ terminator->cases[i / 2u].value = sm6_value_get_constant_uint64(src);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ ins->handler_idx = VKD3DSIH_NOP;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
static void sm6_parser_emit_vselect(struct sm6_parser *sm6, const struct dxil_record *record,
|
|
|
|
struct vkd3d_shader_instruction *ins, struct sm6_value *dst)
|
|
|
|
{
|
|
|
|
@@ -4401,6 +4812,12 @@ static bool sm6_metadata_value_is_string(const struct sm6_metadata_value *m)
|
|
|
|
return m && m->type == VKD3D_METADATA_STRING;
|
|
|
|
}
|
|
|
|
|
|
|
|
+static bool sm6_metadata_value_is_zero_or_undef(const struct sm6_metadata_value *m)
|
|
|
|
+{
|
|
|
|
+ return sm6_metadata_value_is_value(m)
|
|
|
|
+ && (sm6_value_is_undef(m->u.value) || sm6_value_is_constant_zero(m->u.value));
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
static bool sm6_metadata_get_uint_value(const struct sm6_parser *sm6,
|
|
|
|
const struct sm6_metadata_value *m, unsigned int *u)
|
|
|
|
{
|
|
|
|
@@ -4641,6 +5058,70 @@ static void metadata_attachment_record_apply(const struct dxil_record *record, e
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
+static bool sm6_function_blocks_reserve(struct sm6_function *function, unsigned int reserve)
|
|
|
|
+{
|
|
|
|
+ if (!vkd3d_array_reserve((void **)&function->blocks, &function->block_capacity,
|
|
|
|
+ reserve, sizeof(*function->blocks)))
|
|
|
|
+ {
|
|
|
|
+ ERR("Failed to allocate code block array.\n");
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ return true;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static struct sm6_block *sm6_function_create_block(struct sm6_function *function)
|
|
|
|
+{
|
|
|
|
+ struct sm6_block *block;
|
|
|
|
+
|
|
|
|
+ if (!(block = sm6_block_create()))
|
|
|
|
+ return NULL;
|
|
|
|
+
|
|
|
|
+ function->blocks[function->block_count++] = block;
|
|
|
|
+ /* Set the id to the array index + 1. */
|
|
|
|
+ block->id = function->block_count;
|
|
|
|
+
|
|
|
|
+ return block;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static enum vkd3d_result sm6_function_resolve_phi_incomings(const struct sm6_function *function,
|
|
|
|
+ struct sm6_parser *sm6)
|
|
|
|
+{
|
|
|
|
+ const struct sm6_block *block;
|
|
|
|
+ size_t i, j, block_idx;
|
|
|
|
+
|
|
|
|
+ for (block_idx = 0; block_idx < function->block_count; ++block_idx)
|
|
|
|
+ {
|
|
|
|
+ block = function->blocks[block_idx];
|
|
|
|
+
|
|
|
|
+ for (i = 0; i < block->phi_count; ++i)
|
|
|
|
+ {
|
|
|
|
+ struct sm6_phi *phi = &block->phi[i];
|
|
|
|
+ const struct sm6_value *src;
|
|
|
|
+
|
|
|
|
+ for (j = 0; j < phi->incoming_count; ++j)
|
|
|
|
+ {
|
|
|
|
+ src = &sm6->values[phi->incoming[j].reg.idx[0].offset];
|
|
|
|
+ if (!sm6_value_is_constant(src) && !sm6_value_is_undef(src) && !sm6_value_is_ssa(src))
|
|
|
|
+ {
|
|
|
|
+ FIXME("PHI incoming value is not a constant or SSA register.\n");
|
|
|
|
+ vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_INVALID_OPERAND,
|
|
|
|
+ "A PHI incoming value is not a constant or SSA register.");
|
|
|
|
+ return VKD3D_ERROR_INVALID_SHADER;
|
|
|
|
+ }
|
|
|
|
+ if (src->u.reg.data_type != phi->reg.data_type)
|
|
|
|
+ {
|
|
|
|
+ WARN("Type mismatch.\n");
|
|
|
|
+ vkd3d_shader_parser_warning(&sm6->p, VKD3D_SHADER_WARNING_DXIL_TYPE_MISMATCH,
|
|
|
|
+ "The type of a phi incoming value does not match the result type.");
|
|
|
|
+ }
|
|
|
|
+ phi->incoming[j].reg = src->u.reg;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ return VKD3D_OK;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
static enum vkd3d_result sm6_parser_function_init(struct sm6_parser *sm6, const struct dxil_block *block,
|
|
|
|
struct sm6_function *function)
|
|
|
|
{
|
|
|
|
@@ -4680,16 +5161,18 @@ static enum vkd3d_result sm6_parser_function_init(struct sm6_parser *sm6, const
|
|
|
|
WARN("Function contains no blocks.\n");
|
|
|
|
return VKD3D_ERROR_INVALID_SHADER;
|
|
|
|
}
|
|
|
|
- if (block_count > 1)
|
|
|
|
- {
|
|
|
|
- FIXME("Branched shaders are not supported yet.\n");
|
|
|
|
- return VKD3D_ERROR_INVALID_SHADER;
|
|
|
|
- }
|
|
|
|
|
|
|
|
- if (!(function->blocks[0] = sm6_block_create()))
|
|
|
|
- {
|
|
|
|
- ERR("Failed to allocate code block.\n");
|
|
|
|
+ if (!sm6_function_blocks_reserve(function, block_count))
|
|
|
|
return VKD3D_ERROR_OUT_OF_MEMORY;
|
|
|
|
+
|
|
|
|
+ /* Pre-allocate all blocks to simplify instruction parsing. */
|
|
|
|
+ for (i = 0; i < block_count; ++i)
|
|
|
|
+ {
|
|
|
|
+ if (!sm6_function_create_block(function))
|
|
|
|
+ {
|
|
|
|
+ ERR("Failed to allocate code block.\n");
|
|
|
|
+ return VKD3D_ERROR_OUT_OF_MEMORY;
|
|
|
|
+ }
|
|
|
|
}
|
|
|
|
function->block_count = block_count;
|
|
|
|
code_block = function->blocks[0];
|
|
|
|
@@ -4708,10 +5191,9 @@ static enum vkd3d_result sm6_parser_function_init(struct sm6_parser *sm6, const
|
|
|
|
return VKD3D_ERROR_INVALID_SHADER;
|
|
|
|
}
|
|
|
|
|
|
|
|
- /* block->record_count - 1 is the instruction count, but some instructions
|
|
|
|
- * can emit >1 IR instruction, so extra may be used. */
|
|
|
|
+ /* Some instructions can emit >1 IR instruction, so extra may be used. */
|
|
|
|
if (!vkd3d_array_reserve((void **)&code_block->instructions, &code_block->instruction_capacity,
|
|
|
|
- max(code_block->instruction_count + 1, block->record_count), sizeof(*code_block->instructions)))
|
|
|
|
+ code_block->instruction_count + 1, sizeof(*code_block->instructions)))
|
|
|
|
{
|
|
|
|
ERR("Failed to allocate instructions.\n");
|
|
|
|
return VKD3D_ERROR_OUT_OF_MEMORY;
|
|
|
|
@@ -4734,9 +5216,16 @@ static enum vkd3d_result sm6_parser_function_init(struct sm6_parser *sm6, const
|
|
|
|
case FUNC_CODE_INST_BINOP:
|
|
|
|
sm6_parser_emit_binop(sm6, record, ins, dst);
|
|
|
|
break;
|
|
|
|
+ case FUNC_CODE_INST_BR:
|
|
|
|
+ sm6_parser_emit_br(sm6, record, function, code_block, ins);
|
|
|
|
+ is_terminator = true;
|
|
|
|
+ break;
|
|
|
|
case FUNC_CODE_INST_CALL:
|
|
|
|
- sm6_parser_emit_call(sm6, record, code_block, ins, dst);
|
|
|
|
+ {
|
|
|
|
+ struct function_emission_state state = {code_block, ins};
|
|
|
|
+ sm6_parser_emit_call(sm6, record, &state, dst);
|
|
|
|
break;
|
|
|
|
+ }
|
|
|
|
case FUNC_CODE_INST_CAST:
|
|
|
|
sm6_parser_emit_cast(sm6, record, ins, dst);
|
|
|
|
break;
|
|
|
|
@@ -4752,6 +5241,9 @@ static enum vkd3d_result sm6_parser_function_init(struct sm6_parser *sm6, const
|
|
|
|
case FUNC_CODE_INST_LOAD:
|
|
|
|
sm6_parser_emit_load(sm6, record, ins, dst);
|
|
|
|
break;
|
|
|
|
+ case FUNC_CODE_INST_PHI:
|
|
|
|
+ sm6_parser_emit_phi(sm6, record, function, code_block, ins, dst);
|
|
|
|
+ break;
|
|
|
|
case FUNC_CODE_INST_RET:
|
|
|
|
sm6_parser_emit_ret(sm6, record, code_block, ins);
|
|
|
|
is_terminator = true;
|
|
|
|
@@ -4760,6 +5252,10 @@ static enum vkd3d_result sm6_parser_function_init(struct sm6_parser *sm6, const
|
|
|
|
case FUNC_CODE_INST_STORE:
|
|
|
|
sm6_parser_emit_store(sm6, record, ins, dst);
|
|
|
|
break;
|
|
|
|
+ case FUNC_CODE_INST_SWITCH:
|
|
|
|
+ sm6_parser_emit_switch(sm6, record, function, code_block, ins);
|
|
|
|
+ is_terminator = true;
|
|
|
|
+ break;
|
|
|
|
case FUNC_CODE_INST_VSELECT:
|
|
|
|
sm6_parser_emit_vselect(sm6, record, ins, dst);
|
|
|
|
break;
|
|
|
|
@@ -4794,22 +5290,130 @@ static enum vkd3d_result sm6_parser_function_init(struct sm6_parser *sm6, const
|
|
|
|
return VKD3D_ERROR_INVALID_SHADER;
|
|
|
|
}
|
|
|
|
|
|
|
|
- return VKD3D_OK;
|
|
|
|
+ return sm6_function_resolve_phi_incomings(function, sm6);
|
|
|
|
}
|
|
|
|
|
|
|
|
-static bool sm6_block_emit_instructions(struct sm6_block *block, struct sm6_parser *sm6)
|
|
|
|
+static void sm6_block_emit_terminator(const struct sm6_block *block, struct sm6_parser *sm6)
|
|
|
|
{
|
|
|
|
- struct vkd3d_shader_instruction *ins = sm6_parser_require_space(sm6, block->instruction_count + 1);
|
|
|
|
+ struct vkd3d_shader_src_param *src_params;
|
|
|
|
+ struct vkd3d_shader_instruction *ins;
|
|
|
|
+ unsigned int i, count;
|
|
|
|
|
|
|
|
- if (!ins)
|
|
|
|
- return false;
|
|
|
|
+ switch (block->terminator.type)
|
|
|
|
+ {
|
|
|
|
+ case TERMINATOR_UNCOND_BR:
|
|
|
|
+ if (!block->terminator.true_block)
|
|
|
|
+ return;
|
|
|
|
+ ins = sm6_parser_add_instruction(sm6, VKD3DSIH_BRANCH);
|
|
|
|
+ if (!(src_params = instruction_src_params_alloc(ins, 1, sm6)))
|
|
|
|
+ return;
|
|
|
|
+ vsir_src_param_init_label(&src_params[0], block->terminator.true_block->id);
|
|
|
|
+ break;
|
|
|
|
|
|
|
|
- memcpy(ins, block->instructions, block->instruction_count * sizeof(*block->instructions));
|
|
|
|
- sm6->p.instructions.count += block->instruction_count;
|
|
|
|
+ case TERMINATOR_COND_BR:
|
|
|
|
+ if (!block->terminator.true_block || !block->terminator.false_block)
|
|
|
|
+ return;
|
|
|
|
+ ins = sm6_parser_add_instruction(sm6, VKD3DSIH_BRANCH);
|
|
|
|
+ if (!(src_params = instruction_src_params_alloc(ins, 3, sm6)))
|
|
|
|
+ return;
|
|
|
|
+ src_param_init(&src_params[0]);
|
|
|
|
+ src_params[0].reg = block->terminator.conditional_reg;
|
|
|
|
+ vsir_src_param_init_label(&src_params[1], block->terminator.true_block->id);
|
|
|
|
+ vsir_src_param_init_label(&src_params[2], block->terminator.false_block->id);
|
|
|
|
+ break;
|
|
|
|
|
|
|
|
- sm6_parser_add_instruction(sm6, VKD3DSIH_RET);
|
|
|
|
+ case TERMINATOR_SWITCH:
|
|
|
|
+ ins = sm6_parser_add_instruction(sm6, VKD3DSIH_SWITCH_MONOLITHIC);
|
|
|
|
+ if (!(src_params = instruction_src_params_alloc(ins, block->terminator.case_count * 2u + 1, sm6)))
|
|
|
|
+ return;
|
|
|
|
+ src_param_init(&src_params[0]);
|
|
|
|
+ src_params[0].reg = block->terminator.conditional_reg;
|
|
|
|
+ /* TODO: emit the merge block id. */
|
|
|
|
+ vsir_src_param_init_label(&src_params[2], 0);
|
|
|
|
|
|
|
|
- return true;
|
|
|
|
+ for (i = 0, count = 3; i < block->terminator.case_count; ++i)
|
|
|
|
+ {
|
|
|
|
+ const struct terminator_case *switch_case;
|
|
|
|
+ const struct sm6_block *case_block;
|
|
|
|
+
|
|
|
|
+ switch_case = &block->terminator.cases[i];
|
|
|
|
+ if (!(case_block = switch_case->block))
|
|
|
|
+ {
|
|
|
|
+ assert(sm6->p.failed);
|
|
|
|
+ continue;
|
|
|
|
+ }
|
|
|
|
+ if (switch_case->is_default)
|
|
|
|
+ {
|
|
|
|
+ vsir_src_param_init_label(&src_params[1], case_block->id);
|
|
|
|
+ continue;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (src_params[0].reg.data_type == VKD3D_DATA_UINT64)
|
|
|
|
+ {
|
|
|
|
+ vsir_src_param_init(&src_params[count], VKD3DSPR_IMMCONST64, VKD3D_DATA_UINT64, 0);
|
|
|
|
+ src_params[count++].reg.u.immconst_u64[0] = switch_case->value;
|
|
|
|
+ }
|
|
|
|
+ else
|
|
|
|
+ {
|
|
|
|
+ if (switch_case->value > UINT_MAX)
|
|
|
|
+ {
|
|
|
|
+ WARN("Truncating 64-bit constant %"PRIx64".\n", switch_case->value);
|
|
|
|
+ vkd3d_shader_parser_warning(&sm6->p, VKD3D_SHADER_WARNING_DXIL_TYPE_MISMATCH,
|
|
|
|
+ "Truncating 64-bit switch case value %"PRIx64" to 32 bits.", switch_case->value);
|
|
|
|
+ }
|
|
|
|
+ vsir_src_param_init(&src_params[count], VKD3DSPR_IMMCONST, VKD3D_DATA_UINT, 0);
|
|
|
|
+ src_params[count++].reg.u.immconst_u32[0] = switch_case->value;
|
|
|
|
+ }
|
|
|
|
+ vsir_src_param_init_label(&src_params[count++], case_block->id);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ break;
|
|
|
|
+
|
|
|
|
+ case TERMINATOR_RET:
|
|
|
|
+ sm6_parser_add_instruction(sm6, VKD3DSIH_RET);
|
|
|
|
+ break;
|
|
|
|
+
|
|
|
|
+ default:
|
|
|
|
+ vkd3d_unreachable();
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void sm6_block_emit_phi(const struct sm6_block *block, struct sm6_parser *sm6)
|
|
|
|
+{
|
|
|
|
+ struct vkd3d_shader_instruction *ins;
|
|
|
|
+ unsigned int i, j, incoming_count;
|
|
|
|
+ const struct sm6_phi *src_phi;
|
|
|
|
+
|
|
|
|
+ for (i = 0; i < block->phi_count; ++i)
|
|
|
|
+ {
|
|
|
|
+ struct vkd3d_shader_src_param *src_params;
|
|
|
|
+ struct vkd3d_shader_dst_param *dst_param;
|
|
|
|
+
|
|
|
|
+ src_phi = &block->phi[i];
|
|
|
|
+ incoming_count = src_phi->incoming_count;
|
|
|
|
+
|
|
|
|
+ ins = sm6_parser_add_instruction(sm6, VKD3DSIH_PHI);
|
|
|
|
+ if (!(src_params = instruction_src_params_alloc(ins, incoming_count * 2u, sm6)))
|
|
|
|
+ return;
|
|
|
|
+ if (!(dst_param = instruction_dst_params_alloc(ins, 1, sm6)))
|
|
|
|
+ return;
|
|
|
|
+
|
|
|
|
+ for (j = 0; j < incoming_count; ++j)
|
|
|
|
+ {
|
|
|
|
+ const struct sm6_block *incoming_block = src_phi->incoming[j].block;
|
|
|
|
+ unsigned int index = j * 2;
|
|
|
|
+
|
|
|
|
+ src_param_init(&src_params[index]);
|
|
|
|
+ src_params[index].reg = src_phi->incoming[j].reg;
|
|
|
|
+ if (incoming_block)
|
|
|
|
+ vsir_src_param_init_label(&src_params[index + 1], incoming_block->id);
|
|
|
|
+ else
|
|
|
|
+ assert(sm6->p.failed);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ dst_param_init(dst_param);
|
|
|
|
+ dst_param->reg = src_phi->reg;
|
|
|
|
+ }
|
|
|
|
}
|
|
|
|
|
|
|
|
static enum vkd3d_result sm6_parser_module_init(struct sm6_parser *sm6, const struct dxil_block *block,
|
|
|
|
@@ -4881,6 +5485,36 @@ static void sm6_parser_emit_label(struct sm6_parser *sm6, unsigned int label_id)
|
|
|
|
vsir_src_param_init_label(src_param, label_id);
|
|
|
|
}
|
|
|
|
|
|
|
|
+static enum vkd3d_result sm6_function_emit_blocks(const struct sm6_function *function, struct sm6_parser *sm6)
|
|
|
|
+{
|
|
|
|
+ unsigned int i;
|
|
|
|
+
|
|
|
|
+ sm6->p.shader_desc.block_count = function->block_count;
|
|
|
|
+
|
|
|
|
+ for (i = 0; i < function->block_count; ++i)
|
|
|
|
+ {
|
|
|
|
+ const struct sm6_block *block = function->blocks[i];
|
|
|
|
+
|
|
|
|
+ /* Space for the label and terminator. */
|
|
|
|
+ if (!sm6_parser_require_space(sm6, block->instruction_count + block->phi_count + 2))
|
|
|
|
+ {
|
|
|
|
+ vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_OUT_OF_MEMORY,
|
|
|
|
+ "Out of memory emitting shader instructions.");
|
|
|
|
+ return VKD3D_ERROR_OUT_OF_MEMORY;
|
|
|
|
+ }
|
|
|
|
+ sm6_parser_emit_label(sm6, block->id);
|
|
|
|
+ sm6_block_emit_phi(block, sm6);
|
|
|
|
+
|
|
|
|
+ memcpy(&sm6->p.program.instructions.elements[sm6->p.program.instructions.count], block->instructions,
|
|
|
|
+ block->instruction_count * sizeof(*block->instructions));
|
|
|
|
+ sm6->p.program.instructions.count += block->instruction_count;
|
|
|
|
+
|
|
|
|
+ sm6_block_emit_terminator(block, sm6);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ return VKD3D_OK;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
static bool sm6_parser_allocate_named_metadata(struct sm6_parser *sm6)
|
|
|
|
{
|
|
|
|
struct dxil_block *block;
|
|
|
|
@@ -5155,7 +5789,9 @@ static enum vkd3d_shader_minimum_precision minimum_precision_from_dxil_component
|
|
|
|
static const enum vkd3d_shader_sysval_semantic sysval_semantic_table[] =
|
|
|
|
{
|
|
|
|
[SEMANTIC_KIND_ARBITRARY] = VKD3D_SHADER_SV_NONE,
|
|
|
|
+ [SEMANTIC_KIND_VERTEXID] = VKD3D_SHADER_SV_VERTEX_ID,
|
|
|
|
[SEMANTIC_KIND_POSITION] = VKD3D_SHADER_SV_POSITION,
|
|
|
|
+ [SEMANTIC_KIND_ISFRONTFACE] = VKD3D_SHADER_SV_IS_FRONT_FACE,
|
|
|
|
[SEMANTIC_KIND_TARGET] = VKD3D_SHADER_SV_TARGET,
|
|
|
|
};
|
|
|
|
|
|
|
|
@@ -5238,6 +5874,288 @@ static bool sm6_parser_resources_load_register_range(struct sm6_parser *sm6,
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
+static bool resource_kind_is_texture(enum dxil_resource_kind kind)
|
|
|
|
+{
|
|
|
|
+ return kind >= RESOURCE_KIND_TEXTURE1D && kind <= RESOURCE_KIND_TEXTURECUBEARRAY;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static bool resource_kind_is_multisampled(enum dxil_resource_kind kind)
|
|
|
|
+{
|
|
|
|
+ return kind == RESOURCE_KIND_TEXTURE2DMS || kind == RESOURCE_KIND_TEXTURE2DMSARRAY;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static enum vkd3d_shader_resource_type shader_resource_type_from_dxil_resource_kind(enum dxil_resource_kind kind)
|
|
|
|
+{
|
|
|
|
+ if (resource_kind_is_texture(kind))
|
|
|
|
+ return kind + 1;
|
|
|
|
+
|
|
|
|
+ switch (kind)
|
|
|
|
+ {
|
|
|
|
+ case RESOURCE_KIND_TYPEDBUFFER:
|
|
|
|
+ return VKD3D_SHADER_RESOURCE_BUFFER;
|
|
|
|
+ default:
|
|
|
|
+ return VKD3D_SHADER_RESOURCE_NONE;
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static const enum vkd3d_data_type data_type_table[] =
|
|
|
|
+{
|
|
|
|
+ [COMPONENT_TYPE_INVALID] = VKD3D_DATA_UNUSED,
|
|
|
|
+ [COMPONENT_TYPE_I1] = VKD3D_DATA_UNUSED,
|
|
|
|
+ [COMPONENT_TYPE_I16] = VKD3D_DATA_INT,
|
|
|
|
+ [COMPONENT_TYPE_U16] = VKD3D_DATA_UINT,
|
|
|
|
+ [COMPONENT_TYPE_I32] = VKD3D_DATA_INT,
|
|
|
|
+ [COMPONENT_TYPE_U32] = VKD3D_DATA_UINT,
|
|
|
|
+ [COMPONENT_TYPE_I64] = VKD3D_DATA_UNUSED,
|
|
|
|
+ [COMPONENT_TYPE_U64] = VKD3D_DATA_UNUSED,
|
|
|
|
+ [COMPONENT_TYPE_F16] = VKD3D_DATA_FLOAT,
|
|
|
|
+ [COMPONENT_TYPE_F32] = VKD3D_DATA_FLOAT,
|
|
|
|
+ [COMPONENT_TYPE_F64] = VKD3D_DATA_DOUBLE,
|
|
|
|
+ [COMPONENT_TYPE_SNORMF16] = VKD3D_DATA_SNORM,
|
|
|
|
+ [COMPONENT_TYPE_UNORMF16] = VKD3D_DATA_UNORM,
|
|
|
|
+ [COMPONENT_TYPE_SNORMF32] = VKD3D_DATA_SNORM,
|
|
|
|
+ [COMPONENT_TYPE_UNORMF32] = VKD3D_DATA_UNORM,
|
|
|
|
+ [COMPONENT_TYPE_SNORMF64] = VKD3D_DATA_DOUBLE,
|
|
|
|
+ [COMPONENT_TYPE_UNORMF64] = VKD3D_DATA_DOUBLE,
|
|
|
|
+ [COMPONENT_TYPE_PACKEDS8X32] = VKD3D_DATA_UNUSED,
|
|
|
|
+ [COMPONENT_TYPE_PACKEDU8X32] = VKD3D_DATA_UNUSED,
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
+static enum vkd3d_data_type vkd3d_data_type_from_dxil_component_type(enum dxil_component_type type,
|
|
|
|
+ struct sm6_parser *sm6)
|
|
|
|
+{
|
|
|
|
+ enum vkd3d_data_type data_type;
|
|
|
|
+
|
|
|
|
+ if (type >= ARRAY_SIZE(data_type_table) || (data_type = data_type_table[type]) == VKD3D_DATA_UNUSED)
|
|
|
|
+ {
|
|
|
|
+ FIXME("Unhandled component type %u.\n", type);
|
|
|
|
+ vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_INVALID_RESOURCES,
|
|
|
|
+ "Resource descriptor component type %u is unhandled.", type);
|
|
|
|
+ return VKD3D_DATA_FLOAT;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ return data_type;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static struct vkd3d_shader_resource *sm6_parser_resources_load_common_info(struct sm6_parser *sm6,
|
|
|
|
+ const struct sm6_metadata_value *type_value, bool is_uav, enum dxil_resource_kind kind,
|
|
|
|
+ const struct sm6_metadata_value *m, struct vkd3d_shader_instruction *ins)
|
|
|
|
+{
|
|
|
|
+ enum vkd3d_shader_resource_type resource_type;
|
|
|
|
+ enum dxil_resource_type dxil_resource_type;
|
|
|
|
+ const struct sm6_metadata_node *node;
|
|
|
|
+ enum vkd3d_data_type data_type;
|
|
|
|
+ unsigned int i, values[2];
|
|
|
|
+
|
|
|
|
+ if (!(resource_type = shader_resource_type_from_dxil_resource_kind(kind)))
|
|
|
|
+ {
|
|
|
|
+ FIXME("Unhandled resource kind %u.\n", kind);
|
|
|
|
+ vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_INVALID_RESOURCES,
|
|
|
|
+ "Resource kind %u is unhandled.", kind);
|
|
|
|
+ return NULL;
|
|
|
|
+ }
|
|
|
|
+ ins->resource_type = resource_type;
|
|
|
|
+
|
|
|
|
+ if (!sm6_metadata_value_is_node(m))
|
|
|
|
+ {
|
|
|
|
+ WARN("Resource metadata list is not a node.\n");
|
|
|
|
+ vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_INVALID_RESOURCES,
|
|
|
|
+ "Resource descriptor metadata list is not a node.");
|
|
|
|
+ return NULL;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ node = m->u.node;
|
|
|
|
+
|
|
|
|
+ if (node->operand_count < 2)
|
|
|
|
+ {
|
|
|
|
+ WARN("Invalid operand count %u.\n", node->operand_count);
|
|
|
|
+ vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_INVALID_OPERAND_COUNT,
|
|
|
|
+ "Invalid operand count %u for a resource descriptor.", node->operand_count);
|
|
|
|
+ return NULL;
|
|
|
|
+ }
|
|
|
|
+ if (node->operand_count > 2)
|
|
|
|
+ {
|
|
|
|
+ WARN("Ignoring %u extra operands.\n", node->operand_count - 2);
|
|
|
|
+ vkd3d_shader_parser_warning(&sm6->p, VKD3D_SHADER_WARNING_DXIL_IGNORING_OPERANDS,
|
|
|
|
+ "Ignoring %u extra operands for a resource descriptor.", node->operand_count - 2);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ for (i = 0; i < 2; ++i)
|
|
|
|
+ {
|
|
|
|
+ if (!sm6_metadata_get_uint_value(sm6, node->operands[i], &values[i]))
|
|
|
|
+ {
|
|
|
|
+ WARN("Failed to load uint value at index %u.\n", i);
|
|
|
|
+ vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_INVALID_RESOURCES,
|
|
|
|
+ "A resource descriptor operand metadata value is not an integer.");
|
|
|
|
+ return NULL;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if ((dxil_resource_type = values[0]) == RESOURCE_TYPE_NON_RAW_STRUCTURED)
|
|
|
|
+ {
|
|
|
|
+ if (kind != RESOURCE_KIND_TYPEDBUFFER && !resource_kind_is_texture(kind))
|
|
|
|
+ {
|
|
|
|
+ WARN("Unhandled resource kind %u.\n", kind);
|
|
|
|
+ vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_INVALID_RESOURCES,
|
|
|
|
+ "Resource kind %u for a typed resource is unhandled.", kind);
|
|
|
|
+ return NULL;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ data_type = vkd3d_data_type_from_dxil_component_type(values[1], sm6);
|
|
|
|
+ ins->handler_idx = is_uav ? VKD3DSIH_DCL_UAV_TYPED : VKD3DSIH_DCL;
|
|
|
|
+ for (i = 0; i < VKD3D_VEC4_SIZE; ++i)
|
|
|
|
+ ins->declaration.semantic.resource_data_type[i] = data_type;
|
|
|
|
+ ins->declaration.semantic.resource_type = resource_type;
|
|
|
|
+ ins->declaration.semantic.resource.reg.write_mask = VKD3DSP_WRITEMASK_ALL;
|
|
|
|
+
|
|
|
|
+ return &ins->declaration.semantic.resource;
|
|
|
|
+ }
|
|
|
|
+ else
|
|
|
|
+ {
|
|
|
|
+ FIXME("Unhandled resource type %u.\n", dxil_resource_type);
|
|
|
|
+ vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_INVALID_RESOURCES,
|
|
|
|
+ "Resource type %u is unhandled.", dxil_resource_type);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ return NULL;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void init_resource_declaration(struct vkd3d_shader_resource *resource,
|
|
|
|
+ enum vkd3d_shader_register_type reg_type, enum vkd3d_data_type data_type, unsigned int id,
|
|
|
|
+ const struct vkd3d_shader_register_range *range)
|
|
|
|
+{
|
|
|
|
+ struct vkd3d_shader_dst_param *param = &resource->reg;
|
|
|
|
+
|
|
|
|
+ param->modifiers = 0;
|
|
|
|
+ param->shift = 0;
|
|
|
|
+ vsir_register_init(¶m->reg, reg_type, data_type, 3);
|
|
|
|
+ param->reg.idx[0].offset = id;
|
|
|
|
+ param->reg.idx[1].offset = range->first;
|
|
|
|
+ param->reg.idx[2].offset = range->last;
|
|
|
|
+
|
|
|
|
+ resource->range = *range;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static enum vkd3d_result sm6_parser_resources_load_srv(struct sm6_parser *sm6,
|
|
|
|
+ const struct sm6_metadata_node *node, struct sm6_descriptor_info *d, struct vkd3d_shader_instruction *ins)
|
|
|
|
+{
|
|
|
|
+ struct vkd3d_shader_resource *resource;
|
|
|
|
+ unsigned int kind;
|
|
|
|
+
|
|
|
|
+ if (node->operand_count < 9)
|
|
|
|
+ {
|
|
|
|
+ WARN("Invalid operand count %u.\n", node->operand_count);
|
|
|
|
+ vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_INVALID_OPERAND_COUNT,
|
|
|
|
+ "Invalid operand count %u for an SRV descriptor.", node->operand_count);
|
|
|
|
+ return VKD3D_ERROR_INVALID_SHADER;
|
|
|
|
+ }
|
|
|
|
+ if (node->operand_count > 9)
|
|
|
|
+ {
|
|
|
|
+ WARN("Ignoring %u extra operands.\n", node->operand_count - 9);
|
|
|
|
+ vkd3d_shader_parser_warning(&sm6->p, VKD3D_SHADER_WARNING_DXIL_IGNORING_OPERANDS,
|
|
|
|
+ "Ignoring %u extra operands for an SRV descriptor.", node->operand_count - 9);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (!sm6_metadata_get_uint_value(sm6, node->operands[6], &kind))
|
|
|
|
+ {
|
|
|
|
+ WARN("Failed to load resource type.\n");
|
|
|
|
+ vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_INVALID_RESOURCES,
|
|
|
|
+ "SRV resource type metadata value is not an integer.");
|
|
|
|
+ return VKD3D_ERROR_INVALID_SHADER;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ vsir_instruction_init(ins, &sm6->p.location, VKD3DSIH_INVALID);
|
|
|
|
+
|
|
|
|
+ if (!(resource = sm6_parser_resources_load_common_info(sm6, node->operands[1], false, kind,
|
|
|
|
+ node->operands[8], ins)))
|
|
|
|
+ {
|
|
|
|
+ return VKD3D_ERROR_INVALID_SHADER;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ d->resource_type = ins->resource_type;
|
|
|
|
+ d->kind = kind;
|
|
|
|
+ d->reg_type = VKD3DSPR_RESOURCE;
|
|
|
|
+ d->reg_data_type = (ins->resource_type == VKD3D_SHADER_RESOURCE_BUFFER) ? VKD3D_DATA_UINT : VKD3D_DATA_RESOURCE;
|
|
|
|
+ d->resource_data_type = ins->declaration.semantic.resource_data_type[0];
|
|
|
|
+
|
|
|
|
+ init_resource_declaration(resource, VKD3DSPR_RESOURCE, d->reg_data_type, d->id, &d->range);
|
|
|
|
+
|
|
|
|
+ if (resource_kind_is_multisampled(kind))
|
|
|
|
+ {
|
|
|
|
+ if (!sm6_metadata_get_uint_value(sm6, node->operands[7], &ins->declaration.semantic.sample_count))
|
|
|
|
+ {
|
|
|
|
+ WARN("Failed to load sample count.\n");
|
|
|
|
+ vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_INVALID_RESOURCES,
|
|
|
|
+ "SRV sample count metadata value is not an integer.");
|
|
|
|
+ return VKD3D_ERROR_INVALID_SHADER;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ else if (!sm6_metadata_value_is_zero_or_undef(node->operands[7]))
|
|
|
|
+ {
|
|
|
|
+ WARN("Ignoring sample count value.\n");
|
|
|
|
+ vkd3d_shader_parser_warning(&sm6->p, VKD3D_SHADER_WARNING_DXIL_IGNORING_OPERANDS,
|
|
|
|
+ "Ignoring an SRV descriptor sample count metadata value which is not constant zero or undefined.");
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ return VKD3D_OK;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static enum vkd3d_result sm6_parser_resources_load_uav(struct sm6_parser *sm6,
|
|
|
|
+ const struct sm6_metadata_node *node, struct sm6_descriptor_info *d, struct vkd3d_shader_instruction *ins)
|
|
|
|
+{
|
|
|
|
+ struct vkd3d_shader_resource *resource;
|
|
|
|
+ unsigned int i, values[4];
|
|
|
|
+
|
|
|
|
+ if (node->operand_count < 11)
|
|
|
|
+ {
|
|
|
|
+ WARN("Invalid operand count %u.\n", node->operand_count);
|
|
|
|
+ vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_INVALID_OPERAND_COUNT,
|
|
|
|
+ "Invalid operand count %u for a UAV descriptor.", node->operand_count);
|
|
|
|
+ return VKD3D_ERROR_INVALID_SHADER;
|
|
|
|
+ }
|
|
|
|
+ if (node->operand_count > 11)
|
|
|
|
+ {
|
|
|
|
+ WARN("Ignoring %u extra operands.\n", node->operand_count - 11);
|
|
|
|
+ vkd3d_shader_parser_warning(&sm6->p, VKD3D_SHADER_WARNING_DXIL_IGNORING_OPERANDS,
|
|
|
|
+ "Ignoring %u extra operands for a UAV descriptor.", node->operand_count - 11);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ for (i = 6; i < 10; ++i)
|
|
|
|
+ {
|
|
|
|
+ if (!sm6_metadata_get_uint_value(sm6, node->operands[i], &values[i - 6]))
|
|
|
|
+ {
|
|
|
|
+ WARN("Failed to load uint value at index %u.\n", i);
|
|
|
|
+ vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_INVALID_RESOURCES,
|
|
|
|
+ "A UAV descriptor operand metadata value is not an integer.");
|
|
|
|
+ return VKD3D_ERROR_INVALID_SHADER;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ vsir_instruction_init(ins, &sm6->p.location, VKD3DSIH_INVALID);
|
|
|
|
+ if (values[1])
|
|
|
|
+ ins->flags = VKD3DSUF_GLOBALLY_COHERENT;
|
|
|
|
+ if (values[2])
|
|
|
|
+ ins->flags |= VKD3DSUF_ORDER_PRESERVING_COUNTER;
|
|
|
|
+ if (values[3])
|
|
|
|
+ ins->flags |= VKD3DSUF_RASTERISER_ORDERED_VIEW;
|
|
|
|
+
|
|
|
|
+ if (!(resource = sm6_parser_resources_load_common_info(sm6, node->operands[1], true, values[0],
|
|
|
|
+ node->operands[10], ins)))
|
|
|
|
+ {
|
|
|
|
+ return VKD3D_ERROR_INVALID_SHADER;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ d->resource_type = ins->resource_type;
|
|
|
|
+ d->kind = values[0];
|
|
|
|
+ d->reg_type = VKD3DSPR_UAV;
|
|
|
|
+ d->reg_data_type = (ins->resource_type == VKD3D_SHADER_RESOURCE_BUFFER) ? VKD3D_DATA_UINT : VKD3D_DATA_UAV;
|
|
|
|
+ d->resource_data_type = ins->declaration.semantic.resource_data_type[0];
|
|
|
|
+
|
|
|
|
+ init_resource_declaration(resource, VKD3DSPR_UAV, d->reg_data_type, d->id, &d->range);
|
|
|
|
+
|
|
|
|
+ return VKD3D_OK;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
static enum vkd3d_result sm6_parser_resources_load_cbv(struct sm6_parser *sm6,
|
|
|
|
const struct sm6_metadata_node *node, struct sm6_descriptor_info *d, struct vkd3d_shader_instruction *ins)
|
|
|
|
{
|
|
|
|
@@ -5280,6 +6198,10 @@ static enum vkd3d_result sm6_parser_resources_load_cbv(struct sm6_parser *sm6,
|
|
|
|
|
|
|
|
ins->declaration.cb.range = d->range;
|
|
|
|
|
|
|
|
+ d->reg_type = VKD3DSPR_CONSTBUFFER;
|
|
|
|
+ d->reg_data_type = VKD3D_DATA_FLOAT;
|
|
|
|
+ d->resource_data_type = VKD3D_DATA_FLOAT;
|
|
|
|
+
|
|
|
|
return VKD3D_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
@@ -5351,6 +6273,14 @@ static enum vkd3d_result sm6_parser_descriptor_type_init(struct sm6_parser *sm6,
|
|
|
|
if ((ret = sm6_parser_resources_load_cbv(sm6, node, d, ins)) < 0)
|
|
|
|
return ret;
|
|
|
|
break;
|
|
|
|
+ case VKD3D_SHADER_DESCRIPTOR_TYPE_SRV:
|
|
|
|
+ if ((ret = sm6_parser_resources_load_srv(sm6, node, d, ins)) < 0)
|
|
|
|
+ return ret;
|
|
|
|
+ break;
|
|
|
|
+ case VKD3D_SHADER_DESCRIPTOR_TYPE_UAV:
|
|
|
|
+ if ((ret = sm6_parser_resources_load_uav(sm6, node, d, ins)) < 0)
|
|
|
|
+ return ret;
|
|
|
|
+ break;
|
|
|
|
default:
|
|
|
|
FIXME("Unsupported descriptor type %u.\n", type);
|
|
|
|
vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_INVALID_RESOURCES,
|
|
|
|
@@ -5359,7 +6289,7 @@ static enum vkd3d_result sm6_parser_descriptor_type_init(struct sm6_parser *sm6,
|
|
|
|
}
|
|
|
|
|
|
|
|
++sm6->descriptor_count;
|
|
|
|
- ++sm6->p.instructions.count;
|
|
|
|
+ ++sm6->p.program.instructions.count;
|
|
|
|
}
|
|
|
|
|
|
|
|
return VKD3D_OK;
|
|
|
|
@@ -5709,9 +6639,9 @@ static enum vkd3d_result sm6_parser_emit_thread_group(struct sm6_parser *sm6, co
|
|
|
|
unsigned int group_sizes[3];
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
- if (sm6->p.shader_version.type != VKD3D_SHADER_TYPE_COMPUTE)
|
|
|
|
+ if (sm6->p.program.shader_version.type != VKD3D_SHADER_TYPE_COMPUTE)
|
|
|
|
{
|
|
|
|
- WARN("Shader of type %#x has thread group dimensions.\n", sm6->p.shader_version.type);
|
|
|
|
+ WARN("Shader of type %#x has thread group dimensions.\n", sm6->p.program.shader_version.type);
|
|
|
|
vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_INVALID_PROPERTIES,
|
|
|
|
"Shader has thread group dimensions but is not a compute shader.");
|
|
|
|
return VKD3D_ERROR_INVALID_SHADER;
|
|
|
|
@@ -5930,9 +6860,20 @@ static void sm6_symtab_cleanup(struct sm6_symbol *symbols, size_t count)
|
|
|
|
vkd3d_free(symbols);
|
|
|
|
}
|
|
|
|
|
|
|
|
+static void sm6_phi_destroy(struct sm6_phi *phi)
|
|
|
|
+{
|
|
|
|
+ vkd3d_free(phi->incoming);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
static void sm6_block_destroy(struct sm6_block *block)
|
|
|
|
{
|
|
|
|
+ unsigned int i;
|
|
|
|
+
|
|
|
|
vkd3d_free(block->instructions);
|
|
|
|
+ for (i = 0; i < block->phi_count; ++i)
|
|
|
|
+ sm6_phi_destroy(&block->phi[i]);
|
|
|
|
+ vkd3d_free(block->phi);
|
|
|
|
+ vkd3d_free(block->terminator.cases);
|
|
|
|
vkd3d_free(block);
|
|
|
|
}
|
|
|
|
|
|
|
|
@@ -5944,6 +6885,7 @@ static void sm6_functions_cleanup(struct sm6_function *functions, size_t count)
|
|
|
|
{
|
|
|
|
for (j = 0; j < functions[i].block_count; ++j)
|
|
|
|
sm6_block_destroy(functions[i].blocks[j]);
|
|
|
|
+ vkd3d_free(functions[i].blocks);
|
|
|
|
}
|
|
|
|
vkd3d_free(functions);
|
|
|
|
}
|
|
|
|
@@ -5954,7 +6896,7 @@ static void sm6_parser_destroy(struct vkd3d_shader_parser *parser)
|
|
|
|
|
|
|
|
dxil_block_destroy(&sm6->root_block);
|
|
|
|
dxil_global_abbrevs_cleanup(sm6->abbrevs, sm6->abbrev_count);
|
|
|
|
- shader_instruction_array_destroy(&parser->instructions);
|
|
|
|
+ vsir_program_cleanup(&parser->program);
|
|
|
|
sm6_type_table_cleanup(sm6->types, sm6->type_count);
|
|
|
|
sm6_symtab_cleanup(sm6->global_symbols, sm6->global_symbol_count);
|
|
|
|
sm6_functions_cleanup(sm6->functions, sm6->function_count);
|
|
|
|
@@ -6228,8 +7170,7 @@ static enum vkd3d_result sm6_parser_init(struct sm6_parser *sm6, const uint32_t
|
|
|
|
return VKD3D_ERROR_OUT_OF_MEMORY;
|
|
|
|
}
|
|
|
|
|
|
|
|
- sm6->p.shader_desc.ssa_count = sm6->ssa_next_id;
|
|
|
|
- sm6->p.shader_desc.block_count = 1;
|
|
|
|
+ sm6->p.program.ssa_count = sm6->ssa_next_id;
|
|
|
|
|
|
|
|
if (!(fn = sm6_parser_get_function(sm6, sm6->entry_point)))
|
|
|
|
{
|
|
|
|
@@ -6240,13 +7181,8 @@ static enum vkd3d_result sm6_parser_init(struct sm6_parser *sm6, const uint32_t
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(sm6->function_count == 1);
|
|
|
|
- sm6_parser_emit_label(sm6, 1);
|
|
|
|
- if (!sm6_block_emit_instructions(fn->blocks[0], sm6))
|
|
|
|
- {
|
|
|
|
- vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_OUT_OF_MEMORY,
|
|
|
|
- "Out of memory emitting shader instructions.");
|
|
|
|
- return VKD3D_ERROR_OUT_OF_MEMORY;
|
|
|
|
- }
|
|
|
|
+ if ((ret = sm6_function_emit_blocks(fn, sm6)) < 0)
|
|
|
|
+ return ret;
|
|
|
|
|
|
|
|
dxil_block_destroy(&sm6->root_block);
|
|
|
|
|
|
|
|
diff --git a/libs/vkd3d/libs/vkd3d-shader/glsl.c b/libs/vkd3d/libs/vkd3d-shader/glsl.c
|
|
|
|
index f8d68b5a798..bdd03c1e72a 100644
|
|
|
|
--- a/libs/vkd3d/libs/vkd3d-shader/glsl.c
|
|
|
|
+++ b/libs/vkd3d/libs/vkd3d-shader/glsl.c
|
|
|
|
@@ -91,7 +91,7 @@ static void vkd3d_glsl_handle_instruction(struct vkd3d_glsl_generator *generator
|
|
|
|
}
|
|
|
|
|
|
|
|
int vkd3d_glsl_generator_generate(struct vkd3d_glsl_generator *generator,
|
|
|
|
- struct vkd3d_shader_parser *parser, struct vkd3d_shader_code *out)
|
|
|
|
+ struct vsir_program *program, struct vkd3d_shader_code *out)
|
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
void *code;
|
|
|
|
@@ -100,10 +100,10 @@ int vkd3d_glsl_generator_generate(struct vkd3d_glsl_generator *generator,
|
|
|
|
vkd3d_string_buffer_printf(&generator->buffer, "void main()\n{\n");
|
|
|
|
|
|
|
|
generator->location.column = 0;
|
|
|
|
- for (i = 0; i < parser->instructions.count; ++i)
|
|
|
|
+ for (i = 0; i < program->instructions.count; ++i)
|
|
|
|
{
|
|
|
|
generator->location.line = i + 1;
|
|
|
|
- vkd3d_glsl_handle_instruction(generator, &parser->instructions.elements[i]);
|
|
|
|
+ vkd3d_glsl_handle_instruction(generator, &program->instructions.elements[i]);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (generator->failed)
|
|
|
|
diff --git a/libs/vkd3d/libs/vkd3d-shader/hlsl.y b/libs/vkd3d/libs/vkd3d-shader/hlsl.y
|
|
|
|
index e30b3dc5f55..c308916e07e 100644
|
|
|
|
--- a/libs/vkd3d/libs/vkd3d-shader/hlsl.y
|
|
|
|
+++ b/libs/vkd3d/libs/vkd3d-shader/hlsl.y
|
|
|
|
@@ -4089,14 +4089,54 @@ static bool add_ternary(struct hlsl_ctx *ctx, struct hlsl_block *block,
|
|
|
|
struct hlsl_ir_node *cond, struct hlsl_ir_node *first, struct hlsl_ir_node *second)
|
|
|
|
{
|
|
|
|
struct hlsl_ir_node *args[HLSL_MAX_OPERANDS] = {0};
|
|
|
|
+ struct hlsl_type *cond_type = cond->data_type;
|
|
|
|
struct hlsl_type *common_type;
|
|
|
|
|
|
|
|
+ if (cond_type->class > HLSL_CLASS_LAST_NUMERIC)
|
|
|
|
+ {
|
|
|
|
+ struct vkd3d_string_buffer *string;
|
|
|
|
+
|
|
|
|
+ if ((string = hlsl_type_to_string(ctx, cond_type)))
|
|
|
|
+ hlsl_error(ctx, &cond->loc, VKD3D_SHADER_ERROR_HLSL_INVALID_TYPE,
|
|
|
|
+ "Ternary condition type '%s' is not numeric.", string->buffer);
|
|
|
|
+ hlsl_release_string_buffer(ctx, string);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
if (first->data_type->class <= HLSL_CLASS_LAST_NUMERIC
|
|
|
|
&& second->data_type->class <= HLSL_CLASS_LAST_NUMERIC)
|
|
|
|
{
|
|
|
|
if (!(common_type = get_common_numeric_type(ctx, first, second, &first->loc)))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
+ if (cond_type->dimx == 1 && cond_type->dimy == 1)
|
|
|
|
+ {
|
|
|
|
+ cond_type = hlsl_get_numeric_type(ctx, common_type->class,
|
|
|
|
+ HLSL_TYPE_BOOL, common_type->dimx, common_type->dimy);
|
|
|
|
+ if (!(cond = add_implicit_conversion(ctx, block, cond, cond_type, &cond->loc)))
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ else if (common_type->dimx == 1 && common_type->dimy == 1)
|
|
|
|
+ {
|
|
|
|
+ common_type = hlsl_get_numeric_type(ctx, cond_type->class,
|
|
|
|
+ common_type->base_type, cond_type->dimx, cond_type->dimy);
|
|
|
|
+ }
|
|
|
|
+ else if (cond_type->dimx != common_type->dimx || cond_type->dimy != common_type->dimy)
|
|
|
|
+ {
|
|
|
|
+ /* This condition looks wrong but is correct.
|
|
|
|
+ * floatN is compatible with float1xN, but not with floatNx1. */
|
|
|
|
+
|
|
|
|
+ struct vkd3d_string_buffer *cond_string, *value_string;
|
|
|
|
+
|
|
|
|
+ cond_string = hlsl_type_to_string(ctx, cond_type);
|
|
|
|
+ value_string = hlsl_type_to_string(ctx, common_type);
|
|
|
|
+ if (cond_string && value_string)
|
|
|
|
+ hlsl_error(ctx, &first->loc, VKD3D_SHADER_ERROR_HLSL_INVALID_TYPE,
|
|
|
|
+ "Ternary condition type '%s' is not compatible with value type '%s'.",
|
|
|
|
+ cond_string->buffer, value_string->buffer);
|
|
|
|
+ hlsl_release_string_buffer(ctx, cond_string);
|
|
|
|
+ hlsl_release_string_buffer(ctx, value_string);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
if (!(first = add_implicit_conversion(ctx, block, first, common_type, &first->loc)))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
diff --git a/libs/vkd3d/libs/vkd3d-shader/ir.c b/libs/vkd3d/libs/vkd3d-shader/ir.c
|
|
|
|
index d6978171beb..a797e49308a 100644
|
|
|
|
--- a/libs/vkd3d/libs/vkd3d-shader/ir.c
|
|
|
|
+++ b/libs/vkd3d/libs/vkd3d-shader/ir.c
|
|
|
|
@@ -18,6 +18,17 @@
|
|
|
|
|
|
|
|
#include "vkd3d_shader_private.h"
|
|
|
|
|
|
|
|
+bool vsir_program_init(struct vsir_program *program, const struct vkd3d_shader_version *version, unsigned int reserve)
|
|
|
|
+{
|
|
|
|
+ program->shader_version = *version;
|
|
|
|
+ return shader_instruction_array_init(&program->instructions, reserve);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+void vsir_program_cleanup(struct vsir_program *program)
|
|
|
|
+{
|
|
|
|
+ shader_instruction_array_destroy(&program->instructions);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
static inline bool shader_register_is_phase_instance_id(const struct vkd3d_shader_register *reg)
|
|
|
|
{
|
|
|
|
return reg->type == VKD3DSPR_FORKINSTID || reg->type == VKD3DSPR_JOININSTID;
|
|
|
|
@@ -312,7 +323,7 @@ void vsir_register_init(struct vkd3d_shader_register *reg, enum vkd3d_shader_reg
|
|
|
|
reg->alignment = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
-static void vsir_src_param_init(struct vkd3d_shader_src_param *param, enum vkd3d_shader_register_type reg_type,
|
|
|
|
+void vsir_src_param_init(struct vkd3d_shader_src_param *param, enum vkd3d_shader_register_type reg_type,
|
|
|
|
enum vkd3d_data_type data_type, unsigned int idx_count)
|
|
|
|
{
|
|
|
|
vsir_register_init(¶m->reg, reg_type, data_type, idx_count);
|
|
|
|
@@ -840,6 +851,7 @@ static bool shader_signature_merge(struct shader_signature *s, uint8_t range_map
|
|
|
|
unsigned int i, j, element_count, new_count, register_count;
|
|
|
|
struct signature_element *elements;
|
|
|
|
struct signature_element *e, *f;
|
|
|
|
+ bool used;
|
|
|
|
|
|
|
|
element_count = s->element_count;
|
|
|
|
if (!(elements = vkd3d_malloc(element_count * sizeof(*elements))))
|
|
|
|
@@ -860,14 +872,15 @@ static bool shader_signature_merge(struct shader_signature *s, uint8_t range_map
|
|
|
|
if (range_map_get_register_count(range_map, e->register_index, e->mask) > 1)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
+ used = e->used_mask;
|
|
|
|
+
|
|
|
|
for (; j < element_count; ++j)
|
|
|
|
{
|
|
|
|
f = &elements[j];
|
|
|
|
|
|
|
|
/* Merge different components of the same register unless sysvals are different,
|
|
|
|
- * interpolation modes are different, or it will be relative-addressed. */
|
|
|
|
+ * or it will be relative-addressed. */
|
|
|
|
if (f->register_index != e->register_index || f->sysval_semantic != e->sysval_semantic
|
|
|
|
- || f->interpolation_mode != e->interpolation_mode
|
|
|
|
|| range_map_get_register_count(range_map, f->register_index, f->mask) > 1)
|
|
|
|
break;
|
|
|
|
|
|
|
|
@@ -878,6 +891,16 @@ static bool shader_signature_merge(struct shader_signature *s, uint8_t range_map
|
|
|
|
e->mask |= f->mask;
|
|
|
|
e->used_mask |= f->used_mask;
|
|
|
|
e->semantic_index = min(e->semantic_index, f->semantic_index);
|
|
|
|
+
|
|
|
|
+ /* The first element may have no interpolation mode if it is unused. Elements which
|
|
|
|
+ * actually have different interpolation modes are assigned different registers. */
|
|
|
|
+ if (f->used_mask && !used)
|
|
|
|
+ {
|
|
|
|
+ if (e->interpolation_mode && e->interpolation_mode != f->interpolation_mode)
|
|
|
|
+ FIXME("Mismatching interpolation modes %u and %u.\n", e->interpolation_mode, f->interpolation_mode);
|
|
|
|
+ else
|
|
|
|
+ e->interpolation_mode = f->interpolation_mode;
|
|
|
|
+ }
|
|
|
|
}
|
|
|
|
}
|
|
|
|
element_count = new_count;
|
|
|
|
@@ -1217,21 +1240,22 @@ static void shader_instruction_normalise_io_params(struct vkd3d_shader_instructi
|
|
|
|
|
|
|
|
static enum vkd3d_result shader_normalise_io_registers(struct vkd3d_shader_parser *parser)
|
|
|
|
{
|
|
|
|
- struct io_normaliser normaliser = {parser->instructions};
|
|
|
|
+ struct io_normaliser normaliser = {parser->program.instructions};
|
|
|
|
+ struct vsir_program *program = &parser->program;
|
|
|
|
struct vkd3d_shader_instruction *ins;
|
|
|
|
bool has_control_point_phase;
|
|
|
|
unsigned int i, j;
|
|
|
|
|
|
|
|
normaliser.phase = VKD3DSIH_INVALID;
|
|
|
|
- normaliser.shader_type = parser->shader_version.type;
|
|
|
|
- normaliser.major = parser->shader_version.major;
|
|
|
|
+ normaliser.shader_type = program->shader_version.type;
|
|
|
|
+ normaliser.major = program->shader_version.major;
|
|
|
|
normaliser.input_signature = &parser->shader_desc.input_signature;
|
|
|
|
normaliser.output_signature = &parser->shader_desc.output_signature;
|
|
|
|
normaliser.patch_constant_signature = &parser->shader_desc.patch_constant_signature;
|
|
|
|
|
|
|
|
- for (i = 0, has_control_point_phase = false; i < parser->instructions.count; ++i)
|
|
|
|
+ for (i = 0, has_control_point_phase = false; i < program->instructions.count; ++i)
|
|
|
|
{
|
|
|
|
- ins = &parser->instructions.elements[i];
|
|
|
|
+ ins = &program->instructions.elements[i];
|
|
|
|
|
|
|
|
switch (ins->handler_idx)
|
|
|
|
{
|
|
|
|
@@ -1274,7 +1298,7 @@ static enum vkd3d_result shader_normalise_io_registers(struct vkd3d_shader_parse
|
|
|
|
|| !shader_signature_merge(&parser->shader_desc.output_signature, normaliser.output_range_map, false)
|
|
|
|
|| !shader_signature_merge(&parser->shader_desc.patch_constant_signature, normaliser.pc_range_map, true))
|
|
|
|
{
|
|
|
|
- parser->instructions = normaliser.instructions;
|
|
|
|
+ program->instructions = normaliser.instructions;
|
|
|
|
return VKD3D_ERROR_OUT_OF_MEMORY;
|
|
|
|
}
|
|
|
|
|
|
|
|
@@ -1282,8 +1306,8 @@ static enum vkd3d_result shader_normalise_io_registers(struct vkd3d_shader_parse
|
|
|
|
for (i = 0; i < normaliser.instructions.count; ++i)
|
|
|
|
shader_instruction_normalise_io_params(&normaliser.instructions.elements[i], &normaliser);
|
|
|
|
|
|
|
|
- parser->instructions = normaliser.instructions;
|
|
|
|
- parser->shader_desc.use_vocp = normaliser.use_vocp;
|
|
|
|
+ program->instructions = normaliser.instructions;
|
|
|
|
+ program->use_vocp = normaliser.use_vocp;
|
|
|
|
return VKD3D_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
@@ -1296,7 +1320,6 @@ struct flat_constant_def
|
|
|
|
|
|
|
|
struct flat_constants_normaliser
|
|
|
|
{
|
|
|
|
- struct vkd3d_shader_parser *parser;
|
|
|
|
struct flat_constant_def *defs;
|
|
|
|
size_t def_count, defs_capacity;
|
|
|
|
};
|
|
|
|
@@ -1371,14 +1394,14 @@ static void shader_register_normalise_flat_constants(struct vkd3d_shader_src_par
|
|
|
|
param->reg.idx_count = 3;
|
|
|
|
}
|
|
|
|
|
|
|
|
-static enum vkd3d_result instruction_array_normalise_flat_constants(struct vkd3d_shader_parser *parser)
|
|
|
|
+static enum vkd3d_result instruction_array_normalise_flat_constants(struct vsir_program *program)
|
|
|
|
{
|
|
|
|
- struct flat_constants_normaliser normaliser = {.parser = parser};
|
|
|
|
+ struct flat_constants_normaliser normaliser = {0};
|
|
|
|
unsigned int i, j;
|
|
|
|
|
|
|
|
- for (i = 0; i < parser->instructions.count; ++i)
|
|
|
|
+ for (i = 0; i < program->instructions.count; ++i)
|
|
|
|
{
|
|
|
|
- struct vkd3d_shader_instruction *ins = &parser->instructions.elements[i];
|
|
|
|
+ struct vkd3d_shader_instruction *ins = &program->instructions.elements[i];
|
|
|
|
|
|
|
|
if (ins->handler_idx == VKD3DSIH_DEF || ins->handler_idx == VKD3DSIH_DEFI || ins->handler_idx == VKD3DSIH_DEFB)
|
|
|
|
{
|
|
|
|
@@ -1410,14 +1433,14 @@ static enum vkd3d_result instruction_array_normalise_flat_constants(struct vkd3d
|
|
|
|
return VKD3D_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
-static void remove_dead_code(struct vkd3d_shader_parser *parser)
|
|
|
|
+static void remove_dead_code(struct vsir_program *program)
|
|
|
|
{
|
|
|
|
size_t i, depth = 0;
|
|
|
|
bool dead = false;
|
|
|
|
|
|
|
|
- for (i = 0; i < parser->instructions.count; ++i)
|
|
|
|
+ for (i = 0; i < program->instructions.count; ++i)
|
|
|
|
{
|
|
|
|
- struct vkd3d_shader_instruction *ins = &parser->instructions.elements[i];
|
|
|
|
+ struct vkd3d_shader_instruction *ins = &program->instructions.elements[i];
|
|
|
|
|
|
|
|
switch (ins->handler_idx)
|
|
|
|
{
|
|
|
|
@@ -1504,15 +1527,15 @@ static enum vkd3d_result normalise_combined_samplers(struct vkd3d_shader_parser
|
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
- for (i = 0; i < parser->instructions.count; ++i)
|
|
|
|
+ for (i = 0; i < parser->program.instructions.count; ++i)
|
|
|
|
{
|
|
|
|
- struct vkd3d_shader_instruction *ins = &parser->instructions.elements[i];
|
|
|
|
+ struct vkd3d_shader_instruction *ins = &parser->program.instructions.elements[i];
|
|
|
|
struct vkd3d_shader_src_param *srcs;
|
|
|
|
|
|
|
|
switch (ins->handler_idx)
|
|
|
|
{
|
|
|
|
case VKD3DSIH_TEX:
|
|
|
|
- if (!(srcs = shader_src_param_allocator_get(&parser->instructions.src_params, 3)))
|
|
|
|
+ if (!(srcs = shader_src_param_allocator_get(&parser->program.instructions.src_params, 3)))
|
|
|
|
return VKD3D_ERROR_OUT_OF_MEMORY;
|
|
|
|
memset(srcs, 0, sizeof(*srcs) * 3);
|
|
|
|
|
|
|
|
@@ -1857,11 +1880,12 @@ static enum vkd3d_result cf_flattener_iterate_instruction_array(struct cf_flatte
|
|
|
|
bool main_block_open, is_hull_shader, after_declarations_section;
|
|
|
|
struct vkd3d_shader_parser *parser = flattener->parser;
|
|
|
|
struct vkd3d_shader_instruction_array *instructions;
|
|
|
|
+ struct vsir_program *program = &parser->program;
|
|
|
|
struct vkd3d_shader_instruction *dst_ins;
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
- instructions = &parser->instructions;
|
|
|
|
- is_hull_shader = parser->shader_version.type == VKD3D_SHADER_TYPE_HULL;
|
|
|
|
+ instructions = &program->instructions;
|
|
|
|
+ is_hull_shader = program->shader_version.type == VKD3D_SHADER_TYPE_HULL;
|
|
|
|
main_block_open = !is_hull_shader;
|
|
|
|
after_declarations_section = is_hull_shader;
|
|
|
|
|
|
|
|
@@ -2208,10 +2232,10 @@ static enum vkd3d_result flatten_control_flow_constructs(struct vkd3d_shader_par
|
|
|
|
|
|
|
|
if (result >= 0)
|
|
|
|
{
|
|
|
|
- vkd3d_free(parser->instructions.elements);
|
|
|
|
- parser->instructions.elements = flattener.instructions;
|
|
|
|
- parser->instructions.capacity = flattener.instruction_capacity;
|
|
|
|
- parser->instructions.count = flattener.instruction_count;
|
|
|
|
+ vkd3d_free(parser->program.instructions.elements);
|
|
|
|
+ parser->program.instructions.elements = flattener.instructions;
|
|
|
|
+ parser->program.instructions.capacity = flattener.instruction_capacity;
|
|
|
|
+ parser->program.instructions.count = flattener.instruction_count;
|
|
|
|
parser->shader_desc.block_count = flattener.block_id;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
@@ -2230,17 +2254,17 @@ static enum vkd3d_result flatten_control_flow_constructs(struct vkd3d_shader_par
|
|
|
|
enum vkd3d_result vkd3d_shader_normalise(struct vkd3d_shader_parser *parser,
|
|
|
|
const struct vkd3d_shader_compile_info *compile_info)
|
|
|
|
{
|
|
|
|
- struct vkd3d_shader_instruction_array *instructions = &parser->instructions;
|
|
|
|
+ struct vkd3d_shader_instruction_array *instructions = &parser->program.instructions;
|
|
|
|
enum vkd3d_result result = VKD3D_OK;
|
|
|
|
|
|
|
|
if (parser->shader_desc.is_dxil)
|
|
|
|
return result;
|
|
|
|
|
|
|
|
- if (parser->shader_version.type != VKD3D_SHADER_TYPE_PIXEL
|
|
|
|
+ if (parser->program.shader_version.type != VKD3D_SHADER_TYPE_PIXEL
|
|
|
|
&& (result = remap_output_signature(parser, compile_info)) < 0)
|
|
|
|
return result;
|
|
|
|
|
|
|
|
- if (parser->shader_version.type == VKD3D_SHADER_TYPE_HULL
|
|
|
|
+ if (parser->program.shader_version.type == VKD3D_SHADER_TYPE_HULL
|
|
|
|
&& (result = instruction_array_flatten_hull_shader_phases(instructions)) >= 0)
|
|
|
|
{
|
|
|
|
result = instruction_array_normalise_hull_shader_control_point_io(instructions,
|
|
|
|
@@ -2250,10 +2274,10 @@ enum vkd3d_result vkd3d_shader_normalise(struct vkd3d_shader_parser *parser,
|
|
|
|
result = shader_normalise_io_registers(parser);
|
|
|
|
|
|
|
|
if (result >= 0)
|
|
|
|
- result = instruction_array_normalise_flat_constants(parser);
|
|
|
|
+ result = instruction_array_normalise_flat_constants(&parser->program);
|
|
|
|
|
|
|
|
if (result >= 0)
|
|
|
|
- remove_dead_code(parser);
|
|
|
|
+ remove_dead_code(&parser->program);
|
|
|
|
|
|
|
|
if (result >= 0)
|
|
|
|
result = flatten_control_flow_constructs(parser);
|
|
|
|
@@ -2262,7 +2286,7 @@ enum vkd3d_result vkd3d_shader_normalise(struct vkd3d_shader_parser *parser,
|
|
|
|
result = normalise_combined_samplers(parser);
|
|
|
|
|
|
|
|
if (result >= 0 && TRACE_ON())
|
|
|
|
- vkd3d_shader_trace(instructions, &parser->shader_version);
|
|
|
|
+ vkd3d_shader_trace(&parser->program);
|
|
|
|
|
|
|
|
if (result >= 0 && !parser->failed)
|
|
|
|
result = vsir_validate(parser);
|
|
|
|
@@ -2276,11 +2300,18 @@ enum vkd3d_result vkd3d_shader_normalise(struct vkd3d_shader_parser *parser,
|
|
|
|
struct validation_context
|
|
|
|
{
|
|
|
|
struct vkd3d_shader_parser *parser;
|
|
|
|
+ const struct vsir_program *program;
|
|
|
|
size_t instruction_idx;
|
|
|
|
bool invalid_instruction_idx;
|
|
|
|
bool dcl_temps_found;
|
|
|
|
unsigned int temp_count;
|
|
|
|
enum vkd3d_shader_opcode phase;
|
|
|
|
+ enum cf_type
|
|
|
|
+ {
|
|
|
|
+ CF_TYPE_UNKNOWN = 0,
|
|
|
|
+ CF_TYPE_STRUCTURED,
|
|
|
|
+ CF_TYPE_BLOCKS,
|
|
|
|
+ } cf_type;
|
|
|
|
|
|
|
|
struct validation_context_temp_data
|
|
|
|
{
|
|
|
|
@@ -2337,8 +2368,8 @@ static void vsir_validate_register(struct validation_context *ctx,
|
|
|
|
unsigned int i, temp_count = ctx->temp_count;
|
|
|
|
|
|
|
|
/* SM1-3 shaders do not include a DCL_TEMPS instruction. */
|
|
|
|
- if (ctx->parser->shader_version.major <= 3)
|
|
|
|
- temp_count = ctx->parser->shader_desc.temp_count;
|
|
|
|
+ if (ctx->program->shader_version.major <= 3)
|
|
|
|
+ temp_count = ctx->program->temp_count;
|
|
|
|
|
|
|
|
if (reg->type >= VKD3DSPR_COUNT)
|
|
|
|
validator_error(ctx, VKD3D_SHADER_ERROR_VSIR_INVALID_REGISTER_TYPE, "Invalid register type %#x.",
|
|
|
|
@@ -2390,10 +2421,10 @@ static void vsir_validate_register(struct validation_context *ctx,
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
- /* parser->shader_desc.temp_count might be smaller then
|
|
|
|
- * temp_count if the parser made a mistake; we still don't
|
|
|
|
- * want to overflow the array. */
|
|
|
|
- if (reg->idx[0].offset >= ctx->parser->shader_desc.temp_count)
|
|
|
|
+ /* program->temp_count might be smaller then temp_count if the
|
|
|
|
+ * parser made a mistake; we still don't want to overflow the
|
|
|
|
+ * array. */
|
|
|
|
+ if (reg->idx[0].offset >= ctx->program->temp_count)
|
|
|
|
break;
|
|
|
|
data = &ctx->temps[reg->idx[0].offset];
|
|
|
|
|
|
|
|
@@ -2434,10 +2465,11 @@ static void vsir_validate_register(struct validation_context *ctx,
|
|
|
|
if (reg->idx[0].rel_addr)
|
|
|
|
validator_error(ctx, VKD3D_SHADER_ERROR_VSIR_INVALID_INDEX, "Non-NULL relative address for a SSA register.");
|
|
|
|
|
|
|
|
- if (reg->idx[0].offset >= ctx->parser->shader_desc.ssa_count)
|
|
|
|
+ if (reg->idx[0].offset >= ctx->program->ssa_count)
|
|
|
|
{
|
|
|
|
- validator_error(ctx, VKD3D_SHADER_ERROR_VSIR_INVALID_INDEX, "SSA register index %u exceeds the maximum count %u.",
|
|
|
|
- reg->idx[0].offset, ctx->parser->shader_desc.ssa_count);
|
|
|
|
+ validator_error(ctx, VKD3D_SHADER_ERROR_VSIR_INVALID_INDEX,
|
|
|
|
+ "SSA register index %u exceeds the maximum count %u.",
|
|
|
|
+ reg->idx[0].offset, ctx->program->ssa_count);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
@@ -2466,6 +2498,37 @@ static void vsir_validate_register(struct validation_context *ctx,
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
+ case VKD3DSPR_LABEL:
|
|
|
|
+ if (reg->precision != VKD3D_SHADER_REGISTER_PRECISION_DEFAULT)
|
|
|
|
+ validator_error(ctx, VKD3D_SHADER_ERROR_VSIR_INVALID_PRECISION, "Invalid precision %#x for a LABEL register.",
|
|
|
|
+ reg->precision);
|
|
|
|
+
|
|
|
|
+ if (reg->data_type != VKD3D_DATA_UINT)
|
|
|
|
+ validator_error(ctx, VKD3D_SHADER_ERROR_VSIR_INVALID_DATA_TYPE, "Invalid data type %#x for a LABEL register.",
|
|
|
|
+ reg->data_type);
|
|
|
|
+
|
|
|
|
+ if (reg->dimension != VSIR_DIMENSION_NONE)
|
|
|
|
+ validator_error(ctx, VKD3D_SHADER_ERROR_VSIR_INVALID_DIMENSION, "Invalid dimension %#x for a LABEL register.",
|
|
|
|
+ reg->dimension);
|
|
|
|
+
|
|
|
|
+ if (reg->idx_count != 1)
|
|
|
|
+ {
|
|
|
|
+ validator_error(ctx, VKD3D_SHADER_ERROR_VSIR_INVALID_INDEX_COUNT, "Invalid index count %u for a LABEL register.",
|
|
|
|
+ reg->idx_count);
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (reg->idx[0].rel_addr)
|
|
|
|
+ validator_error(ctx, VKD3D_SHADER_ERROR_VSIR_INVALID_INDEX, "Non-NULL relative address for a LABEL register.");
|
|
|
|
+
|
|
|
|
+ /* Index == 0 is invalid, but it is temporarily allowed
|
|
|
|
+ * for intermediate stages. Once we support validation
|
|
|
|
+ * dialects we can selectively check for that. */
|
|
|
|
+ if (reg->idx[0].offset > ctx->parser->shader_desc.block_count)
|
|
|
|
+ validator_error(ctx, VKD3D_SHADER_ERROR_VSIR_INVALID_INDEX, "LABEL register index %u exceeds the maximum count %u.",
|
|
|
|
+ reg->idx[0].offset, ctx->parser->shader_desc.block_count);
|
|
|
|
+ break;
|
|
|
|
+
|
|
|
|
case VKD3DSPR_NULL:
|
|
|
|
if (reg->idx_count != 0)
|
|
|
|
validator_error(ctx, VKD3D_SHADER_ERROR_VSIR_INVALID_INDEX_COUNT, "Invalid index count %u for a NULL register.",
|
|
|
|
@@ -2538,7 +2601,7 @@ static void vsir_validate_dst_param(struct validation_context *ctx,
|
|
|
|
dst->shift);
|
|
|
|
}
|
|
|
|
|
|
|
|
- if (dst->reg.type == VKD3DSPR_SSA && dst->reg.idx[0].offset < ctx->parser->shader_desc.ssa_count)
|
|
|
|
+ if (dst->reg.type == VKD3DSPR_SSA && dst->reg.idx[0].offset < ctx->program->ssa_count)
|
|
|
|
{
|
|
|
|
struct validation_context_ssa_data *data = &ctx->ssas[dst->reg.idx[0].offset];
|
|
|
|
|
|
|
|
@@ -2573,7 +2636,7 @@ static void vsir_validate_src_param(struct validation_context *ctx,
|
|
|
|
validator_error(ctx, VKD3D_SHADER_ERROR_VSIR_INVALID_MODIFIERS, "Source has invalid modifiers %#x.",
|
|
|
|
src->modifiers);
|
|
|
|
|
|
|
|
- if (src->reg.type == VKD3DSPR_SSA && src->reg.idx[0].offset < ctx->parser->shader_desc.ssa_count)
|
|
|
|
+ if (src->reg.type == VKD3DSPR_SSA && src->reg.idx[0].offset < ctx->program->ssa_count)
|
|
|
|
{
|
|
|
|
struct validation_context_ssa_data *data = &ctx->ssas[src->reg.idx[0].offset];
|
|
|
|
unsigned int i;
|
|
|
|
@@ -2601,11 +2664,64 @@ static void vsir_validate_src_count(struct validation_context *ctx,
|
|
|
|
instruction->src_count, instruction->handler_idx, count);
|
|
|
|
}
|
|
|
|
|
|
|
|
+static bool vsir_validate_src_min_count(struct validation_context *ctx,
|
|
|
|
+ const struct vkd3d_shader_instruction *instruction, unsigned int count)
|
|
|
|
+{
|
|
|
|
+ if (instruction->src_count < count)
|
|
|
|
+ {
|
|
|
|
+ validator_error(ctx, VKD3D_SHADER_ERROR_VSIR_INVALID_SOURCE_COUNT,
|
|
|
|
+ "Invalid source count %u for an instruction of type %#x, expected at least %u.",
|
|
|
|
+ instruction->src_count, instruction->handler_idx, count);
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ return true;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static bool vsir_validate_src_max_count(struct validation_context *ctx,
|
|
|
|
+ const struct vkd3d_shader_instruction *instruction, unsigned int count)
|
|
|
|
+{
|
|
|
|
+ if (instruction->src_count > count)
|
|
|
|
+ {
|
|
|
|
+ validator_error(ctx, VKD3D_SHADER_ERROR_VSIR_INVALID_SOURCE_COUNT,
|
|
|
|
+ "Invalid source count %u for an instruction of type %#x, expected at most %u.",
|
|
|
|
+ instruction->src_count, instruction->handler_idx, count);
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ return true;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static const char *name_from_cf_type(enum cf_type type)
|
|
|
|
+{
|
|
|
|
+ switch (type)
|
|
|
|
+ {
|
|
|
|
+ case CF_TYPE_STRUCTURED:
|
|
|
|
+ return "structured";
|
|
|
|
+ case CF_TYPE_BLOCKS:
|
|
|
|
+ return "block-based";
|
|
|
|
+ default:
|
|
|
|
+ vkd3d_unreachable();
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void vsir_validate_cf_type(struct validation_context *ctx,
|
|
|
|
+ const struct vkd3d_shader_instruction *instruction, enum cf_type expected_type)
|
|
|
|
+{
|
|
|
|
+ assert(ctx->cf_type != CF_TYPE_UNKNOWN);
|
|
|
|
+ assert(expected_type != CF_TYPE_UNKNOWN);
|
|
|
|
+ if (ctx->cf_type != expected_type)
|
|
|
|
+ validator_error(ctx, VKD3D_SHADER_ERROR_VSIR_INVALID_CONTROL_FLOW, "Invalid instruction %#x in %s shader.",
|
|
|
|
+ instruction->handler_idx, name_from_cf_type(ctx->cf_type));
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
static void vsir_validate_instruction(struct validation_context *ctx)
|
|
|
|
{
|
|
|
|
- const struct vkd3d_shader_instruction *instruction = &ctx->parser->instructions.elements[ctx->instruction_idx];
|
|
|
|
+ const struct vkd3d_shader_version *version = &ctx->program->shader_version;
|
|
|
|
+ const struct vkd3d_shader_instruction *instruction;
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
+ instruction = &ctx->program->instructions.elements[ctx->instruction_idx];
|
|
|
|
ctx->parser->location = instruction->location;
|
|
|
|
|
|
|
|
for (i = 0; i < instruction->dst_count; ++i)
|
|
|
|
@@ -2628,11 +2744,11 @@ static void vsir_validate_instruction(struct validation_context *ctx)
|
|
|
|
case VKD3DSIH_HS_JOIN_PHASE:
|
|
|
|
vsir_validate_dst_count(ctx, instruction, 0);
|
|
|
|
vsir_validate_src_count(ctx, instruction, 0);
|
|
|
|
- if (ctx->parser->shader_version.type != VKD3D_SHADER_TYPE_HULL)
|
|
|
|
+ if (version->type != VKD3D_SHADER_TYPE_HULL)
|
|
|
|
validator_error(ctx, VKD3D_SHADER_ERROR_VSIR_INVALID_HANDLER, "Phase instruction %#x is only valid in a hull shader.",
|
|
|
|
instruction->handler_idx);
|
|
|
|
if (ctx->depth != 0)
|
|
|
|
- validator_error(ctx, VKD3D_SHADER_ERROR_VSIR_INVALID_INSTRUCTION_NESTING, "Phase instruction %#x must appear to top level.",
|
|
|
|
+ validator_error(ctx, VKD3D_SHADER_ERROR_VSIR_INVALID_CONTROL_FLOW, "Phase instruction %#x must appear to top level.",
|
|
|
|
instruction->handler_idx);
|
|
|
|
ctx->phase = instruction->handler_idx;
|
|
|
|
ctx->dcl_temps_found = false;
|
|
|
|
@@ -2643,11 +2759,28 @@ static void vsir_validate_instruction(struct validation_context *ctx)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
- if (ctx->parser->shader_version.type == VKD3D_SHADER_TYPE_HULL &&
|
|
|
|
- ctx->phase == VKD3DSIH_INVALID)
|
|
|
|
- validator_error(ctx, VKD3D_SHADER_ERROR_VSIR_INVALID_HANDLER, "Instruction %#x appear before any phase instruction in a hull shader.",
|
|
|
|
+ if (version->type == VKD3D_SHADER_TYPE_HULL && ctx->phase == VKD3DSIH_INVALID)
|
|
|
|
+ validator_error(ctx, VKD3D_SHADER_ERROR_VSIR_INVALID_HANDLER,
|
|
|
|
+ "Instruction %#x appear before any phase instruction in a hull shader.",
|
|
|
|
instruction->handler_idx);
|
|
|
|
|
|
|
|
+ /* We support two different control flow types in shaders:
|
|
|
|
+ * block-based, like DXIL and SPIR-V, and structured, like D3DBC
|
|
|
|
+ * and TPF. The shader is detected as block-based when its first
|
|
|
|
+ * instruction, except for DCL_* and phases, is a LABEL. Currently
|
|
|
|
+ * we mandate that each shader is either purely block-based or
|
|
|
|
+ * purely structured. In principle we could allow structured
|
|
|
|
+ * constructs in a block, provided they are confined in a single
|
|
|
|
+ * block, but need for that hasn't arisen yet, so we don't. */
|
|
|
|
+ if (ctx->cf_type == CF_TYPE_UNKNOWN && !(instruction->handler_idx >= VKD3DSIH_DCL
|
|
|
|
+ && instruction->handler_idx <= VKD3DSIH_DCL_VERTICES_OUT))
|
|
|
|
+ {
|
|
|
|
+ if (instruction->handler_idx == VKD3DSIH_LABEL)
|
|
|
|
+ ctx->cf_type = CF_TYPE_BLOCKS;
|
|
|
|
+ else
|
|
|
|
+ ctx->cf_type = CF_TYPE_STRUCTURED;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
switch (instruction->handler_idx)
|
|
|
|
{
|
|
|
|
case VKD3DSIH_DCL_TEMPS:
|
|
|
|
@@ -2655,14 +2788,16 @@ static void vsir_validate_instruction(struct validation_context *ctx)
|
|
|
|
vsir_validate_src_count(ctx, instruction, 0);
|
|
|
|
if (ctx->dcl_temps_found)
|
|
|
|
validator_error(ctx, VKD3D_SHADER_ERROR_VSIR_DUPLICATE_DCL_TEMPS, "Duplicate DCL_TEMPS instruction.");
|
|
|
|
- if (instruction->declaration.count > ctx->parser->shader_desc.temp_count)
|
|
|
|
- validator_error(ctx, VKD3D_SHADER_ERROR_VSIR_INVALID_DCL_TEMPS, "Invalid DCL_TEMPS count %u, expected at most %u.",
|
|
|
|
- instruction->declaration.count, ctx->parser->shader_desc.temp_count);
|
|
|
|
+ if (instruction->declaration.count > ctx->program->temp_count)
|
|
|
|
+ validator_error(ctx, VKD3D_SHADER_ERROR_VSIR_INVALID_DCL_TEMPS,
|
|
|
|
+ "Invalid DCL_TEMPS count %u, expected at most %u.",
|
|
|
|
+ instruction->declaration.count, ctx->program->temp_count);
|
|
|
|
ctx->dcl_temps_found = true;
|
|
|
|
ctx->temp_count = instruction->declaration.count;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case VKD3DSIH_IF:
|
|
|
|
+ vsir_validate_cf_type(ctx, instruction, CF_TYPE_STRUCTURED);
|
|
|
|
vsir_validate_dst_count(ctx, instruction, 0);
|
|
|
|
vsir_validate_src_count(ctx, instruction, 1);
|
|
|
|
if (!vkd3d_array_reserve((void **)&ctx->blocks, &ctx->blocks_capacity, ctx->depth + 1, sizeof(*ctx->blocks)))
|
|
|
|
@@ -2671,6 +2806,7 @@ static void vsir_validate_instruction(struct validation_context *ctx)
|
|
|
|
break;
|
|
|
|
|
|
|
|
case VKD3DSIH_IFC:
|
|
|
|
+ vsir_validate_cf_type(ctx, instruction, CF_TYPE_STRUCTURED);
|
|
|
|
vsir_validate_dst_count(ctx, instruction, 0);
|
|
|
|
vsir_validate_src_count(ctx, instruction, 2);
|
|
|
|
if (!vkd3d_array_reserve((void **)&ctx->blocks, &ctx->blocks_capacity, ctx->depth + 1, sizeof(*ctx->blocks)))
|
|
|
|
@@ -2679,41 +2815,46 @@ static void vsir_validate_instruction(struct validation_context *ctx)
|
|
|
|
break;
|
|
|
|
|
|
|
|
case VKD3DSIH_ELSE:
|
|
|
|
+ vsir_validate_cf_type(ctx, instruction, CF_TYPE_STRUCTURED);
|
|
|
|
vsir_validate_dst_count(ctx, instruction, 0);
|
|
|
|
vsir_validate_src_count(ctx, instruction, 0);
|
|
|
|
if (ctx->depth == 0 || ctx->blocks[ctx->depth - 1] != VKD3DSIH_IF)
|
|
|
|
- validator_error(ctx, VKD3D_SHADER_ERROR_VSIR_INVALID_INSTRUCTION_NESTING, "ELSE instruction doesn't terminate IF block.");
|
|
|
|
+ validator_error(ctx, VKD3D_SHADER_ERROR_VSIR_INVALID_CONTROL_FLOW, "ELSE instruction doesn't terminate IF block.");
|
|
|
|
else
|
|
|
|
ctx->blocks[ctx->depth - 1] = instruction->handler_idx;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case VKD3DSIH_ENDIF:
|
|
|
|
+ vsir_validate_cf_type(ctx, instruction, CF_TYPE_STRUCTURED);
|
|
|
|
vsir_validate_dst_count(ctx, instruction, 0);
|
|
|
|
vsir_validate_src_count(ctx, instruction, 0);
|
|
|
|
if (ctx->depth == 0 || (ctx->blocks[ctx->depth - 1] != VKD3DSIH_IF && ctx->blocks[ctx->depth - 1] != VKD3DSIH_ELSE))
|
|
|
|
- validator_error(ctx, VKD3D_SHADER_ERROR_VSIR_INVALID_INSTRUCTION_NESTING, "ENDIF instruction doesn't terminate IF/ELSE block.");
|
|
|
|
+ validator_error(ctx, VKD3D_SHADER_ERROR_VSIR_INVALID_CONTROL_FLOW, "ENDIF instruction doesn't terminate IF/ELSE block.");
|
|
|
|
else
|
|
|
|
--ctx->depth;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case VKD3DSIH_LOOP:
|
|
|
|
+ vsir_validate_cf_type(ctx, instruction, CF_TYPE_STRUCTURED);
|
|
|
|
vsir_validate_dst_count(ctx, instruction, 0);
|
|
|
|
- vsir_validate_src_count(ctx, instruction, ctx->parser->shader_version.major <= 3 ? 2 : 0);
|
|
|
|
+ vsir_validate_src_count(ctx, instruction, version->major <= 3 ? 2 : 0);
|
|
|
|
if (!vkd3d_array_reserve((void **)&ctx->blocks, &ctx->blocks_capacity, ctx->depth + 1, sizeof(*ctx->blocks)))
|
|
|
|
return;
|
|
|
|
ctx->blocks[ctx->depth++] = instruction->handler_idx;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case VKD3DSIH_ENDLOOP:
|
|
|
|
+ vsir_validate_cf_type(ctx, instruction, CF_TYPE_STRUCTURED);
|
|
|
|
vsir_validate_dst_count(ctx, instruction, 0);
|
|
|
|
vsir_validate_src_count(ctx, instruction, 0);
|
|
|
|
if (ctx->depth == 0 || ctx->blocks[ctx->depth - 1] != VKD3DSIH_LOOP)
|
|
|
|
- validator_error(ctx, VKD3D_SHADER_ERROR_VSIR_INVALID_INSTRUCTION_NESTING, "ENDLOOP instruction doesn't terminate LOOP block.");
|
|
|
|
+ validator_error(ctx, VKD3D_SHADER_ERROR_VSIR_INVALID_CONTROL_FLOW, "ENDLOOP instruction doesn't terminate LOOP block.");
|
|
|
|
else
|
|
|
|
--ctx->depth;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case VKD3DSIH_REP:
|
|
|
|
+ vsir_validate_cf_type(ctx, instruction, CF_TYPE_STRUCTURED);
|
|
|
|
vsir_validate_dst_count(ctx, instruction, 0);
|
|
|
|
vsir_validate_src_count(ctx, instruction, 1);
|
|
|
|
if (!vkd3d_array_reserve((void **)&ctx->blocks, &ctx->blocks_capacity, ctx->depth + 1, sizeof(*ctx->blocks)))
|
|
|
|
@@ -2722,15 +2863,17 @@ static void vsir_validate_instruction(struct validation_context *ctx)
|
|
|
|
break;
|
|
|
|
|
|
|
|
case VKD3DSIH_ENDREP:
|
|
|
|
+ vsir_validate_cf_type(ctx, instruction, CF_TYPE_STRUCTURED);
|
|
|
|
vsir_validate_dst_count(ctx, instruction, 0);
|
|
|
|
vsir_validate_src_count(ctx, instruction, 0);
|
|
|
|
if (ctx->depth == 0 || ctx->blocks[ctx->depth - 1] != VKD3DSIH_REP)
|
|
|
|
- validator_error(ctx, VKD3D_SHADER_ERROR_VSIR_INVALID_INSTRUCTION_NESTING, "ENDREP instruction doesn't terminate REP block.");
|
|
|
|
+ validator_error(ctx, VKD3D_SHADER_ERROR_VSIR_INVALID_CONTROL_FLOW, "ENDREP instruction doesn't terminate REP block.");
|
|
|
|
else
|
|
|
|
--ctx->depth;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case VKD3DSIH_SWITCH:
|
|
|
|
+ vsir_validate_cf_type(ctx, instruction, CF_TYPE_STRUCTURED);
|
|
|
|
vsir_validate_dst_count(ctx, instruction, 0);
|
|
|
|
vsir_validate_src_count(ctx, instruction, 1);
|
|
|
|
if (!vkd3d_array_reserve((void **)&ctx->blocks, &ctx->blocks_capacity, ctx->depth + 1, sizeof(*ctx->blocks)))
|
|
|
|
@@ -2739,14 +2882,112 @@ static void vsir_validate_instruction(struct validation_context *ctx)
|
|
|
|
break;
|
|
|
|
|
|
|
|
case VKD3DSIH_ENDSWITCH:
|
|
|
|
+ vsir_validate_cf_type(ctx, instruction, CF_TYPE_STRUCTURED);
|
|
|
|
vsir_validate_dst_count(ctx, instruction, 0);
|
|
|
|
vsir_validate_src_count(ctx, instruction, 0);
|
|
|
|
if (ctx->depth == 0 || ctx->blocks[ctx->depth - 1] != VKD3DSIH_SWITCH)
|
|
|
|
- validator_error(ctx, VKD3D_SHADER_ERROR_VSIR_INVALID_INSTRUCTION_NESTING, "ENDSWITCH instruction doesn't terminate SWITCH block.");
|
|
|
|
+ validator_error(ctx, VKD3D_SHADER_ERROR_VSIR_INVALID_CONTROL_FLOW, "ENDSWITCH instruction doesn't terminate SWITCH block.");
|
|
|
|
else
|
|
|
|
--ctx->depth;
|
|
|
|
break;
|
|
|
|
|
|
|
|
+ case VKD3DSIH_RET:
|
|
|
|
+ vsir_validate_dst_count(ctx, instruction, 0);
|
|
|
|
+ vsir_validate_src_count(ctx, instruction, 0);
|
|
|
|
+ break;
|
|
|
|
+
|
|
|
|
+ case VKD3DSIH_LABEL:
|
|
|
|
+ vsir_validate_cf_type(ctx, instruction, CF_TYPE_BLOCKS);
|
|
|
|
+ vsir_validate_dst_count(ctx, instruction, 0);
|
|
|
|
+ vsir_validate_src_count(ctx, instruction, 1);
|
|
|
|
+ if (instruction->src_count >= 1 && !vsir_register_is_label(&instruction->src[0].reg))
|
|
|
|
+ validator_error(ctx, VKD3D_SHADER_ERROR_VSIR_INVALID_REGISTER_TYPE,
|
|
|
|
+ "Invalid register of type %#x in a LABEL instruction, expected LABEL.",
|
|
|
|
+ instruction->src[0].reg.type);
|
|
|
|
+ break;
|
|
|
|
+
|
|
|
|
+ case VKD3DSIH_BRANCH:
|
|
|
|
+ vsir_validate_cf_type(ctx, instruction, CF_TYPE_BLOCKS);
|
|
|
|
+ vsir_validate_dst_count(ctx, instruction, 0);
|
|
|
|
+ if (!vsir_validate_src_min_count(ctx, instruction, 1))
|
|
|
|
+ break;
|
|
|
|
+ if (vsir_register_is_label(&instruction->src[0].reg))
|
|
|
|
+ {
|
|
|
|
+ /* Unconditional branch: parameters are jump label,
|
|
|
|
+ * optional merge label, optional continue label. */
|
|
|
|
+ vsir_validate_src_max_count(ctx, instruction, 3);
|
|
|
|
+
|
|
|
|
+ for (i = 0; i < instruction->src_count; ++i)
|
|
|
|
+ {
|
|
|
|
+ if (!vsir_register_is_label(&instruction->src[i].reg))
|
|
|
|
+ validator_error(ctx, VKD3D_SHADER_ERROR_VSIR_INVALID_REGISTER_TYPE,
|
|
|
|
+ "Invalid register of type %#x in unconditional BRANCH instruction, expected LABEL.",
|
|
|
|
+ instruction->src[i].reg.type);
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ else
|
|
|
|
+ {
|
|
|
|
+ /* Conditional branch: parameters are condition, true
|
|
|
|
+ * jump label, false jump label, optional merge label,
|
|
|
|
+ * optional continue label. */
|
|
|
|
+ vsir_validate_src_min_count(ctx, instruction, 3);
|
|
|
|
+ vsir_validate_src_max_count(ctx, instruction, 5);
|
|
|
|
+
|
|
|
|
+ for (i = 1; i < instruction->src_count; ++i)
|
|
|
|
+ {
|
|
|
|
+ if (!vsir_register_is_label(&instruction->src[i].reg))
|
|
|
|
+ validator_error(ctx, VKD3D_SHADER_ERROR_VSIR_INVALID_REGISTER_TYPE,
|
|
|
|
+ "Invalid register of type %#x in conditional BRANCH instruction, expected LABEL.",
|
|
|
|
+ instruction->src[i].reg.type);
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ break;
|
|
|
|
+
|
|
|
|
+ case VKD3DSIH_SWITCH_MONOLITHIC:
|
|
|
|
+ {
|
|
|
|
+ unsigned int case_count;
|
|
|
|
+
|
|
|
|
+ vsir_validate_cf_type(ctx, instruction, CF_TYPE_BLOCKS);
|
|
|
|
+ vsir_validate_dst_count(ctx, instruction, 0);
|
|
|
|
+ /* Parameters are source, default label, merge label and
|
|
|
|
+ * then pairs of constant value and case label. */
|
|
|
|
+ if (!vsir_validate_src_min_count(ctx, instruction, 3))
|
|
|
|
+ break;
|
|
|
|
+ if (instruction->src_count % 2 != 1)
|
|
|
|
+ validator_error(ctx, VKD3D_SHADER_ERROR_VSIR_INVALID_SOURCE_COUNT,
|
|
|
|
+ "Invalid source count %u for a monolithic SWITCH instruction, it must be an odd number.",
|
|
|
|
+ instruction->src_count);
|
|
|
|
+
|
|
|
|
+ if (!vsir_register_is_label(&instruction->src[1].reg))
|
|
|
|
+ validator_error(ctx, VKD3D_SHADER_ERROR_VSIR_INVALID_REGISTER_TYPE,
|
|
|
|
+ "Invalid default label register of type %#x in monolithic SWITCH instruction, expected LABEL.",
|
|
|
|
+ instruction->src[1].reg.type);
|
|
|
|
+
|
|
|
|
+ if (!vsir_register_is_label(&instruction->src[2].reg))
|
|
|
|
+ validator_error(ctx, VKD3D_SHADER_ERROR_VSIR_INVALID_REGISTER_TYPE,
|
|
|
|
+ "Invalid merge label register of type %#x in monolithic SWITCH instruction, expected LABEL.",
|
|
|
|
+ instruction->src[2].reg.type);
|
|
|
|
+
|
|
|
|
+ case_count = (instruction->src_count - 3) / 2;
|
|
|
|
+
|
|
|
|
+ for (i = 0; i < case_count; ++i)
|
|
|
|
+ {
|
|
|
|
+ unsigned int value_idx = 3 + 2 * i;
|
|
|
|
+ unsigned int label_idx = 3 + 2 * i + 1;
|
|
|
|
+
|
|
|
|
+ if (!register_is_constant(&instruction->src[value_idx].reg))
|
|
|
|
+ validator_error(ctx, VKD3D_SHADER_ERROR_VSIR_INVALID_REGISTER_TYPE,
|
|
|
|
+ "Invalid value register for case %zu of type %#x in monolithic SWITCH instruction, "
|
|
|
|
+ "expected IMMCONST or IMMCONST64.", i, instruction->src[value_idx].reg.type);
|
|
|
|
+
|
|
|
|
+ if (!vsir_register_is_label(&instruction->src[label_idx].reg))
|
|
|
|
+ validator_error(ctx, VKD3D_SHADER_ERROR_VSIR_INVALID_REGISTER_TYPE,
|
|
|
|
+ "Invalid label register for case %zu of type %#x in monolithic SWITCH instruction, "
|
|
|
|
+ "expected LABEL.", i, instruction->src[value_idx].reg.type);
|
|
|
|
+ }
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
@@ -2757,6 +2998,7 @@ enum vkd3d_result vsir_validate(struct vkd3d_shader_parser *parser)
|
|
|
|
struct validation_context ctx =
|
|
|
|
{
|
|
|
|
.parser = parser,
|
|
|
|
+ .program = &parser->program,
|
|
|
|
.phase = VKD3DSIH_INVALID,
|
|
|
|
};
|
|
|
|
unsigned int i;
|
|
|
|
@@ -2764,21 +3006,21 @@ enum vkd3d_result vsir_validate(struct vkd3d_shader_parser *parser)
|
|
|
|
if (!(parser->config_flags & VKD3D_SHADER_CONFIG_FLAG_FORCE_VALIDATION))
|
|
|
|
return VKD3D_OK;
|
|
|
|
|
|
|
|
- if (!(ctx.temps = vkd3d_calloc(parser->shader_desc.temp_count, sizeof(*ctx.temps))))
|
|
|
|
+ if (!(ctx.temps = vkd3d_calloc(ctx.program->temp_count, sizeof(*ctx.temps))))
|
|
|
|
goto fail;
|
|
|
|
|
|
|
|
- if (!(ctx.ssas = vkd3d_calloc(parser->shader_desc.ssa_count, sizeof(*ctx.ssas))))
|
|
|
|
+ if (!(ctx.ssas = vkd3d_calloc(ctx.program->ssa_count, sizeof(*ctx.ssas))))
|
|
|
|
goto fail;
|
|
|
|
|
|
|
|
- for (ctx.instruction_idx = 0; ctx.instruction_idx < parser->instructions.count; ++ctx.instruction_idx)
|
|
|
|
+ for (ctx.instruction_idx = 0; ctx.instruction_idx < parser->program.instructions.count; ++ctx.instruction_idx)
|
|
|
|
vsir_validate_instruction(&ctx);
|
|
|
|
|
|
|
|
ctx.invalid_instruction_idx = true;
|
|
|
|
|
|
|
|
if (ctx.depth != 0)
|
|
|
|
- validator_error(&ctx, VKD3D_SHADER_ERROR_VSIR_INVALID_INSTRUCTION_NESTING, "%zu nested blocks were not closed.", ctx.depth);
|
|
|
|
+ validator_error(&ctx, VKD3D_SHADER_ERROR_VSIR_INVALID_CONTROL_FLOW, "%zu nested blocks were not closed.", ctx.depth);
|
|
|
|
|
|
|
|
- for (i = 0; i < parser->shader_desc.ssa_count; ++i)
|
|
|
|
+ for (i = 0; i < ctx.program->ssa_count; ++i)
|
|
|
|
{
|
|
|
|
struct validation_context_ssa_data *data = &ctx.ssas[i];
|
|
|
|
|
|
|
|
diff --git a/libs/vkd3d/libs/vkd3d-shader/spirv.c b/libs/vkd3d/libs/vkd3d-shader/spirv.c
|
|
|
|
index 0eeb04bfe59..e99724ca21c 100644
|
|
|
|
--- a/libs/vkd3d/libs/vkd3d-shader/spirv.c
|
|
|
|
+++ b/libs/vkd3d/libs/vkd3d-shader/spirv.c
|
|
|
|
@@ -1933,6 +1933,8 @@ static bool vkd3d_spirv_compile_module(struct vkd3d_spirv_builder *builder,
|
|
|
|
vkd3d_spirv_build_op_extension(&stream, "SPV_EXT_descriptor_indexing");
|
|
|
|
if (vkd3d_spirv_capability_is_enabled(builder, SpvCapabilityStencilExportEXT))
|
|
|
|
vkd3d_spirv_build_op_extension(&stream, "SPV_EXT_shader_stencil_export");
|
|
|
|
+ if (vkd3d_spirv_capability_is_enabled(builder, SpvCapabilityShaderViewportIndexLayerEXT))
|
|
|
|
+ vkd3d_spirv_build_op_extension(&stream, "SPV_EXT_shader_viewport_index_layer");
|
|
|
|
|
|
|
|
if (builder->ext_instr_set_glsl_450)
|
|
|
|
vkd3d_spirv_build_op_ext_inst_import(&stream, builder->ext_instr_set_glsl_450, "GLSL.std.450");
|
|
|
|
@@ -3852,7 +3854,12 @@ static uint32_t spirv_compiler_emit_load_ssa_reg(struct spirv_compiler *compiler
|
|
|
|
|
|
|
|
ssa = spirv_compiler_get_ssa_register_info(compiler, reg);
|
|
|
|
val_id = ssa->id;
|
|
|
|
- assert(val_id);
|
|
|
|
+ if (!val_id)
|
|
|
|
+ {
|
|
|
|
+ /* Should only be from a missing instruction implementation. */
|
|
|
|
+ assert(compiler->failed);
|
|
|
|
+ return 0;
|
|
|
|
+ }
|
|
|
|
assert(vkd3d_swizzle_is_scalar(swizzle));
|
|
|
|
|
|
|
|
if (reg->dimension == VSIR_DIMENSION_SCALAR)
|
|
|
|
@@ -4236,10 +4243,58 @@ static void spirv_compiler_decorate_builtin(struct spirv_compiler *compiler,
|
|
|
|
spirv_compiler_emit_execution_mode(compiler, SpvExecutionModeDepthReplacing, NULL, 0);
|
|
|
|
break;
|
|
|
|
case SpvBuiltInLayer:
|
|
|
|
- vkd3d_spirv_enable_capability(builder, SpvCapabilityGeometry);
|
|
|
|
+ switch (compiler->shader_type)
|
|
|
|
+ {
|
|
|
|
+ case VKD3D_SHADER_TYPE_PIXEL:
|
|
|
|
+ case VKD3D_SHADER_TYPE_GEOMETRY:
|
|
|
|
+ vkd3d_spirv_enable_capability(builder, SpvCapabilityGeometry);
|
|
|
|
+ break;
|
|
|
|
+
|
|
|
|
+ case VKD3D_SHADER_TYPE_VERTEX:
|
|
|
|
+ case VKD3D_SHADER_TYPE_DOMAIN:
|
|
|
|
+ if (!spirv_compiler_is_target_extension_supported(compiler,
|
|
|
|
+ VKD3D_SHADER_SPIRV_EXTENSION_EXT_VIEWPORT_INDEX_LAYER))
|
|
|
|
+ {
|
|
|
|
+ FIXME("The target environment does not support decoration Layer.\n");
|
|
|
|
+ spirv_compiler_error(compiler, VKD3D_SHADER_ERROR_SPV_UNSUPPORTED_FEATURE,
|
|
|
|
+ "Cannot use SV_RenderTargetArrayIndex. "
|
|
|
|
+ "The target environment does not support decoration Layer.");
|
|
|
|
+ }
|
|
|
|
+ vkd3d_spirv_enable_capability(builder, SpvCapabilityShaderViewportIndexLayerEXT);
|
|
|
|
+ break;
|
|
|
|
+
|
|
|
|
+ default:
|
|
|
|
+ spirv_compiler_error(compiler, VKD3D_SHADER_ERROR_SPV_INVALID_SHADER,
|
|
|
|
+ "Invalid use of SV_RenderTargetArrayIndex.");
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
break;
|
|
|
|
case SpvBuiltInViewportIndex:
|
|
|
|
- vkd3d_spirv_enable_capability(builder, SpvCapabilityMultiViewport);
|
|
|
|
+ switch (compiler->shader_type)
|
|
|
|
+ {
|
|
|
|
+ case VKD3D_SHADER_TYPE_PIXEL:
|
|
|
|
+ case VKD3D_SHADER_TYPE_GEOMETRY:
|
|
|
|
+ vkd3d_spirv_enable_capability(builder, SpvCapabilityMultiViewport);
|
|
|
|
+ break;
|
|
|
|
+
|
|
|
|
+ case VKD3D_SHADER_TYPE_VERTEX:
|
|
|
|
+ case VKD3D_SHADER_TYPE_DOMAIN:
|
|
|
|
+ if (!spirv_compiler_is_target_extension_supported(compiler,
|
|
|
|
+ VKD3D_SHADER_SPIRV_EXTENSION_EXT_VIEWPORT_INDEX_LAYER))
|
|
|
|
+ {
|
|
|
|
+ FIXME("The target environment does not support decoration ViewportIndex.\n");
|
|
|
|
+ spirv_compiler_error(compiler, VKD3D_SHADER_ERROR_SPV_UNSUPPORTED_FEATURE,
|
|
|
|
+ "Cannot use SV_ViewportArrayIndex. "
|
|
|
|
+ "The target environment does not support decoration ViewportIndex.");
|
|
|
|
+ }
|
|
|
|
+ vkd3d_spirv_enable_capability(builder, SpvCapabilityShaderViewportIndexLayerEXT);
|
|
|
|
+ break;
|
|
|
|
+
|
|
|
|
+ default:
|
|
|
|
+ spirv_compiler_error(compiler, VKD3D_SHADER_ERROR_SPV_INVALID_SHADER,
|
|
|
|
+ "Invalid use of SV_ViewportArrayIndex.");
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
break;
|
|
|
|
case SpvBuiltInSampleId:
|
|
|
|
vkd3d_spirv_enable_capability(builder, SpvCapabilitySampleRateShading);
|
|
|
|
@@ -6887,11 +6942,11 @@ static enum GLSLstd450 spirv_compiler_map_ext_glsl_instruction(
|
|
|
|
static void spirv_compiler_emit_ext_glsl_instruction(struct spirv_compiler *compiler,
|
|
|
|
const struct vkd3d_shader_instruction *instruction)
|
|
|
|
{
|
|
|
|
+ uint32_t instr_set_id, type_id, val_id, rev_val_id, uint_max_id, condition_id;
|
|
|
|
struct vkd3d_spirv_builder *builder = &compiler->spirv_builder;
|
|
|
|
const struct vkd3d_shader_dst_param *dst = instruction->dst;
|
|
|
|
const struct vkd3d_shader_src_param *src = instruction->src;
|
|
|
|
uint32_t src_id[SPIRV_MAX_SRC_COUNT];
|
|
|
|
- uint32_t instr_set_id, type_id, val_id;
|
|
|
|
unsigned int i, component_count;
|
|
|
|
enum GLSLstd450 glsl_inst;
|
|
|
|
|
|
|
|
@@ -6920,8 +6975,12 @@ static void spirv_compiler_emit_ext_glsl_instruction(struct spirv_compiler *comp
|
|
|
|
{
|
|
|
|
/* In D3D bits are numbered from the most significant bit. */
|
|
|
|
component_count = vsir_write_mask_component_count(dst->write_mask);
|
|
|
|
- val_id = vkd3d_spirv_build_op_isub(builder, type_id,
|
|
|
|
+ uint_max_id = spirv_compiler_get_constant_uint_vector(compiler, UINT32_MAX, component_count);
|
|
|
|
+ condition_id = vkd3d_spirv_build_op_tr2(builder, &builder->function_stream, SpvOpIEqual,
|
|
|
|
+ vkd3d_spirv_get_type_id(builder, VKD3D_SHADER_COMPONENT_BOOL, component_count), val_id, uint_max_id);
|
|
|
|
+ rev_val_id = vkd3d_spirv_build_op_isub(builder, type_id,
|
|
|
|
spirv_compiler_get_constant_uint_vector(compiler, 31, component_count), val_id);
|
|
|
|
+ val_id = vkd3d_spirv_build_op_select(builder, type_id, condition_id, val_id, rev_val_id);
|
|
|
|
}
|
|
|
|
|
|
|
|
spirv_compiler_emit_store_dst(compiler, dst, val_id);
|
|
|
|
@@ -8319,7 +8378,6 @@ static void spirv_compiler_emit_ld_raw_structured_srv_uav(struct spirv_compiler
|
|
|
|
type_id, val_id, 0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
- assert(dst->reg.data_type == VKD3D_DATA_UINT);
|
|
|
|
spirv_compiler_emit_store_dst_components(compiler, dst, VKD3D_SHADER_COMPONENT_UINT, constituents);
|
|
|
|
}
|
|
|
|
|
|
|
|
@@ -9672,27 +9730,28 @@ static int spirv_compiler_generate_spirv(struct spirv_compiler *compiler,
|
|
|
|
struct vkd3d_spirv_builder *builder = &compiler->spirv_builder;
|
|
|
|
struct vkd3d_shader_desc *shader_desc = &parser->shader_desc;
|
|
|
|
struct vkd3d_shader_instruction_array instructions;
|
|
|
|
+ struct vsir_program *program = &parser->program;
|
|
|
|
enum vkd3d_result result = VKD3D_OK;
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
- if (parser->shader_desc.temp_count)
|
|
|
|
- spirv_compiler_emit_temps(compiler, parser->shader_desc.temp_count);
|
|
|
|
- if (parser->shader_desc.ssa_count)
|
|
|
|
- spirv_compiler_allocate_ssa_register_ids(compiler, parser->shader_desc.ssa_count);
|
|
|
|
+ if ((result = vkd3d_shader_normalise(parser, compile_info)) < 0)
|
|
|
|
+ return result;
|
|
|
|
+
|
|
|
|
+ if (program->temp_count)
|
|
|
|
+ spirv_compiler_emit_temps(compiler, program->temp_count);
|
|
|
|
+ if (program->ssa_count)
|
|
|
|
+ spirv_compiler_allocate_ssa_register_ids(compiler, program->ssa_count);
|
|
|
|
|
|
|
|
spirv_compiler_emit_descriptor_declarations(compiler);
|
|
|
|
|
|
|
|
compiler->location.column = 0;
|
|
|
|
compiler->location.line = 1;
|
|
|
|
|
|
|
|
- if ((result = vkd3d_shader_normalise(parser, compile_info)) < 0)
|
|
|
|
- return result;
|
|
|
|
-
|
|
|
|
if (parser->shader_desc.block_count && !spirv_compiler_init_blocks(compiler, parser->shader_desc.block_count))
|
|
|
|
return VKD3D_ERROR_OUT_OF_MEMORY;
|
|
|
|
|
|
|
|
- instructions = parser->instructions;
|
|
|
|
- memset(&parser->instructions, 0, sizeof(parser->instructions));
|
|
|
|
+ instructions = program->instructions;
|
|
|
|
+ memset(&program->instructions, 0, sizeof(program->instructions));
|
|
|
|
|
|
|
|
compiler->input_signature = shader_desc->input_signature;
|
|
|
|
compiler->output_signature = shader_desc->output_signature;
|
|
|
|
@@ -9700,7 +9759,7 @@ static int spirv_compiler_generate_spirv(struct spirv_compiler *compiler,
|
|
|
|
memset(&shader_desc->input_signature, 0, sizeof(shader_desc->input_signature));
|
|
|
|
memset(&shader_desc->output_signature, 0, sizeof(shader_desc->output_signature));
|
|
|
|
memset(&shader_desc->patch_constant_signature, 0, sizeof(shader_desc->patch_constant_signature));
|
|
|
|
- compiler->use_vocp = parser->shader_desc.use_vocp;
|
|
|
|
+ compiler->use_vocp = program->use_vocp;
|
|
|
|
compiler->block_names = parser->shader_desc.block_names;
|
|
|
|
compiler->block_name_count = parser->shader_desc.block_name_count;
|
|
|
|
|
|
|
|
@@ -9802,7 +9861,7 @@ int spirv_compile(struct vkd3d_shader_parser *parser,
|
|
|
|
struct spirv_compiler *spirv_compiler;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
- if (!(spirv_compiler = spirv_compiler_create(&parser->shader_version, &parser->shader_desc,
|
|
|
|
+ if (!(spirv_compiler = spirv_compiler_create(&parser->program.shader_version, &parser->shader_desc,
|
|
|
|
compile_info, scan_descriptor_info, message_context, &parser->location, parser->config_flags)))
|
|
|
|
{
|
|
|
|
ERR("Failed to create SPIR-V compiler.\n");
|
|
|
|
diff --git a/libs/vkd3d/libs/vkd3d-shader/tpf.c b/libs/vkd3d/libs/vkd3d-shader/tpf.c
|
|
|
|
index e4dfb5235ec..df10cd254d6 100644
|
|
|
|
--- a/libs/vkd3d/libs/vkd3d-shader/tpf.c
|
|
|
|
+++ b/libs/vkd3d/libs/vkd3d-shader/tpf.c
|
|
|
|
@@ -711,7 +711,7 @@ static struct vkd3d_shader_sm4_parser *vkd3d_shader_sm4_parser(struct vkd3d_shad
|
|
|
|
|
|
|
|
static bool shader_is_sm_5_1(const struct vkd3d_shader_sm4_parser *sm4)
|
|
|
|
{
|
|
|
|
- const struct vkd3d_shader_version *version = &sm4->p.shader_version;
|
|
|
|
+ const struct vkd3d_shader_version *version = &sm4->p.program.shader_version;
|
|
|
|
|
|
|
|
return version->major >= 5 && version->minor >= 1;
|
|
|
|
}
|
|
|
|
@@ -796,7 +796,7 @@ static void shader_sm4_read_shader_data(struct vkd3d_shader_instruction *ins, ui
|
|
|
|
icb->element_count = icb_size / VKD3D_VEC4_SIZE;
|
|
|
|
icb->is_null = false;
|
|
|
|
memcpy(icb->data, tokens, sizeof(*tokens) * icb_size);
|
|
|
|
- shader_instruction_array_add_icb(&priv->p.instructions, icb);
|
|
|
|
+ shader_instruction_array_add_icb(&priv->p.program.instructions, icb);
|
|
|
|
ins->declaration.icb = icb;
|
|
|
|
}
|
|
|
|
|
|
|
|
@@ -1072,7 +1072,7 @@ static void shader_sm4_read_declaration_count(struct vkd3d_shader_instruction *i
|
|
|
|
{
|
|
|
|
ins->declaration.count = *tokens;
|
|
|
|
if (opcode == VKD3D_SM4_OP_DCL_TEMPS)
|
|
|
|
- priv->p.shader_desc.temp_count = max(priv->p.shader_desc.temp_count, *tokens);
|
|
|
|
+ priv->p.program.temp_count = max(priv->p.program.temp_count, *tokens);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void shader_sm4_read_declaration_dst(struct vkd3d_shader_instruction *ins, uint32_t opcode,
|
|
|
|
@@ -1732,7 +1732,7 @@ static void shader_sm4_destroy(struct vkd3d_shader_parser *parser)
|
|
|
|
{
|
|
|
|
struct vkd3d_shader_sm4_parser *sm4 = vkd3d_shader_sm4_parser(parser);
|
|
|
|
|
|
|
|
- shader_instruction_array_destroy(&parser->instructions);
|
|
|
|
+ vsir_program_cleanup(&parser->program);
|
|
|
|
free_shader_desc(&parser->shader_desc);
|
|
|
|
vkd3d_free(sm4);
|
|
|
|
}
|
|
|
|
@@ -2020,7 +2020,7 @@ static bool register_is_control_point_input(const struct vkd3d_shader_register *
|
|
|
|
{
|
|
|
|
return reg->type == VKD3DSPR_INCONTROLPOINT || reg->type == VKD3DSPR_OUTCONTROLPOINT
|
|
|
|
|| (reg->type == VKD3DSPR_INPUT && (priv->phase == VKD3DSIH_HS_CONTROL_POINT_PHASE
|
|
|
|
- || priv->p.shader_version.type == VKD3D_SHADER_TYPE_GEOMETRY));
|
|
|
|
+ || priv->p.program.shader_version.type == VKD3D_SHADER_TYPE_GEOMETRY));
|
|
|
|
}
|
|
|
|
|
|
|
|
static uint32_t mask_from_swizzle(uint32_t swizzle)
|
|
|
|
@@ -2662,7 +2662,7 @@ int vkd3d_shader_sm4_parser_create(const struct vkd3d_shader_compile_info *compi
|
|
|
|
/* DXBC stores used masks inverted for output signatures, for some reason.
|
|
|
|
* We return them un-inverted. */
|
|
|
|
uninvert_used_masks(&shader_desc->output_signature);
|
|
|
|
- if (sm4->p.shader_version.type == VKD3D_SHADER_TYPE_HULL)
|
|
|
|
+ if (sm4->p.program.shader_version.type == VKD3D_SHADER_TYPE_HULL)
|
|
|
|
uninvert_used_masks(&shader_desc->patch_constant_signature);
|
|
|
|
|
|
|
|
if (!shader_sm4_parser_validate_signature(sm4, &shader_desc->input_signature,
|
|
|
|
@@ -2676,7 +2676,7 @@ int vkd3d_shader_sm4_parser_create(const struct vkd3d_shader_compile_info *compi
|
|
|
|
return VKD3D_ERROR_INVALID_SHADER;
|
|
|
|
}
|
|
|
|
|
|
|
|
- instructions = &sm4->p.instructions;
|
|
|
|
+ instructions = &sm4->p.program.instructions;
|
|
|
|
while (sm4->ptr != sm4->end)
|
|
|
|
{
|
|
|
|
if (!shader_instruction_array_reserve(instructions, instructions->count + 1))
|
|
|
|
@@ -2697,7 +2697,8 @@ int vkd3d_shader_sm4_parser_create(const struct vkd3d_shader_compile_info *compi
|
|
|
|
}
|
|
|
|
++instructions->count;
|
|
|
|
}
|
|
|
|
- if (sm4->p.shader_version.type == VKD3D_SHADER_TYPE_HULL && !sm4->has_control_point_phase && !sm4->p.failed)
|
|
|
|
+ if (sm4->p.program.shader_version.type == VKD3D_SHADER_TYPE_HULL
|
|
|
|
+ && !sm4->has_control_point_phase && !sm4->p.failed)
|
|
|
|
shader_sm4_validate_default_phase_index_ranges(sm4);
|
|
|
|
|
|
|
|
if (!sm4->p.failed)
|
|
|
|
@@ -2806,6 +2807,8 @@ bool hlsl_sm4_usage_from_semantic(struct hlsl_ctx *ctx, const struct hlsl_semant
|
|
|
|
{"position", false, VKD3D_SHADER_TYPE_PIXEL, D3D_NAME_POSITION},
|
|
|
|
{"sv_position", false, VKD3D_SHADER_TYPE_PIXEL, D3D_NAME_POSITION},
|
|
|
|
{"sv_isfrontface", false, VKD3D_SHADER_TYPE_PIXEL, D3D_NAME_IS_FRONT_FACE},
|
|
|
|
+ {"sv_rendertargetarrayindex", false, VKD3D_SHADER_TYPE_PIXEL, D3D_NAME_RENDER_TARGET_ARRAY_INDEX},
|
|
|
|
+ {"sv_viewportarrayindex", false, VKD3D_SHADER_TYPE_PIXEL, D3D_NAME_VIEWPORT_ARRAY_INDEX},
|
|
|
|
|
|
|
|
{"color", true, VKD3D_SHADER_TYPE_PIXEL, D3D_NAME_TARGET},
|
|
|
|
{"depth", true, VKD3D_SHADER_TYPE_PIXEL, D3D_NAME_DEPTH},
|
|
|
|
@@ -2814,9 +2817,12 @@ bool hlsl_sm4_usage_from_semantic(struct hlsl_ctx *ctx, const struct hlsl_semant
|
|
|
|
|
|
|
|
{"sv_position", false, VKD3D_SHADER_TYPE_VERTEX, D3D_NAME_UNDEFINED},
|
|
|
|
{"sv_vertexid", false, VKD3D_SHADER_TYPE_VERTEX, D3D_NAME_VERTEX_ID},
|
|
|
|
+ {"sv_instanceid", false, VKD3D_SHADER_TYPE_VERTEX, D3D_NAME_INSTANCE_ID},
|
|
|
|
|
|
|
|
{"position", true, VKD3D_SHADER_TYPE_VERTEX, D3D_NAME_POSITION},
|
|
|
|
{"sv_position", true, VKD3D_SHADER_TYPE_VERTEX, D3D_NAME_POSITION},
|
|
|
|
+ {"sv_rendertargetarrayindex", true, VKD3D_SHADER_TYPE_VERTEX, D3D_NAME_RENDER_TARGET_ARRAY_INDEX},
|
|
|
|
+ {"sv_viewportarrayindex", true, VKD3D_SHADER_TYPE_VERTEX, D3D_NAME_VIEWPORT_ARRAY_INDEX},
|
|
|
|
};
|
|
|
|
bool needs_compat_mapping = ascii_strncasecmp(semantic->name, "sv_", 3);
|
|
|
|
|
|
|
|
diff --git a/libs/vkd3d/libs/vkd3d-shader/vkd3d_shader_main.c b/libs/vkd3d/libs/vkd3d-shader/vkd3d_shader_main.c
|
|
|
|
index 61e46f5538e..14d885fb666 100644
|
|
|
|
--- a/libs/vkd3d/libs/vkd3d-shader/vkd3d_shader_main.c
|
|
|
|
+++ b/libs/vkd3d/libs/vkd3d-shader/vkd3d_shader_main.c
|
|
|
|
@@ -539,10 +539,9 @@ bool vkd3d_shader_parser_init(struct vkd3d_shader_parser *parser,
|
|
|
|
parser->location.source_name = source_name;
|
|
|
|
parser->location.line = 1;
|
|
|
|
parser->location.column = 0;
|
|
|
|
- parser->shader_version = *version;
|
|
|
|
parser->ops = ops;
|
|
|
|
parser->config_flags = vkd3d_shader_init_config_flags();
|
|
|
|
- return shader_instruction_array_init(&parser->instructions, instruction_reserve);
|
|
|
|
+ return vsir_program_init(&parser->program, version, instruction_reserve);
|
|
|
|
}
|
|
|
|
|
|
|
|
void VKD3D_PRINTF_FUNC(3, 4) vkd3d_shader_parser_error(struct vkd3d_shader_parser *parser,
|
|
|
|
@@ -1402,17 +1401,15 @@ static int scan_with_parser(const struct vkd3d_shader_compile_info *compile_info
|
|
|
|
descriptor_info1 = &local_descriptor_info1;
|
|
|
|
}
|
|
|
|
|
|
|
|
- vkd3d_shader_scan_context_init(&context, &parser->shader_version, compile_info,
|
|
|
|
+ vkd3d_shader_scan_context_init(&context, &parser->program.shader_version, compile_info,
|
|
|
|
descriptor_info1, combined_sampler_info, message_context);
|
|
|
|
|
|
|
|
if (TRACE_ON())
|
|
|
|
- {
|
|
|
|
- vkd3d_shader_trace(&parser->instructions, &parser->shader_version);
|
|
|
|
- }
|
|
|
|
+ vkd3d_shader_trace(&parser->program);
|
|
|
|
|
|
|
|
- for (i = 0; i < parser->instructions.count; ++i)
|
|
|
|
+ for (i = 0; i < parser->program.instructions.count; ++i)
|
|
|
|
{
|
|
|
|
- instruction = &parser->instructions.elements[i];
|
|
|
|
+ instruction = &parser->program.instructions.elements[i];
|
|
|
|
if ((ret = vkd3d_shader_scan_instruction(&context, instruction)) < 0)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
@@ -1585,13 +1582,13 @@ static int vkd3d_shader_parser_compile(struct vkd3d_shader_parser *parser,
|
|
|
|
switch (compile_info->target_type)
|
|
|
|
{
|
|
|
|
case VKD3D_SHADER_TARGET_D3D_ASM:
|
|
|
|
- ret = vkd3d_dxbc_binary_to_text(&parser->instructions, &parser->shader_version, compile_info, out, VSIR_ASM_D3D);
|
|
|
|
+ ret = vkd3d_dxbc_binary_to_text(&parser->program, compile_info, out, VSIR_ASM_D3D);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case VKD3D_SHADER_TARGET_GLSL:
|
|
|
|
if ((ret = scan_with_parser(&scan_info, message_context, &scan_descriptor_info, parser)) < 0)
|
|
|
|
return ret;
|
|
|
|
- if (!(glsl_generator = vkd3d_glsl_generator_create(&parser->shader_version,
|
|
|
|
+ if (!(glsl_generator = vkd3d_glsl_generator_create(&parser->program.shader_version,
|
|
|
|
message_context, &parser->location)))
|
|
|
|
{
|
|
|
|
ERR("Failed to create GLSL generator.\n");
|
|
|
|
@@ -1599,7 +1596,7 @@ static int vkd3d_shader_parser_compile(struct vkd3d_shader_parser *parser,
|
|
|
|
return VKD3D_ERROR;
|
|
|
|
}
|
|
|
|
|
|
|
|
- ret = vkd3d_glsl_generator_generate(glsl_generator, parser, out);
|
|
|
|
+ ret = vkd3d_glsl_generator_generate(glsl_generator, &parser->program, out);
|
|
|
|
vkd3d_glsl_generator_destroy(glsl_generator);
|
|
|
|
vkd3d_shader_free_scan_descriptor_info1(&scan_descriptor_info);
|
|
|
|
break;
|
|
|
|
diff --git a/libs/vkd3d/libs/vkd3d-shader/vkd3d_shader_private.h b/libs/vkd3d/libs/vkd3d-shader/vkd3d_shader_private.h
|
|
|
|
index ace58161e6b..2d5e25d7f05 100644
|
|
|
|
--- a/libs/vkd3d/libs/vkd3d-shader/vkd3d_shader_private.h
|
|
|
|
+++ b/libs/vkd3d/libs/vkd3d-shader/vkd3d_shader_private.h
|
|
|
|
@@ -215,7 +215,7 @@ enum vkd3d_shader_error
|
|
|
|
VKD3D_SHADER_ERROR_VSIR_DUPLICATE_DCL_TEMPS = 9013,
|
|
|
|
VKD3D_SHADER_ERROR_VSIR_INVALID_DCL_TEMPS = 9014,
|
|
|
|
VKD3D_SHADER_ERROR_VSIR_INVALID_INDEX = 9015,
|
|
|
|
- VKD3D_SHADER_ERROR_VSIR_INVALID_INSTRUCTION_NESTING = 9016,
|
|
|
|
+ VKD3D_SHADER_ERROR_VSIR_INVALID_CONTROL_FLOW = 9016,
|
|
|
|
VKD3D_SHADER_ERROR_VSIR_INVALID_SSA_USAGE = 9017,
|
|
|
|
|
|
|
|
VKD3D_SHADER_WARNING_VSIR_DYNAMIC_DESCRIPTOR_ARRAY = 9300,
|
|
|
|
@@ -436,6 +436,7 @@ enum vkd3d_shader_opcode
|
|
|
|
VKD3DSIH_NRM,
|
|
|
|
VKD3DSIH_OR,
|
|
|
|
VKD3DSIH_PHASE,
|
|
|
|
+ VKD3DSIH_PHI,
|
|
|
|
VKD3DSIH_POW,
|
|
|
|
VKD3DSIH_RCP,
|
|
|
|
VKD3DSIH_REP,
|
|
|
|
@@ -826,7 +827,7 @@ struct vkd3d_shader_indexable_temp
|
|
|
|
|
|
|
|
struct vkd3d_shader_register_index
|
|
|
|
{
|
|
|
|
- const struct vkd3d_shader_src_param *rel_addr;
|
|
|
|
+ struct vkd3d_shader_src_param *rel_addr;
|
|
|
|
unsigned int offset;
|
|
|
|
/* address is known to fall within the object (for optimisation) */
|
|
|
|
bool is_in_bounds;
|
|
|
|
@@ -886,6 +887,8 @@ struct vkd3d_shader_src_param
|
|
|
|
enum vkd3d_shader_src_modifier modifiers;
|
|
|
|
};
|
|
|
|
|
|
|
|
+void vsir_src_param_init(struct vkd3d_shader_src_param *param, enum vkd3d_shader_register_type reg_type,
|
|
|
|
+ enum vkd3d_data_type data_type, unsigned int idx_count);
|
|
|
|
void vsir_src_param_init_label(struct vkd3d_shader_src_param *param, unsigned int label_id);
|
|
|
|
|
|
|
|
struct vkd3d_shader_index_range
|
|
|
|
@@ -1019,8 +1022,6 @@ struct vkd3d_shader_desc
|
|
|
|
|
|
|
|
unsigned int input_control_point_count, output_control_point_count;
|
|
|
|
|
|
|
|
- uint32_t temp_count;
|
|
|
|
- unsigned int ssa_count;
|
|
|
|
unsigned int block_count;
|
|
|
|
|
|
|
|
struct
|
|
|
|
@@ -1028,8 +1029,6 @@ struct vkd3d_shader_desc
|
|
|
|
uint32_t used, external;
|
|
|
|
} flat_constant_count[3];
|
|
|
|
|
|
|
|
- bool use_vocp;
|
|
|
|
-
|
|
|
|
const char **block_names;
|
|
|
|
size_t block_name_count;
|
|
|
|
};
|
|
|
|
@@ -1194,11 +1193,22 @@ static inline bool register_is_constant(const struct vkd3d_shader_register *reg)
|
|
|
|
return (reg->type == VKD3DSPR_IMMCONST || reg->type == VKD3DSPR_IMMCONST64);
|
|
|
|
}
|
|
|
|
|
|
|
|
+static inline bool register_is_scalar_constant_zero(const struct vkd3d_shader_register *reg)
|
|
|
|
+{
|
|
|
|
+ return register_is_constant(reg) && reg->dimension == VSIR_DIMENSION_SCALAR
|
|
|
|
+ && (data_type_is_64_bit(reg->data_type) ? !reg->u.immconst_u64[0] : !reg->u.immconst_u32[0]);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
static inline bool vsir_register_is_label(const struct vkd3d_shader_register *reg)
|
|
|
|
{
|
|
|
|
return reg->type == VKD3DSPR_LABEL;
|
|
|
|
}
|
|
|
|
|
|
|
|
+static inline bool register_is_ssa(const struct vkd3d_shader_register *reg)
|
|
|
|
+{
|
|
|
|
+ return reg->type == VKD3DSPR_SSA;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
struct vkd3d_shader_param_node
|
|
|
|
{
|
|
|
|
struct vkd3d_shader_param_node *next;
|
|
|
|
@@ -1256,6 +1266,19 @@ enum vkd3d_shader_config_flags
|
|
|
|
VKD3D_SHADER_CONFIG_FLAG_FORCE_VALIDATION = 0x00000001,
|
|
|
|
};
|
|
|
|
|
|
|
|
+struct vsir_program
|
|
|
|
+{
|
|
|
|
+ struct vkd3d_shader_version shader_version;
|
|
|
|
+ struct vkd3d_shader_instruction_array instructions;
|
|
|
|
+
|
|
|
|
+ unsigned int temp_count;
|
|
|
|
+ unsigned int ssa_count;
|
|
|
|
+ bool use_vocp;
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
+bool vsir_program_init(struct vsir_program *program, const struct vkd3d_shader_version *version, unsigned int reserve);
|
|
|
|
+void vsir_program_cleanup(struct vsir_program *program);
|
|
|
|
+
|
|
|
|
struct vkd3d_shader_parser
|
|
|
|
{
|
|
|
|
struct vkd3d_shader_message_context *message_context;
|
|
|
|
@@ -1263,9 +1286,8 @@ struct vkd3d_shader_parser
|
|
|
|
bool failed;
|
|
|
|
|
|
|
|
struct vkd3d_shader_desc shader_desc;
|
|
|
|
- struct vkd3d_shader_version shader_version;
|
|
|
|
const struct vkd3d_shader_parser_ops *ops;
|
|
|
|
- struct vkd3d_shader_instruction_array instructions;
|
|
|
|
+ struct vsir_program program;
|
|
|
|
|
|
|
|
uint64_t config_flags;
|
|
|
|
};
|
|
|
|
@@ -1287,13 +1309,13 @@ void vkd3d_shader_parser_warning(struct vkd3d_shader_parser *parser,
|
|
|
|
static inline struct vkd3d_shader_dst_param *shader_parser_get_dst_params(
|
|
|
|
struct vkd3d_shader_parser *parser, unsigned int count)
|
|
|
|
{
|
|
|
|
- return shader_dst_param_allocator_get(&parser->instructions.dst_params, count);
|
|
|
|
+ return shader_dst_param_allocator_get(&parser->program.instructions.dst_params, count);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline struct vkd3d_shader_src_param *shader_parser_get_src_params(
|
|
|
|
struct vkd3d_shader_parser *parser, unsigned int count)
|
|
|
|
{
|
|
|
|
- return shader_src_param_allocator_get(&parser->instructions.src_params, count);
|
|
|
|
+ return shader_src_param_allocator_get(&parser->program.instructions.src_params, count);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void vkd3d_shader_parser_destroy(struct vkd3d_shader_parser *parser)
|
|
|
|
@@ -1322,8 +1344,7 @@ struct vkd3d_shader_scan_descriptor_info1
|
|
|
|
unsigned int descriptor_count;
|
|
|
|
};
|
|
|
|
|
|
|
|
-void vkd3d_shader_trace(const struct vkd3d_shader_instruction_array *instructions,
|
|
|
|
- const struct vkd3d_shader_version *shader_version);
|
|
|
|
+void vkd3d_shader_trace(const struct vsir_program *program);
|
|
|
|
|
|
|
|
const char *shader_get_type_prefix(enum vkd3d_shader_type type);
|
|
|
|
|
|
|
|
@@ -1345,8 +1366,8 @@ enum vsir_asm_dialect
|
|
|
|
VSIR_ASM_D3D,
|
|
|
|
};
|
|
|
|
|
|
|
|
-enum vkd3d_result vkd3d_dxbc_binary_to_text(const struct vkd3d_shader_instruction_array *instructions,
|
|
|
|
- const struct vkd3d_shader_version *shader_version, const struct vkd3d_shader_compile_info *compile_info,
|
|
|
|
+enum vkd3d_result vkd3d_dxbc_binary_to_text(const struct vsir_program *program,
|
|
|
|
+ const struct vkd3d_shader_compile_info *compile_info,
|
|
|
|
struct vkd3d_shader_code *out, enum vsir_asm_dialect dialect);
|
|
|
|
void vkd3d_string_buffer_cleanup(struct vkd3d_string_buffer *buffer);
|
|
|
|
struct vkd3d_string_buffer *vkd3d_string_buffer_get(struct vkd3d_string_buffer_cache *list);
|
|
|
|
@@ -1447,7 +1468,7 @@ struct vkd3d_glsl_generator;
|
|
|
|
struct vkd3d_glsl_generator *vkd3d_glsl_generator_create(const struct vkd3d_shader_version *version,
|
|
|
|
struct vkd3d_shader_message_context *message_context, const struct vkd3d_shader_location *location);
|
|
|
|
int vkd3d_glsl_generator_generate(struct vkd3d_glsl_generator *generator,
|
|
|
|
- struct vkd3d_shader_parser *parser, struct vkd3d_shader_code *out);
|
|
|
|
+ struct vsir_program *program, struct vkd3d_shader_code *out);
|
|
|
|
void vkd3d_glsl_generator_destroy(struct vkd3d_glsl_generator *generator);
|
|
|
|
|
|
|
|
#define SPIRV_MAX_SRC_COUNT 6
|
|
|
|
diff --git a/libs/vkd3d/libs/vkd3d/command.c b/libs/vkd3d/libs/vkd3d/command.c
|
|
|
|
index 4c39d00de24..d146e322d25 100644
|
|
|
|
--- a/libs/vkd3d/libs/vkd3d/command.c
|
|
|
|
+++ b/libs/vkd3d/libs/vkd3d/command.c
|
|
|
|
@@ -313,7 +313,7 @@ static void vkd3d_wait_for_gpu_fence(struct vkd3d_fence_worker *worker,
|
|
|
|
|
|
|
|
TRACE("Signaling fence %p value %#"PRIx64".\n", waiting_fence->fence, waiting_fence->value);
|
|
|
|
if (FAILED(hr = d3d12_fence_signal(waiting_fence->fence, waiting_fence->value, waiting_fence->u.vk_fence, false)))
|
|
|
|
- ERR("Failed to signal D3D12 fence, hr %#x.\n", hr);
|
|
|
|
+ ERR("Failed to signal d3d12 fence, hr %s.\n", debugstr_hresult(hr));
|
|
|
|
|
|
|
|
d3d12_fence_decref(waiting_fence->fence);
|
|
|
|
|
|
|
|
@@ -3706,7 +3706,7 @@ static void d3d12_command_list_copy_incompatible_texture_region(struct d3d12_com
|
|
|
|
buffer_image_copy.imageExtent.height * buffer_image_copy.imageExtent.depth * layer_count;
|
|
|
|
if (FAILED(hr = d3d12_command_list_allocate_transfer_buffer(list, buffer_size, &transfer_buffer)))
|
|
|
|
{
|
|
|
|
- ERR("Failed to allocate transfer buffer, hr %#x.\n", hr);
|
|
|
|
+ ERR("Failed to allocate transfer buffer, hr %s.\n", debugstr_hresult(hr));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
@@ -6564,7 +6564,7 @@ static void d3d12_command_queue_submit_locked(struct d3d12_command_queue *queue)
|
|
|
|
if (queue->op_queue.count == 1 && !queue->is_flushing)
|
|
|
|
{
|
|
|
|
if (FAILED(hr = d3d12_command_queue_flush_ops_locked(queue, &flushed_any)))
|
|
|
|
- ERR("Cannot flush queue, hr %#x.\n", hr);
|
|
|
|
+ ERR("Failed to flush queue, hr %s.\n", debugstr_hresult(hr));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
diff --git a/libs/vkd3d/libs/vkd3d/device.c b/libs/vkd3d/libs/vkd3d/device.c
|
|
|
|
index ea243977c22..75efa41fd32 100644
|
|
|
|
--- a/libs/vkd3d/libs/vkd3d/device.c
|
|
|
|
+++ b/libs/vkd3d/libs/vkd3d/device.c
|
|
|
|
@@ -98,6 +98,7 @@ static const struct vkd3d_optional_extension_info optional_device_extensions[] =
|
|
|
|
VK_EXTENSION(EXT_ROBUSTNESS_2, EXT_robustness2),
|
|
|
|
VK_EXTENSION(EXT_SHADER_DEMOTE_TO_HELPER_INVOCATION, EXT_shader_demote_to_helper_invocation),
|
|
|
|
VK_EXTENSION(EXT_SHADER_STENCIL_EXPORT, EXT_shader_stencil_export),
|
|
|
|
+ VK_EXTENSION(EXT_SHADER_VIEWPORT_INDEX_LAYER, EXT_shader_viewport_index_layer),
|
|
|
|
VK_EXTENSION(EXT_TEXEL_BUFFER_ALIGNMENT, EXT_texel_buffer_alignment),
|
|
|
|
VK_EXTENSION(EXT_TRANSFORM_FEEDBACK, EXT_transform_feedback),
|
|
|
|
VK_EXTENSION(EXT_VERTEX_ATTRIBUTE_DIVISOR, EXT_vertex_attribute_divisor),
|
|
|
|
@@ -1546,8 +1547,6 @@ static HRESULT vkd3d_init_device_caps(struct d3d12_device *device,
|
|
|
|
device->feature_options.StandardSwizzle64KBSupported = FALSE;
|
|
|
|
device->feature_options.CrossNodeSharingTier = D3D12_CROSS_NODE_SHARING_TIER_NOT_SUPPORTED;
|
|
|
|
device->feature_options.CrossAdapterRowMajorTextureSupported = FALSE;
|
|
|
|
- /* SPV_EXT_shader_viewport_index_layer */
|
|
|
|
- device->feature_options.VPAndRTArrayIndexFromAnyShaderFeedingRasterizerSupportedWithoutGSEmulation = FALSE;
|
|
|
|
device->feature_options.ResourceHeapTier = D3D12_RESOURCE_HEAP_TIER_2;
|
|
|
|
|
|
|
|
/* Shader Model 6 support. */
|
|
|
|
@@ -1653,6 +1652,8 @@ static HRESULT vkd3d_init_device_caps(struct d3d12_device *device,
|
|
|
|
vkd3d_free(vk_extensions);
|
|
|
|
|
|
|
|
device->feature_options.PSSpecifiedStencilRefSupported = vulkan_info->EXT_shader_stencil_export;
|
|
|
|
+ device->feature_options.VPAndRTArrayIndexFromAnyShaderFeedingRasterizerSupportedWithoutGSEmulation =
|
|
|
|
+ vulkan_info->EXT_shader_viewport_index_layer;
|
|
|
|
|
|
|
|
vkd3d_init_feature_level(vulkan_info, features, &device->feature_options);
|
|
|
|
if (vulkan_info->max_feature_level < create_info->minimum_feature_level)
|
|
|
|
@@ -1678,6 +1679,10 @@ static HRESULT vkd3d_init_device_caps(struct d3d12_device *device,
|
|
|
|
vulkan_info->shader_extensions[vulkan_info->shader_extension_count++]
|
|
|
|
= VKD3D_SHADER_SPIRV_EXTENSION_EXT_STENCIL_EXPORT;
|
|
|
|
|
|
|
|
+ if (vulkan_info->EXT_shader_viewport_index_layer)
|
|
|
|
+ vulkan_info->shader_extensions[vulkan_info->shader_extension_count++]
|
|
|
|
+ = VKD3D_SHADER_SPIRV_EXTENSION_EXT_VIEWPORT_INDEX_LAYER;
|
|
|
|
+
|
|
|
|
/* Disable unused Vulkan features. */
|
|
|
|
features->shaderTessellationAndGeometryPointSize = VK_FALSE;
|
|
|
|
|
|
|
|
diff --git a/libs/vkd3d/libs/vkd3d/vkd3d_private.h b/libs/vkd3d/libs/vkd3d/vkd3d_private.h
|
|
|
|
index 4abe7df3a95..13802c97773 100644
|
|
|
|
--- a/libs/vkd3d/libs/vkd3d/vkd3d_private.h
|
|
|
|
+++ b/libs/vkd3d/libs/vkd3d/vkd3d_private.h
|
|
|
|
@@ -55,7 +55,7 @@
|
|
|
|
|
|
|
|
#define VKD3D_MAX_COMPATIBLE_FORMAT_COUNT 6u
|
|
|
|
#define VKD3D_MAX_QUEUE_FAMILY_COUNT 3u
|
|
|
|
-#define VKD3D_MAX_SHADER_EXTENSIONS 3u
|
|
|
|
+#define VKD3D_MAX_SHADER_EXTENSIONS 4u
|
|
|
|
#define VKD3D_MAX_SHADER_STAGES 5u
|
|
|
|
#define VKD3D_MAX_VK_SYNC_OBJECTS 4u
|
|
|
|
#define VKD3D_MAX_DEVICE_BLOCKED_QUEUES 16u
|
|
|
|
@@ -137,6 +137,7 @@ struct vkd3d_vulkan_info
|
|
|
|
bool EXT_robustness2;
|
|
|
|
bool EXT_shader_demote_to_helper_invocation;
|
|
|
|
bool EXT_shader_stencil_export;
|
|
|
|
+ bool EXT_shader_viewport_index_layer;
|
|
|
|
bool EXT_texel_buffer_alignment;
|
|
|
|
bool EXT_transform_feedback;
|
|
|
|
bool EXT_vertex_attribute_divisor;
|
|
|
|
--
|
|
|
|
2.43.0
|
|
|
|
|