2023-09-15 16:31:27 -07:00
|
|
|
From f8bcf91aa485c43f4e5080bdf21f3c399e15a186 Mon Sep 17 00:00:00 2001
|
2023-08-30 16:25:00 -07:00
|
|
|
From: Alistair Leslie-Hughes <leslie_alistair@hotmail.com>
|
|
|
|
Date: Thu, 31 Aug 2023 09:08:26 +1000
|
|
|
|
Subject: [PATCH] Updated vkd3d to a597dc8755af5d2ef4826f1b570927379afc5824.
|
|
|
|
|
|
|
|
---
|
|
|
|
libs/vkd3d/include/vkd3d_shader.h | 2 +
|
|
|
|
libs/vkd3d/libs/vkd3d-shader/d3dbc.c | 11 +-
|
|
|
|
libs/vkd3d/libs/vkd3d-shader/dxil.c | 620 +++++++++++++++++-
|
|
|
|
libs/vkd3d/libs/vkd3d-shader/hlsl.c | 75 ++-
|
|
|
|
libs/vkd3d/libs/vkd3d-shader/hlsl.h | 13 +
|
|
|
|
libs/vkd3d/libs/vkd3d-shader/hlsl.y | 316 ++++-----
|
|
|
|
libs/vkd3d/libs/vkd3d-shader/hlsl_codegen.c | 25 +-
|
|
|
|
libs/vkd3d/libs/vkd3d-shader/ir.c | 2 +-
|
|
|
|
libs/vkd3d/libs/vkd3d-shader/spirv.c | 13 +-
|
|
|
|
.../libs/vkd3d-shader/vkd3d_shader_main.c | 3 -
|
|
|
|
.../libs/vkd3d-shader/vkd3d_shader_private.h | 10 +
|
|
|
|
11 files changed, 848 insertions(+), 242 deletions(-)
|
|
|
|
|
|
|
|
diff --git a/libs/vkd3d/include/vkd3d_shader.h b/libs/vkd3d/include/vkd3d_shader.h
|
|
|
|
index cfe54dbff53..d329e205fd1 100644
|
|
|
|
--- a/libs/vkd3d/include/vkd3d_shader.h
|
|
|
|
+++ b/libs/vkd3d/include/vkd3d_shader.h
|
|
|
|
@@ -1463,6 +1463,8 @@ enum vkd3d_shader_sysval_semantic
|
|
|
|
VKD3D_SHADER_SV_TESS_FACTOR_TRIINT = 0x0e,
|
|
|
|
VKD3D_SHADER_SV_TESS_FACTOR_LINEDET = 0x0f,
|
|
|
|
VKD3D_SHADER_SV_TESS_FACTOR_LINEDEN = 0x10,
|
|
|
|
+ /** Render target; SV_Target in Direct3D shader model 6 shaders. */
|
|
|
|
+ VKD3D_SHADER_SV_TARGET = 0x40,
|
|
|
|
|
|
|
|
VKD3D_FORCE_32_BIT_ENUM(VKD3D_SHADER_SYSVAL_SEMANTIC),
|
|
|
|
};
|
|
|
|
diff --git a/libs/vkd3d/libs/vkd3d-shader/d3dbc.c b/libs/vkd3d/libs/vkd3d-shader/d3dbc.c
|
|
|
|
index 99a5bd7a438..2b02d51f59a 100644
|
|
|
|
--- a/libs/vkd3d/libs/vkd3d-shader/d3dbc.c
|
|
|
|
+++ b/libs/vkd3d/libs/vkd3d-shader/d3dbc.c
|
|
|
|
@@ -1638,17 +1638,12 @@ static void write_sm1_uniforms(struct hlsl_ctx *ctx, struct vkd3d_bytecode_buffe
|
|
|
|
|
|
|
|
if (var->is_param && var->is_uniform)
|
|
|
|
{
|
|
|
|
- struct vkd3d_string_buffer *name;
|
|
|
|
+ char *new_name;
|
|
|
|
|
|
|
|
- if (!(name = hlsl_get_string_buffer(ctx)))
|
|
|
|
- {
|
|
|
|
- buffer->status = VKD3D_ERROR_OUT_OF_MEMORY;
|
|
|
|
+ if (!(new_name = hlsl_sprintf_alloc(ctx, "$%s", var->name)))
|
|
|
|
return;
|
|
|
|
- }
|
|
|
|
- vkd3d_string_buffer_printf(name, "$%s", var->name);
|
|
|
|
vkd3d_free((char *)var->name);
|
|
|
|
- var->name = hlsl_strdup(ctx, name->buffer);
|
|
|
|
- hlsl_release_string_buffer(ctx, name);
|
|
|
|
+ var->name = new_name;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
diff --git a/libs/vkd3d/libs/vkd3d-shader/dxil.c b/libs/vkd3d/libs/vkd3d-shader/dxil.c
|
|
|
|
index f9efe47f95d..666d8b08614 100644
|
|
|
|
--- a/libs/vkd3d/libs/vkd3d-shader/dxil.c
|
|
|
|
+++ b/libs/vkd3d/libs/vkd3d-shader/dxil.c
|
|
|
|
@@ -22,6 +22,7 @@
|
|
|
|
#define VKD3D_SM6_VERSION_MINOR(version) (((version) >> 0) & 0xf)
|
|
|
|
|
|
|
|
#define BITCODE_MAGIC VKD3D_MAKE_TAG('B', 'C', 0xc0, 0xde)
|
|
|
|
+#define DXIL_OP_MAX_OPERANDS 17
|
|
|
|
|
|
|
|
enum bitcode_block_id
|
|
|
|
{
|
|
|
|
@@ -138,6 +139,11 @@ enum bitcode_value_symtab_code
|
|
|
|
VST_CODE_BBENTRY = 2,
|
|
|
|
};
|
|
|
|
|
|
|
|
+enum dx_intrinsic_opcode
|
|
|
|
+{
|
|
|
|
+ DX_STORE_OUTPUT = 5,
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
struct sm6_pointer_info
|
|
|
|
{
|
|
|
|
const struct sm6_type *type;
|
|
|
|
@@ -242,6 +248,8 @@ struct sm6_function
|
|
|
|
|
|
|
|
struct sm6_block *blocks[1];
|
|
|
|
size_t block_count;
|
|
|
|
+
|
|
|
|
+ size_t value_count;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct dxil_block
|
|
|
|
@@ -287,12 +295,15 @@ struct sm6_parser
|
|
|
|
struct sm6_symbol *global_symbols;
|
|
|
|
size_t global_symbol_count;
|
|
|
|
|
|
|
|
+ struct vkd3d_shader_dst_param *output_params;
|
|
|
|
+
|
|
|
|
struct sm6_function *functions;
|
|
|
|
size_t function_count;
|
|
|
|
|
|
|
|
struct sm6_value *values;
|
|
|
|
size_t value_count;
|
|
|
|
size_t value_capacity;
|
|
|
|
+ size_t cur_max_value;
|
|
|
|
|
|
|
|
struct vkd3d_shader_parser p;
|
|
|
|
};
|
|
|
|
@@ -316,6 +327,8 @@ struct dxil_global_abbrev
|
|
|
|
struct dxil_abbrev abbrev;
|
|
|
|
};
|
|
|
|
|
|
|
|
+static const uint64_t CALL_CONV_FLAG_EXPLICIT_TYPE = 1ull << 15;
|
|
|
|
+
|
|
|
|
static size_t size_add_with_overflow_check(size_t a, size_t b)
|
|
|
|
{
|
|
|
|
size_t i = a + b;
|
|
|
|
@@ -1261,6 +1274,16 @@ static inline bool sm6_type_is_integer(const struct sm6_type *type)
|
|
|
|
return type->class == TYPE_CLASS_INTEGER;
|
|
|
|
}
|
|
|
|
|
|
|
|
+static inline bool sm6_type_is_i8(const struct sm6_type *type)
|
|
|
|
+{
|
|
|
|
+ return type->class == TYPE_CLASS_INTEGER && type->u.width == 8;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static inline bool sm6_type_is_i32(const struct sm6_type *type)
|
|
|
|
+{
|
|
|
|
+ return type->class == TYPE_CLASS_INTEGER && type->u.width == 32;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
static inline bool sm6_type_is_floating_point(const struct sm6_type *type)
|
|
|
|
{
|
|
|
|
return type->class == TYPE_CLASS_FLOAT;
|
|
|
|
@@ -1341,6 +1364,30 @@ static const struct sm6_type *sm6_type_get_pointer_to_type(const struct sm6_type
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
+/* Never returns null for elem_idx 0. */
|
|
|
|
+static const struct sm6_type *sm6_type_get_scalar_type(const struct sm6_type *type, unsigned int elem_idx)
|
|
|
|
+{
|
|
|
|
+ switch (type->class)
|
|
|
|
+ {
|
|
|
|
+ case TYPE_CLASS_ARRAY:
|
|
|
|
+ case TYPE_CLASS_VECTOR:
|
|
|
|
+ if (elem_idx >= type->u.array.count)
|
|
|
|
+ return NULL;
|
|
|
|
+ return sm6_type_get_scalar_type(type->u.array.elem_type, 0);
|
|
|
|
+
|
|
|
|
+ case TYPE_CLASS_POINTER:
|
|
|
|
+ return sm6_type_get_scalar_type(type->u.pointer.type, 0);
|
|
|
|
+
|
|
|
|
+ case TYPE_CLASS_STRUCT:
|
|
|
|
+ if (elem_idx >= type->u.struc->elem_count)
|
|
|
|
+ return NULL;
|
|
|
|
+ return sm6_type_get_scalar_type(type->u.struc->elem_types[elem_idx], 0);
|
|
|
|
+
|
|
|
|
+ default:
|
|
|
|
+ return type;
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
static const struct sm6_type *sm6_parser_get_type(struct sm6_parser *sm6, uint64_t type_id)
|
|
|
|
{
|
|
|
|
if (type_id >= sm6->type_count)
|
|
|
|
@@ -1443,9 +1490,32 @@ static const char *sm6_parser_get_global_symbol_name(const struct sm6_parser *sm
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
+static unsigned int register_get_uint_value(const struct vkd3d_shader_register *reg)
|
|
|
|
+{
|
|
|
|
+ if (!register_is_constant(reg) || !data_type_is_integer(reg->data_type))
|
|
|
|
+ return UINT_MAX;
|
|
|
|
+
|
|
|
|
+ if (reg->immconst_type == VKD3D_IMMCONST_VEC4)
|
|
|
|
+ WARN("Returning vec4.x.\n");
|
|
|
|
+
|
|
|
|
+ if (reg->type == VKD3DSPR_IMMCONST64)
|
|
|
|
+ {
|
|
|
|
+ if (reg->u.immconst_uint64[0] > UINT_MAX)
|
|
|
|
+ FIXME("Truncating 64-bit value.\n");
|
|
|
|
+ return reg->u.immconst_uint64[0];
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ return reg->u.immconst_uint[0];
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static inline bool sm6_value_is_function_dcl(const struct sm6_value *value)
|
|
|
|
+{
|
|
|
|
+ return value->value_type == VALUE_TYPE_FUNCTION;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
static inline bool sm6_value_is_dx_intrinsic_dcl(const struct sm6_value *fn)
|
|
|
|
{
|
|
|
|
- assert(fn->value_type == VALUE_TYPE_FUNCTION);
|
|
|
|
+ assert(sm6_value_is_function_dcl(fn));
|
|
|
|
return fn->u.function.is_prototype && !strncmp(fn->u.function.name, "dx.op.", 6);
|
|
|
|
}
|
|
|
|
|
|
|
|
@@ -1455,6 +1525,60 @@ static inline struct sm6_value *sm6_parser_get_current_value(const struct sm6_pa
|
|
|
|
return &sm6->values[sm6->value_count];
|
|
|
|
}
|
|
|
|
|
|
|
|
+static inline bool sm6_value_is_register(const struct sm6_value *value)
|
|
|
|
+{
|
|
|
|
+ return value->value_type == VALUE_TYPE_REG;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static inline bool sm6_value_is_constant(const struct sm6_value *value)
|
|
|
|
+{
|
|
|
|
+ return sm6_value_is_register(value) && register_is_constant(&value->u.reg);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static inline bool sm6_value_is_undef(const struct sm6_value *value)
|
|
|
|
+{
|
|
|
|
+ return sm6_value_is_register(value) && value->u.reg.type == VKD3DSPR_UNDEF;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static inline unsigned int sm6_value_get_constant_uint(const struct sm6_value *value)
|
|
|
|
+{
|
|
|
|
+ if (!sm6_value_is_constant(value))
|
|
|
|
+ return UINT_MAX;
|
|
|
|
+ return register_get_uint_value(&value->u.reg);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static struct vkd3d_shader_src_param *instruction_src_params_alloc(struct vkd3d_shader_instruction *ins,
|
|
|
|
+ unsigned int count, struct sm6_parser *sm6)
|
|
|
|
+{
|
|
|
|
+ struct vkd3d_shader_src_param *params = shader_parser_get_src_params(&sm6->p, count);
|
|
|
|
+ if (!params)
|
|
|
|
+ {
|
|
|
|
+ ERR("Failed to allocate src params.\n");
|
|
|
|
+ vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_OUT_OF_MEMORY,
|
|
|
|
+ "Out of memory allocating instruction src paramaters.");
|
|
|
|
+ return NULL;
|
|
|
|
+ }
|
|
|
|
+ ins->src = params;
|
|
|
|
+ ins->src_count = count;
|
|
|
|
+ return params;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static struct vkd3d_shader_dst_param *instruction_dst_params_alloc(struct vkd3d_shader_instruction *ins,
|
|
|
|
+ unsigned int count, struct sm6_parser *sm6)
|
|
|
|
+{
|
|
|
|
+ struct vkd3d_shader_dst_param *params = shader_parser_get_dst_params(&sm6->p, count);
|
|
|
|
+ if (!params)
|
|
|
|
+ {
|
|
|
|
+ ERR("Failed to allocate dst params.\n");
|
|
|
|
+ vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_OUT_OF_MEMORY,
|
|
|
|
+ "Out of memory allocating instruction dst paramaters.");
|
|
|
|
+ return NULL;
|
|
|
|
+ }
|
|
|
|
+ ins->dst = params;
|
|
|
|
+ ins->dst_count = count;
|
|
|
|
+ return params;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
static enum vkd3d_data_type vkd3d_data_type_from_sm6_type(const struct sm6_type *type)
|
|
|
|
{
|
|
|
|
if (type->class == TYPE_CLASS_INTEGER)
|
|
|
|
@@ -1488,6 +1612,47 @@ static enum vkd3d_data_type vkd3d_data_type_from_sm6_type(const struct sm6_type
|
|
|
|
return VKD3D_DATA_UINT;
|
|
|
|
}
|
|
|
|
|
|
|
|
+static inline void dst_param_init_scalar(struct vkd3d_shader_dst_param *param, unsigned int component_idx)
|
|
|
|
+{
|
|
|
|
+ param->write_mask = 1u << component_idx;
|
|
|
|
+ param->modifiers = 0;
|
|
|
|
+ param->shift = 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static inline void src_param_init(struct vkd3d_shader_src_param *param)
|
|
|
|
+{
|
|
|
|
+ param->swizzle = VKD3D_SHADER_SWIZZLE(X, X, X, X);
|
|
|
|
+ param->modifiers = VKD3DSPSM_NONE;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void src_param_init_from_value(struct vkd3d_shader_src_param *param, const struct sm6_value *src)
|
|
|
|
+{
|
|
|
|
+ src_param_init(param);
|
|
|
|
+ param->reg = src->u.reg;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void register_address_init(struct vkd3d_shader_register *reg, const struct sm6_value *address,
|
|
|
|
+ unsigned int idx, struct sm6_parser *sm6)
|
|
|
|
+{
|
|
|
|
+ assert(idx < ARRAY_SIZE(reg->idx));
|
|
|
|
+ if (sm6_value_is_constant(address))
|
|
|
|
+ {
|
|
|
|
+ reg->idx[idx].offset = sm6_value_get_constant_uint(address);
|
|
|
|
+ }
|
|
|
|
+ else if (sm6_value_is_undef(address))
|
|
|
|
+ {
|
|
|
|
+ reg->idx[idx].offset = 0;
|
|
|
|
+ }
|
|
|
|
+ else
|
|
|
|
+ {
|
|
|
|
+ struct vkd3d_shader_src_param *rel_addr = shader_parser_get_src_params(&sm6->p, 1);
|
|
|
|
+ if (rel_addr)
|
|
|
|
+ src_param_init_from_value(rel_addr, address);
|
|
|
|
+ reg->idx[idx].offset = 0;
|
|
|
|
+ reg->idx[idx].rel_addr = rel_addr;
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
/* Recurse through the block tree while maintaining a current value count. The current
|
|
|
|
* count is the sum of the global count plus all declarations within the current function.
|
|
|
|
* Store into value_capacity the highest count seen. */
|
|
|
|
@@ -1513,6 +1678,7 @@ static size_t sm6_parser_compute_max_value_count(struct sm6_parser *sm6,
|
|
|
|
* overestimate the value count somewhat, but this should be no problem. */
|
|
|
|
value_count = size_add_with_overflow_check(value_count, max(block->record_count, 1u) - 1);
|
|
|
|
sm6->value_capacity = max(sm6->value_capacity, value_count);
|
|
|
|
+ sm6->functions[sm6->function_count].value_count = value_count;
|
|
|
|
/* The value count returns to its previous value after handling a function. */
|
|
|
|
if (value_count < SIZE_MAX)
|
|
|
|
value_count = old_value_count;
|
|
|
|
@@ -1524,6 +1690,77 @@ static size_t sm6_parser_compute_max_value_count(struct sm6_parser *sm6,
|
|
|
|
return value_count;
|
|
|
|
}
|
|
|
|
|
|
|
|
+static size_t sm6_parser_get_value_index(struct sm6_parser *sm6, uint64_t idx)
|
|
|
|
+{
|
|
|
|
+ size_t i;
|
|
|
|
+
|
|
|
|
+ /* The value relative index is 32 bits. */
|
|
|
|
+ if (idx > UINT32_MAX)
|
|
|
|
+ WARN("Ignoring upper 32 bits of relative index.\n");
|
|
|
|
+ i = (uint32_t)sm6->value_count - (uint32_t)idx;
|
|
|
|
+
|
|
|
|
+ /* This may underflow to produce a forward reference, but it must not exceeed the final value count. */
|
|
|
|
+ if (i >= sm6->cur_max_value)
|
|
|
|
+ {
|
|
|
|
+ WARN("Invalid value index %"PRIx64" at %zu.\n", idx, sm6->value_count);
|
|
|
|
+ vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_INVALID_OPERAND,
|
|
|
|
+ "Invalid value relative index %u.", (unsigned int)idx);
|
|
|
|
+ return SIZE_MAX;
|
|
|
|
+ }
|
|
|
|
+ if (i == sm6->value_count)
|
|
|
|
+ {
|
|
|
|
+ WARN("Invalid value self-reference at %zu.\n", sm6->value_count);
|
|
|
|
+ vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_INVALID_OPERAND, "Invalid value self-reference.");
|
|
|
|
+ return SIZE_MAX;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ return i;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static size_t sm6_parser_get_value_idx_by_ref(struct sm6_parser *sm6, const struct dxil_record *record,
|
|
|
|
+ const struct sm6_type *fwd_type, unsigned int *rec_idx)
|
|
|
|
+{
|
|
|
|
+ unsigned int idx;
|
|
|
|
+ uint64_t val_ref;
|
|
|
|
+ size_t operand;
|
|
|
|
+
|
|
|
|
+ idx = *rec_idx;
|
|
|
|
+ if (!dxil_record_validate_operand_min_count(record, idx + 1, sm6))
|
|
|
|
+ return SIZE_MAX;
|
|
|
|
+ val_ref = record->operands[idx++];
|
|
|
|
+
|
|
|
|
+ operand = sm6_parser_get_value_index(sm6, val_ref);
|
|
|
|
+ if (operand == SIZE_MAX)
|
|
|
|
+ return SIZE_MAX;
|
|
|
|
+
|
|
|
|
+ if (operand >= sm6->value_count)
|
|
|
|
+ {
|
|
|
|
+ if (!fwd_type)
|
|
|
|
+ {
|
|
|
|
+ /* Forward references are followed by a type id unless an earlier operand set the type,
|
|
|
|
+ * or it is contained in a function declaration. */
|
|
|
|
+ if (!dxil_record_validate_operand_min_count(record, idx + 1, sm6))
|
|
|
|
+ return SIZE_MAX;
|
|
|
|
+ if (!(fwd_type = sm6_parser_get_type(sm6, record->operands[idx++])))
|
|
|
|
+ return SIZE_MAX;
|
|
|
|
+ }
|
|
|
|
+ FIXME("Forward value references are not supported yet.\n");
|
|
|
|
+ vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_INVALID_OPERAND,
|
|
|
|
+ "Unsupported value forward reference.");
|
|
|
|
+ return SIZE_MAX;
|
|
|
|
+ }
|
|
|
|
+ *rec_idx = idx;
|
|
|
|
+
|
|
|
|
+ return operand;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static const struct sm6_value *sm6_parser_get_value_by_ref(struct sm6_parser *sm6,
|
|
|
|
+ const struct dxil_record *record, const struct sm6_type *type, unsigned int *rec_idx)
|
|
|
|
+{
|
|
|
|
+ size_t operand = sm6_parser_get_value_idx_by_ref(sm6, record, type, rec_idx);
|
|
|
|
+ return operand == SIZE_MAX ? NULL : &sm6->values[operand];
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
static bool sm6_parser_declare_function(struct sm6_parser *sm6, const struct dxil_record *record)
|
|
|
|
{
|
|
|
|
const unsigned int max_count = 15;
|
|
|
|
@@ -1816,6 +2053,81 @@ static enum vkd3d_result sm6_parser_globals_init(struct sm6_parser *sm6)
|
|
|
|
return VKD3D_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
+static void dst_param_io_init(struct vkd3d_shader_dst_param *param,
|
|
|
|
+ const struct signature_element *e, enum vkd3d_shader_register_type reg_type)
|
|
|
|
+{
|
|
|
|
+ enum vkd3d_shader_component_type component_type;
|
|
|
|
+
|
|
|
|
+ param->write_mask = e->mask;
|
|
|
|
+ param->modifiers = 0;
|
|
|
|
+ param->shift = 0;
|
|
|
|
+ /* DXIL types do not have signedness. Load signed elements as unsigned. */
|
|
|
|
+ component_type = e->component_type == VKD3D_SHADER_COMPONENT_INT ? VKD3D_SHADER_COMPONENT_UINT : e->component_type;
|
|
|
|
+ shader_register_init(¶m->reg, reg_type, vkd3d_data_type_from_component_type(component_type), 0);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void sm6_parser_init_signature(struct sm6_parser *sm6, const struct shader_signature *s,
|
|
|
|
+ enum vkd3d_shader_register_type reg_type, struct vkd3d_shader_dst_param *params)
|
|
|
|
+{
|
|
|
|
+ struct vkd3d_shader_dst_param *param;
|
|
|
|
+ const struct signature_element *e;
|
|
|
|
+ unsigned int i;
|
|
|
|
+
|
|
|
|
+ for (i = 0; i < s->element_count; ++i)
|
|
|
|
+ {
|
|
|
|
+ e = &s->elements[i];
|
|
|
|
+
|
|
|
|
+ param = ¶ms[i];
|
|
|
|
+ dst_param_io_init(param, e, reg_type);
|
|
|
|
+ param->reg.idx[0].offset = i;
|
|
|
|
+ param->reg.idx_count = 1;
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void sm6_parser_emit_signature(struct sm6_parser *sm6, const struct shader_signature *s,
|
|
|
|
+ enum vkd3d_shader_opcode handler_idx, enum vkd3d_shader_opcode siv_handler_idx,
|
|
|
|
+ struct vkd3d_shader_dst_param *params)
|
|
|
|
+{
|
|
|
|
+ struct vkd3d_shader_instruction *ins;
|
|
|
|
+ struct vkd3d_shader_dst_param *param;
|
|
|
|
+ const struct signature_element *e;
|
|
|
|
+ unsigned int i;
|
|
|
|
+
|
|
|
|
+ for (i = 0; i < s->element_count; ++i)
|
|
|
|
+ {
|
|
|
|
+ e = &s->elements[i];
|
|
|
|
+
|
|
|
|
+ /* Do not check e->used_mask because in some cases it is zero for used elements.
|
|
|
|
+ * TODO: scan ahead for used I/O elements. */
|
|
|
|
+
|
|
|
|
+ if (e->sysval_semantic != VKD3D_SHADER_SV_NONE && e->sysval_semantic != VKD3D_SHADER_SV_TARGET)
|
|
|
|
+ {
|
|
|
|
+ ins = sm6_parser_add_instruction(sm6, siv_handler_idx);
|
|
|
|
+ param = &ins->declaration.register_semantic.reg;
|
|
|
|
+ ins->declaration.register_semantic.sysval_semantic = vkd3d_siv_from_sysval(e->sysval_semantic);
|
|
|
|
+ }
|
|
|
|
+ else
|
|
|
|
+ {
|
|
|
|
+ ins = sm6_parser_add_instruction(sm6, handler_idx);
|
|
|
|
+ param = &ins->declaration.dst;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ *param = params[i];
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void sm6_parser_init_output_signature(struct sm6_parser *sm6, const struct shader_signature *output_signature)
|
|
|
|
+{
|
|
|
|
+ sm6_parser_init_signature(sm6, output_signature,
|
|
|
|
+ (sm6->p.shader_version.type == VKD3D_SHADER_TYPE_PIXEL) ? VKD3DSPR_COLOROUT : VKD3DSPR_OUTPUT,
|
|
|
|
+ sm6->output_params);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void sm6_parser_emit_output_signature(struct sm6_parser *sm6, const struct shader_signature *output_signature)
|
|
|
|
+{
|
|
|
|
+ sm6_parser_emit_signature(sm6, output_signature, VKD3DSIH_DCL_OUTPUT, VKD3DSIH_DCL_OUTPUT_SIV, sm6->output_params);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
static const struct sm6_value *sm6_parser_next_function_definition(struct sm6_parser *sm6)
|
|
|
|
{
|
|
|
|
size_t i, count = sm6->function_count;
|
|
|
|
@@ -1838,6 +2150,258 @@ static struct sm6_block *sm6_block_create()
|
|
|
|
return block;
|
|
|
|
}
|
|
|
|
|
|
|
|
+static void sm6_parser_emit_dx_store_output(struct sm6_parser *sm6, struct sm6_block *code_block,
|
|
|
|
+ enum dx_intrinsic_opcode op, const struct sm6_value **operands, struct vkd3d_shader_instruction *ins)
|
|
|
|
+{
|
|
|
|
+ struct vkd3d_shader_src_param *src_param;
|
|
|
|
+ struct vkd3d_shader_dst_param *dst_param;
|
|
|
|
+ const struct shader_signature *signature;
|
|
|
|
+ unsigned int row_index, column_index;
|
|
|
|
+ const struct signature_element *e;
|
|
|
|
+ const struct sm6_value *value;
|
|
|
|
+
|
|
|
|
+ row_index = sm6_value_get_constant_uint(operands[0]);
|
|
|
|
+ column_index = sm6_value_get_constant_uint(operands[2]);
|
|
|
|
+
|
|
|
|
+ signature = &sm6->p.shader_desc.output_signature;
|
|
|
|
+ if (row_index >= signature->element_count)
|
|
|
|
+ {
|
|
|
|
+ WARN("Invalid row index %u.\n", row_index);
|
|
|
|
+ vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_INVALID_OPERAND,
|
|
|
|
+ "Invalid output row index %u.", row_index);
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+ e = &signature->elements[row_index];
|
|
|
|
+
|
|
|
|
+ if (column_index >= VKD3D_VEC4_SIZE)
|
|
|
|
+ {
|
|
|
|
+ WARN("Invalid column index %u.\n", column_index);
|
|
|
|
+ vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_INVALID_OPERAND,
|
|
|
|
+ "Invalid output column index %u.", column_index);
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ value = operands[3];
|
|
|
|
+ if (!sm6_value_is_register(value))
|
|
|
|
+ {
|
|
|
|
+ WARN("Source value is not a register.\n");
|
|
|
|
+ vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_INVALID_OPERAND,
|
|
|
|
+ "Expected store operation source to be a register.");
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ shader_instruction_init(ins, VKD3DSIH_MOV);
|
|
|
|
+
|
|
|
|
+ if (!(dst_param = instruction_dst_params_alloc(ins, 1, sm6)))
|
|
|
|
+ return;
|
|
|
|
+ dst_param_init_scalar(dst_param, column_index);
|
|
|
|
+ dst_param->reg = sm6->output_params[row_index].reg;
|
|
|
|
+ if (e->register_count > 1)
|
|
|
|
+ register_address_init(&dst_param->reg, operands[1], 0, sm6);
|
|
|
|
+
|
|
|
|
+ if ((src_param = instruction_src_params_alloc(ins, 1, sm6)))
|
|
|
|
+ src_param_init_from_value(src_param, value);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+struct sm6_dx_opcode_info
|
|
|
|
+{
|
|
|
|
+ const char ret_type;
|
|
|
|
+ const char *operand_info;
|
|
|
|
+ void (*handler)(struct sm6_parser *, struct sm6_block *, enum dx_intrinsic_opcode,
|
|
|
|
+ const struct sm6_value **, struct vkd3d_shader_instruction *);
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ 8 -> int8
|
|
|
|
+ i -> int32
|
|
|
|
+ v -> void
|
|
|
|
+ o -> overloaded
|
|
|
|
+ */
|
|
|
|
+static const struct sm6_dx_opcode_info sm6_dx_op_table[] =
|
|
|
|
+{
|
|
|
|
+ [DX_STORE_OUTPUT ] = {'v', "ii8o", sm6_parser_emit_dx_store_output},
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
+static bool sm6_parser_validate_operand_type(struct sm6_parser *sm6, const struct sm6_type *type, char info_type)
|
|
|
|
+{
|
|
|
|
+ switch (info_type)
|
|
|
|
+ {
|
|
|
|
+ case 0:
|
|
|
|
+ FIXME("Invalid operand count.\n");
|
|
|
|
+ return false;
|
|
|
|
+ case '8':
|
|
|
|
+ return sm6_type_is_i8(type);
|
|
|
|
+ case 'i':
|
|
|
|
+ return sm6_type_is_i32(type);
|
|
|
|
+ case 'v':
|
|
|
|
+ return !type;
|
|
|
|
+ case 'o':
|
|
|
|
+ /* TODO: some type checking may be possible */
|
|
|
|
+ return true;
|
|
|
|
+ default:
|
|
|
|
+ FIXME("Unhandled operand code '%c'.\n", info_type);
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static bool sm6_parser_validate_dx_op(struct sm6_parser *sm6, enum dx_intrinsic_opcode op, const char *name,
|
|
|
|
+ const struct sm6_value **operands, unsigned int operand_count, struct sm6_value *dst)
|
|
|
|
+{
|
|
|
|
+ const struct sm6_dx_opcode_info *info;
|
|
|
|
+ unsigned int i;
|
|
|
|
+
|
|
|
|
+ info = &sm6_dx_op_table[op];
|
|
|
|
+
|
|
|
|
+ if (!sm6_parser_validate_operand_type(sm6, dst->type, info->ret_type))
|
|
|
|
+ {
|
|
|
|
+ WARN("Failed to validate return type for dx intrinsic id %u, '%s'.\n", op, name);
|
|
|
|
+ /* Return type validation failure is not so critical. We only need to set
|
|
|
|
+ * a data type for the SSA result. */
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ for (i = 0; i < operand_count; ++i)
|
|
|
|
+ {
|
|
|
|
+ const struct sm6_value *value = operands[i];
|
|
|
|
+ if (!sm6_value_is_register(value) || !sm6_parser_validate_operand_type(sm6, value->type, info->operand_info[i]))
|
|
|
|
+ {
|
|
|
|
+ WARN("Failed to validate operand %u for dx intrinsic id %u, '%s'.\n", i + 1, op, name);
|
|
|
|
+ vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_INVALID_OPERAND,
|
|
|
|
+ "Operand %u for call to dx intrinsic function '%s' is invalid.", i + 1, name);
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ if (info->operand_info[operand_count])
|
|
|
|
+ {
|
|
|
|
+ WARN("Missing operands for dx intrinsic id %u, '%s'.\n", op, name);
|
|
|
|
+ vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_INVALID_OPERAND_COUNT,
|
|
|
|
+ "Call to dx intrinsic function '%s' has missing operands.", name);
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ return true;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void sm6_parser_emit_unhandled(struct sm6_parser *sm6, struct vkd3d_shader_instruction *ins,
|
|
|
|
+ struct sm6_value *dst)
|
|
|
|
+{
|
|
|
|
+ const struct sm6_type *type;
|
|
|
|
+
|
|
|
|
+ ins->handler_idx = VKD3DSIH_NOP;
|
|
|
|
+
|
|
|
|
+ if (!dst->type)
|
|
|
|
+ return;
|
|
|
|
+
|
|
|
|
+ type = sm6_type_get_scalar_type(dst->type, 0);
|
|
|
|
+ shader_register_init(&dst->u.reg, VKD3DSPR_UNDEF, vkd3d_data_type_from_sm6_type(type), 0);
|
|
|
|
+ /* dst->is_undefined is not set here because it flags only explicitly undefined values. */
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void sm6_parser_decode_dx_op(struct sm6_parser *sm6, struct sm6_block *code_block, enum dx_intrinsic_opcode op,
|
|
|
|
+ const char *name, const struct sm6_value **operands, unsigned int operand_count,
|
|
|
|
+ struct vkd3d_shader_instruction *ins, struct sm6_value *dst)
|
|
|
|
+{
|
|
|
|
+ if (op >= ARRAY_SIZE(sm6_dx_op_table) || !sm6_dx_op_table[op].operand_info)
|
|
|
|
+ {
|
|
|
|
+ FIXME("Unhandled dx intrinsic function id %u, '%s'.\n", op, name);
|
|
|
|
+ vkd3d_shader_parser_warning(&sm6->p, VKD3D_SHADER_WARNING_DXIL_UNHANDLED_INTRINSIC,
|
|
|
|
+ "Call to intrinsic function %s is unhandled.", name);
|
|
|
|
+ sm6_parser_emit_unhandled(sm6, ins, dst);
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (sm6_parser_validate_dx_op(sm6, op, name, operands, operand_count, dst))
|
|
|
|
+ sm6_dx_op_table[op].handler(sm6, code_block, op, operands, ins);
|
|
|
|
+ else
|
|
|
|
+ sm6_parser_emit_unhandled(sm6, ins, dst);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void sm6_parser_emit_call(struct sm6_parser *sm6, const struct dxil_record *record,
|
|
|
|
+ struct sm6_block *code_block, struct vkd3d_shader_instruction *ins, struct sm6_value *dst)
|
|
|
|
+{
|
|
|
|
+ const struct sm6_value *operands[DXIL_OP_MAX_OPERANDS];
|
|
|
|
+ const struct sm6_value *fn_value, *op_value;
|
|
|
|
+ unsigned int i = 1, j, operand_count;
|
|
|
|
+ const struct sm6_type *type = NULL;
|
|
|
|
+ uint64_t call_conv;
|
|
|
|
+
|
|
|
|
+ if (!dxil_record_validate_operand_min_count(record, 2, sm6))
|
|
|
|
+ return;
|
|
|
|
+
|
|
|
|
+ /* TODO: load the 1-based attributes index from record->operands[0] and validate against attribute count. */
|
|
|
|
+
|
|
|
|
+ if ((call_conv = record->operands[i++]) & CALL_CONV_FLAG_EXPLICIT_TYPE)
|
|
|
|
+ type = sm6_parser_get_type(sm6, record->operands[i++]);
|
|
|
|
+ if (call_conv &= ~CALL_CONV_FLAG_EXPLICIT_TYPE)
|
|
|
|
+ WARN("Ignoring calling convention %#"PRIx64".\n", call_conv);
|
|
|
|
+
|
|
|
|
+ if (!(fn_value = sm6_parser_get_value_by_ref(sm6, record, NULL, &i)))
|
|
|
|
+ return;
|
|
|
|
+ if (!sm6_value_is_function_dcl(fn_value))
|
|
|
|
+ {
|
|
|
|
+ WARN("Function target value is not a function declaration.\n");
|
|
|
|
+ vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_INVALID_OPERAND,
|
|
|
|
+ "Function call target value is not a function declaration.");
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (type && type != fn_value->type->u.pointer.type)
|
|
|
|
+ WARN("Explicit call type does not match function type.\n");
|
|
|
|
+ type = fn_value->type->u.pointer.type;
|
|
|
|
+
|
|
|
|
+ if (!sm6_type_is_void(type->u.function->ret_type))
|
|
|
|
+ dst->type = type->u.function->ret_type;
|
|
|
|
+
|
|
|
|
+ operand_count = type->u.function->param_count;
|
|
|
|
+ if (operand_count > ARRAY_SIZE(operands))
|
|
|
|
+ {
|
|
|
|
+ WARN("Ignoring %zu operands.\n", operand_count - ARRAY_SIZE(operands));
|
|
|
|
+ vkd3d_shader_parser_warning(&sm6->p, VKD3D_SHADER_WARNING_DXIL_IGNORING_OPERANDS,
|
|
|
|
+ "Ignoring %zu operands for function call.", operand_count - ARRAY_SIZE(operands));
|
|
|
|
+ operand_count = ARRAY_SIZE(operands);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ for (j = 0; j < operand_count; ++j)
|
|
|
|
+ {
|
|
|
|
+ if (!(operands[j] = sm6_parser_get_value_by_ref(sm6, record, type->u.function->param_types[j], &i)))
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+ if ((j = record->operand_count - i))
|
|
|
|
+ {
|
|
|
|
+ WARN("Ignoring %u operands beyond the function parameter list.\n", j);
|
|
|
|
+ vkd3d_shader_parser_warning(&sm6->p, VKD3D_SHADER_WARNING_DXIL_IGNORING_OPERANDS,
|
|
|
|
+ "Ignoring %u function call operands beyond the parameter list.", j);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (!fn_value->u.function.is_prototype)
|
|
|
|
+ {
|
|
|
|
+ FIXME("Unhandled call to local function.\n");
|
|
|
|
+ vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_INVALID_OPERAND,
|
|
|
|
+ "Call to a local function is unsupported.");
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+ if (!sm6_value_is_dx_intrinsic_dcl(fn_value))
|
|
|
|
+ WARN("External function is not a dx intrinsic.\n");
|
|
|
|
+
|
|
|
|
+ if (!operand_count)
|
|
|
|
+ {
|
|
|
|
+ WARN("Missing dx intrinsic function id.\n");
|
|
|
|
+ vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_INVALID_OPERAND_COUNT,
|
|
|
|
+ "The id for a dx intrinsic function is missing.");
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ op_value = operands[0];
|
|
|
|
+ if (!sm6_value_is_constant(op_value) || !sm6_type_is_integer(op_value->type))
|
|
|
|
+ {
|
|
|
|
+ WARN("dx intrinsic function id is not a constant int.\n");
|
|
|
|
+ vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_INVALID_OPERAND,
|
|
|
|
+ "Expected a constant integer dx intrinsic function id.");
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+ sm6_parser_decode_dx_op(sm6, code_block, register_get_uint_value(&op_value->u.reg),
|
|
|
|
+ fn_value->u.function.name, &operands[1], operand_count - 1, ins, dst);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
static void sm6_parser_emit_ret(struct sm6_parser *sm6, const struct dxil_record *record,
|
|
|
|
struct sm6_block *code_block, struct vkd3d_shader_instruction *ins)
|
|
|
|
{
|
|
|
|
@@ -1855,15 +2419,10 @@ static enum vkd3d_result sm6_parser_function_init(struct sm6_parser *sm6, const
|
|
|
|
{
|
|
|
|
struct vkd3d_shader_instruction *ins;
|
|
|
|
const struct dxil_record *record;
|
|
|
|
+ bool ret_found, is_terminator;
|
|
|
|
struct sm6_block *code_block;
|
|
|
|
struct sm6_value *dst;
|
|
|
|
size_t i, block_idx;
|
|
|
|
- bool ret_found;
|
|
|
|
- enum
|
|
|
|
- {
|
|
|
|
- RESULT_VALUE,
|
|
|
|
- RESULT_TERMINATE,
|
|
|
|
- } result_type;
|
|
|
|
|
|
|
|
if (sm6->function_count)
|
|
|
|
{
|
|
|
|
@@ -1907,10 +2466,20 @@ static enum vkd3d_result sm6_parser_function_init(struct sm6_parser *sm6, const
|
|
|
|
}
|
|
|
|
code_block = function->blocks[0];
|
|
|
|
|
|
|
|
+ sm6->cur_max_value = function->value_count;
|
|
|
|
+
|
|
|
|
for (i = 1, block_idx = 0, ret_found = false; i < block->record_count; ++i)
|
|
|
|
{
|
|
|
|
sm6->p.location.column = i;
|
|
|
|
|
|
|
|
+ if (!code_block)
|
|
|
|
+ {
|
|
|
|
+ WARN("Invalid block count %zu.\n", function->block_count);
|
|
|
|
+ vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_INVALID_OPERAND,
|
|
|
|
+ "Invalid block count %zu.", function->block_count);
|
|
|
|
+ return VKD3D_ERROR_INVALID_SHADER;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
/* block->record_count - 1 is the instruction count, but some instructions
|
|
|
|
* can emit >1 IR instruction, so extra may be used. */
|
|
|
|
if (!vkd3d_array_reserve((void **)&code_block->instructions, &code_block->instruction_capacity,
|
|
|
|
@@ -1926,14 +2495,17 @@ static enum vkd3d_result sm6_parser_function_init(struct sm6_parser *sm6, const
|
|
|
|
dst = sm6_parser_get_current_value(sm6);
|
|
|
|
dst->type = NULL;
|
|
|
|
dst->value_type = VALUE_TYPE_REG;
|
|
|
|
- result_type = RESULT_VALUE;
|
|
|
|
+ is_terminator = false;
|
|
|
|
|
|
|
|
record = block->records[i];
|
|
|
|
switch (record->code)
|
|
|
|
{
|
|
|
|
+ case FUNC_CODE_INST_CALL:
|
|
|
|
+ sm6_parser_emit_call(sm6, record, code_block, ins, dst);
|
|
|
|
+ break;
|
|
|
|
case FUNC_CODE_INST_RET:
|
|
|
|
sm6_parser_emit_ret(sm6, record, code_block, ins);
|
|
|
|
- result_type = RESULT_TERMINATE;
|
|
|
|
+ is_terminator = true;
|
|
|
|
ret_found = true;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
@@ -1941,7 +2513,11 @@ static enum vkd3d_result sm6_parser_function_init(struct sm6_parser *sm6, const
|
|
|
|
return VKD3D_ERROR_INVALID_SHADER;
|
|
|
|
}
|
|
|
|
|
|
|
|
- if (result_type == RESULT_TERMINATE)
|
|
|
|
+ if (sm6->p.failed)
|
|
|
|
+ return VKD3D_ERROR;
|
|
|
|
+ assert(ins->handler_idx != VKD3DSIH_INVALID);
|
|
|
|
+
|
|
|
|
+ if (is_terminator)
|
|
|
|
{
|
|
|
|
++block_idx;
|
|
|
|
code_block = (block_idx < function->block_count) ? function->blocks[block_idx] : NULL;
|
|
|
|
@@ -1950,6 +2526,7 @@ static enum vkd3d_result sm6_parser_function_init(struct sm6_parser *sm6, const
|
|
|
|
code_block->instruction_count += ins->handler_idx != VKD3DSIH_NOP;
|
|
|
|
else
|
|
|
|
assert(ins->handler_idx == VKD3DSIH_NOP);
|
|
|
|
+
|
|
|
|
sm6->value_count += !!dst->type;
|
|
|
|
}
|
|
|
|
|
|
|
|
@@ -1996,6 +2573,8 @@ static enum vkd3d_result sm6_parser_module_init(struct sm6_parser *sm6, const st
|
|
|
|
switch (block->id)
|
|
|
|
{
|
|
|
|
case CONSTANTS_BLOCK:
|
|
|
|
+ function = &sm6->functions[sm6->function_count];
|
|
|
|
+ sm6->cur_max_value = function->value_count;
|
|
|
|
return sm6_parser_constants_init(sm6, block);
|
|
|
|
|
|
|
|
case FUNCTION_BLOCK:
|
|
|
|
@@ -2103,6 +2682,7 @@ static const struct vkd3d_shader_parser_ops sm6_parser_ops =
|
|
|
|
static enum vkd3d_result sm6_parser_init(struct sm6_parser *sm6, const uint32_t *byte_code, size_t byte_code_size,
|
|
|
|
const char *source_name, struct vkd3d_shader_message_context *message_context)
|
|
|
|
{
|
|
|
|
+ const struct shader_signature *output_signature = &sm6->p.shader_desc.output_signature;
|
|
|
|
const struct vkd3d_shader_location location = {.source_name = source_name};
|
|
|
|
uint32_t version_token, dxil_version, token_count, magic;
|
|
|
|
unsigned int chunk_offset, chunk_size;
|
|
|
|
@@ -2258,6 +2838,14 @@ static enum vkd3d_result sm6_parser_init(struct sm6_parser *sm6, const uint32_t
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
+ if (!(sm6->output_params = shader_parser_get_dst_params(&sm6->p, output_signature->element_count)))
|
|
|
|
+ {
|
|
|
|
+ ERR("Failed to allocate output parameters.\n");
|
|
|
|
+ vkd3d_shader_error(message_context, &location, VKD3D_SHADER_ERROR_DXIL_OUT_OF_MEMORY,
|
|
|
|
+ "Out of memory allocating output parameters.");
|
|
|
|
+ return VKD3D_ERROR_OUT_OF_MEMORY;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
function_count = dxil_block_compute_function_count(&sm6->root_block);
|
|
|
|
if (!(sm6->functions = vkd3d_calloc(function_count, sizeof(*sm6->functions))))
|
|
|
|
{
|
|
|
|
@@ -2288,6 +2876,8 @@ static enum vkd3d_result sm6_parser_init(struct sm6_parser *sm6, const uint32_t
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
+ sm6_parser_init_output_signature(sm6, output_signature);
|
|
|
|
+
|
|
|
|
if ((ret = sm6_parser_module_init(sm6, &sm6->root_block, 0)) < 0)
|
|
|
|
{
|
|
|
|
if (ret == VKD3D_ERROR_OUT_OF_MEMORY)
|
|
|
|
@@ -2296,11 +2886,17 @@ static enum vkd3d_result sm6_parser_init(struct sm6_parser *sm6, const uint32_t
|
|
|
|
else if (ret == VKD3D_ERROR_INVALID_SHADER)
|
|
|
|
vkd3d_shader_error(message_context, &location, VKD3D_SHADER_ERROR_DXIL_INVALID_MODULE,
|
|
|
|
"DXIL module is invalid.");
|
|
|
|
- else
|
|
|
|
- vkd3d_unreachable();
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
+ if (!sm6_parser_require_space(sm6, output_signature->element_count))
|
|
|
|
+ {
|
|
|
|
+ vkd3d_shader_error(message_context, &location, VKD3D_SHADER_ERROR_DXIL_OUT_OF_MEMORY,
|
|
|
|
+ "Out of memory emitting shader signature declarations.");
|
|
|
|
+ return VKD3D_ERROR_OUT_OF_MEMORY;
|
|
|
|
+ }
|
|
|
|
+ sm6_parser_emit_output_signature(sm6, output_signature);
|
|
|
|
+
|
|
|
|
for (i = 0; i < sm6->function_count; ++i)
|
|
|
|
{
|
|
|
|
if (!sm6_block_emit_instructions(sm6->functions[i].blocks[0], sm6))
|
|
|
|
diff --git a/libs/vkd3d/libs/vkd3d-shader/hlsl.c b/libs/vkd3d/libs/vkd3d-shader/hlsl.c
|
|
|
|
index 8b706e1e667..b8cf6813f67 100644
|
|
|
|
--- a/libs/vkd3d/libs/vkd3d-shader/hlsl.c
|
|
|
|
+++ b/libs/vkd3d/libs/vkd3d-shader/hlsl.c
|
|
|
|
@@ -72,6 +72,27 @@ void hlsl_fixme(struct hlsl_ctx *ctx, const struct vkd3d_shader_location *loc, c
|
|
|
|
ctx->result = VKD3D_ERROR_NOT_IMPLEMENTED;
|
|
|
|
}
|
|
|
|
|
|
|
|
+char *hlsl_sprintf_alloc(struct hlsl_ctx *ctx, const char *fmt, ...)
|
|
|
|
+{
|
|
|
|
+ struct vkd3d_string_buffer *string;
|
|
|
|
+ va_list args;
|
|
|
|
+ char *ret;
|
|
|
|
+
|
|
|
|
+ if (!(string = hlsl_get_string_buffer(ctx)))
|
|
|
|
+ return NULL;
|
|
|
|
+ va_start(args, fmt);
|
|
|
|
+ if (vkd3d_string_buffer_vprintf(string, fmt, args) < 0)
|
|
|
|
+ {
|
|
|
|
+ va_end(args);
|
|
|
|
+ hlsl_release_string_buffer(ctx, string);
|
|
|
|
+ return NULL;
|
|
|
|
+ }
|
|
|
|
+ va_end(args);
|
|
|
|
+ ret = hlsl_strdup(ctx, string->buffer);
|
|
|
|
+ hlsl_release_string_buffer(ctx, string);
|
|
|
|
+ return ret;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
bool hlsl_add_var(struct hlsl_ctx *ctx, struct hlsl_ir_var *decl, bool local_var)
|
|
|
|
{
|
|
|
|
struct hlsl_scope *scope = ctx->cur_scope;
|
|
|
|
@@ -1039,11 +1060,10 @@ struct hlsl_ir_var *hlsl_new_synthetic_var(struct hlsl_ctx *ctx, const char *tem
|
|
|
|
{
|
|
|
|
struct vkd3d_string_buffer *string;
|
|
|
|
struct hlsl_ir_var *var;
|
|
|
|
- static LONG counter;
|
|
|
|
|
|
|
|
if (!(string = hlsl_get_string_buffer(ctx)))
|
|
|
|
return NULL;
|
|
|
|
- vkd3d_string_buffer_printf(string, "<%s-%u>", template, InterlockedIncrement(&counter));
|
|
|
|
+ vkd3d_string_buffer_printf(string, "<%s-%u>", template, ctx->internal_name_counter++);
|
|
|
|
var = hlsl_new_synthetic_var_named(ctx, string->buffer, type, loc, true);
|
|
|
|
hlsl_release_string_buffer(ctx, string);
|
|
|
|
return var;
|
|
|
|
@@ -2968,6 +2988,16 @@ void hlsl_add_function(struct hlsl_ctx *ctx, char *name, struct hlsl_ir_function
|
|
|
|
struct hlsl_ir_function *func;
|
|
|
|
struct rb_entry *func_entry;
|
|
|
|
|
|
|
|
+ if (ctx->internal_func_name)
|
|
|
|
+ {
|
|
|
|
+ char *internal_name;
|
|
|
|
+
|
|
|
|
+ if (!(internal_name = hlsl_strdup(ctx, ctx->internal_func_name)))
|
|
|
|
+ return;
|
|
|
|
+ vkd3d_free(name);
|
|
|
|
+ name = internal_name;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
func_entry = rb_get(&ctx->functions, name);
|
|
|
|
if (func_entry)
|
|
|
|
{
|
|
|
|
@@ -3499,3 +3529,44 @@ int hlsl_compile_shader(const struct vkd3d_shader_code *hlsl, const struct vkd3d
|
|
|
|
hlsl_ctx_cleanup(&ctx);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
+
|
|
|
|
+struct hlsl_ir_function_decl *hlsl_compile_internal_function(struct hlsl_ctx *ctx, const char *name, const char *hlsl)
|
|
|
|
+{
|
|
|
|
+ const struct hlsl_ir_function_decl *saved_cur_function = ctx->cur_function;
|
|
|
|
+ struct vkd3d_shader_code code = {.code = hlsl, .size = strlen(hlsl)};
|
|
|
|
+ const char *saved_internal_func_name = ctx->internal_func_name;
|
|
|
|
+ struct vkd3d_string_buffer *internal_name;
|
|
|
|
+ struct hlsl_ir_function_decl *func;
|
|
|
|
+ void *saved_scanner = ctx->scanner;
|
|
|
|
+ int ret;
|
|
|
|
+
|
|
|
|
+ TRACE("name %s, hlsl %s.\n", debugstr_a(name), debugstr_a(hlsl));
|
|
|
|
+
|
|
|
|
+ /* The actual name of the function is mangled with a unique prefix, both to
|
|
|
|
+ * allow defining multiple variants of a function with the same name, and to
|
|
|
|
+ * avoid polluting the user name space. */
|
|
|
|
+
|
|
|
|
+ if (!(internal_name = hlsl_get_string_buffer(ctx)))
|
|
|
|
+ return NULL;
|
|
|
|
+ vkd3d_string_buffer_printf(internal_name, "<%s-%u>", name, ctx->internal_name_counter++);
|
|
|
|
+
|
|
|
|
+ /* Save and restore everything that matters.
|
|
|
|
+ * Note that saving the scope stack is hard, and shouldn't be necessary. */
|
|
|
|
+
|
|
|
|
+ ctx->scanner = NULL;
|
|
|
|
+ ctx->internal_func_name = internal_name->buffer;
|
|
|
|
+ ctx->cur_function = NULL;
|
|
|
|
+ ret = hlsl_lexer_compile(ctx, &code);
|
|
|
|
+ ctx->scanner = saved_scanner;
|
|
|
|
+ ctx->internal_func_name = saved_internal_func_name;
|
|
|
|
+ ctx->cur_function = saved_cur_function;
|
|
|
|
+ if (ret)
|
|
|
|
+ {
|
|
|
|
+ ERR("Failed to compile intrinsic, error %u.\n", ret);
|
|
|
|
+ hlsl_release_string_buffer(ctx, internal_name);
|
|
|
|
+ return NULL;
|
|
|
|
+ }
|
|
|
|
+ func = hlsl_get_func_decl(ctx, internal_name->buffer);
|
|
|
|
+ hlsl_release_string_buffer(ctx, internal_name);
|
|
|
|
+ return func;
|
|
|
|
+}
|
|
|
|
diff --git a/libs/vkd3d/libs/vkd3d-shader/hlsl.h b/libs/vkd3d/libs/vkd3d-shader/hlsl.h
|
|
|
|
index 070fec74326..73b08ee3ea0 100644
|
|
|
|
--- a/libs/vkd3d/libs/vkd3d-shader/hlsl.h
|
|
|
|
+++ b/libs/vkd3d/libs/vkd3d-shader/hlsl.h
|
|
|
|
@@ -798,6 +798,9 @@ struct hlsl_ctx
|
|
|
|
/* Pointer to the current function; changes as the parser reads the code. */
|
|
|
|
const struct hlsl_ir_function_decl *cur_function;
|
|
|
|
|
|
|
|
+ /* Counter for generating unique internal variable names. */
|
|
|
|
+ unsigned int internal_name_counter;
|
|
|
|
+
|
|
|
|
/* Default matrix majority for matrix types. Can be set by a pragma within the HLSL source. */
|
|
|
|
unsigned int matrix_majority;
|
|
|
|
|
|
|
|
@@ -834,6 +837,12 @@ struct hlsl_ctx
|
|
|
|
* compute shader profiles. It is set using the numthreads() attribute in the entry point. */
|
|
|
|
uint32_t thread_count[3];
|
|
|
|
|
|
|
|
+ /* In some cases we generate opcodes by parsing an HLSL function and then
|
|
|
|
+ * invoking it. If not NULL, this field is the name of the function that we
|
|
|
|
+ * are currently parsing, "mangled" with an internal prefix to avoid
|
|
|
|
+ * polluting the user namespace. */
|
|
|
|
+ const char *internal_func_name;
|
|
|
|
+
|
|
|
|
/* Whether the parser is inside a state block (effects' metadata) inside a variable declaration. */
|
|
|
|
uint32_t in_state_block : 1;
|
|
|
|
/* Whether the numthreads() attribute has been provided in the entry-point function. */
|
|
|
|
@@ -1069,6 +1078,8 @@ static inline unsigned int hlsl_sampler_dim_count(enum hlsl_sampler_dim dim)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
+char *hlsl_sprintf_alloc(struct hlsl_ctx *ctx, const char *fmt, ...) VKD3D_PRINTF_FUNC(2, 3);
|
|
|
|
+
|
|
|
|
const char *debug_hlsl_expr_op(enum hlsl_ir_expr_op op);
|
|
|
|
const char *debug_hlsl_type(struct hlsl_ctx *ctx, const struct hlsl_type *type);
|
|
|
|
const char *debug_hlsl_writemask(unsigned int writemask);
|
|
|
|
@@ -1258,6 +1269,8 @@ bool hlsl_sm4_register_from_semantic(struct hlsl_ctx *ctx, const struct hlsl_sem
|
|
|
|
bool output, enum vkd3d_shader_register_type *type, enum vkd3d_sm4_swizzle_type *swizzle_type, bool *has_idx);
|
|
|
|
int hlsl_sm4_write(struct hlsl_ctx *ctx, struct hlsl_ir_function_decl *entry_func, struct vkd3d_shader_code *out);
|
|
|
|
|
|
|
|
+struct hlsl_ir_function_decl *hlsl_compile_internal_function(struct hlsl_ctx *ctx, const char *name, const char *hlsl);
|
|
|
|
+
|
|
|
|
int hlsl_lexer_compile(struct hlsl_ctx *ctx, const struct vkd3d_shader_code *hlsl);
|
|
|
|
|
|
|
|
#endif
|
|
|
|
diff --git a/libs/vkd3d/libs/vkd3d-shader/hlsl.y b/libs/vkd3d/libs/vkd3d-shader/hlsl.y
|
|
|
|
index 43ea4b4d038..161d1ab42c3 100644
|
|
|
|
--- a/libs/vkd3d/libs/vkd3d-shader/hlsl.y
|
|
|
|
+++ b/libs/vkd3d/libs/vkd3d-shader/hlsl.y
|
|
|
|
@@ -2330,6 +2330,92 @@ static struct hlsl_ir_function_decl *find_function_call(struct hlsl_ctx *ctx,
|
|
|
|
return args.decl;
|
|
|
|
}
|
|
|
|
|
|
|
|
+static struct hlsl_ir_node *hlsl_new_void_expr(struct hlsl_ctx *ctx, const struct vkd3d_shader_location *loc)
|
|
|
|
+{
|
|
|
|
+ struct hlsl_ir_node *operands[HLSL_MAX_OPERANDS] = {0};
|
|
|
|
+
|
|
|
|
+ return hlsl_new_expr(ctx, HLSL_OP0_VOID, operands, ctx->builtin_types.Void, loc);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static bool add_user_call(struct hlsl_ctx *ctx, struct hlsl_ir_function_decl *func,
|
|
|
|
+ const struct parse_initializer *args, const struct vkd3d_shader_location *loc)
|
|
|
|
+{
|
|
|
|
+ struct hlsl_ir_node *call;
|
|
|
|
+ unsigned int i;
|
|
|
|
+
|
|
|
|
+ assert(args->args_count == func->parameters.count);
|
|
|
|
+
|
|
|
|
+ for (i = 0; i < func->parameters.count; ++i)
|
|
|
|
+ {
|
|
|
|
+ struct hlsl_ir_var *param = func->parameters.vars[i];
|
|
|
|
+ struct hlsl_ir_node *arg = args->args[i];
|
|
|
|
+
|
|
|
|
+ if (!hlsl_types_are_equal(arg->data_type, param->data_type))
|
|
|
|
+ {
|
|
|
|
+ struct hlsl_ir_node *cast;
|
|
|
|
+
|
|
|
|
+ if (!(cast = add_cast(ctx, args->instrs, arg, param->data_type, &arg->loc)))
|
|
|
|
+ return false;
|
|
|
|
+ args->args[i] = cast;
|
|
|
|
+ arg = cast;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (param->storage_modifiers & HLSL_STORAGE_IN)
|
|
|
|
+ {
|
|
|
|
+ struct hlsl_ir_node *store;
|
|
|
|
+
|
|
|
|
+ if (!(store = hlsl_new_simple_store(ctx, param, arg)))
|
|
|
|
+ return false;
|
|
|
|
+ hlsl_block_add_instr(args->instrs, store);
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (!(call = hlsl_new_call(ctx, func, loc)))
|
|
|
|
+ return false;
|
|
|
|
+ hlsl_block_add_instr(args->instrs, call);
|
|
|
|
+
|
|
|
|
+ for (i = 0; i < func->parameters.count; ++i)
|
|
|
|
+ {
|
|
|
|
+ struct hlsl_ir_var *param = func->parameters.vars[i];
|
|
|
|
+ struct hlsl_ir_node *arg = args->args[i];
|
|
|
|
+
|
|
|
|
+ if (param->storage_modifiers & HLSL_STORAGE_OUT)
|
|
|
|
+ {
|
|
|
|
+ struct hlsl_ir_load *load;
|
|
|
|
+
|
|
|
|
+ if (arg->data_type->modifiers & HLSL_MODIFIER_CONST)
|
|
|
|
+ hlsl_error(ctx, &arg->loc, VKD3D_SHADER_ERROR_HLSL_MODIFIES_CONST,
|
|
|
|
+ "Output argument to \"%s\" is const.", func->func->name);
|
|
|
|
+
|
|
|
|
+ if (!(load = hlsl_new_var_load(ctx, param, &arg->loc)))
|
|
|
|
+ return false;
|
|
|
|
+ hlsl_block_add_instr(args->instrs, &load->node);
|
|
|
|
+
|
|
|
|
+ if (!add_assignment(ctx, args->instrs, arg, ASSIGN_OP_ASSIGN, &load->node))
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (func->return_var)
|
|
|
|
+ {
|
|
|
|
+ struct hlsl_ir_load *load;
|
|
|
|
+
|
|
|
|
+ if (!(load = hlsl_new_var_load(ctx, func->return_var, loc)))
|
|
|
|
+ return false;
|
|
|
|
+ hlsl_block_add_instr(args->instrs, &load->node);
|
|
|
|
+ }
|
|
|
|
+ else
|
|
|
|
+ {
|
|
|
|
+ struct hlsl_ir_node *expr;
|
|
|
|
+
|
|
|
|
+ if (!(expr = hlsl_new_void_expr(ctx, loc)))
|
|
|
|
+ return false;
|
|
|
|
+ hlsl_block_add_instr(args->instrs, expr);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ return true;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
static struct hlsl_ir_node *intrinsic_float_convert_arg(struct hlsl_ctx *ctx,
|
|
|
|
const struct parse_initializer *params, struct hlsl_ir_node *arg, const struct vkd3d_shader_location *loc)
|
|
|
|
{
|
|
|
|
@@ -2948,14 +3034,17 @@ static struct hlsl_ir_node * add_pow_expr(struct hlsl_ctx *ctx,
|
|
|
|
static bool intrinsic_lit(struct hlsl_ctx *ctx,
|
|
|
|
const struct parse_initializer *params, const struct vkd3d_shader_location *loc)
|
|
|
|
{
|
|
|
|
- struct hlsl_ir_node *n_l_neg, *n_h_neg, *specular_or, *specular_pow, *load;
|
|
|
|
- struct hlsl_ir_node *n_l, *n_h, *m, *diffuse, *zero, *store, *init;
|
|
|
|
- struct hlsl_constant_value init_value;
|
|
|
|
- struct hlsl_ir_load *var_load;
|
|
|
|
- struct hlsl_deref var_deref;
|
|
|
|
- struct hlsl_type *ret_type;
|
|
|
|
- struct hlsl_ir_var *var;
|
|
|
|
- struct hlsl_block block;
|
|
|
|
+ struct hlsl_ir_function_decl *func;
|
|
|
|
+
|
|
|
|
+ static const char body[] =
|
|
|
|
+ "float4 lit(float n_l, float n_h, float m)\n"
|
|
|
|
+ "{\n"
|
|
|
|
+ " float4 ret;\n"
|
|
|
|
+ " ret.xw = 1.0;\n"
|
|
|
|
+ " ret.y = max(n_l, 0);\n"
|
|
|
|
+ " ret.z = (n_l < 0 || n_h < 0) ? 0 : pow(n_h, m);\n"
|
|
|
|
+ " return ret;\n"
|
|
|
|
+ "}";
|
|
|
|
|
|
|
|
if (params->args[0]->data_type->class != HLSL_CLASS_SCALAR
|
|
|
|
|| params->args[1]->data_type->class != HLSL_CLASS_SCALAR
|
|
|
|
@@ -2965,70 +3054,10 @@ static bool intrinsic_lit(struct hlsl_ctx *ctx,
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
- if (!(n_l = intrinsic_float_convert_arg(ctx, params, params->args[0], loc)))
|
|
|
|
- return false;
|
|
|
|
-
|
|
|
|
- if (!(n_h = intrinsic_float_convert_arg(ctx, params, params->args[1], loc)))
|
|
|
|
- return false;
|
|
|
|
-
|
|
|
|
- if (!(m = intrinsic_float_convert_arg(ctx, params, params->args[2], loc)))
|
|
|
|
- return false;
|
|
|
|
-
|
|
|
|
- ret_type = hlsl_get_vector_type(ctx, HLSL_TYPE_FLOAT, 4);
|
|
|
|
-
|
|
|
|
- if (!(var = hlsl_new_synthetic_var(ctx, "lit", ret_type, loc)))
|
|
|
|
- return false;
|
|
|
|
- hlsl_init_simple_deref_from_var(&var_deref, var);
|
|
|
|
-
|
|
|
|
- init_value.u[0].f = 1.0f;
|
|
|
|
- init_value.u[1].f = 0.0f;
|
|
|
|
- init_value.u[2].f = 0.0f;
|
|
|
|
- init_value.u[3].f = 1.0f;
|
|
|
|
- if (!(init = hlsl_new_constant(ctx, ret_type, &init_value, loc)))
|
|
|
|
- return false;
|
|
|
|
- hlsl_block_add_instr(params->instrs, init);
|
|
|
|
-
|
|
|
|
- if (!(store = hlsl_new_simple_store(ctx, var, init)))
|
|
|
|
- return false;
|
|
|
|
- hlsl_block_add_instr(params->instrs, store);
|
|
|
|
-
|
|
|
|
- if (!(zero = hlsl_new_float_constant(ctx, 0.0f, loc)))
|
|
|
|
- return false;
|
|
|
|
- hlsl_block_add_instr(params->instrs, zero);
|
|
|
|
-
|
|
|
|
- /* Diffuse component. */
|
|
|
|
- if (!(diffuse = add_binary_arithmetic_expr(ctx, params->instrs, HLSL_OP2_MAX, n_l, zero, loc)))
|
|
|
|
+ if (!(func = hlsl_compile_internal_function(ctx, "lit", body)))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
- if (!hlsl_new_store_component(ctx, &block, &var_deref, 1, diffuse))
|
|
|
|
- return false;
|
|
|
|
- hlsl_block_add_block(params->instrs, &block);
|
|
|
|
-
|
|
|
|
- /* Specular component. */
|
|
|
|
- if (!(n_h_neg = add_binary_comparison_expr(ctx, params->instrs, HLSL_OP2_LESS, n_h, zero, loc)))
|
|
|
|
- return false;
|
|
|
|
-
|
|
|
|
- if (!(n_l_neg = add_binary_comparison_expr(ctx, params->instrs, HLSL_OP2_LESS, n_l, zero, loc)))
|
|
|
|
- return false;
|
|
|
|
-
|
|
|
|
- if (!(specular_or = add_binary_logical_expr(ctx, params->instrs, HLSL_OP2_LOGIC_OR, n_l_neg, n_h_neg, loc)))
|
|
|
|
- return false;
|
|
|
|
-
|
|
|
|
- if (!(specular_pow = add_pow_expr(ctx, params->instrs, n_h, m, loc)))
|
|
|
|
- return false;
|
|
|
|
-
|
|
|
|
- if (!(load = hlsl_add_conditional(ctx, params->instrs, specular_or, zero, specular_pow)))
|
|
|
|
- return false;
|
|
|
|
-
|
|
|
|
- if (!hlsl_new_store_component(ctx, &block, &var_deref, 2, load))
|
|
|
|
- return false;
|
|
|
|
- hlsl_block_add_block(params->instrs, &block);
|
|
|
|
-
|
|
|
|
- if (!(var_load = hlsl_new_var_load(ctx, var, loc)))
|
|
|
|
- return false;
|
|
|
|
- hlsl_block_add_instr(params->instrs, &var_load->node);
|
|
|
|
-
|
|
|
|
- return true;
|
|
|
|
+ return add_user_call(ctx, func, params, loc);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool intrinsic_log(struct hlsl_ctx *ctx,
|
|
|
|
@@ -3336,58 +3365,29 @@ static bool intrinsic_sin(struct hlsl_ctx *ctx,
|
|
|
|
static bool intrinsic_smoothstep(struct hlsl_ctx *ctx,
|
|
|
|
const struct parse_initializer *params, const struct vkd3d_shader_location *loc)
|
|
|
|
{
|
|
|
|
- struct hlsl_ir_node *min_arg, *max_arg, *x_arg, *p, *p_num, *p_denom, *res, *one, *minus_two, *three;
|
|
|
|
-
|
|
|
|
- if (!elementwise_intrinsic_float_convert_args(ctx, params, loc))
|
|
|
|
- return false;
|
|
|
|
-
|
|
|
|
- min_arg = params->args[0];
|
|
|
|
- max_arg = params->args[1];
|
|
|
|
- x_arg = params->args[2];
|
|
|
|
-
|
|
|
|
- if (!(min_arg = add_unary_arithmetic_expr(ctx, params->instrs, HLSL_OP1_NEG, min_arg, loc)))
|
|
|
|
- return false;
|
|
|
|
-
|
|
|
|
- if (!(p_num = add_binary_arithmetic_expr(ctx, params->instrs, HLSL_OP2_ADD, x_arg, min_arg, loc)))
|
|
|
|
- return false;
|
|
|
|
-
|
|
|
|
- if (!(p_denom = add_binary_arithmetic_expr(ctx, params->instrs, HLSL_OP2_ADD, max_arg, min_arg, loc)))
|
|
|
|
- return false;
|
|
|
|
-
|
|
|
|
- if (!(one = hlsl_new_float_constant(ctx, 1.0, loc)))
|
|
|
|
- return false;
|
|
|
|
- hlsl_block_add_instr(params->instrs, one);
|
|
|
|
-
|
|
|
|
- if (!(p_denom = add_binary_arithmetic_expr(ctx, params->instrs, HLSL_OP2_DIV, one, p_denom, loc)))
|
|
|
|
- return false;
|
|
|
|
-
|
|
|
|
- if (!(p = add_binary_arithmetic_expr(ctx, params->instrs, HLSL_OP2_MUL, p_num, p_denom, loc)))
|
|
|
|
- return false;
|
|
|
|
-
|
|
|
|
- if (!(p = add_unary_arithmetic_expr(ctx, params->instrs, HLSL_OP1_SAT, p, loc)))
|
|
|
|
- return false;
|
|
|
|
-
|
|
|
|
- if (!(minus_two = hlsl_new_float_constant(ctx, -2.0, loc)))
|
|
|
|
- return false;
|
|
|
|
- hlsl_block_add_instr(params->instrs, minus_two);
|
|
|
|
-
|
|
|
|
- if (!(three = hlsl_new_float_constant(ctx, 3.0, loc)))
|
|
|
|
- return false;
|
|
|
|
- hlsl_block_add_instr(params->instrs, three);
|
|
|
|
+ struct hlsl_ir_function_decl *func;
|
|
|
|
+ struct hlsl_type *type;
|
|
|
|
+ char *body;
|
|
|
|
|
|
|
|
- if (!(res = add_binary_arithmetic_expr(ctx, params->instrs, HLSL_OP2_MUL, minus_two, p, loc)))
|
|
|
|
- return false;
|
|
|
|
+ static const char template[] =
|
|
|
|
+ "%s smoothstep(%s low, %s high, %s x)\n"
|
|
|
|
+ "{\n"
|
|
|
|
+ " %s p = saturate((x - low) / (high - low));\n"
|
|
|
|
+ " return (p * p) * (3 - 2 * p);\n"
|
|
|
|
+ "}";
|
|
|
|
|
|
|
|
- if (!(res = add_binary_arithmetic_expr(ctx, params->instrs, HLSL_OP2_ADD, three, res, loc)))
|
|
|
|
+ if (!(type = elementwise_intrinsic_get_common_type(ctx, params, loc)))
|
|
|
|
return false;
|
|
|
|
+ type = hlsl_get_numeric_type(ctx, type->class, HLSL_TYPE_FLOAT, type->dimx, type->dimy);
|
|
|
|
|
|
|
|
- if (!(p = add_binary_arithmetic_expr(ctx, params->instrs, HLSL_OP2_MUL, p, p, loc)))
|
|
|
|
+ if (!(body = hlsl_sprintf_alloc(ctx, template, type->name, type->name, type->name, type->name, type->name)))
|
|
|
|
return false;
|
|
|
|
-
|
|
|
|
- if (!(res = add_binary_arithmetic_expr(ctx, params->instrs, HLSL_OP2_MUL, p, res, loc)))
|
|
|
|
+ func = hlsl_compile_internal_function(ctx, "smoothstep", body);
|
|
|
|
+ vkd3d_free(body);
|
|
|
|
+ if (!func)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
- return true;
|
|
|
|
+ return add_user_call(ctx, func, params, loc);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool intrinsic_sqrt(struct hlsl_ctx *ctx,
|
|
|
|
@@ -3478,6 +3478,12 @@ static bool intrinsic_tex3D(struct hlsl_ctx *ctx,
|
|
|
|
return intrinsic_tex(ctx, params, loc, "tex3D", HLSL_SAMPLER_DIM_3D);
|
|
|
|
}
|
|
|
|
|
|
|
|
+static bool intrinsic_texCUBE(struct hlsl_ctx *ctx,
|
|
|
|
+ const struct parse_initializer *params, const struct vkd3d_shader_location *loc)
|
|
|
|
+{
|
|
|
|
+ return intrinsic_tex(ctx, params, loc, "texCUBE", HLSL_SAMPLER_DIM_CUBE);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
static bool intrinsic_transpose(struct hlsl_ctx *ctx,
|
|
|
|
const struct parse_initializer *params, const struct vkd3d_shader_location *loc)
|
|
|
|
{
|
|
|
|
@@ -3648,6 +3654,7 @@ intrinsic_functions[] =
|
|
|
|
{"step", 2, true, intrinsic_step},
|
|
|
|
{"tex2D", -1, false, intrinsic_tex2D},
|
|
|
|
{"tex3D", -1, false, intrinsic_tex3D},
|
|
|
|
+ {"texCUBE", -1, false, intrinsic_texCUBE},
|
|
|
|
{"transpose", 1, true, intrinsic_transpose},
|
|
|
|
{"trunc", 1, true, intrinsic_trunc},
|
|
|
|
};
|
|
|
|
@@ -3659,13 +3666,6 @@ static int intrinsic_function_name_compare(const void *a, const void *b)
|
|
|
|
return strcmp(a, func->name);
|
|
|
|
}
|
|
|
|
|
|
|
|
-static struct hlsl_ir_node *hlsl_new_void_expr(struct hlsl_ctx *ctx, const struct vkd3d_shader_location *loc)
|
|
|
|
-{
|
|
|
|
- struct hlsl_ir_node *operands[HLSL_MAX_OPERANDS] = {0};
|
|
|
|
-
|
|
|
|
- return hlsl_new_expr(ctx, HLSL_OP0_VOID, operands, ctx->builtin_types.Void, loc);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
static struct hlsl_block *add_call(struct hlsl_ctx *ctx, const char *name,
|
|
|
|
struct parse_initializer *args, const struct vkd3d_shader_location *loc)
|
|
|
|
{
|
|
|
|
@@ -3674,78 +3674,8 @@ static struct hlsl_block *add_call(struct hlsl_ctx *ctx, const char *name,
|
|
|
|
|
|
|
|
if ((decl = find_function_call(ctx, name, args, loc)))
|
|
|
|
{
|
|
|
|
- struct hlsl_ir_node *call;
|
|
|
|
- unsigned int i;
|
|
|
|
-
|
|
|
|
- assert(args->args_count == decl->parameters.count);
|
|
|
|
-
|
|
|
|
- for (i = 0; i < decl->parameters.count; ++i)
|
|
|
|
- {
|
|
|
|
- struct hlsl_ir_var *param = decl->parameters.vars[i];
|
|
|
|
- struct hlsl_ir_node *arg = args->args[i];
|
|
|
|
-
|
|
|
|
- if (!hlsl_types_are_equal(arg->data_type, param->data_type))
|
|
|
|
- {
|
|
|
|
- struct hlsl_ir_node *cast;
|
|
|
|
-
|
|
|
|
- if (!(cast = add_cast(ctx, args->instrs, arg, param->data_type, &arg->loc)))
|
|
|
|
- goto fail;
|
|
|
|
- args->args[i] = cast;
|
|
|
|
- arg = cast;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- if (param->storage_modifiers & HLSL_STORAGE_IN)
|
|
|
|
- {
|
|
|
|
- struct hlsl_ir_node *store;
|
|
|
|
-
|
|
|
|
- if (!(store = hlsl_new_simple_store(ctx, param, arg)))
|
|
|
|
- goto fail;
|
|
|
|
- hlsl_block_add_instr(args->instrs, store);
|
|
|
|
- }
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- if (!(call = hlsl_new_call(ctx, decl, loc)))
|
|
|
|
+ if (!add_user_call(ctx, decl, args, loc))
|
|
|
|
goto fail;
|
|
|
|
- hlsl_block_add_instr(args->instrs, call);
|
|
|
|
-
|
|
|
|
- for (i = 0; i < decl->parameters.count; ++i)
|
|
|
|
- {
|
|
|
|
- struct hlsl_ir_var *param = decl->parameters.vars[i];
|
|
|
|
- struct hlsl_ir_node *arg = args->args[i];
|
|
|
|
-
|
|
|
|
- if (param->storage_modifiers & HLSL_STORAGE_OUT)
|
|
|
|
- {
|
|
|
|
- struct hlsl_ir_load *load;
|
|
|
|
-
|
|
|
|
- if (arg->data_type->modifiers & HLSL_MODIFIER_CONST)
|
|
|
|
- hlsl_error(ctx, &arg->loc, VKD3D_SHADER_ERROR_HLSL_MODIFIES_CONST,
|
|
|
|
- "Output argument to \"%s\" is const.", decl->func->name);
|
|
|
|
-
|
|
|
|
- if (!(load = hlsl_new_var_load(ctx, param, &arg->loc)))
|
|
|
|
- goto fail;
|
|
|
|
- hlsl_block_add_instr(args->instrs, &load->node);
|
|
|
|
-
|
|
|
|
- if (!add_assignment(ctx, args->instrs, arg, ASSIGN_OP_ASSIGN, &load->node))
|
|
|
|
- goto fail;
|
|
|
|
- }
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- if (decl->return_var)
|
|
|
|
- {
|
|
|
|
- struct hlsl_ir_load *load;
|
|
|
|
-
|
|
|
|
- if (!(load = hlsl_new_var_load(ctx, decl->return_var, loc)))
|
|
|
|
- goto fail;
|
|
|
|
- hlsl_block_add_instr(args->instrs, &load->node);
|
|
|
|
- }
|
|
|
|
- else
|
|
|
|
- {
|
|
|
|
- struct hlsl_ir_node *expr;
|
|
|
|
-
|
|
|
|
- if (!(expr = hlsl_new_void_expr(ctx, loc)))
|
|
|
|
- goto fail;
|
|
|
|
- hlsl_block_add_instr(args->instrs, expr);
|
|
|
|
- }
|
|
|
|
}
|
|
|
|
else if ((intrinsic = bsearch(name, intrinsic_functions, ARRAY_SIZE(intrinsic_functions),
|
|
|
|
sizeof(*intrinsic_functions), intrinsic_function_name_compare)))
|
|
|
|
diff --git a/libs/vkd3d/libs/vkd3d-shader/hlsl_codegen.c b/libs/vkd3d/libs/vkd3d-shader/hlsl_codegen.c
|
|
|
|
index bae8e5f9a5f..710d2908166 100644
|
|
|
|
--- a/libs/vkd3d/libs/vkd3d-shader/hlsl_codegen.c
|
|
|
|
+++ b/libs/vkd3d/libs/vkd3d-shader/hlsl_codegen.c
|
|
|
|
@@ -163,10 +163,10 @@ static bool replace_deref_path_with_offset(struct hlsl_ctx *ctx, struct hlsl_der
|
|
|
|
* work. */
|
|
|
|
static void prepend_uniform_copy(struct hlsl_ctx *ctx, struct hlsl_block *block, struct hlsl_ir_var *temp)
|
|
|
|
{
|
|
|
|
- struct vkd3d_string_buffer *name;
|
|
|
|
struct hlsl_ir_var *uniform;
|
|
|
|
struct hlsl_ir_node *store;
|
|
|
|
struct hlsl_ir_load *load;
|
|
|
|
+ char *new_name;
|
|
|
|
|
|
|
|
/* Use the synthetic name for the temp, rather than the uniform, so that we
|
|
|
|
* can write the uniform name into the shader reflection data. */
|
|
|
|
@@ -180,11 +180,9 @@ static void prepend_uniform_copy(struct hlsl_ctx *ctx, struct hlsl_block *block,
|
|
|
|
uniform->is_param = temp->is_param;
|
|
|
|
uniform->buffer = temp->buffer;
|
|
|
|
|
|
|
|
- if (!(name = hlsl_get_string_buffer(ctx)))
|
|
|
|
+ if (!(new_name = hlsl_sprintf_alloc(ctx, "<temp-%s>", temp->name)))
|
|
|
|
return;
|
|
|
|
- vkd3d_string_buffer_printf(name, "<temp-%s>", temp->name);
|
|
|
|
- temp->name = hlsl_strdup(ctx, name->buffer);
|
|
|
|
- hlsl_release_string_buffer(ctx, name);
|
|
|
|
+ temp->name = new_name;
|
|
|
|
|
|
|
|
if (!(load = hlsl_new_var_load(ctx, uniform, &temp->loc)))
|
|
|
|
return;
|
|
|
|
@@ -235,16 +233,15 @@ static struct hlsl_ir_var *add_semantic_var(struct hlsl_ctx *ctx, struct hlsl_ir
|
|
|
|
uint32_t index, bool output, const struct vkd3d_shader_location *loc)
|
|
|
|
{
|
|
|
|
struct hlsl_semantic new_semantic;
|
|
|
|
- struct vkd3d_string_buffer *name;
|
|
|
|
struct hlsl_ir_var *ext_var;
|
|
|
|
+ char *new_name;
|
|
|
|
|
|
|
|
- if (!(name = hlsl_get_string_buffer(ctx)))
|
|
|
|
+ if (!(new_name = hlsl_sprintf_alloc(ctx, "<%s-%s%u>", output ? "output" : "input", semantic->name, index)))
|
|
|
|
return NULL;
|
|
|
|
- vkd3d_string_buffer_printf(name, "<%s-%s%u>", output ? "output" : "input", semantic->name, index);
|
|
|
|
|
|
|
|
LIST_FOR_EACH_ENTRY(ext_var, &ctx->extern_vars, struct hlsl_ir_var, extern_entry)
|
|
|
|
{
|
|
|
|
- if (!ascii_strcasecmp(ext_var->name, name->buffer))
|
|
|
|
+ if (!ascii_strcasecmp(ext_var->name, new_name))
|
|
|
|
{
|
|
|
|
if (output)
|
|
|
|
{
|
|
|
|
@@ -271,25 +268,23 @@ static struct hlsl_ir_var *add_semantic_var(struct hlsl_ctx *ctx, struct hlsl_ir
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
- hlsl_release_string_buffer(ctx, name);
|
|
|
|
+ vkd3d_free(new_name);
|
|
|
|
return ext_var;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!(new_semantic.name = hlsl_strdup(ctx, semantic->name)))
|
|
|
|
{
|
|
|
|
- hlsl_release_string_buffer(ctx, name);
|
|
|
|
+ vkd3d_free(new_name);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
new_semantic.index = index;
|
|
|
|
- if (!(ext_var = hlsl_new_var(ctx, hlsl_strdup(ctx, name->buffer), type, loc, &new_semantic,
|
|
|
|
- modifiers, NULL)))
|
|
|
|
+ if (!(ext_var = hlsl_new_var(ctx, new_name, type, loc, &new_semantic, modifiers, NULL)))
|
|
|
|
{
|
|
|
|
- hlsl_release_string_buffer(ctx, name);
|
|
|
|
+ vkd3d_free(new_name);
|
|
|
|
hlsl_cleanup_semantic(&new_semantic);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
- hlsl_release_string_buffer(ctx, name);
|
|
|
|
if (output)
|
|
|
|
ext_var->is_output_semantic = 1;
|
|
|
|
else
|
|
|
|
diff --git a/libs/vkd3d/libs/vkd3d-shader/ir.c b/libs/vkd3d/libs/vkd3d-shader/ir.c
|
|
|
|
index 705905f7888..6d7c89653e3 100644
|
|
|
|
--- a/libs/vkd3d/libs/vkd3d-shader/ir.c
|
|
|
|
+++ b/libs/vkd3d/libs/vkd3d-shader/ir.c
|
|
|
|
@@ -296,7 +296,7 @@ static enum vkd3d_result flattener_flatten_phases(struct hull_flattener *normali
|
|
|
|
return VKD3D_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
-static void shader_register_init(struct vkd3d_shader_register *reg, enum vkd3d_shader_register_type reg_type,
|
|
|
|
+void shader_register_init(struct vkd3d_shader_register *reg, enum vkd3d_shader_register_type reg_type,
|
|
|
|
enum vkd3d_data_type data_type, unsigned int idx_count)
|
|
|
|
{
|
|
|
|
reg->type = reg_type;
|
|
|
|
diff --git a/libs/vkd3d/libs/vkd3d-shader/spirv.c b/libs/vkd3d/libs/vkd3d-shader/spirv.c
|
|
|
|
index 9b3084538ba..f93960d6d54 100644
|
|
|
|
--- a/libs/vkd3d/libs/vkd3d-shader/spirv.c
|
|
|
|
+++ b/libs/vkd3d/libs/vkd3d-shader/spirv.c
|
|
|
|
@@ -4792,13 +4792,16 @@ static bool is_dual_source_blending(const struct spirv_compiler *compiler)
|
|
|
|
|
|
|
|
static void calculate_clip_or_cull_distance_mask(const struct signature_element *e, uint32_t *mask)
|
|
|
|
{
|
|
|
|
+ unsigned int write_mask;
|
|
|
|
+
|
|
|
|
if (e->semantic_index >= sizeof(*mask) * CHAR_BIT / VKD3D_VEC4_SIZE)
|
|
|
|
{
|
|
|
|
FIXME("Invalid semantic index %u for clip/cull distance.\n", e->semantic_index);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
- *mask |= (e->mask & VKD3DSP_WRITEMASK_ALL) << (VKD3D_VEC4_SIZE * e->semantic_index);
|
|
|
|
+ write_mask = e->mask >> vkd3d_write_mask_get_component_idx(e->mask);
|
|
|
|
+ *mask |= (write_mask & VKD3DSP_WRITEMASK_ALL) << (VKD3D_VEC4_SIZE * e->semantic_index);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Emits arrayed SPIR-V built-in variables. */
|
|
|
|
@@ -4962,7 +4965,6 @@ static void spirv_compiler_emit_output(struct spirv_compiler *compiler,
|
|
|
|
component_type = builtin->component_type;
|
|
|
|
if (!builtin->spirv_array_size)
|
|
|
|
output_component_count = builtin->component_count;
|
|
|
|
- component_idx = 0;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
@@ -4976,14 +4978,9 @@ static void spirv_compiler_emit_output(struct spirv_compiler *compiler,
|
|
|
|
|| needs_private_io_variable(builtin))
|
|
|
|
{
|
|
|
|
use_private_variable = true;
|
|
|
|
- reg_write_mask = write_mask;
|
|
|
|
- }
|
|
|
|
- else
|
|
|
|
- {
|
|
|
|
- component_idx = vkd3d_write_mask_get_component_idx(write_mask);
|
|
|
|
- reg_write_mask = write_mask >> component_idx;
|
|
|
|
}
|
|
|
|
|
|
|
|
+ reg_write_mask = write_mask >> component_idx;
|
|
|
|
vkd3d_symbol_make_register(®_symbol, reg);
|
|
|
|
|
|
|
|
if (rb_get(&compiler->symbol_table, ®_symbol))
|
|
|
|
diff --git a/libs/vkd3d/libs/vkd3d-shader/vkd3d_shader_main.c b/libs/vkd3d/libs/vkd3d-shader/vkd3d_shader_main.c
|
|
|
|
index 2bc8613f2ef..a70894a160d 100644
|
|
|
|
--- a/libs/vkd3d/libs/vkd3d-shader/vkd3d_shader_main.c
|
|
|
|
+++ b/libs/vkd3d/libs/vkd3d-shader/vkd3d_shader_main.c
|
|
|
|
@@ -1771,9 +1771,6 @@ void *shader_param_allocator_get(struct vkd3d_shader_param_allocator *allocator,
|
|
|
|
{
|
|
|
|
void *params;
|
|
|
|
|
|
|
|
- if (!count)
|
|
|
|
- return NULL;
|
|
|
|
-
|
|
|
|
if (count > allocator->count - allocator->index)
|
|
|
|
{
|
|
|
|
struct vkd3d_shader_param_node *next = shader_param_allocator_node_create(allocator);
|
|
|
|
diff --git a/libs/vkd3d/libs/vkd3d-shader/vkd3d_shader_private.h b/libs/vkd3d/libs/vkd3d-shader/vkd3d_shader_private.h
|
|
|
|
index 84614a4eb79..eab1c730ae9 100644
|
|
|
|
--- a/libs/vkd3d/libs/vkd3d-shader/vkd3d_shader_private.h
|
|
|
|
+++ b/libs/vkd3d/libs/vkd3d-shader/vkd3d_shader_private.h
|
|
|
|
@@ -173,12 +173,14 @@ enum vkd3d_shader_error
|
|
|
|
VKD3D_SHADER_ERROR_DXIL_INVALID_FUNCTION_DCL = 8009,
|
|
|
|
VKD3D_SHADER_ERROR_DXIL_INVALID_TYPE_ID = 8010,
|
|
|
|
VKD3D_SHADER_ERROR_DXIL_INVALID_MODULE = 8011,
|
|
|
|
+ VKD3D_SHADER_ERROR_DXIL_INVALID_OPERAND = 8012,
|
|
|
|
|
|
|
|
VKD3D_SHADER_WARNING_DXIL_UNKNOWN_MAGIC_NUMBER = 8300,
|
|
|
|
VKD3D_SHADER_WARNING_DXIL_UNKNOWN_SHADER_TYPE = 8301,
|
|
|
|
VKD3D_SHADER_WARNING_DXIL_INVALID_BLOCK_LENGTH = 8302,
|
|
|
|
VKD3D_SHADER_WARNING_DXIL_INVALID_MODULE_LENGTH = 8303,
|
|
|
|
VKD3D_SHADER_WARNING_DXIL_IGNORING_OPERANDS = 8304,
|
|
|
|
+ VKD3D_SHADER_WARNING_DXIL_UNHANDLED_INTRINSIC = 8305,
|
|
|
|
|
|
|
|
VKD3D_SHADER_ERROR_VSIR_NOT_IMPLEMENTED = 9000,
|
|
|
|
};
|
|
|
|
@@ -556,6 +558,11 @@ enum vkd3d_data_type
|
|
|
|
VKD3D_DATA_UINT8,
|
|
|
|
};
|
|
|
|
|
|
|
|
+static inline bool data_type_is_integer(enum vkd3d_data_type data_type)
|
|
|
|
+{
|
|
|
|
+ return data_type == VKD3D_DATA_INT || data_type == VKD3D_DATA_UINT8 || data_type == VKD3D_DATA_UINT;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
enum vkd3d_immconst_type
|
|
|
|
{
|
|
|
|
VKD3D_IMMCONST_SCALAR,
|
|
|
|
@@ -734,6 +741,9 @@ struct vkd3d_shader_register
|
|
|
|
} u;
|
|
|
|
};
|
|
|
|
|
|
|
|
+void shader_register_init(struct vkd3d_shader_register *reg, enum vkd3d_shader_register_type reg_type,
|
|
|
|
+ enum vkd3d_data_type data_type, unsigned int idx_count);
|
|
|
|
+
|
|
|
|
struct vkd3d_shader_dst_param
|
|
|
|
{
|
|
|
|
struct vkd3d_shader_register reg;
|
|
|
|
--
|
|
|
|
2.40.1
|
|
|
|
|