diff --git a/patches/vkd3d-latest/0001-Updated-vkd3d-to-ca05e57e67306e9b97eb22a35cd77728e3e.patch b/patches/vkd3d-latest/0001-Updated-vkd3d-to-ca05e57e67306e9b97eb22a35cd77728e3e.patch index e7af90d0..75fecc02 100644 --- a/patches/vkd3d-latest/0001-Updated-vkd3d-to-ca05e57e67306e9b97eb22a35cd77728e3e.patch +++ b/patches/vkd3d-latest/0001-Updated-vkd3d-to-ca05e57e67306e9b97eb22a35cd77728e3e.patch @@ -1,7 +1,7 @@ -From 693aa55e7a12e42906f9e20049f9856915013450 Mon Sep 17 00:00:00 2001 +From a66fff01c4ccc1b1d25f6cf92a9a2301281fbbc4 Mon Sep 17 00:00:00 2001 From: Alistair Leslie-Hughes Date: Wed, 17 May 2023 08:35:40 +1000 -Subject: [PATCH] Updated vkd3d to ca05e57e67306e9b97eb22a35cd77728e3e91db9 +Subject: [PATCH 1/3] Updated vkd3d to ca05e57e67306e9b97eb22a35cd77728e3e91db9 --- libs/vkd3d/include/list.h | 270 +++++++++++ @@ -3858,5 +3858,5 @@ index 363a7132c3a..fceb06fc05a 100644 /* utils */ -- -2.40.1 +2.42.0 diff --git a/patches/vkd3d-latest/0002-Updated-vkd3d-to-1006e8cbd4f46b560b939a894a03b08ce1f.patch b/patches/vkd3d-latest/0002-Updated-vkd3d-to-1006e8cbd4f46b560b939a894a03b08ce1f.patch new file mode 100644 index 00000000..ad6f9c33 --- /dev/null +++ b/patches/vkd3d-latest/0002-Updated-vkd3d-to-1006e8cbd4f46b560b939a894a03b08ce1f.patch @@ -0,0 +1,3734 @@ +From 76bde8ac9ee47e0e7078a8510c0677b48300de50 Mon Sep 17 00:00:00 2001 +From: Alistair Leslie-Hughes +Date: Fri, 29 Sep 2023 13:08:54 +1000 +Subject: [PATCH 2/3] Updated vkd3d to + 1006e8cbd4f46b560b939a894a03b08ce1f20def. + +--- + libs/vkd3d/libs/vkd3d-shader/d3d_asm.c | 33 +- + libs/vkd3d/libs/vkd3d-shader/d3dbc.c | 44 +- + libs/vkd3d/libs/vkd3d-shader/dxbc.c | 2 +- + libs/vkd3d/libs/vkd3d-shader/dxil.c | 136 ++++++- + libs/vkd3d/libs/vkd3d-shader/hlsl.y | 15 + + libs/vkd3d/libs/vkd3d-shader/hlsl_codegen.c | 190 +++++---- + libs/vkd3d/libs/vkd3d-shader/ir.c | 105 ++++- + libs/vkd3d/libs/vkd3d-shader/spirv.c | 234 ++++++++--- + libs/vkd3d/libs/vkd3d-shader/tpf.c | 304 +++++++------- + .../libs/vkd3d-shader/vkd3d_shader_main.c | 2 +- + .../libs/vkd3d-shader/vkd3d_shader_private.h | 16 +- + libs/vkd3d/libs/vkd3d/command.c | 1 + + libs/vkd3d/libs/vkd3d/device.c | 375 +++++++++++++----- + libs/vkd3d/libs/vkd3d/resource.c | 20 +- + libs/vkd3d/libs/vkd3d/vkd3d_main.c | 4 +- + libs/vkd3d/libs/vkd3d/vkd3d_private.h | 14 +- + 16 files changed, 1039 insertions(+), 456 deletions(-) + +diff --git a/libs/vkd3d/libs/vkd3d-shader/d3d_asm.c b/libs/vkd3d/libs/vkd3d-shader/d3d_asm.c +index 2c5108095d5..6a3513a2827 100644 +--- a/libs/vkd3d/libs/vkd3d-shader/d3d_asm.c ++++ b/libs/vkd3d/libs/vkd3d-shader/d3d_asm.c +@@ -1066,6 +1066,14 @@ static void shader_dump_register(struct vkd3d_d3d_asm_compiler *compiler, const + shader_addline(buffer, "oStencilRef"); + break; + ++ case VKD3DSPR_UNDEF: ++ shader_addline(buffer, "undef"); ++ break; ++ ++ case VKD3DSPR_SSA: ++ shader_addline(buffer, "sr"); ++ break; ++ + default: + shader_addline(buffer, "", reg->type); + break; +@@ -1074,9 +1082,9 @@ static void shader_dump_register(struct vkd3d_d3d_asm_compiler *compiler, const + if (reg->type == VKD3DSPR_IMMCONST) + { + shader_addline(buffer, "%s(", compiler->colours.reset); +- switch (reg->immconst_type) ++ switch (reg->dimension) + { +- case VKD3D_IMMCONST_SCALAR: ++ case VSIR_DIMENSION_SCALAR: + switch (reg->data_type) + { + case VKD3D_DATA_FLOAT: +@@ -1096,7 +1104,7 @@ static void shader_dump_register(struct vkd3d_d3d_asm_compiler *compiler, const + } + break; + +- case VKD3D_IMMCONST_VEC4: ++ case VSIR_DIMENSION_VEC4: + switch (reg->data_type) + { + case VKD3D_DATA_FLOAT: +@@ -1126,7 +1134,7 @@ static void shader_dump_register(struct vkd3d_d3d_asm_compiler *compiler, const + break; + + default: +- shader_addline(buffer, "", reg->immconst_type); ++ shader_addline(buffer, "", reg->dimension); + break; + } + shader_addline(buffer, ")"); +@@ -1134,13 +1142,13 @@ static void shader_dump_register(struct vkd3d_d3d_asm_compiler *compiler, const + else if (reg->type == VKD3DSPR_IMMCONST64) + { + shader_addline(buffer, "%s(", compiler->colours.reset); +- /* A double2 vector is treated as a float4 vector in enum vkd3d_immconst_type. */ +- if (reg->immconst_type == VKD3D_IMMCONST_SCALAR || reg->immconst_type == VKD3D_IMMCONST_VEC4) ++ /* A double2 vector is treated as a float4 vector in enum vsir_dimension. */ ++ if (reg->dimension == VSIR_DIMENSION_SCALAR || reg->dimension == VSIR_DIMENSION_VEC4) + { + if (reg->data_type == VKD3D_DATA_DOUBLE) + { + shader_print_double_literal(compiler, "", reg->u.immconst_double[0], ""); +- if (reg->immconst_type == VKD3D_IMMCONST_VEC4) ++ if (reg->dimension == VSIR_DIMENSION_VEC4) + shader_print_double_literal(compiler, ", ", reg->u.immconst_double[1], ""); + } + else +@@ -1150,13 +1158,14 @@ static void shader_dump_register(struct vkd3d_d3d_asm_compiler *compiler, const + } + else + { +- shader_addline(buffer, "", reg->immconst_type); ++ shader_addline(buffer, "", reg->dimension); + } + shader_addline(buffer, ")"); + } + else if (reg->type != VKD3DSPR_RASTOUT + && reg->type != VKD3DSPR_MISCTYPE +- && reg->type != VKD3DSPR_NULL) ++ && reg->type != VKD3DSPR_NULL ++ && reg->type != VKD3DSPR_DEPTHOUT) + { + if (offset != ~0u) + { +@@ -1181,7 +1190,7 @@ static void shader_dump_register(struct vkd3d_d3d_asm_compiler *compiler, const + { + shader_print_subscript_range(compiler, reg->idx[1].offset, reg->idx[2].offset); + } +- else ++ else if (reg->type != VKD3DSPR_SSA) + { + /* For descriptors in sm < 5.1 we move the reg->idx values up one slot + * to normalise with 5.1. +@@ -1256,7 +1265,7 @@ static void shader_dump_dst_param(struct vkd3d_d3d_asm_compiler *compiler, + + shader_dump_register(compiler, ¶m->reg, is_declaration); + +- if (write_mask) ++ if (write_mask && param->reg.dimension == VSIR_DIMENSION_VEC4) + { + static const char write_mask_chars[] = "xyzw"; + +@@ -1322,7 +1331,7 @@ static void shader_dump_src_param(struct vkd3d_d3d_asm_compiler *compiler, + } + + if (param->reg.type != VKD3DSPR_IMMCONST && param->reg.type != VKD3DSPR_IMMCONST64 +- && param->reg.type != VKD3DSPR_SAMPLER) ++ && param->reg.dimension == VSIR_DIMENSION_VEC4) + { + unsigned int swizzle_x = vkd3d_swizzle_get_component(swizzle, 0); + unsigned int swizzle_y = vkd3d_swizzle_get_component(swizzle, 1); +diff --git a/libs/vkd3d/libs/vkd3d-shader/d3dbc.c b/libs/vkd3d/libs/vkd3d-shader/d3dbc.c +index 0d2b8d248d1..598b7518394 100644 +--- a/libs/vkd3d/libs/vkd3d-shader/d3dbc.c ++++ b/libs/vkd3d/libs/vkd3d-shader/d3dbc.c +@@ -457,18 +457,20 @@ static uint32_t swizzle_from_sm1(uint32_t swizzle) + static void shader_sm1_parse_src_param(uint32_t param, const struct vkd3d_shader_src_param *rel_addr, + struct vkd3d_shader_src_param *src) + { +- src->reg.type = ((param & VKD3D_SM1_REGISTER_TYPE_MASK) >> VKD3D_SM1_REGISTER_TYPE_SHIFT) ++ enum vkd3d_shader_register_type reg_type = ((param & VKD3D_SM1_REGISTER_TYPE_MASK) >> VKD3D_SM1_REGISTER_TYPE_SHIFT) + | ((param & VKD3D_SM1_REGISTER_TYPE_MASK2) >> VKD3D_SM1_REGISTER_TYPE_SHIFT2); ++ ++ vsir_register_init(&src->reg, reg_type, VKD3D_DATA_FLOAT, 1); + src->reg.precision = VKD3D_SHADER_REGISTER_PRECISION_DEFAULT; + src->reg.non_uniform = false; +- src->reg.data_type = VKD3D_DATA_FLOAT; + src->reg.idx[0].offset = param & VKD3D_SM1_REGISTER_NUMBER_MASK; + src->reg.idx[0].rel_addr = rel_addr; +- src->reg.idx[1].offset = ~0u; +- src->reg.idx[1].rel_addr = NULL; +- src->reg.idx[2].offset = ~0u; +- src->reg.idx[2].rel_addr = NULL; +- src->reg.idx_count = 1; ++ if (src->reg.type == VKD3DSPR_SAMPLER) ++ src->reg.dimension = VSIR_DIMENSION_NONE; ++ else if (src->reg.type == VKD3DSPR_DEPTHOUT) ++ src->reg.dimension = VSIR_DIMENSION_SCALAR; ++ else ++ src->reg.dimension = VSIR_DIMENSION_VEC4; + src->swizzle = swizzle_from_sm1((param & VKD3D_SM1_SWIZZLE_MASK) >> VKD3D_SM1_SWIZZLE_SHIFT); + src->modifiers = (param & VKD3D_SM1_SRC_MODIFIER_MASK) >> VKD3D_SM1_SRC_MODIFIER_SHIFT; + } +@@ -476,18 +478,20 @@ static void shader_sm1_parse_src_param(uint32_t param, const struct vkd3d_shader + static void shader_sm1_parse_dst_param(uint32_t param, const struct vkd3d_shader_src_param *rel_addr, + struct vkd3d_shader_dst_param *dst) + { +- dst->reg.type = ((param & VKD3D_SM1_REGISTER_TYPE_MASK) >> VKD3D_SM1_REGISTER_TYPE_SHIFT) ++ enum vkd3d_shader_register_type reg_type = ((param & VKD3D_SM1_REGISTER_TYPE_MASK) >> VKD3D_SM1_REGISTER_TYPE_SHIFT) + | ((param & VKD3D_SM1_REGISTER_TYPE_MASK2) >> VKD3D_SM1_REGISTER_TYPE_SHIFT2); ++ ++ vsir_register_init(&dst->reg, reg_type, VKD3D_DATA_FLOAT, 1); + dst->reg.precision = VKD3D_SHADER_REGISTER_PRECISION_DEFAULT; + dst->reg.non_uniform = false; +- dst->reg.data_type = VKD3D_DATA_FLOAT; + dst->reg.idx[0].offset = param & VKD3D_SM1_REGISTER_NUMBER_MASK; + dst->reg.idx[0].rel_addr = rel_addr; +- dst->reg.idx[1].offset = ~0u; +- dst->reg.idx[1].rel_addr = NULL; +- dst->reg.idx[2].offset = ~0u; +- dst->reg.idx[2].rel_addr = NULL; +- dst->reg.idx_count = 1; ++ if (dst->reg.type == VKD3DSPR_SAMPLER) ++ dst->reg.dimension = VSIR_DIMENSION_NONE; ++ else if (dst->reg.type == VKD3DSPR_DEPTHOUT) ++ dst->reg.dimension = VSIR_DIMENSION_SCALAR; ++ else ++ dst->reg.dimension = VSIR_DIMENSION_VEC4; + dst->write_mask = (param & VKD3D_SM1_WRITEMASK_MASK) >> VKD3D_SM1_WRITEMASK_SHIFT; + dst->modifiers = (param & VKD3D_SM1_DST_MODIFIER_MASK) >> VKD3D_SM1_DST_MODIFIER_SHIFT; + dst->shift = (param & VKD3D_SM1_DSTSHIFT_MASK) >> VKD3D_SM1_DSTSHIFT_SHIFT; +@@ -959,9 +963,9 @@ static void shader_sm1_read_semantic(struct vkd3d_shader_sm1_parser *sm1, + } + + static void shader_sm1_read_immconst(struct vkd3d_shader_sm1_parser *sm1, const uint32_t **ptr, +- struct vkd3d_shader_src_param *src_param, enum vkd3d_immconst_type type, enum vkd3d_data_type data_type) ++ struct vkd3d_shader_src_param *src_param, enum vsir_dimension dimension, enum vkd3d_data_type data_type) + { +- unsigned int count = type == VKD3D_IMMCONST_VEC4 ? 4 : 1; ++ unsigned int count = dimension == VSIR_DIMENSION_VEC4 ? 4 : 1; + + if (*ptr >= sm1->end || sm1->end - *ptr < count) + { +@@ -983,7 +987,7 @@ static void shader_sm1_read_immconst(struct vkd3d_shader_sm1_parser *sm1, const + src_param->reg.idx[2].offset = ~0u; + src_param->reg.idx[2].rel_addr = NULL; + src_param->reg.idx_count = 0; +- src_param->reg.immconst_type = type; ++ src_param->reg.dimension = dimension; + memcpy(src_param->reg.u.immconst_uint, *ptr, count * sizeof(uint32_t)); + src_param->swizzle = VKD3D_SHADER_NO_SWIZZLE; + src_param->modifiers = 0; +@@ -1140,19 +1144,19 @@ static void shader_sm1_read_instruction(struct vkd3d_shader_sm1_parser *sm1, str + else if (ins->handler_idx == VKD3DSIH_DEF) + { + shader_sm1_read_dst_param(sm1, &p, dst_param); +- shader_sm1_read_immconst(sm1, &p, &src_params[0], VKD3D_IMMCONST_VEC4, VKD3D_DATA_FLOAT); ++ shader_sm1_read_immconst(sm1, &p, &src_params[0], VSIR_DIMENSION_VEC4, VKD3D_DATA_FLOAT); + shader_sm1_scan_register(sm1, &dst_param->reg, dst_param->write_mask, true); + } + else if (ins->handler_idx == VKD3DSIH_DEFB) + { + shader_sm1_read_dst_param(sm1, &p, dst_param); +- shader_sm1_read_immconst(sm1, &p, &src_params[0], VKD3D_IMMCONST_SCALAR, VKD3D_DATA_UINT); ++ shader_sm1_read_immconst(sm1, &p, &src_params[0], VSIR_DIMENSION_SCALAR, VKD3D_DATA_UINT); + shader_sm1_scan_register(sm1, &dst_param->reg, dst_param->write_mask, true); + } + else if (ins->handler_idx == VKD3DSIH_DEFI) + { + shader_sm1_read_dst_param(sm1, &p, dst_param); +- shader_sm1_read_immconst(sm1, &p, &src_params[0], VKD3D_IMMCONST_VEC4, VKD3D_DATA_INT); ++ shader_sm1_read_immconst(sm1, &p, &src_params[0], VSIR_DIMENSION_VEC4, VKD3D_DATA_INT); + shader_sm1_scan_register(sm1, &dst_param->reg, dst_param->write_mask, true); + } + else +diff --git a/libs/vkd3d/libs/vkd3d-shader/dxbc.c b/libs/vkd3d/libs/vkd3d-shader/dxbc.c +index 1cb00688c76..dbbf8a5c458 100644 +--- a/libs/vkd3d/libs/vkd3d-shader/dxbc.c ++++ b/libs/vkd3d/libs/vkd3d-shader/dxbc.c +@@ -452,7 +452,7 @@ static int isgn_handler(const struct vkd3d_shader_dxbc_section_desc *section, + { + struct shader_signature *is = ctx; + +- if (section->tag != TAG_ISGN) ++ if (section->tag != TAG_ISGN && section->tag != TAG_ISG1) + return VKD3D_OK; + + if (is->elements) +diff --git a/libs/vkd3d/libs/vkd3d-shader/dxil.c b/libs/vkd3d/libs/vkd3d-shader/dxil.c +index b778f6abed3..bb50ad62b68 100644 +--- a/libs/vkd3d/libs/vkd3d-shader/dxil.c ++++ b/libs/vkd3d/libs/vkd3d-shader/dxil.c +@@ -141,6 +141,7 @@ enum bitcode_value_symtab_code + + enum dx_intrinsic_opcode + { ++ DX_LOAD_INPUT = 4, + DX_STORE_OUTPUT = 5, + }; + +@@ -296,6 +297,7 @@ struct sm6_parser + size_t global_symbol_count; + + struct vkd3d_shader_dst_param *output_params; ++ struct vkd3d_shader_dst_param *input_params; + + struct sm6_function *functions; + size_t function_count; +@@ -304,6 +306,7 @@ struct sm6_parser + size_t value_count; + size_t value_capacity; + size_t cur_max_value; ++ unsigned int ssa_next_id; + + struct vkd3d_shader_parser p; + }; +@@ -1495,7 +1498,7 @@ static unsigned int register_get_uint_value(const struct vkd3d_shader_register * + if (!register_is_constant(reg) || !data_type_is_integer(reg->data_type)) + return UINT_MAX; + +- if (reg->immconst_type == VKD3D_IMMCONST_VEC4) ++ if (reg->dimension == VSIR_DIMENSION_VEC4) + WARN("Returning vec4.x.\n"); + + if (reg->type == VKD3DSPR_IMMCONST64) +@@ -1547,6 +1550,11 @@ static inline unsigned int sm6_value_get_constant_uint(const struct sm6_value *v + return register_get_uint_value(&value->u.reg); + } + ++static unsigned int sm6_parser_alloc_ssa_id(struct sm6_parser *sm6) ++{ ++ return sm6->ssa_next_id++; ++} ++ + static struct vkd3d_shader_src_param *instruction_src_params_alloc(struct vkd3d_shader_instruction *ins, + unsigned int count, struct sm6_parser *sm6) + { +@@ -1579,6 +1587,13 @@ static struct vkd3d_shader_dst_param *instruction_dst_params_alloc(struct vkd3d_ + return params; + } + ++static void register_init_with_id(struct vkd3d_shader_register *reg, ++ enum vkd3d_shader_register_type reg_type, enum vkd3d_data_type data_type, unsigned int id) ++{ ++ vsir_register_init(reg, reg_type, data_type, 1); ++ reg->idx[0].offset = id; ++} ++ + static enum vkd3d_data_type vkd3d_data_type_from_sm6_type(const struct sm6_type *type) + { + if (type->class == TYPE_CLASS_INTEGER) +@@ -1612,6 +1627,24 @@ static enum vkd3d_data_type vkd3d_data_type_from_sm6_type(const struct sm6_type + return VKD3D_DATA_UINT; + } + ++static void register_init_ssa_scalar(struct vkd3d_shader_register *reg, const struct sm6_type *type, ++ struct sm6_parser *sm6) ++{ ++ enum vkd3d_data_type data_type; ++ unsigned int id; ++ ++ id = sm6_parser_alloc_ssa_id(sm6); ++ data_type = vkd3d_data_type_from_sm6_type(sm6_type_get_scalar_type(type, 0)); ++ register_init_with_id(reg, VKD3DSPR_SSA, data_type, id); ++} ++ ++static void dst_param_init(struct vkd3d_shader_dst_param *param) ++{ ++ param->write_mask = VKD3DSP_WRITEMASK_0; ++ param->modifiers = 0; ++ param->shift = 0; ++} ++ + static inline void dst_param_init_scalar(struct vkd3d_shader_dst_param *param, unsigned int component_idx) + { + param->write_mask = 1u << component_idx; +@@ -1619,12 +1652,25 @@ static inline void dst_param_init_scalar(struct vkd3d_shader_dst_param *param, u + param->shift = 0; + } + ++static void dst_param_init_ssa_scalar(struct vkd3d_shader_dst_param *param, const struct sm6_type *type, ++ struct sm6_parser *sm6) ++{ ++ dst_param_init(param); ++ register_init_ssa_scalar(¶m->reg, type, sm6); ++} ++ + static inline void src_param_init(struct vkd3d_shader_src_param *param) + { + param->swizzle = VKD3D_SHADER_SWIZZLE(X, X, X, X); + param->modifiers = VKD3DSPSM_NONE; + } + ++static void src_param_init_scalar(struct vkd3d_shader_src_param *param, unsigned int component_idx) ++{ ++ param->swizzle = vkd3d_shader_create_swizzle(component_idx, component_idx, component_idx, component_idx); ++ param->modifiers = VKD3DSPSM_NONE; ++} ++ + static void src_param_init_from_value(struct vkd3d_shader_src_param *param, const struct sm6_value *src) + { + src_param_init(param); +@@ -1653,6 +1699,16 @@ static void register_address_init(struct vkd3d_shader_register *reg, const struc + } + } + ++static void instruction_dst_param_init_ssa_scalar(struct vkd3d_shader_instruction *ins, struct sm6_parser *sm6) ++{ ++ struct vkd3d_shader_dst_param *param = instruction_dst_params_alloc(ins, 1, sm6); ++ struct sm6_value *dst = sm6_parser_get_current_value(sm6); ++ ++ dst_param_init_ssa_scalar(param, dst->type, sm6); ++ param->write_mask = VKD3DSP_WRITEMASK_0; ++ dst->u.reg = param->reg; ++} ++ + /* Recurse through the block tree while maintaining a current value count. The current + * count is the sum of the global count plus all declarations within the current function. + * Store into value_capacity the highest count seen. */ +@@ -1912,7 +1968,7 @@ static enum vkd3d_result sm6_parser_constants_init(struct sm6_parser *sm6, const + dst->type = type; + dst->value_type = VALUE_TYPE_REG; + dst->u.reg.type = reg_type; +- dst->u.reg.immconst_type = VKD3D_IMMCONST_SCALAR; ++ dst->u.reg.dimension = VSIR_DIMENSION_SCALAR; + dst->u.reg.data_type = reg_data_type; + + switch (record->code) +@@ -2063,7 +2119,7 @@ static void dst_param_io_init(struct vkd3d_shader_dst_param *param, + param->shift = 0; + /* DXIL types do not have signedness. Load signed elements as unsigned. */ + component_type = e->component_type == VKD3D_SHADER_COMPONENT_INT ? VKD3D_SHADER_COMPONENT_UINT : e->component_type; +- shader_register_init(¶m->reg, reg_type, vkd3d_data_type_from_component_type(component_type), 0); ++ vsir_register_init(¶m->reg, reg_type, vkd3d_data_type_from_component_type(component_type), 0); + } + + static void sm6_parser_init_signature(struct sm6_parser *sm6, const struct shader_signature *s, +@@ -2112,6 +2168,8 @@ static void sm6_parser_emit_signature(struct sm6_parser *sm6, const struct shade + param = &ins->declaration.dst; + } + ++ /* TODO: set the interpolation mode when signatures are loaded from DXIL metadata. */ ++ ins->flags = (handler_idx == VKD3DSIH_DCL_INPUT_PS) ? VKD3DSIM_LINEAR_NOPERSPECTIVE : 0; + *param = params[i]; + } + } +@@ -2123,11 +2181,24 @@ static void sm6_parser_init_output_signature(struct sm6_parser *sm6, const struc + sm6->output_params); + } + ++static void sm6_parser_init_input_signature(struct sm6_parser *sm6, const struct shader_signature *input_signature) ++{ ++ sm6_parser_init_signature(sm6, input_signature, VKD3DSPR_INPUT, sm6->input_params); ++} ++ + static void sm6_parser_emit_output_signature(struct sm6_parser *sm6, const struct shader_signature *output_signature) + { + sm6_parser_emit_signature(sm6, output_signature, VKD3DSIH_DCL_OUTPUT, VKD3DSIH_DCL_OUTPUT_SIV, sm6->output_params); + } + ++static void sm6_parser_emit_input_signature(struct sm6_parser *sm6, const struct shader_signature *input_signature) ++{ ++ sm6_parser_emit_signature(sm6, input_signature, ++ (sm6->p.shader_version.type == VKD3D_SHADER_TYPE_PIXEL) ? VKD3DSIH_DCL_INPUT_PS : VKD3DSIH_DCL_INPUT, ++ (sm6->p.shader_version.type == VKD3D_SHADER_TYPE_PIXEL) ? VKD3DSIH_DCL_INPUT_PS_SIV : VKD3DSIH_DCL_INPUT_SIV, ++ sm6->input_params); ++} ++ + static const struct sm6_value *sm6_parser_next_function_definition(struct sm6_parser *sm6) + { + size_t i, count = sm6->function_count; +@@ -2150,6 +2221,38 @@ static struct sm6_block *sm6_block_create() + return block; + } + ++static void sm6_parser_emit_dx_load_input(struct sm6_parser *sm6, struct sm6_block *code_block, ++ enum dx_intrinsic_opcode op, const struct sm6_value **operands, struct vkd3d_shader_instruction *ins) ++{ ++ struct vkd3d_shader_src_param *src_param; ++ const struct shader_signature *signature; ++ unsigned int row_index, column_index; ++ const struct signature_element *e; ++ ++ row_index = sm6_value_get_constant_uint(operands[0]); ++ column_index = sm6_value_get_constant_uint(operands[2]); ++ ++ vsir_instruction_init(ins, &sm6->p.location, VKD3DSIH_MOV); ++ ++ signature = &sm6->p.shader_desc.input_signature; ++ if (row_index >= signature->element_count) ++ { ++ WARN("Invalid row index %u.\n", row_index); ++ vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_INVALID_OPERAND, ++ "Invalid input row index %u.", row_index); ++ return; ++ } ++ e = &signature->elements[row_index]; ++ ++ src_param = instruction_src_params_alloc(ins, 1, sm6); ++ src_param->reg = sm6->input_params[row_index].reg; ++ src_param_init_scalar(src_param, column_index); ++ if (e->register_count > 1) ++ register_address_init(&src_param->reg, operands[1], 0, sm6); ++ ++ instruction_dst_param_init_ssa_scalar(ins, sm6); ++} ++ + static void sm6_parser_emit_dx_store_output(struct sm6_parser *sm6, struct sm6_block *code_block, + enum dx_intrinsic_opcode op, const struct sm6_value **operands, struct vkd3d_shader_instruction *ins) + { +@@ -2219,6 +2322,7 @@ struct sm6_dx_opcode_info + */ + static const struct sm6_dx_opcode_info sm6_dx_op_table[] = + { ++ [DX_LOAD_INPUT ] = {'o', "ii8i", sm6_parser_emit_dx_load_input}, + [DX_STORE_OUTPUT ] = {'v', "ii8o", sm6_parser_emit_dx_store_output}, + }; + +@@ -2292,7 +2396,7 @@ static void sm6_parser_emit_unhandled(struct sm6_parser *sm6, struct vkd3d_shade + return; + + type = sm6_type_get_scalar_type(dst->type, 0); +- shader_register_init(&dst->u.reg, VKD3DSPR_UNDEF, vkd3d_data_type_from_sm6_type(type), 0); ++ vsir_register_init(&dst->u.reg, VKD3DSPR_UNDEF, vkd3d_data_type_from_sm6_type(type), 0); + /* dst->is_undefined is not set here because it flags only explicitly undefined values. */ + } + +@@ -2303,7 +2407,7 @@ static void sm6_parser_decode_dx_op(struct sm6_parser *sm6, struct sm6_block *co + if (op >= ARRAY_SIZE(sm6_dx_op_table) || !sm6_dx_op_table[op].operand_info) + { + FIXME("Unhandled dx intrinsic function id %u, '%s'.\n", op, name); +- vkd3d_shader_parser_warning(&sm6->p, VKD3D_SHADER_WARNING_DXIL_UNHANDLED_INTRINSIC, ++ vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_UNHANDLED_INTRINSIC, + "Call to intrinsic function %s is unhandled.", name); + sm6_parser_emit_unhandled(sm6, ins, dst); + return; +@@ -2418,11 +2522,11 @@ static enum vkd3d_result sm6_parser_function_init(struct sm6_parser *sm6, const + struct sm6_function *function) + { + struct vkd3d_shader_instruction *ins; ++ size_t i, block_idx, block_count; + const struct dxil_record *record; + bool ret_found, is_terminator; + struct sm6_block *code_block; + struct sm6_value *dst; +- size_t i, block_idx; + + if (sm6->function_count) + { +@@ -2448,12 +2552,12 @@ static enum vkd3d_result sm6_parser_function_init(struct sm6_parser *sm6, const + return VKD3D_ERROR_INVALID_SHADER; + } + +- if (!(function->block_count = block->records[0]->operands[0])) ++ if (!(block_count = block->records[0]->operands[0])) + { + WARN("Function contains no blocks.\n"); + return VKD3D_ERROR_INVALID_SHADER; + } +- if (function->block_count > 1) ++ if (block_count > 1) + { + FIXME("Branched shaders are not supported yet.\n"); + return VKD3D_ERROR_INVALID_SHADER; +@@ -2464,6 +2568,7 @@ static enum vkd3d_result sm6_parser_function_init(struct sm6_parser *sm6, const + ERR("Failed to allocate code block.\n"); + return VKD3D_ERROR_OUT_OF_MEMORY; + } ++ function->block_count = block_count; + code_block = function->blocks[0]; + + sm6->cur_max_value = function->value_count; +@@ -2683,6 +2788,7 @@ static enum vkd3d_result sm6_parser_init(struct sm6_parser *sm6, const uint32_t + const char *source_name, struct vkd3d_shader_message_context *message_context) + { + const struct shader_signature *output_signature = &sm6->p.shader_desc.output_signature; ++ const struct shader_signature *input_signature = &sm6->p.shader_desc.input_signature; + const struct vkd3d_shader_location location = {.source_name = source_name}; + uint32_t version_token, dxil_version, token_count, magic; + unsigned int chunk_offset, chunk_size; +@@ -2838,11 +2944,12 @@ static enum vkd3d_result sm6_parser_init(struct sm6_parser *sm6, const uint32_t + return ret; + } + +- if (!(sm6->output_params = shader_parser_get_dst_params(&sm6->p, output_signature->element_count))) ++ if (!(sm6->output_params = shader_parser_get_dst_params(&sm6->p, output_signature->element_count)) ++ || !(sm6->input_params = shader_parser_get_dst_params(&sm6->p, input_signature->element_count))) + { +- ERR("Failed to allocate output parameters.\n"); ++ ERR("Failed to allocate input/output parameters.\n"); + vkd3d_shader_error(message_context, &location, VKD3D_SHADER_ERROR_DXIL_OUT_OF_MEMORY, +- "Out of memory allocating output parameters."); ++ "Out of memory allocating input/output parameters."); + return VKD3D_ERROR_OUT_OF_MEMORY; + } + +@@ -2869,6 +2976,7 @@ static enum vkd3d_result sm6_parser_init(struct sm6_parser *sm6, const uint32_t + "Out of memory allocating DXIL value array."); + return VKD3D_ERROR_OUT_OF_MEMORY; + } ++ sm6->ssa_next_id = 1; + + if ((ret = sm6_parser_globals_init(sm6)) < 0) + { +@@ -2877,6 +2985,7 @@ static enum vkd3d_result sm6_parser_init(struct sm6_parser *sm6, const uint32_t + } + + sm6_parser_init_output_signature(sm6, output_signature); ++ sm6_parser_init_input_signature(sm6, input_signature); + + if ((ret = sm6_parser_module_init(sm6, &sm6->root_block, 0)) < 0) + { +@@ -2889,13 +2998,16 @@ static enum vkd3d_result sm6_parser_init(struct sm6_parser *sm6, const uint32_t + return ret; + } + +- if (!sm6_parser_require_space(sm6, output_signature->element_count)) ++ if (!sm6_parser_require_space(sm6, output_signature->element_count + input_signature->element_count)) + { + vkd3d_shader_error(message_context, &location, VKD3D_SHADER_ERROR_DXIL_OUT_OF_MEMORY, + "Out of memory emitting shader signature declarations."); + return VKD3D_ERROR_OUT_OF_MEMORY; + } + sm6_parser_emit_output_signature(sm6, output_signature); ++ sm6_parser_emit_input_signature(sm6, input_signature); ++ ++ sm6->p.shader_desc.ssa_count = sm6->ssa_next_id; + + for (i = 0; i < sm6->function_count; ++i) + { +diff --git a/libs/vkd3d/libs/vkd3d-shader/hlsl.y b/libs/vkd3d/libs/vkd3d-shader/hlsl.y +index fb6d485ea69..a47246de2be 100644 +--- a/libs/vkd3d/libs/vkd3d-shader/hlsl.y ++++ b/libs/vkd3d/libs/vkd3d-shader/hlsl.y +@@ -3423,6 +3423,20 @@ static bool intrinsic_step(struct hlsl_ctx *ctx, + return !!add_implicit_conversion(ctx, params->instrs, ge, type, loc); + } + ++static bool intrinsic_tan(struct hlsl_ctx *ctx, ++ const struct parse_initializer *params, const struct vkd3d_shader_location *loc) ++{ ++ struct hlsl_ir_node *arg = params->args[0], *sin, *cos; ++ ++ if (!(sin = add_unary_arithmetic_expr(ctx, params->instrs, HLSL_OP1_SIN, arg, loc))) ++ return false; ++ ++ if (!(cos = add_unary_arithmetic_expr(ctx, params->instrs, HLSL_OP1_COS, arg, loc))) ++ return false; ++ ++ return !!add_binary_arithmetic_expr(ctx, params->instrs, HLSL_OP2_DIV, sin, cos, loc); ++} ++ + static bool intrinsic_tex(struct hlsl_ctx *ctx, const struct parse_initializer *params, + const struct vkd3d_shader_location *loc, const char *name, enum hlsl_sampler_dim dim) + { +@@ -3697,6 +3711,7 @@ intrinsic_functions[] = + {"smoothstep", 3, true, intrinsic_smoothstep}, + {"sqrt", 1, true, intrinsic_sqrt}, + {"step", 2, true, intrinsic_step}, ++ {"tan", 1, true, intrinsic_tan}, + {"tex1D", -1, false, intrinsic_tex1D}, + {"tex2D", -1, false, intrinsic_tex2D}, + {"tex3D", -1, false, intrinsic_tex3D}, +diff --git a/libs/vkd3d/libs/vkd3d-shader/hlsl_codegen.c b/libs/vkd3d/libs/vkd3d-shader/hlsl_codegen.c +index ed31efc3f0b..76572cf93ec 100644 +--- a/libs/vkd3d/libs/vkd3d-shader/hlsl_codegen.c ++++ b/libs/vkd3d/libs/vkd3d-shader/hlsl_codegen.c +@@ -989,7 +989,7 @@ static bool lower_matrix_swizzles(struct hlsl_ctx *ctx, struct hlsl_ir_node *ins + * For the latter case, this pass takes care of lowering hlsl_ir_indexes into individual + * hlsl_ir_loads, or individual hlsl_ir_resource_loads, in case the indexing is a + * resource access. */ +-static bool lower_index_loads(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, void *context) ++static bool lower_index_loads(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, struct hlsl_block *block) + { + struct hlsl_ir_node *val, *store; + struct hlsl_deref var_deref; +@@ -1023,8 +1023,7 @@ static bool lower_index_loads(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, + + if (!(load = hlsl_new_resource_load(ctx, ¶ms, &instr->loc))) + return false; +- list_add_before(&instr->entry, &load->entry); +- hlsl_replace_node(instr, load); ++ hlsl_block_add_instr(block, load); + return true; + } + +@@ -1034,7 +1033,7 @@ static bool lower_index_loads(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, + + if (!(store = hlsl_new_simple_store(ctx, var, val))) + return false; +- list_add_before(&instr->entry, &store->entry); ++ hlsl_block_add_instr(block, store); + + if (hlsl_index_is_noncontiguous(index)) + { +@@ -1054,38 +1053,36 @@ static bool lower_index_loads(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, + + if (!(c = hlsl_new_uint_constant(ctx, i, &instr->loc))) + return false; +- list_add_before(&instr->entry, &c->entry); ++ hlsl_block_add_instr(block, c); + + if (!(load = hlsl_new_load_index(ctx, &var_deref, c, &instr->loc))) + return false; +- list_add_before(&instr->entry, &load->node.entry); ++ hlsl_block_add_instr(block, &load->node); + + if (!(load = hlsl_new_load_index(ctx, &load->src, index->idx.node, &instr->loc))) + return false; +- list_add_before(&instr->entry, &load->node.entry); ++ hlsl_block_add_instr(block, &load->node); + + if (!(store = hlsl_new_store_index(ctx, &row_deref, c, &load->node, 0, &instr->loc))) + return false; +- list_add_before(&instr->entry, &store->entry); ++ hlsl_block_add_instr(block, store); + } + + if (!(load = hlsl_new_var_load(ctx, var, &instr->loc))) + return false; +- list_add_before(&instr->entry, &load->node.entry); +- hlsl_replace_node(instr, &load->node); ++ hlsl_block_add_instr(block, &load->node); + } + else + { + if (!(load = hlsl_new_load_index(ctx, &var_deref, index->idx.node, &instr->loc))) + return false; +- list_add_before(&instr->entry, &load->node.entry); +- hlsl_replace_node(instr, &load->node); ++ hlsl_block_add_instr(block, &load->node); + } + return true; + } + + /* Lower casts from vec1 to vecN to swizzles. */ +-static bool lower_broadcasts(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, void *context) ++static bool lower_broadcasts(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, struct hlsl_block *block) + { + const struct hlsl_type *src_type, *dst_type; + struct hlsl_type *dst_scalar_type; +@@ -1101,25 +1098,22 @@ static bool lower_broadcasts(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, v + + if (src_type->class <= HLSL_CLASS_VECTOR && dst_type->class <= HLSL_CLASS_VECTOR && src_type->dimx == 1) + { +- struct hlsl_ir_node *replacement, *new_cast, *swizzle; ++ struct hlsl_ir_node *new_cast, *swizzle; + + dst_scalar_type = hlsl_get_scalar_type(ctx, dst_type->base_type); + /* We need to preserve the cast since it might be doing more than just + * turning the scalar into a vector. */ + if (!(new_cast = hlsl_new_cast(ctx, cast->operands[0].node, dst_scalar_type, &cast->node.loc))) + return false; +- list_add_after(&cast->node.entry, &new_cast->entry); +- replacement = new_cast; ++ hlsl_block_add_instr(block, new_cast); + + if (dst_type->dimx != 1) + { +- if (!(swizzle = hlsl_new_swizzle(ctx, HLSL_SWIZZLE(X, X, X, X), dst_type->dimx, replacement, &cast->node.loc))) ++ if (!(swizzle = hlsl_new_swizzle(ctx, HLSL_SWIZZLE(X, X, X, X), dst_type->dimx, new_cast, &cast->node.loc))) + return false; +- list_add_after(&new_cast->entry, &swizzle->entry); +- replacement = swizzle; ++ hlsl_block_add_instr(block, swizzle); + } + +- hlsl_replace_node(&cast->node, replacement); + return true; + } + +@@ -1981,7 +1975,7 @@ static bool split_matrix_copies(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr + return true; + } + +-static bool lower_narrowing_casts(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, void *context) ++static bool lower_narrowing_casts(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, struct hlsl_block *block) + { + const struct hlsl_type *src_type, *dst_type; + struct hlsl_type *dst_vector_type; +@@ -2004,12 +1998,12 @@ static bool lower_narrowing_casts(struct hlsl_ctx *ctx, struct hlsl_ir_node *ins + * narrowing the vector. */ + if (!(new_cast = hlsl_new_cast(ctx, cast->operands[0].node, dst_vector_type, &cast->node.loc))) + return false; +- list_add_after(&cast->node.entry, &new_cast->entry); ++ hlsl_block_add_instr(block, new_cast); ++ + if (!(swizzle = hlsl_new_swizzle(ctx, HLSL_SWIZZLE(X, Y, Z, W), dst_type->dimx, new_cast, &cast->node.loc))) + return false; +- list_add_after(&new_cast->entry, &swizzle->entry); ++ hlsl_block_add_instr(block, swizzle); + +- hlsl_replace_node(&cast->node, swizzle); + return true; + } + +@@ -2068,7 +2062,7 @@ static bool remove_trivial_swizzles(struct hlsl_ctx *ctx, struct hlsl_ir_node *i + return true; + } + +-static bool lower_nonconstant_vector_derefs(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, void *context) ++static bool lower_nonconstant_vector_derefs(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, struct hlsl_block *block) + { + struct hlsl_ir_node *idx; + struct hlsl_deref *deref; +@@ -2099,11 +2093,11 @@ static bool lower_nonconstant_vector_derefs(struct hlsl_ctx *ctx, struct hlsl_ir + + if (!(vector_load = hlsl_new_load_parent(ctx, deref, &instr->loc))) + return false; +- list_add_before(&instr->entry, &vector_load->node.entry); ++ hlsl_block_add_instr(block, &vector_load->node); + + if (!(swizzle = hlsl_new_swizzle(ctx, HLSL_SWIZZLE(X, X, X, X), type->dimx, idx, &instr->loc))) + return false; +- list_add_before(&instr->entry, &swizzle->entry); ++ hlsl_block_add_instr(block, swizzle); + + value.u[0].u = 0; + value.u[1].u = 1; +@@ -2111,18 +2105,18 @@ static bool lower_nonconstant_vector_derefs(struct hlsl_ctx *ctx, struct hlsl_ir + value.u[3].u = 3; + if (!(c = hlsl_new_constant(ctx, hlsl_get_vector_type(ctx, HLSL_TYPE_UINT, type->dimx), &value, &instr->loc))) + return false; +- list_add_before(&instr->entry, &c->entry); ++ hlsl_block_add_instr(block, c); + + operands[0] = swizzle; + operands[1] = c; + if (!(eq = hlsl_new_expr(ctx, HLSL_OP2_EQUAL, operands, + hlsl_get_vector_type(ctx, HLSL_TYPE_BOOL, type->dimx), &instr->loc))) + return false; +- list_add_before(&instr->entry, &eq->entry); ++ hlsl_block_add_instr(block, eq); + + if (!(eq = hlsl_new_cast(ctx, eq, type, &instr->loc))) + return false; +- list_add_before(&instr->entry, &eq->entry); ++ hlsl_block_add_instr(block, eq); + + op = HLSL_OP2_DOT; + if (type->dimx == 1) +@@ -2134,8 +2128,7 @@ static bool lower_nonconstant_vector_derefs(struct hlsl_ctx *ctx, struct hlsl_ir + operands[1] = eq; + if (!(dot = hlsl_new_expr(ctx, op, operands, instr->data_type, &instr->loc))) + return false; +- list_add_before(&instr->entry, &dot->entry); +- hlsl_replace_node(instr, dot); ++ hlsl_block_add_instr(block, dot); + + return true; + } +@@ -2275,10 +2268,10 @@ static bool sort_synthetic_separated_samplers_first(struct hlsl_ctx *ctx) + } + + /* Lower DIV to RCP + MUL. */ +-static bool lower_division(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, void *context) ++static bool lower_division(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, struct hlsl_block *block) + { ++ struct hlsl_ir_node *rcp, *mul; + struct hlsl_ir_expr *expr; +- struct hlsl_ir_node *rcp; + + if (instr->type != HLSL_IR_EXPR) + return false; +@@ -2288,18 +2281,20 @@ static bool lower_division(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, voi + + if (!(rcp = hlsl_new_unary_expr(ctx, HLSL_OP1_RCP, expr->operands[1].node, &instr->loc))) + return false; +- list_add_before(&expr->node.entry, &rcp->entry); +- expr->op = HLSL_OP2_MUL; +- hlsl_src_remove(&expr->operands[1]); +- hlsl_src_from_node(&expr->operands[1], rcp); ++ hlsl_block_add_instr(block, rcp); ++ ++ if (!(mul = hlsl_new_binary_expr(ctx, HLSL_OP2_MUL, expr->operands[0].node, rcp))) ++ return false; ++ hlsl_block_add_instr(block, mul); ++ + return true; + } + + /* Lower SQRT to RSQ + RCP. */ +-static bool lower_sqrt(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, void *context) ++static bool lower_sqrt(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, struct hlsl_block *block) + { ++ struct hlsl_ir_node *rsq, *rcp; + struct hlsl_ir_expr *expr; +- struct hlsl_ir_node *rsq; + + if (instr->type != HLSL_IR_EXPR) + return false; +@@ -2309,15 +2304,16 @@ static bool lower_sqrt(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, void *c + + if (!(rsq = hlsl_new_unary_expr(ctx, HLSL_OP1_RSQ, expr->operands[0].node, &instr->loc))) + return false; +- list_add_before(&expr->node.entry, &rsq->entry); +- expr->op = HLSL_OP1_RCP; +- hlsl_src_remove(&expr->operands[0]); +- hlsl_src_from_node(&expr->operands[0], rsq); ++ hlsl_block_add_instr(block, rsq); ++ ++ if (!(rcp = hlsl_new_unary_expr(ctx, HLSL_OP1_RCP, rsq, &instr->loc))) ++ return false; ++ hlsl_block_add_instr(block, rcp); + return true; + } + + /* Lower DP2 to MUL + ADD */ +-static bool lower_dot(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, void *context) ++static bool lower_dot(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, struct hlsl_block *block) + { + struct hlsl_ir_node *arg1, *arg2, *mul, *replacement, *zero, *add_x, *add_y; + struct hlsl_ir_expr *expr; +@@ -2338,7 +2334,7 @@ static bool lower_dot(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, void *co + + if (!(zero = hlsl_new_float_constant(ctx, 0.0f, &expr->node.loc))) + return false; +- list_add_before(&instr->entry, &zero->entry); ++ hlsl_block_add_instr(block, zero); + + operands[0] = arg1; + operands[1] = arg2; +@@ -2351,27 +2347,26 @@ static bool lower_dot(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, void *co + { + if (!(mul = hlsl_new_binary_expr(ctx, HLSL_OP2_MUL, expr->operands[0].node, expr->operands[1].node))) + return false; +- list_add_before(&instr->entry, &mul->entry); ++ hlsl_block_add_instr(block, mul); + + if (!(add_x = hlsl_new_swizzle(ctx, HLSL_SWIZZLE(X, X, X, X), instr->data_type->dimx, mul, &expr->node.loc))) + return false; +- list_add_before(&instr->entry, &add_x->entry); ++ hlsl_block_add_instr(block, add_x); + + if (!(add_y = hlsl_new_swizzle(ctx, HLSL_SWIZZLE(Y, Y, Y, Y), instr->data_type->dimx, mul, &expr->node.loc))) + return false; +- list_add_before(&instr->entry, &add_y->entry); ++ hlsl_block_add_instr(block, add_y); + + if (!(replacement = hlsl_new_binary_expr(ctx, HLSL_OP2_ADD, add_x, add_y))) + return false; + } +- list_add_before(&instr->entry, &replacement->entry); ++ hlsl_block_add_instr(block, replacement); + +- hlsl_replace_node(instr, replacement); + return true; + } + + /* Lower ABS to MAX */ +-static bool lower_abs(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, void *context) ++static bool lower_abs(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, struct hlsl_block *block) + { + struct hlsl_ir_node *arg, *neg, *replacement; + struct hlsl_ir_expr *expr; +@@ -2385,18 +2380,17 @@ static bool lower_abs(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, void *co + + if (!(neg = hlsl_new_unary_expr(ctx, HLSL_OP1_NEG, arg, &instr->loc))) + return false; +- list_add_before(&instr->entry, &neg->entry); ++ hlsl_block_add_instr(block, neg); + + if (!(replacement = hlsl_new_binary_expr(ctx, HLSL_OP2_MAX, neg, arg))) + return false; +- list_add_before(&instr->entry, &replacement->entry); ++ hlsl_block_add_instr(block, replacement); + +- hlsl_replace_node(instr, replacement); + return true; + } + + /* Lower ROUND using FRC, ROUND(x) -> ((x + 0.5) - FRC(x + 0.5)). */ +-static bool lower_round(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, void *context) ++static bool lower_round(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, struct hlsl_block *block) + { + struct hlsl_ir_node *arg, *neg, *sum, *frc, *half, *replacement; + struct hlsl_type *type = instr->data_type; +@@ -2417,31 +2411,29 @@ static bool lower_round(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, void * + half_value.u[i].f = 0.5f; + if (!(half = hlsl_new_constant(ctx, type, &half_value, &expr->node.loc))) + return false; +- +- list_add_before(&instr->entry, &half->entry); ++ hlsl_block_add_instr(block, half); + + if (!(sum = hlsl_new_binary_expr(ctx, HLSL_OP2_ADD, arg, half))) + return false; +- list_add_before(&instr->entry, &sum->entry); ++ hlsl_block_add_instr(block, sum); + + if (!(frc = hlsl_new_unary_expr(ctx, HLSL_OP1_FRACT, sum, &instr->loc))) + return false; +- list_add_before(&instr->entry, &frc->entry); ++ hlsl_block_add_instr(block, frc); + + if (!(neg = hlsl_new_unary_expr(ctx, HLSL_OP1_NEG, frc, &instr->loc))) + return false; +- list_add_before(&instr->entry, &neg->entry); ++ hlsl_block_add_instr(block, neg); + + if (!(replacement = hlsl_new_binary_expr(ctx, HLSL_OP2_ADD, sum, neg))) + return false; +- list_add_before(&instr->entry, &replacement->entry); ++ hlsl_block_add_instr(block, replacement); + +- hlsl_replace_node(instr, replacement); + return true; + } + + /* Use 'movc' for the ternary operator. */ +-static bool lower_ternary(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, void *context) ++static bool lower_ternary(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, struct hlsl_block *block) + { + struct hlsl_ir_node *operands[HLSL_MAX_OPERANDS], *replacement; + struct hlsl_ir_node *zero, *cond, *first, *second; +@@ -2464,7 +2456,7 @@ static bool lower_ternary(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, void + { + if (!(zero = hlsl_new_constant(ctx, cond->data_type, &zero_value, &instr->loc))) + return false; +- list_add_tail(&instr->entry, &zero->entry); ++ hlsl_block_add_instr(block, zero); + + memset(operands, 0, sizeof(operands)); + operands[0] = zero; +@@ -2473,7 +2465,7 @@ static bool lower_ternary(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, void + type = hlsl_get_numeric_type(ctx, type->class, HLSL_TYPE_BOOL, type->dimx, type->dimy); + if (!(cond = hlsl_new_expr(ctx, HLSL_OP2_NEQUAL, operands, type, &instr->loc))) + return false; +- list_add_before(&instr->entry, &cond->entry); ++ hlsl_block_add_instr(block, cond); + } + + memset(operands, 0, sizeof(operands)); +@@ -2482,17 +2474,15 @@ static bool lower_ternary(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, void + operands[2] = second; + if (!(replacement = hlsl_new_expr(ctx, HLSL_OP3_MOVC, operands, first->data_type, &instr->loc))) + return false; +- list_add_before(&instr->entry, &replacement->entry); +- +- hlsl_replace_node(instr, replacement); ++ hlsl_block_add_instr(block, replacement); + return true; + } + +-static bool lower_casts_to_bool(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, void *context) ++static bool lower_casts_to_bool(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, struct hlsl_block *block) + { + struct hlsl_type *type = instr->data_type, *arg_type; + static const struct hlsl_constant_value zero_value; +- struct hlsl_ir_node *zero; ++ struct hlsl_ir_node *zero, *neq; + struct hlsl_ir_expr *expr; + + if (instr->type != HLSL_IR_EXPR) +@@ -2512,10 +2502,12 @@ static bool lower_casts_to_bool(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr + zero = hlsl_new_constant(ctx, arg_type, &zero_value, &instr->loc); + if (!zero) + return false; +- list_add_before(&instr->entry, &zero->entry); ++ hlsl_block_add_instr(block, zero); + +- expr->op = HLSL_OP2_NEQUAL; +- hlsl_src_from_node(&expr->operands[1], zero); ++ if (!(neq = hlsl_new_binary_expr(ctx, HLSL_OP2_NEQUAL, expr->operands[0].node, zero))) ++ return false; ++ neq->data_type = expr->node.data_type; ++ hlsl_block_add_instr(block, neq); + + return true; + } +@@ -2666,10 +2658,10 @@ static bool lower_int_modulus(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, + return hlsl_add_conditional(ctx, block, and, neg, cast3); + } + +-static bool lower_int_abs(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, void *context) ++static bool lower_int_abs(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, struct hlsl_block *block) + { + struct hlsl_type *type = instr->data_type; +- struct hlsl_ir_node *arg, *neg; ++ struct hlsl_ir_node *arg, *neg, *max; + struct hlsl_ir_expr *expr; + + if (instr->type != HLSL_IR_EXPR) +@@ -2687,15 +2679,16 @@ static bool lower_int_abs(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, void + + if (!(neg = hlsl_new_unary_expr(ctx, HLSL_OP1_NEG, arg, &instr->loc))) + return false; +- list_add_before(&instr->entry, &neg->entry); ++ hlsl_block_add_instr(block, neg); + +- expr->op = HLSL_OP2_MAX; +- hlsl_src_from_node(&expr->operands[1], neg); ++ if (!(max = hlsl_new_binary_expr(ctx, HLSL_OP2_MAX, arg, neg))) ++ return false; ++ hlsl_block_add_instr(block, max); + + return true; + } + +-static bool lower_int_dot(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, void *context) ++static bool lower_int_dot(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, struct hlsl_block *block) + { + struct hlsl_ir_node *arg1, *arg2, *mult, *comps[4] = {0}, *res; + struct hlsl_type *type = instr->data_type; +@@ -2721,7 +2714,7 @@ static bool lower_int_dot(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, void + + if (!(mult = hlsl_new_binary_expr(ctx, is_bool ? HLSL_OP2_LOGIC_AND : HLSL_OP2_MUL, arg1, arg2))) + return false; +- list_add_before(&instr->entry, &mult->entry); ++ hlsl_block_add_instr(block, mult); + + for (i = 0; i < dimx; ++i) + { +@@ -2729,7 +2722,7 @@ static bool lower_int_dot(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, void + + if (!(comps[i] = hlsl_new_swizzle(ctx, s, 1, mult, &instr->loc))) + return false; +- list_add_before(&instr->entry, &comps[i]->entry); ++ hlsl_block_add_instr(block, comps[i]); + } + + res = comps[0]; +@@ -2737,10 +2730,9 @@ static bool lower_int_dot(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, void + { + if (!(res = hlsl_new_binary_expr(ctx, is_bool ? HLSL_OP2_LOGIC_OR : HLSL_OP2_ADD, res, comps[i]))) + return false; +- list_add_before(&instr->entry, &res->entry); ++ hlsl_block_add_instr(block, res); + } + +- hlsl_replace_node(instr, res); + return true; + } + +@@ -4328,7 +4320,7 @@ int hlsl_emit_bytecode(struct hlsl_ctx *ctx, struct hlsl_ir_function_decl *entry + while (hlsl_transform_ir(ctx, lower_calls, body, NULL)); + + lower_ir(ctx, lower_matrix_swizzles, body); +- hlsl_transform_ir(ctx, lower_index_loads, body, NULL); ++ lower_ir(ctx, lower_index_loads, body); + + LIST_FOR_EACH_ENTRY(var, &ctx->globals->vars, struct hlsl_ir_var, scope_entry) + { +@@ -4391,7 +4383,7 @@ int hlsl_emit_bytecode(struct hlsl_ctx *ctx, struct hlsl_ir_function_decl *entry + { + hlsl_transform_ir(ctx, lower_discard_neg, body, NULL); + } +- hlsl_transform_ir(ctx, lower_broadcasts, body, NULL); ++ lower_ir(ctx, lower_broadcasts, body); + while (hlsl_transform_ir(ctx, fold_redundant_casts, body, NULL)); + do + { +@@ -4401,12 +4393,12 @@ int hlsl_emit_bytecode(struct hlsl_ctx *ctx, struct hlsl_ir_function_decl *entry + while (progress); + hlsl_transform_ir(ctx, split_matrix_copies, body, NULL); + +- hlsl_transform_ir(ctx, lower_narrowing_casts, body, NULL); +- hlsl_transform_ir(ctx, lower_casts_to_bool, body, NULL); +- hlsl_transform_ir(ctx, lower_int_dot, body, NULL); ++ lower_ir(ctx, lower_narrowing_casts, body); ++ lower_ir(ctx, lower_casts_to_bool, body); ++ lower_ir(ctx, lower_int_dot, body); + lower_ir(ctx, lower_int_division, body); + lower_ir(ctx, lower_int_modulus, body); +- hlsl_transform_ir(ctx, lower_int_abs, body, NULL); ++ lower_ir(ctx, lower_int_abs, body); + lower_ir(ctx, lower_float_modulus, body); + hlsl_transform_ir(ctx, fold_redundant_casts, body, NULL); + do +@@ -4419,9 +4411,9 @@ int hlsl_emit_bytecode(struct hlsl_ctx *ctx, struct hlsl_ir_function_decl *entry + } + while (progress); + +- hlsl_transform_ir(ctx, lower_nonconstant_vector_derefs, body, NULL); +- hlsl_transform_ir(ctx, lower_casts_to_bool, body, NULL); +- hlsl_transform_ir(ctx, lower_int_dot, body, NULL); ++ lower_ir(ctx, lower_nonconstant_vector_derefs, body); ++ lower_ir(ctx, lower_casts_to_bool, body); ++ lower_ir(ctx, lower_int_dot, body); + + hlsl_transform_ir(ctx, validate_static_object_references, body, NULL); + hlsl_transform_ir(ctx, track_object_components_sampler_dim, body, NULL); +@@ -4431,18 +4423,18 @@ int hlsl_emit_bytecode(struct hlsl_ctx *ctx, struct hlsl_ir_function_decl *entry + sort_synthetic_separated_samplers_first(ctx); + + if (profile->major_version >= 4) +- hlsl_transform_ir(ctx, lower_ternary, body, NULL); ++ lower_ir(ctx, lower_ternary, body); + if (profile->major_version < 4) + { +- hlsl_transform_ir(ctx, lower_division, body, NULL); +- hlsl_transform_ir(ctx, lower_sqrt, body, NULL); +- hlsl_transform_ir(ctx, lower_dot, body, NULL); +- hlsl_transform_ir(ctx, lower_round, body, NULL); ++ lower_ir(ctx, lower_division, body); ++ lower_ir(ctx, lower_sqrt, body); ++ lower_ir(ctx, lower_dot, body); ++ lower_ir(ctx, lower_round, body); + } + + if (profile->major_version < 2) + { +- hlsl_transform_ir(ctx, lower_abs, body, NULL); ++ lower_ir(ctx, lower_abs, body); + } + + /* TODO: move forward, remove when no longer needed */ +diff --git a/libs/vkd3d/libs/vkd3d-shader/ir.c b/libs/vkd3d/libs/vkd3d-shader/ir.c +index 9ee38ffee37..a97747b515a 100644 +--- a/libs/vkd3d/libs/vkd3d-shader/ir.c ++++ b/libs/vkd3d/libs/vkd3d-shader/ir.c +@@ -72,7 +72,7 @@ static void shader_instruction_eliminate_phase_instance_id(struct vkd3d_shader_i + reg->idx[2].offset = ~0u; + reg->idx[2].rel_addr = NULL; + reg->idx_count = 0; +- reg->immconst_type = VKD3D_IMMCONST_SCALAR; ++ reg->dimension = VSIR_DIMENSION_SCALAR; + reg->u.immconst_uint[0] = instance_id; + continue; + } +@@ -296,7 +296,7 @@ static enum vkd3d_result flattener_flatten_phases(struct hull_flattener *normali + return VKD3D_OK; + } + +-void shader_register_init(struct vkd3d_shader_register *reg, enum vkd3d_shader_register_type reg_type, ++void vsir_register_init(struct vkd3d_shader_register *reg, enum vkd3d_shader_register_type reg_type, + enum vkd3d_data_type data_type, unsigned int idx_count) + { + reg->type = reg_type; +@@ -310,7 +310,7 @@ void shader_register_init(struct vkd3d_shader_register *reg, enum vkd3d_shader_r + reg->idx[2].offset = ~0u; + reg->idx[2].rel_addr = NULL; + reg->idx_count = idx_count; +- reg->immconst_type = VKD3D_IMMCONST_SCALAR; ++ reg->dimension = VSIR_DIMENSION_SCALAR; + } + + void vsir_instruction_init(struct vkd3d_shader_instruction *ins, const struct vkd3d_shader_location *location, +@@ -372,7 +372,7 @@ static struct vkd3d_shader_src_param *instruction_array_create_outpointid_param( + if (!(rel_addr = shader_src_param_allocator_get(&instructions->src_params, 1))) + return NULL; + +- shader_register_init(&rel_addr->reg, VKD3DSPR_OUTPOINTID, VKD3D_DATA_UINT, 0); ++ vsir_register_init(&rel_addr->reg, VKD3DSPR_OUTPOINTID, VKD3D_DATA_UINT, 0); + rel_addr->swizzle = 0; + rel_addr->modifiers = 0; + +@@ -402,7 +402,7 @@ static void shader_dst_param_io_init(struct vkd3d_shader_dst_param *param, const + param->write_mask = e->mask; + param->modifiers = 0; + param->shift = 0; +- shader_register_init(¶m->reg, reg_type, vkd3d_data_type_from_component_type(e->component_type), idx_count); ++ vsir_register_init(¶m->reg, reg_type, vkd3d_data_type_from_component_type(e->component_type), idx_count); + } + + static enum vkd3d_result control_point_normaliser_emit_hs_input(struct control_point_normaliser *normaliser, +@@ -1207,7 +1207,7 @@ static void shader_register_normalise_flat_constants(struct vkd3d_shader_src_par + { + param->reg.type = VKD3DSPR_IMMCONST; + param->reg.idx_count = 0; +- param->reg.immconst_type = VKD3D_IMMCONST_VEC4; ++ param->reg.dimension = VSIR_DIMENSION_VEC4; + for (j = 0; j < 4; ++j) + param->reg.u.immconst_uint[j] = normaliser->defs[i].value[j]; + return; +@@ -1260,6 +1260,96 @@ static enum vkd3d_result instruction_array_normalise_flat_constants(struct vkd3d + return VKD3D_OK; + } + ++static void remove_dead_code(struct vkd3d_shader_parser *parser) ++{ ++ size_t i, depth = 0; ++ bool dead = false; ++ ++ for (i = 0; i < parser->instructions.count; ++i) ++ { ++ struct vkd3d_shader_instruction *ins = &parser->instructions.elements[i]; ++ ++ switch (ins->handler_idx) ++ { ++ case VKD3DSIH_IF: ++ case VKD3DSIH_LOOP: ++ case VKD3DSIH_SWITCH: ++ if (dead) ++ { ++ vkd3d_shader_instruction_make_nop(ins); ++ ++depth; ++ } ++ break; ++ ++ case VKD3DSIH_ENDIF: ++ case VKD3DSIH_ENDLOOP: ++ case VKD3DSIH_ENDSWITCH: ++ case VKD3DSIH_ELSE: ++ if (dead) ++ { ++ if (depth > 0) ++ { ++ vkd3d_shader_instruction_make_nop(ins); ++ if (ins->handler_idx != VKD3DSIH_ELSE) ++ --depth; ++ } ++ else ++ { ++ dead = false; ++ } ++ } ++ break; ++ ++ /* `depth' is counted with respect to where the dead code ++ * segment began. So it starts at zero and it signals the ++ * termination of the dead code segment when it would ++ * become negative. */ ++ case VKD3DSIH_BREAK: ++ case VKD3DSIH_RET: ++ case VKD3DSIH_CONTINUE: ++ if (dead) ++ { ++ vkd3d_shader_instruction_make_nop(ins); ++ } ++ else ++ { ++ dead = true; ++ depth = 0; ++ } ++ break; ++ ++ /* If `case' or `default' appears at zero depth, it means ++ * that they are a possible target for the corresponding ++ * switch, so the code is live again. */ ++ case VKD3DSIH_CASE: ++ case VKD3DSIH_DEFAULT: ++ if (dead) ++ { ++ if (depth == 0) ++ dead = false; ++ else ++ vkd3d_shader_instruction_make_nop(ins); ++ } ++ break; ++ ++ /* Phase instructions can only appear in hull shaders and ++ * outside of any block. When a phase returns, control is ++ * moved to the following phase, so they make code live ++ * again. */ ++ case VKD3DSIH_HS_CONTROL_POINT_PHASE: ++ case VKD3DSIH_HS_FORK_PHASE: ++ case VKD3DSIH_HS_JOIN_PHASE: ++ dead = false; ++ break; ++ ++ default: ++ if (dead) ++ vkd3d_shader_instruction_make_nop(ins); ++ break; ++ } ++ } ++} ++ + enum vkd3d_result vkd3d_shader_normalise(struct vkd3d_shader_parser *parser, + const struct vkd3d_shader_compile_info *compile_info) + { +@@ -1287,6 +1377,9 @@ enum vkd3d_result vkd3d_shader_normalise(struct vkd3d_shader_parser *parser, + if (result >= 0) + result = instruction_array_normalise_flat_constants(parser); + ++ if (result >= 0) ++ remove_dead_code(parser); ++ + if (result >= 0 && TRACE_ON()) + vkd3d_shader_trace(instructions, &parser->shader_version); + +diff --git a/libs/vkd3d/libs/vkd3d-shader/spirv.c b/libs/vkd3d/libs/vkd3d-shader/spirv.c +index 8285b56a17c..07e276c57da 100644 +--- a/libs/vkd3d/libs/vkd3d-shader/spirv.c ++++ b/libs/vkd3d/libs/vkd3d-shader/spirv.c +@@ -1215,10 +1215,14 @@ static uint32_t vkd3d_spirv_build_op_function_call(struct vkd3d_spirv_builder *b + SpvOpFunctionCall, result_type, function_id, arguments, argument_count); + } + +-static uint32_t vkd3d_spirv_build_op_undef(struct vkd3d_spirv_builder *builder, +- struct vkd3d_spirv_stream *stream, uint32_t type_id) ++static uint32_t vkd3d_spirv_build_op_undef(struct vkd3d_spirv_builder *builder, uint32_t type_id) + { +- return vkd3d_spirv_build_op_tr(builder, stream, SpvOpUndef, type_id); ++ return vkd3d_spirv_build_op_tr(builder, &builder->global_stream, SpvOpUndef, type_id); ++} ++ ++static uint32_t vkd3d_spirv_get_op_undef(struct vkd3d_spirv_builder *builder, uint32_t type_id) ++{ ++ return vkd3d_spirv_build_once1(builder, SpvOpUndef, type_id, vkd3d_spirv_build_op_undef); + } + + static uint32_t vkd3d_spirv_build_op_access_chain(struct vkd3d_spirv_builder *builder, +@@ -1710,6 +1714,15 @@ static uint32_t vkd3d_spirv_build_op_glsl_std450_cos(struct vkd3d_spirv_builder + return vkd3d_spirv_build_op_glsl_std450_tr1(builder, GLSLstd450Cos, result_type, operand); + } + ++static uint32_t vkd3d_spirv_build_op_glsl_std450_max(struct vkd3d_spirv_builder *builder, ++ uint32_t result_type, uint32_t x, uint32_t y) ++{ ++ uint32_t glsl_std450_id = vkd3d_spirv_get_glsl_std450_instr_set(builder); ++ uint32_t operands[] = {x, y}; ++ return vkd3d_spirv_build_op_ext_inst(builder, result_type, glsl_std450_id, ++ GLSLstd450NMax, operands, ARRAY_SIZE(operands)); ++} ++ + static uint32_t vkd3d_spirv_build_op_glsl_std450_nclamp(struct vkd3d_spirv_builder *builder, + uint32_t result_type, uint32_t x, uint32_t min, uint32_t max) + { +@@ -2323,6 +2336,9 @@ struct spirv_compiler + bool write_tess_geom_point_size; + + struct vkd3d_string_buffer_cache string_buffers; ++ ++ uint32_t *ssa_register_ids; ++ unsigned int ssa_register_count; + }; + + static bool is_in_default_phase(const struct spirv_compiler *compiler) +@@ -2370,6 +2386,8 @@ static void spirv_compiler_destroy(struct spirv_compiler *compiler) + shader_signature_cleanup(&compiler->output_signature); + shader_signature_cleanup(&compiler->patch_constant_signature); + ++ vkd3d_free(compiler->ssa_register_ids); ++ + vkd3d_free(compiler); + } + +@@ -2850,7 +2868,7 @@ static uint32_t spirv_compiler_get_constant(struct spirv_compiler *compiler, + break; + default: + FIXME("Unhandled component_type %#x.\n", component_type); +- return vkd3d_spirv_build_op_undef(builder, &builder->global_stream, type_id); ++ return vkd3d_spirv_get_op_undef(builder, type_id); + } + + if (component_count == 1) +@@ -2879,7 +2897,7 @@ static uint32_t spirv_compiler_get_constant64(struct spirv_compiler *compiler, + if (component_type != VKD3D_SHADER_COMPONENT_DOUBLE) + { + FIXME("Unhandled component_type %#x.\n", component_type); +- return vkd3d_spirv_build_op_undef(builder, &builder->global_stream, type_id); ++ return vkd3d_spirv_get_op_undef(builder, type_id); + } + + if (component_count == 1) +@@ -2914,6 +2932,12 @@ static uint32_t spirv_compiler_get_constant_vector(struct spirv_compiler *compil + return spirv_compiler_get_constant(compiler, component_type, component_count, values); + } + ++static uint32_t spirv_compiler_get_constant_int_vector(struct spirv_compiler *compiler, ++ uint32_t value, unsigned int component_count) ++{ ++ return spirv_compiler_get_constant_vector(compiler, VKD3D_SHADER_COMPONENT_INT, component_count, value); ++} ++ + static uint32_t spirv_compiler_get_constant_uint_vector(struct spirv_compiler *compiler, + uint32_t value, unsigned int component_count) + { +@@ -3576,7 +3600,7 @@ static uint32_t spirv_compiler_emit_load_constant(struct spirv_compiler *compile + + assert(reg->type == VKD3DSPR_IMMCONST); + +- if (reg->immconst_type == VKD3D_IMMCONST_SCALAR) ++ if (reg->dimension == VSIR_DIMENSION_SCALAR) + { + for (i = 0; i < component_count; ++i) + values[i] = *reg->u.immconst_uint; +@@ -3603,7 +3627,7 @@ static uint32_t spirv_compiler_emit_load_constant64(struct spirv_compiler *compi + + assert(reg->type == VKD3DSPR_IMMCONST64); + +- if (reg->immconst_type == VKD3D_IMMCONST_SCALAR) ++ if (reg->dimension == VSIR_DIMENSION_SCALAR) + { + for (i = 0; i < component_count; ++i) + values[i] = *reg->u.immconst_uint64; +@@ -3631,7 +3655,7 @@ static uint32_t spirv_compiler_emit_load_undef(struct spirv_compiler *compiler, + assert(reg->type == VKD3DSPR_UNDEF); + + type_id = vkd3d_spirv_get_type_id_for_data_type(builder, reg->data_type, component_count); +- return vkd3d_spirv_build_op_undef(builder, &builder->global_stream, type_id); ++ return vkd3d_spirv_get_op_undef(builder, type_id); + } + + static uint32_t spirv_compiler_emit_load_scalar(struct spirv_compiler *compiler, +@@ -3682,6 +3706,22 @@ static uint32_t spirv_compiler_emit_load_scalar(struct spirv_compiler *compiler, + return val_id; + } + ++static uint32_t spirv_compiler_get_ssa_register_id(const struct spirv_compiler *compiler, ++ const struct vkd3d_shader_register *reg) ++{ ++ assert(reg->idx[0].offset < compiler->ssa_register_count); ++ assert(reg->idx_count == 1); ++ return compiler->ssa_register_ids[reg->idx[0].offset]; ++} ++ ++static void spirv_compiler_set_ssa_register_id(const struct spirv_compiler *compiler, ++ const struct vkd3d_shader_register *reg, uint32_t val_id) ++{ ++ unsigned int i = reg->idx[0].offset; ++ assert(i < compiler->ssa_register_count); ++ compiler->ssa_register_ids[i] = val_id; ++} ++ + static uint32_t spirv_compiler_emit_load_reg(struct spirv_compiler *compiler, + const struct vkd3d_shader_register *reg, DWORD swizzle, DWORD write_mask) + { +@@ -3701,10 +3741,14 @@ static uint32_t spirv_compiler_emit_load_reg(struct spirv_compiler *compiler, + + component_count = vkd3d_write_mask_component_count(write_mask); + component_type = vkd3d_component_type_from_data_type(reg->data_type); ++ ++ if (reg->type == VKD3DSPR_SSA) ++ return spirv_compiler_get_ssa_register_id(compiler, reg); ++ + if (!spirv_compiler_get_register_info(compiler, reg, ®_info)) + { + type_id = vkd3d_spirv_get_type_id(builder, component_type, component_count); +- return vkd3d_spirv_build_op_undef(builder, &builder->global_stream, type_id); ++ return vkd3d_spirv_get_op_undef(builder, type_id); + } + assert(reg_info.component_type != VKD3D_SHADER_COMPONENT_DOUBLE); + spirv_compiler_emit_dereference_register(compiler, reg, ®_info); +@@ -3912,6 +3956,12 @@ static void spirv_compiler_emit_store_reg(struct spirv_compiler *compiler, + + assert(!register_is_constant_or_undef(reg)); + ++ if (reg->type == VKD3DSPR_SSA) ++ { ++ spirv_compiler_set_ssa_register_id(compiler, reg, val_id); ++ return; ++ } ++ + if (!spirv_compiler_get_register_info(compiler, reg, ®_info)) + return; + spirv_compiler_emit_dereference_register(compiler, reg, ®_info); +@@ -4361,11 +4411,7 @@ static uint32_t spirv_compiler_get_invocation_id(struct spirv_compiler *compiler + + assert(compiler->shader_type == VKD3D_SHADER_TYPE_HULL); + +- memset(&r, 0, sizeof(r)); +- r.type = VKD3DSPR_OUTPOINTID; +- r.idx[0].offset = ~0u; +- r.idx[1].offset = ~0u; +- r.idx_count = 0; ++ vsir_register_init(&r, VKD3DSPR_OUTPOINTID, VKD3D_DATA_FLOAT, 0); + return spirv_compiler_get_register_id(compiler, &r); + } + +@@ -5262,10 +5308,7 @@ static void spirv_compiler_emit_hull_shader_builtins(struct spirv_compiler *comp + struct vkd3d_shader_dst_param dst; + + memset(&dst, 0, sizeof(dst)); +- dst.reg.type = VKD3DSPR_OUTPOINTID; +- dst.reg.idx[0].offset = ~0u; +- dst.reg.idx[1].offset = ~0u; +- dst.reg.idx_count = 0; ++ vsir_register_init(&dst.reg, VKD3DSPR_OUTPOINTID, VKD3D_DATA_FLOAT, 0); + dst.write_mask = VKD3DSP_WRITEMASK_0; + spirv_compiler_emit_input_register(compiler, &dst); + } +@@ -5375,6 +5418,18 @@ static void spirv_compiler_emit_temps(struct spirv_compiler *compiler, uint32_t + vkd3d_spirv_end_function_stream_insertion(builder); + } + ++static void spirv_compiler_allocate_ssa_register_ids(struct spirv_compiler *compiler, unsigned int count) ++{ ++ assert(!compiler->ssa_register_ids); ++ if (!(compiler->ssa_register_ids = vkd3d_calloc(count, sizeof(*compiler->ssa_register_ids)))) ++ { ++ ERR("Failed to allocate SSA register value id array, count %u.\n", count); ++ spirv_compiler_error(compiler, VKD3D_SHADER_ERROR_SPV_OUT_OF_MEMORY, ++ "Failed to allocate SSA register value id array of count %u.", count); ++ } ++ compiler->ssa_register_count = count; ++} ++ + static void spirv_compiler_emit_dcl_indexable_temp(struct spirv_compiler *compiler, + const struct vkd3d_shader_instruction *instruction) + { +@@ -5388,11 +5443,8 @@ static void spirv_compiler_emit_dcl_indexable_temp(struct spirv_compiler *compil + if (temp->component_count != 4) + FIXME("Unhandled component count %u.\n", temp->component_count); + +- memset(®, 0, sizeof(reg)); +- reg.type = VKD3DSPR_IDXTEMP; ++ vsir_register_init(®, VKD3DSPR_IDXTEMP, VKD3D_DATA_FLOAT, 1); + reg.idx[0].offset = temp->register_idx; +- reg.idx[1].offset = ~0u; +- reg.idx_count = 1; + + function_location = spirv_compiler_get_current_function_location(compiler); + vkd3d_spirv_begin_function_stream_insertion(builder, function_location); +@@ -5571,15 +5623,12 @@ static void spirv_compiler_emit_cbv_declaration(struct spirv_compiler *compiler, + const SpvStorageClass storage_class = SpvStorageClassUniform; + struct vkd3d_push_constant_buffer_binding *push_cb; + struct vkd3d_descriptor_variable_info var_info; ++ struct vkd3d_shader_register reg; + struct vkd3d_symbol reg_symbol; + unsigned int size; + +- struct vkd3d_shader_register reg = +- { +- .type = VKD3DSPR_CONSTBUFFER, +- .idx[0].offset = register_id, +- .idx_count = 1, +- }; ++ vsir_register_init(®, VKD3DSPR_CONSTBUFFER, VKD3D_DATA_FLOAT, 1); ++ reg.idx[0].offset = register_id; + + size = size_in_bytes / (VKD3D_VEC4_SIZE * sizeof(uint32_t)); + +@@ -5644,8 +5693,7 @@ static void spirv_compiler_emit_dcl_immediate_constant_buffer(struct spirv_compi + vkd3d_spirv_build_op_name(builder, icb_id, "icb"); + vkd3d_free(elements); + +- memset(®, 0, sizeof(reg)); +- reg.type = VKD3DSPR_IMMCONSTBUFFER; ++ vsir_register_init(®, VKD3DSPR_IMMCONSTBUFFER, VKD3D_DATA_FLOAT, 0); + vkd3d_symbol_make_register(®_symbol, ®); + vkd3d_symbol_set_register_info(®_symbol, icb_id, SpvStorageClassPrivate, + VKD3D_SHADER_COMPONENT_FLOAT, VKD3DSP_WRITEMASK_ALL); +@@ -5658,15 +5706,12 @@ static void spirv_compiler_emit_sampler_declaration(struct spirv_compiler *compi + const SpvStorageClass storage_class = SpvStorageClassUniformConstant; + struct vkd3d_spirv_builder *builder = &compiler->spirv_builder; + struct vkd3d_descriptor_variable_info var_info; ++ struct vkd3d_shader_register reg; + struct vkd3d_symbol reg_symbol; + uint32_t type_id, var_id; + +- const struct vkd3d_shader_register reg = +- { +- .type = VKD3DSPR_SAMPLER, +- .idx[0].offset = register_id, +- .idx_count = 1, +- }; ++ vsir_register_init(®, VKD3DSPR_SAMPLER, VKD3D_DATA_FLOAT, 1); ++ reg.idx[0].offset = register_id; + + vkd3d_symbol_make_sampler(®_symbol, ®); + reg_symbol.info.sampler.range = *range; +@@ -5872,13 +5917,10 @@ static void spirv_compiler_emit_resource_declaration(struct spirv_compiler *comp + const struct vkd3d_spirv_resource_type *resource_type_info; + enum vkd3d_shader_component_type sampled_type; + struct vkd3d_symbol resource_symbol; ++ struct vkd3d_shader_register reg; + +- struct vkd3d_shader_register reg = +- { +- .type = is_uav ? VKD3DSPR_UAV : VKD3DSPR_RESOURCE, +- .idx[0].offset = register_id, +- .idx_count = 1, +- }; ++ vsir_register_init(®, is_uav ? VKD3DSPR_UAV : VKD3DSPR_RESOURCE, VKD3D_DATA_FLOAT, 1); ++ reg.idx[0].offset = register_id; + + if (resource_type == VKD3D_SHADER_RESOURCE_TEXTURE_2DMS && sample_count == 1) + resource_type = VKD3D_SHADER_RESOURCE_TEXTURE_2D; +@@ -6361,20 +6403,13 @@ static void spirv_compiler_emit_default_control_point_phase(struct spirv_compile + invocation_id = spirv_compiler_emit_load_invocation_id(compiler); + + memset(&invocation, 0, sizeof(invocation)); +- invocation.reg.type = VKD3DSPR_OUTPOINTID; +- invocation.reg.data_type = VKD3D_DATA_INT; +- invocation.reg.idx[0].offset = ~0u; +- invocation.reg.idx[1].offset = ~0u; +- invocation.reg.idx[2].offset = ~0u; +- invocation.reg.idx_count = 0; ++ vsir_register_init(&invocation.reg, VKD3DSPR_OUTPOINTID, VKD3D_DATA_INT, 0); + invocation.swizzle = VKD3D_SHADER_NO_SWIZZLE; + +- memset(&input_reg, 0, sizeof(input_reg)); +- input_reg.type = VKD3DSPR_INPUT; +- input_reg.data_type = VKD3D_DATA_FLOAT; ++ vsir_register_init(&input_reg, VKD3DSPR_INPUT, VKD3D_DATA_FLOAT, 2); ++ input_reg.idx[0].offset = 0; + input_reg.idx[0].rel_addr = &invocation; +- input_reg.idx[2].offset = ~0u; +- input_reg.idx_count = 2; ++ input_reg.idx[1].offset = 0; + input_id = spirv_compiler_get_register_id(compiler, &input_reg); + + assert(input_signature->element_count == output_signature->element_count); +@@ -6521,8 +6556,6 @@ static SpvOp spirv_compiler_map_alu_instruction(const struct vkd3d_shader_instru + {VKD3DSIH_DTOI, SpvOpConvertFToS}, + {VKD3DSIH_DTOU, SpvOpConvertFToU}, + {VKD3DSIH_FTOD, SpvOpFConvert}, +- {VKD3DSIH_FTOI, SpvOpConvertFToS}, +- {VKD3DSIH_FTOU, SpvOpConvertFToU}, + {VKD3DSIH_IADD, SpvOpIAdd}, + {VKD3DSIH_INEG, SpvOpSNegate}, + {VKD3DSIH_ISHL, SpvOpShiftLeftLogical}, +@@ -6694,7 +6727,8 @@ static void spirv_compiler_emit_mov(struct spirv_compiler *compiler, + uint32_t components[VKD3D_VEC4_SIZE]; + unsigned int i, component_count; + +- if (register_is_constant_or_undef(&src->reg) || dst->modifiers || src->modifiers) ++ if (register_is_constant_or_undef(&src->reg) || src->reg.type == VKD3DSPR_SSA || dst->reg.type == VKD3DSPR_SSA ++ || dst->modifiers || src->modifiers) + goto general_implementation; + + spirv_compiler_get_register_info(compiler, &dst->reg, &dst_reg_info); +@@ -6988,6 +7022,84 @@ static void spirv_compiler_emit_udiv(struct spirv_compiler *compiler, + } + } + ++static void spirv_compiler_emit_ftoi(struct spirv_compiler *compiler, ++ const struct vkd3d_shader_instruction *instruction) ++{ ++ uint32_t src_id, int_min_id, int_max_id, zero_id, float_max_id, condition_id, val_id; ++ struct vkd3d_spirv_builder *builder = &compiler->spirv_builder; ++ const struct vkd3d_shader_dst_param *dst = instruction->dst; ++ const struct vkd3d_shader_src_param *src = instruction->src; ++ uint32_t src_type_id, dst_type_id, condition_type_id; ++ unsigned int component_count; ++ ++ assert(instruction->dst_count == 1); ++ assert(instruction->src_count == 1); ++ ++ /* OpConvertFToI has undefined results if the result cannot be represented ++ * as a signed integer, but Direct3D expects the result to saturate, ++ * and for NaN to yield zero. */ ++ ++ component_count = vkd3d_write_mask_component_count(dst->write_mask); ++ src_type_id = spirv_compiler_get_type_id_for_reg(compiler, &src->reg, dst->write_mask); ++ dst_type_id = spirv_compiler_get_type_id_for_dst(compiler, dst); ++ src_id = spirv_compiler_emit_load_src(compiler, src, dst->write_mask); ++ ++ int_min_id = spirv_compiler_get_constant_float_vector(compiler, -2147483648.0f, component_count); ++ val_id = vkd3d_spirv_build_op_glsl_std450_max(builder, src_type_id, src_id, int_min_id); ++ ++ float_max_id = spirv_compiler_get_constant_float_vector(compiler, 2147483648.0f, component_count); ++ int_max_id = spirv_compiler_get_constant_int_vector(compiler, INT_MAX, component_count); ++ condition_type_id = vkd3d_spirv_get_type_id(builder, VKD3D_SHADER_COMPONENT_BOOL, component_count); ++ condition_id = vkd3d_spirv_build_op_tr2(builder, &builder->function_stream, ++ SpvOpFOrdGreaterThanEqual, condition_type_id, val_id, float_max_id); ++ ++ val_id = vkd3d_spirv_build_op_tr1(builder, &builder->function_stream, SpvOpConvertFToS, dst_type_id, val_id); ++ val_id = vkd3d_spirv_build_op_select(builder, dst_type_id, condition_id, int_max_id, val_id); ++ ++ zero_id = spirv_compiler_get_constant_int_vector(compiler, 0, component_count); ++ condition_id = vkd3d_spirv_build_op_tr1(builder, &builder->function_stream, SpvOpIsNan, condition_type_id, src_id); ++ val_id = vkd3d_spirv_build_op_select(builder, dst_type_id, condition_id, zero_id, val_id); ++ ++ spirv_compiler_emit_store_dst(compiler, dst, val_id); ++} ++ ++static void spirv_compiler_emit_ftou(struct spirv_compiler *compiler, ++ const struct vkd3d_shader_instruction *instruction) ++{ ++ uint32_t src_id, zero_id, uint_max_id, float_max_id, condition_id, val_id; ++ struct vkd3d_spirv_builder *builder = &compiler->spirv_builder; ++ const struct vkd3d_shader_dst_param *dst = instruction->dst; ++ const struct vkd3d_shader_src_param *src = instruction->src; ++ uint32_t src_type_id, dst_type_id, condition_type_id; ++ unsigned int component_count; ++ ++ assert(instruction->dst_count == 1); ++ assert(instruction->src_count == 1); ++ ++ /* OpConvertFToU has undefined results if the result cannot be represented ++ * as an unsigned integer, but Direct3D expects the result to saturate, ++ * and for NaN to yield zero. */ ++ ++ component_count = vkd3d_write_mask_component_count(dst->write_mask); ++ src_type_id = spirv_compiler_get_type_id_for_reg(compiler, &src->reg, dst->write_mask); ++ dst_type_id = spirv_compiler_get_type_id_for_dst(compiler, dst); ++ src_id = spirv_compiler_emit_load_src(compiler, src, dst->write_mask); ++ ++ zero_id = spirv_compiler_get_constant_float_vector(compiler, 0.0f, component_count); ++ val_id = vkd3d_spirv_build_op_glsl_std450_max(builder, src_type_id, src_id, zero_id); ++ ++ float_max_id = spirv_compiler_get_constant_float_vector(compiler, 4294967296.0f, component_count); ++ uint_max_id = spirv_compiler_get_constant_uint_vector(compiler, UINT_MAX, component_count); ++ condition_type_id = vkd3d_spirv_get_type_id(builder, VKD3D_SHADER_COMPONENT_BOOL, component_count); ++ condition_id = vkd3d_spirv_build_op_tr2(builder, &builder->function_stream, ++ SpvOpFOrdGreaterThanEqual, condition_type_id, val_id, float_max_id); ++ ++ val_id = vkd3d_spirv_build_op_tr1(builder, &builder->function_stream, SpvOpConvertFToU, dst_type_id, val_id); ++ val_id = vkd3d_spirv_build_op_select(builder, dst_type_id, condition_id, uint_max_id, val_id); ++ ++ spirv_compiler_emit_store_dst(compiler, dst, val_id); ++} ++ + static void spirv_compiler_emit_bitfield_instruction(struct spirv_compiler *compiler, + const struct vkd3d_shader_instruction *instruction) + { +@@ -9259,8 +9371,6 @@ static int spirv_compiler_handle_instruction(struct spirv_compiler *compiler, + case VKD3DSIH_DTOI: + case VKD3DSIH_DTOU: + case VKD3DSIH_FTOD: +- case VKD3DSIH_FTOI: +- case VKD3DSIH_FTOU: + case VKD3DSIH_IADD: + case VKD3DSIH_INEG: + case VKD3DSIH_ISHL: +@@ -9321,6 +9431,12 @@ static int spirv_compiler_handle_instruction(struct spirv_compiler *compiler, + case VKD3DSIH_UDIV: + spirv_compiler_emit_udiv(compiler, instruction); + break; ++ case VKD3DSIH_FTOI: ++ spirv_compiler_emit_ftoi(compiler, instruction); ++ break; ++ case VKD3DSIH_FTOU: ++ spirv_compiler_emit_ftou(compiler, instruction); ++ break; + case VKD3DSIH_DEQ: + case VKD3DSIH_DGE: + case VKD3DSIH_DLT: +@@ -9543,6 +9659,8 @@ static int spirv_compiler_generate_spirv(struct spirv_compiler *compiler, + + if (parser->shader_desc.temp_count) + spirv_compiler_emit_temps(compiler, parser->shader_desc.temp_count); ++ if (parser->shader_desc.ssa_count) ++ spirv_compiler_allocate_ssa_register_ids(compiler, parser->shader_desc.ssa_count); + + spirv_compiler_emit_descriptor_declarations(compiler); + +diff --git a/libs/vkd3d/libs/vkd3d-shader/tpf.c b/libs/vkd3d/libs/vkd3d-shader/tpf.c +index 58b7f030dac..63771736eaa 100644 +--- a/libs/vkd3d/libs/vkd3d-shader/tpf.c ++++ b/libs/vkd3d/libs/vkd3d-shader/tpf.c +@@ -519,6 +519,36 @@ enum vkd3d_sm4_dimension + VKD3D_SM4_DIMENSION_VEC4 = 0x2, + }; + ++static enum vsir_dimension vsir_dimension_from_sm4_dimension(enum vkd3d_sm4_dimension dim) ++{ ++ switch (dim) ++ { ++ case VKD3D_SM4_DIMENSION_NONE: ++ return VSIR_DIMENSION_NONE; ++ case VKD3D_SM4_DIMENSION_SCALAR: ++ return VSIR_DIMENSION_SCALAR; ++ case VKD3D_SM4_DIMENSION_VEC4: ++ return VSIR_DIMENSION_VEC4; ++ default: ++ FIXME("Unknown SM4 dimension %#x.\n", dim); ++ return VSIR_DIMENSION_NONE; ++ } ++} ++ ++static enum vkd3d_sm4_dimension sm4_dimension_from_vsir_dimension(enum vsir_dimension dim) ++{ ++ switch (dim) ++ { ++ case VSIR_DIMENSION_NONE: ++ return VKD3D_SM4_DIMENSION_NONE; ++ case VSIR_DIMENSION_SCALAR: ++ return VKD3D_SM4_DIMENSION_SCALAR; ++ case VSIR_DIMENSION_VEC4: ++ return VKD3D_SM4_DIMENSION_VEC4; ++ } ++ vkd3d_unreachable(); ++} ++ + enum vkd3d_sm4_resource_type + { + VKD3D_SM4_RESOURCE_BUFFER = 0x1, +@@ -1713,10 +1743,12 @@ static bool shader_sm4_read_param(struct vkd3d_shader_sm4_parser *priv, const ui + enum vkd3d_data_type data_type, struct vkd3d_shader_register *param, enum vkd3d_shader_src_modifier *modifier) + { + const struct vkd3d_sm4_register_type_info *register_type_info; ++ enum vkd3d_shader_register_type vsir_register_type; + enum vkd3d_sm4_register_precision precision; + enum vkd3d_sm4_register_type register_type; + enum vkd3d_sm4_extended_operand_type type; + enum vkd3d_sm4_register_modifier m; ++ enum vkd3d_sm4_dimension sm4_dimension; + uint32_t token, order, extended; + + if (*ptr >= end) +@@ -1731,15 +1763,18 @@ static bool shader_sm4_read_param(struct vkd3d_shader_sm4_parser *priv, const ui + if (!register_type_info) + { + FIXME("Unhandled register type %#x.\n", register_type); +- param->type = VKD3DSPR_TEMP; ++ vsir_register_type = VKD3DSPR_TEMP; + } + else + { +- param->type = register_type_info->vkd3d_type; ++ vsir_register_type = register_type_info->vkd3d_type; + } ++ ++ order = (token & VKD3D_SM4_REGISTER_ORDER_MASK) >> VKD3D_SM4_REGISTER_ORDER_SHIFT; ++ ++ vsir_register_init(param, vsir_register_type, data_type, order); + param->precision = VKD3D_SHADER_REGISTER_PRECISION_DEFAULT; + param->non_uniform = false; +- param->data_type = data_type; + + *modifier = VKD3DSPSM_NONE; + if (token & VKD3D_SM4_EXTENDED_OPERAND) +@@ -1809,14 +1844,7 @@ static bool shader_sm4_read_param(struct vkd3d_shader_sm4_parser *priv, const ui + } + } + +- order = (token & VKD3D_SM4_REGISTER_ORDER_MASK) >> VKD3D_SM4_REGISTER_ORDER_SHIFT; +- +- if (order < 1) +- { +- param->idx[0].offset = ~0u; +- param->idx[0].rel_addr = NULL; +- } +- else ++ if (order >= 1) + { + DWORD addressing = (token & VKD3D_SM4_ADDRESSING_MASK0) >> VKD3D_SM4_ADDRESSING_SHIFT0; + if (!(shader_sm4_read_reg_idx(priv, ptr, end, addressing, ¶m->idx[0]))) +@@ -1826,12 +1854,7 @@ static bool shader_sm4_read_param(struct vkd3d_shader_sm4_parser *priv, const ui + } + } + +- if (order < 2) +- { +- param->idx[1].offset = ~0u; +- param->idx[1].rel_addr = NULL; +- } +- else ++ if (order >= 2) + { + DWORD addressing = (token & VKD3D_SM4_ADDRESSING_MASK1) >> VKD3D_SM4_ADDRESSING_SHIFT1; + if (!(shader_sm4_read_reg_idx(priv, ptr, end, addressing, ¶m->idx[1]))) +@@ -1841,12 +1864,7 @@ static bool shader_sm4_read_param(struct vkd3d_shader_sm4_parser *priv, const ui + } + } + +- if (order < 3) +- { +- param->idx[2].offset = ~0u; +- param->idx[2].rel_addr = NULL; +- } +- else ++ if (order >= 3) + { + DWORD addressing = (token & VKD3D_SM4_ADDRESSING_MASK2) >> VKD3D_SM4_ADDRESSING_SHIFT2; + if (!(shader_sm4_read_reg_idx(priv, ptr, end, addressing, ¶m->idx[2]))) +@@ -1862,17 +1880,16 @@ static bool shader_sm4_read_param(struct vkd3d_shader_sm4_parser *priv, const ui + return false; + } + +- param->idx_count = order; ++ sm4_dimension = (token & VKD3D_SM4_DIMENSION_MASK) >> VKD3D_SM4_DIMENSION_SHIFT; ++ param->dimension = vsir_dimension_from_sm4_dimension(sm4_dimension); + + if (register_type == VKD3D_SM4_RT_IMMCONST || register_type == VKD3D_SM4_RT_IMMCONST64) + { +- enum vkd3d_sm4_dimension dimension = (token & VKD3D_SM4_DIMENSION_MASK) >> VKD3D_SM4_DIMENSION_SHIFT; + unsigned int dword_count; + +- switch (dimension) ++ switch (param->dimension) + { +- case VKD3D_SM4_DIMENSION_SCALAR: +- param->immconst_type = VKD3D_IMMCONST_SCALAR; ++ case VSIR_DIMENSION_SCALAR: + dword_count = 1 + (register_type == VKD3D_SM4_RT_IMMCONST64); + if (end - *ptr < dword_count) + { +@@ -1883,8 +1900,7 @@ static bool shader_sm4_read_param(struct vkd3d_shader_sm4_parser *priv, const ui + *ptr += dword_count; + break; + +- case VKD3D_SM4_DIMENSION_VEC4: +- param->immconst_type = VKD3D_IMMCONST_VEC4; ++ case VSIR_DIMENSION_VEC4: + if (end - *ptr < VKD3D_VEC4_SIZE) + { + WARN("Invalid ptr %p, end %p.\n", *ptr, end); +@@ -1895,7 +1911,7 @@ static bool shader_sm4_read_param(struct vkd3d_shader_sm4_parser *priv, const ui + break; + + default: +- FIXME("Unhandled dimension %#x.\n", dimension); ++ FIXME("Unhandled dimension %#x.\n", param->dimension); + break; + } + } +@@ -1938,6 +1954,16 @@ static uint32_t swizzle_from_sm4(uint32_t s) + return vkd3d_shader_create_swizzle(s & 0x3, (s >> 2) & 0x3, (s >> 4) & 0x3, (s >> 6) & 0x3); + } + ++static uint32_t swizzle_to_sm4(uint32_t s) ++{ ++ uint32_t ret = 0; ++ ret |= ((vkd3d_swizzle_get_component(s, 0)) & 0x3); ++ ret |= ((vkd3d_swizzle_get_component(s, 1)) & 0x3) << 2; ++ ret |= ((vkd3d_swizzle_get_component(s, 2)) & 0x3) << 4; ++ ret |= ((vkd3d_swizzle_get_component(s, 3)) & 0x3) << 6; ++ return ret; ++} ++ + static bool register_is_input_output(const struct vkd3d_shader_register *reg) + { + switch (reg->type) +@@ -3587,16 +3613,6 @@ static uint32_t sm4_encode_instruction_modifier(const struct sm4_instruction_mod + return word; + } + +-struct sm4_register +-{ +- enum vkd3d_shader_register_type type; +- struct vkd3d_shader_register_index idx[2]; +- unsigned int idx_count; +- enum vkd3d_sm4_dimension dim; +- uint32_t immconst_uint[4]; +- unsigned int mod; +-}; +- + struct sm4_instruction + { + enum vkd3d_sm4_opcode opcode; +@@ -3604,18 +3620,15 @@ struct sm4_instruction + struct sm4_instruction_modifier modifiers[1]; + unsigned int modifier_count; + +- struct sm4_dst_register +- { +- struct sm4_register reg; +- unsigned int writemask; +- } dsts[2]; ++ struct vkd3d_shader_dst_param dsts[2]; + unsigned int dst_count; + + struct sm4_src_register + { +- struct sm4_register reg; ++ struct vkd3d_shader_register reg; + enum vkd3d_sm4_swizzle_type swizzle_type; +- unsigned int swizzle; ++ DWORD swizzle; ++ unsigned int mod; + } srcs[5]; + unsigned int src_count; + +@@ -3625,7 +3638,7 @@ struct sm4_instruction + unsigned int idx_count; + }; + +-static void sm4_register_from_deref(struct hlsl_ctx *ctx, struct sm4_register *reg, ++static void sm4_register_from_deref(struct hlsl_ctx *ctx, struct vkd3d_shader_register *reg, + unsigned int *writemask, enum vkd3d_sm4_swizzle_type *swizzle_type, + const struct hlsl_deref *deref) + { +@@ -3639,7 +3652,7 @@ static void sm4_register_from_deref(struct hlsl_ctx *ctx, struct sm4_register *r + if (regset == HLSL_REGSET_TEXTURES) + { + reg->type = VKD3DSPR_RESOURCE; +- reg->dim = VKD3D_SM4_DIMENSION_VEC4; ++ reg->dimension = VSIR_DIMENSION_VEC4; + if (swizzle_type) + *swizzle_type = VKD3D_SM4_SWIZZLE_VEC4; + reg->idx[0].offset = var->regs[HLSL_REGSET_TEXTURES].id; +@@ -3651,7 +3664,7 @@ static void sm4_register_from_deref(struct hlsl_ctx *ctx, struct sm4_register *r + else if (regset == HLSL_REGSET_UAVS) + { + reg->type = VKD3DSPR_UAV; +- reg->dim = VKD3D_SM4_DIMENSION_VEC4; ++ reg->dimension = VSIR_DIMENSION_VEC4; + if (swizzle_type) + *swizzle_type = VKD3D_SM4_SWIZZLE_VEC4; + reg->idx[0].offset = var->regs[HLSL_REGSET_UAVS].id; +@@ -3663,7 +3676,7 @@ static void sm4_register_from_deref(struct hlsl_ctx *ctx, struct sm4_register *r + else if (regset == HLSL_REGSET_SAMPLERS) + { + reg->type = VKD3DSPR_SAMPLER; +- reg->dim = VKD3D_SM4_DIMENSION_NONE; ++ reg->dimension = VSIR_DIMENSION_NONE; + if (swizzle_type) + *swizzle_type = VKD3D_SM4_SWIZZLE_NONE; + reg->idx[0].offset = var->regs[HLSL_REGSET_SAMPLERS].id; +@@ -3678,7 +3691,7 @@ static void sm4_register_from_deref(struct hlsl_ctx *ctx, struct sm4_register *r + + assert(data_type->class <= HLSL_CLASS_VECTOR); + reg->type = VKD3DSPR_CONSTBUFFER; +- reg->dim = VKD3D_SM4_DIMENSION_VEC4; ++ reg->dimension = VSIR_DIMENSION_VEC4; + if (swizzle_type) + *swizzle_type = VKD3D_SM4_SWIZZLE_VEC4; + reg->idx[0].offset = var->buffer->reg.id; +@@ -3701,7 +3714,7 @@ static void sm4_register_from_deref(struct hlsl_ctx *ctx, struct sm4_register *r + reg->idx_count = 1; + } + +- reg->dim = VKD3D_SM4_DIMENSION_VEC4; ++ reg->dimension = VSIR_DIMENSION_VEC4; + *writemask = ((1u << data_type->dimx) - 1) << (offset % 4); + } + else +@@ -3710,7 +3723,7 @@ static void sm4_register_from_deref(struct hlsl_ctx *ctx, struct sm4_register *r + + assert(hlsl_reg.allocated); + reg->type = VKD3DSPR_INPUT; +- reg->dim = VKD3D_SM4_DIMENSION_VEC4; ++ reg->dimension = VSIR_DIMENSION_VEC4; + if (swizzle_type) + *swizzle_type = VKD3D_SM4_SWIZZLE_VEC4; + reg->idx[0].offset = hlsl_reg.id; +@@ -3733,9 +3746,9 @@ static void sm4_register_from_deref(struct hlsl_ctx *ctx, struct sm4_register *r + } + + if (reg->type == VKD3DSPR_DEPTHOUT) +- reg->dim = VKD3D_SM4_DIMENSION_SCALAR; ++ reg->dimension = VSIR_DIMENSION_SCALAR; + else +- reg->dim = VKD3D_SM4_DIMENSION_VEC4; ++ reg->dimension = VSIR_DIMENSION_VEC4; + *writemask = ((1u << data_type->dimx) - 1) << (offset % 4); + } + else +@@ -3744,7 +3757,7 @@ static void sm4_register_from_deref(struct hlsl_ctx *ctx, struct sm4_register *r + + assert(hlsl_reg.allocated); + reg->type = VKD3DSPR_OUTPUT; +- reg->dim = VKD3D_SM4_DIMENSION_VEC4; ++ reg->dimension = VSIR_DIMENSION_VEC4; + reg->idx[0].offset = hlsl_reg.id; + reg->idx_count = 1; + *writemask = hlsl_reg.writemask; +@@ -3756,7 +3769,7 @@ static void sm4_register_from_deref(struct hlsl_ctx *ctx, struct sm4_register *r + + assert(hlsl_reg.allocated); + reg->type = VKD3DSPR_TEMP; +- reg->dim = VKD3D_SM4_DIMENSION_VEC4; ++ reg->dimension = VSIR_DIMENSION_VEC4; + if (swizzle_type) + *swizzle_type = VKD3D_SM4_SWIZZLE_VEC4; + reg->idx[0].offset = hlsl_reg.id; +@@ -3768,53 +3781,57 @@ static void sm4_register_from_deref(struct hlsl_ctx *ctx, struct sm4_register *r + static void sm4_src_from_deref(struct hlsl_ctx *ctx, struct sm4_src_register *src, + const struct hlsl_deref *deref, unsigned int map_writemask) + { +- unsigned int writemask; ++ unsigned int writemask, hlsl_swizzle; + + sm4_register_from_deref(ctx, &src->reg, &writemask, &src->swizzle_type, deref); + if (src->swizzle_type == VKD3D_SM4_SWIZZLE_VEC4) +- src->swizzle = hlsl_map_swizzle(hlsl_swizzle_from_writemask(writemask), map_writemask); ++ { ++ hlsl_swizzle = hlsl_map_swizzle(hlsl_swizzle_from_writemask(writemask), map_writemask); ++ src->swizzle = swizzle_from_sm4(hlsl_swizzle); ++ } + } + +-static void sm4_register_from_node(struct sm4_register *reg, unsigned int *writemask, ++static void sm4_register_from_node(struct vkd3d_shader_register *reg, unsigned int *writemask, + enum vkd3d_sm4_swizzle_type *swizzle_type, const struct hlsl_ir_node *instr) + { + assert(instr->reg.allocated); + reg->type = VKD3DSPR_TEMP; +- reg->dim = VKD3D_SM4_DIMENSION_VEC4; ++ reg->dimension = VSIR_DIMENSION_VEC4; + *swizzle_type = VKD3D_SM4_SWIZZLE_VEC4; + reg->idx[0].offset = instr->reg.id; + reg->idx_count = 1; + *writemask = instr->reg.writemask; + } + +-static void sm4_dst_from_node(struct sm4_dst_register *dst, const struct hlsl_ir_node *instr) ++static void sm4_dst_from_node(struct vkd3d_shader_dst_param *dst, const struct hlsl_ir_node *instr) + { + unsigned int swizzle_type; + +- sm4_register_from_node(&dst->reg, &dst->writemask, &swizzle_type, instr); ++ sm4_register_from_node(&dst->reg, &dst->write_mask, &swizzle_type, instr); + } + + static void sm4_src_from_constant_value(struct sm4_src_register *src, + const struct hlsl_constant_value *value, unsigned int width, unsigned int map_writemask) + { ++ src->swizzle = VKD3D_SHADER_NO_SWIZZLE; + src->swizzle_type = VKD3D_SM4_SWIZZLE_NONE; + src->reg.type = VKD3DSPR_IMMCONST; + if (width == 1) + { +- src->reg.dim = VKD3D_SM4_DIMENSION_SCALAR; +- src->reg.immconst_uint[0] = value->u[0].u; ++ src->reg.dimension = VSIR_DIMENSION_SCALAR; ++ src->reg.u.immconst_uint[0] = value->u[0].u; + } + else + { + unsigned int i, j = 0; + +- src->reg.dim = VKD3D_SM4_DIMENSION_VEC4; ++ src->reg.dimension = VSIR_DIMENSION_VEC4; + for (i = 0; i < 4; ++i) + { + if ((map_writemask & (1u << i)) && (j < width)) +- src->reg.immconst_uint[i] = value->u[j++].u; ++ src->reg.u.immconst_uint[i] = value->u[j++].u; + else +- src->reg.immconst_uint[i] = 0; ++ src->reg.u.immconst_uint[i] = 0; + } + } + } +@@ -3822,7 +3839,7 @@ static void sm4_src_from_constant_value(struct sm4_src_register *src, + static void sm4_src_from_node(struct sm4_src_register *src, + const struct hlsl_ir_node *instr, unsigned int map_writemask) + { +- unsigned int writemask; ++ unsigned int writemask, hlsl_swizzle; + + if (instr->type == HLSL_IR_CONSTANT) + { +@@ -3834,10 +3851,13 @@ static void sm4_src_from_node(struct sm4_src_register *src, + + sm4_register_from_node(&src->reg, &writemask, &src->swizzle_type, instr); + if (src->swizzle_type == VKD3D_SM4_SWIZZLE_VEC4) +- src->swizzle = hlsl_map_swizzle(hlsl_swizzle_from_writemask(writemask), map_writemask); ++ { ++ hlsl_swizzle = hlsl_map_swizzle(hlsl_swizzle_from_writemask(writemask), map_writemask); ++ src->swizzle = swizzle_from_sm4(hlsl_swizzle); ++ } + } + +-static void sm4_write_dst_register(const struct tpf_writer *tpf, const struct sm4_dst_register *dst) ++static void sm4_write_dst_register(const struct tpf_writer *tpf, const struct vkd3d_shader_dst_param *dst) + { + const struct vkd3d_sm4_register_type_info *register_type_info; + struct vkd3d_bytecode_buffer *buffer = tpf->buffer; +@@ -3856,13 +3876,13 @@ static void sm4_write_dst_register(const struct tpf_writer *tpf, const struct sm + sm4_reg_type = register_type_info->sm4_type; + } + +- reg_dim = dst->reg.dim; ++ reg_dim = sm4_dimension_from_vsir_dimension(dst->reg.dimension); + + token |= sm4_reg_type << VKD3D_SM4_REGISTER_TYPE_SHIFT; + token |= dst->reg.idx_count << VKD3D_SM4_REGISTER_ORDER_SHIFT; + token |= reg_dim << VKD3D_SM4_DIMENSION_SHIFT; + if (reg_dim == VKD3D_SM4_DIMENSION_VEC4) +- token |= dst->writemask << VKD3D_SM4_WRITEMASK_SHIFT; ++ token |= dst->write_mask << VKD3D_SM4_WRITEMASK_SHIFT; + put_u32(buffer, token); + + for (j = 0; j < dst->reg.idx_count; ++j) +@@ -3891,7 +3911,7 @@ static void sm4_write_src_register(const struct tpf_writer *tpf, const struct sm + sm4_reg_type = register_type_info->sm4_type; + } + +- reg_dim = src->reg.dim; ++ reg_dim = sm4_dimension_from_vsir_dimension(src->reg.dimension); + + token |= sm4_reg_type << VKD3D_SM4_REGISTER_TYPE_SHIFT; + token |= src->reg.idx_count << VKD3D_SM4_REGISTER_ORDER_SHIFT; +@@ -3899,14 +3919,14 @@ static void sm4_write_src_register(const struct tpf_writer *tpf, const struct sm + if (reg_dim == VKD3D_SM4_DIMENSION_VEC4) + { + token |= (uint32_t)src->swizzle_type << VKD3D_SM4_SWIZZLE_TYPE_SHIFT; +- token |= src->swizzle << VKD3D_SM4_SWIZZLE_SHIFT; ++ token |= swizzle_to_sm4(src->swizzle) << VKD3D_SM4_SWIZZLE_SHIFT; + } +- if (src->reg.mod) ++ if (src->mod) + token |= VKD3D_SM4_EXTENDED_OPERAND; + put_u32(buffer, token); + +- if (src->reg.mod) +- put_u32(buffer, (src->reg.mod << VKD3D_SM4_REGISTER_MODIFIER_SHIFT) ++ if (src->mod) ++ put_u32(buffer, (src->mod << VKD3D_SM4_REGISTER_MODIFIER_SHIFT) + | VKD3D_SM4_EXTENDED_OPERAND_MODIFIER); + + for (j = 0; j < src->reg.idx_count; ++j) +@@ -3917,23 +3937,32 @@ static void sm4_write_src_register(const struct tpf_writer *tpf, const struct sm + + if (src->reg.type == VKD3DSPR_IMMCONST) + { +- put_u32(buffer, src->reg.immconst_uint[0]); ++ put_u32(buffer, src->reg.u.immconst_uint[0]); + if (reg_dim == VKD3D_SM4_DIMENSION_VEC4) + { +- put_u32(buffer, src->reg.immconst_uint[1]); +- put_u32(buffer, src->reg.immconst_uint[2]); +- put_u32(buffer, src->reg.immconst_uint[3]); ++ put_u32(buffer, src->reg.u.immconst_uint[1]); ++ put_u32(buffer, src->reg.u.immconst_uint[2]); ++ put_u32(buffer, src->reg.u.immconst_uint[3]); + } + } + } + +-static uint32_t sm4_register_order(const struct sm4_register *reg) ++static uint32_t vkd3d_shader_dst_param_order(const struct vkd3d_shader_dst_param *dst) ++{ ++ uint32_t order = 1; ++ if (dst->reg.type == VKD3DSPR_IMMCONST) ++ order += dst->reg.dimension == VSIR_DIMENSION_VEC4 ? 4 : 1; ++ order += dst->reg.idx_count; ++ return order; ++} ++ ++static uint32_t sm4_src_register_order(const struct sm4_src_register *src) + { + uint32_t order = 1; +- if (reg->type == VKD3DSPR_IMMCONST) +- order += reg->dim == VKD3D_SM4_DIMENSION_VEC4 ? 4 : 1; +- order += reg->idx_count; +- if (reg->mod) ++ if (src->reg.type == VKD3DSPR_IMMCONST) ++ order += src->reg.dimension == VSIR_DIMENSION_VEC4 ? 4 : 1; ++ order += src->reg.idx_count; ++ if (src->mod) + ++order; + return order; + } +@@ -3946,9 +3975,9 @@ static void write_sm4_instruction(const struct tpf_writer *tpf, const struct sm4 + + size += instr->modifier_count; + for (i = 0; i < instr->dst_count; ++i) +- size += sm4_register_order(&instr->dsts[i].reg); ++ size += vkd3d_shader_dst_param_order(&instr->dsts[i]); + for (i = 0; i < instr->src_count; ++i) +- size += sm4_register_order(&instr->srcs[i].reg); ++ size += sm4_src_register_order(&instr->srcs[i]); + size += instr->idx_count; + if (instr->byte_stride) + ++size; +@@ -4013,13 +4042,13 @@ static void write_sm4_dcl_constant_buffer(const struct tpf_writer *tpf, const st + { + .opcode = VKD3D_SM4_OP_DCL_CONSTANT_BUFFER, + +- .srcs[0].reg.dim = VKD3D_SM4_DIMENSION_VEC4, ++ .srcs[0].reg.dimension = VSIR_DIMENSION_VEC4, + .srcs[0].reg.type = VKD3DSPR_CONSTBUFFER, + .srcs[0].reg.idx[0].offset = cbuffer->reg.id, + .srcs[0].reg.idx[1].offset = (cbuffer->used_size + 3) / 4, + .srcs[0].reg.idx_count = 2, + .srcs[0].swizzle_type = VKD3D_SM4_SWIZZLE_VEC4, +- .srcs[0].swizzle = HLSL_SWIZZLE(X, Y, Z, W), ++ .srcs[0].swizzle = VKD3D_SHADER_NO_SWIZZLE, + .src_count = 1, + }; + write_sm4_instruction(tpf, &instr); +@@ -4121,7 +4150,7 @@ static void write_sm4_dcl_semantic(const struct tpf_writer *tpf, const struct hl + + struct sm4_instruction instr = + { +- .dsts[0].reg.dim = VKD3D_SM4_DIMENSION_VEC4, ++ .dsts[0].reg.dimension = VSIR_DIMENSION_VEC4, + .dst_count = 1, + }; + +@@ -4136,18 +4165,18 @@ static void write_sm4_dcl_semantic(const struct tpf_writer *tpf, const struct hl + { + instr.dsts[0].reg.idx_count = 0; + } +- instr.dsts[0].writemask = (1 << var->data_type->dimx) - 1; ++ instr.dsts[0].write_mask = (1 << var->data_type->dimx) - 1; + } + else + { + instr.dsts[0].reg.type = output ? VKD3DSPR_OUTPUT : VKD3DSPR_INPUT; + instr.dsts[0].reg.idx[0].offset = var->regs[HLSL_REGSET_NUMERIC].id; + instr.dsts[0].reg.idx_count = 1; +- instr.dsts[0].writemask = var->regs[HLSL_REGSET_NUMERIC].writemask; ++ instr.dsts[0].write_mask = var->regs[HLSL_REGSET_NUMERIC].writemask; + } + + if (instr.dsts[0].reg.type == VKD3DSPR_DEPTHOUT) +- instr.dsts[0].reg.dim = VKD3D_SM4_DIMENSION_SCALAR; ++ instr.dsts[0].reg.dimension = VSIR_DIMENSION_SCALAR; + + hlsl_sm4_usage_from_semantic(tpf->ctx, &var->semantic, output, &usage); + if (usage == ~0u) +@@ -4261,8 +4290,8 @@ static void write_sm4_unary_op(const struct tpf_writer *tpf, enum vkd3d_sm4_opco + sm4_dst_from_node(&instr.dsts[0], dst); + instr.dst_count = 1; + +- sm4_src_from_node(&instr.srcs[0], src, instr.dsts[0].writemask); +- instr.srcs[0].reg.mod = src_mod; ++ sm4_src_from_node(&instr.srcs[0], src, instr.dsts[0].write_mask); ++ instr.srcs[0].mod = src_mod; + instr.src_count = 1; + + write_sm4_instruction(tpf, &instr); +@@ -4280,11 +4309,11 @@ static void write_sm4_unary_op_with_two_destinations(const struct tpf_writer *tp + sm4_dst_from_node(&instr.dsts[dst_idx], dst); + assert(1 - dst_idx >= 0); + instr.dsts[1 - dst_idx].reg.type = VKD3DSPR_NULL; +- instr.dsts[1 - dst_idx].reg.dim = VKD3D_SM4_DIMENSION_NONE; ++ instr.dsts[1 - dst_idx].reg.dimension = VSIR_DIMENSION_NONE; + instr.dsts[1 - dst_idx].reg.idx_count = 0; + instr.dst_count = 2; + +- sm4_src_from_node(&instr.srcs[0], src, instr.dsts[dst_idx].writemask); ++ sm4_src_from_node(&instr.srcs[0], src, instr.dsts[dst_idx].write_mask); + instr.src_count = 1; + + write_sm4_instruction(tpf, &instr); +@@ -4301,8 +4330,8 @@ static void write_sm4_binary_op(const struct tpf_writer *tpf, enum vkd3d_sm4_opc + sm4_dst_from_node(&instr.dsts[0], dst); + instr.dst_count = 1; + +- sm4_src_from_node(&instr.srcs[0], src1, instr.dsts[0].writemask); +- sm4_src_from_node(&instr.srcs[1], src2, instr.dsts[0].writemask); ++ sm4_src_from_node(&instr.srcs[0], src1, instr.dsts[0].write_mask); ++ sm4_src_from_node(&instr.srcs[1], src2, instr.dsts[0].write_mask); + instr.src_count = 2; + + write_sm4_instruction(tpf, &instr); +@@ -4340,12 +4369,12 @@ static void write_sm4_binary_op_with_two_destinations(const struct tpf_writer *t + sm4_dst_from_node(&instr.dsts[dst_idx], dst); + assert(1 - dst_idx >= 0); + instr.dsts[1 - dst_idx].reg.type = VKD3DSPR_NULL; +- instr.dsts[1 - dst_idx].reg.dim = VKD3D_SM4_DIMENSION_NONE; ++ instr.dsts[1 - dst_idx].reg.dimension = VSIR_DIMENSION_NONE; + instr.dsts[1 - dst_idx].reg.idx_count = 0; + instr.dst_count = 2; + +- sm4_src_from_node(&instr.srcs[0], src1, instr.dsts[dst_idx].writemask); +- sm4_src_from_node(&instr.srcs[1], src2, instr.dsts[dst_idx].writemask); ++ sm4_src_from_node(&instr.srcs[0], src1, instr.dsts[dst_idx].write_mask); ++ sm4_src_from_node(&instr.srcs[1], src2, instr.dsts[dst_idx].write_mask); + instr.src_count = 2; + + write_sm4_instruction(tpf, &instr); +@@ -4363,9 +4392,9 @@ static void write_sm4_ternary_op(const struct tpf_writer *tpf, enum vkd3d_sm4_op + sm4_dst_from_node(&instr.dsts[0], dst); + instr.dst_count = 1; + +- sm4_src_from_node(&instr.srcs[0], src1, instr.dsts[0].writemask); +- sm4_src_from_node(&instr.srcs[1], src2, instr.dsts[0].writemask); +- sm4_src_from_node(&instr.srcs[2], src3, instr.dsts[0].writemask); ++ sm4_src_from_node(&instr.srcs[0], src1, instr.dsts[0].write_mask); ++ sm4_src_from_node(&instr.srcs[1], src2, instr.dsts[0].write_mask); ++ sm4_src_from_node(&instr.srcs[2], src3, instr.dsts[0].write_mask); + instr.src_count = 3; + + write_sm4_instruction(tpf, &instr); +@@ -4416,7 +4445,7 @@ static void write_sm4_ld(const struct tpf_writer *tpf, const struct hlsl_ir_node + + sm4_src_from_node(&instr.srcs[0], coords, coords_writemask); + +- sm4_src_from_deref(tpf->ctx, &instr.srcs[1], resource, instr.dsts[0].writemask); ++ sm4_src_from_deref(tpf->ctx, &instr.srcs[1], resource, instr.dsts[0].write_mask); + + instr.src_count = 2; + +@@ -4424,7 +4453,7 @@ static void write_sm4_ld(const struct tpf_writer *tpf, const struct hlsl_ir_node + { + if (sample_index->type == HLSL_IR_CONSTANT) + { +- struct sm4_register *reg = &instr.srcs[2].reg; ++ struct vkd3d_shader_register *reg = &instr.srcs[2].reg; + struct hlsl_ir_constant *index; + + index = hlsl_ir_constant(sample_index); +@@ -4432,8 +4461,8 @@ static void write_sm4_ld(const struct tpf_writer *tpf, const struct hlsl_ir_node + memset(&instr.srcs[2], 0, sizeof(instr.srcs[2])); + instr.srcs[2].swizzle_type = VKD3D_SM4_SWIZZLE_NONE; + reg->type = VKD3DSPR_IMMCONST; +- reg->dim = VKD3D_SM4_DIMENSION_SCALAR; +- reg->immconst_uint[0] = index->value.u[0].u; ++ reg->dimension = VSIR_DIMENSION_SCALAR; ++ reg->u.immconst_uint[0] = index->value.u[0].u; + } + else if (tpf->ctx->profile->major_version == 4 && tpf->ctx->profile->minor_version == 0) + { +@@ -4504,7 +4533,7 @@ static void write_sm4_sample(const struct tpf_writer *tpf, const struct hlsl_ir_ + instr.dst_count = 1; + + sm4_src_from_node(&instr.srcs[0], coords, VKD3DSP_WRITEMASK_ALL); +- sm4_src_from_deref(tpf->ctx, &instr.srcs[1], resource, instr.dsts[0].writemask); ++ sm4_src_from_deref(tpf->ctx, &instr.srcs[1], resource, instr.dsts[0].write_mask); + sm4_src_from_deref(tpf->ctx, &instr.srcs[2], sampler, VKD3DSP_WRITEMASK_ALL); + instr.src_count = 3; + +@@ -4546,7 +4575,7 @@ static void write_sm4_sampleinfo(const struct tpf_writer *tpf, const struct hlsl + sm4_dst_from_node(&instr.dsts[0], dst); + instr.dst_count = 1; + +- sm4_src_from_deref(tpf->ctx, &instr.srcs[0], resource, instr.dsts[0].writemask); ++ sm4_src_from_deref(tpf->ctx, &instr.srcs[0], resource, instr.dsts[0].write_mask); + instr.src_count = 1; + + write_sm4_instruction(tpf, &instr); +@@ -4569,7 +4598,7 @@ static void write_sm4_resinfo(const struct tpf_writer *tpf, const struct hlsl_ir + instr.dst_count = 1; + + sm4_src_from_node(&instr.srcs[0], load->lod.node, VKD3DSP_WRITEMASK_ALL); +- sm4_src_from_deref(tpf->ctx, &instr.srcs[1], resource, instr.dsts[0].writemask); ++ sm4_src_from_deref(tpf->ctx, &instr.srcs[1], resource, instr.dsts[0].write_mask); + instr.src_count = 2; + + write_sm4_instruction(tpf, &instr); +@@ -4591,11 +4620,11 @@ static void write_sm4_cast_from_bool(const struct tpf_writer *tpf, const struct + sm4_dst_from_node(&instr.dsts[0], &expr->node); + instr.dst_count = 1; + +- sm4_src_from_node(&instr.srcs[0], arg, instr.dsts[0].writemask); ++ sm4_src_from_node(&instr.srcs[0], arg, instr.dsts[0].write_mask); + instr.srcs[1].swizzle_type = VKD3D_SM4_SWIZZLE_NONE; + instr.srcs[1].reg.type = VKD3DSPR_IMMCONST; +- instr.srcs[1].reg.dim = VKD3D_SM4_DIMENSION_SCALAR; +- instr.srcs[1].reg.immconst_uint[0] = mask; ++ instr.srcs[1].reg.dimension = VSIR_DIMENSION_SCALAR; ++ instr.srcs[1].reg.u.immconst_uint[0] = mask; + instr.src_count = 2; + + write_sm4_instruction(tpf, &instr); +@@ -4718,7 +4747,7 @@ static void write_sm4_store_uav_typed(const struct tpf_writer *tpf, const struct + memset(&instr, 0, sizeof(instr)); + instr.opcode = VKD3D_SM5_OP_STORE_UAV_TYPED; + +- sm4_register_from_deref(tpf->ctx, &instr.dsts[0].reg, &instr.dsts[0].writemask, NULL, dst); ++ sm4_register_from_deref(tpf->ctx, &instr.dsts[0].reg, &instr.dsts[0].write_mask, NULL, dst); + instr.dst_count = 1; + + sm4_src_from_node(&instr.srcs[0], coords, VKD3DSP_WRITEMASK_ALL); +@@ -5260,19 +5289,19 @@ static void write_sm4_load(const struct tpf_writer *tpf, const struct hlsl_ir_lo + + instr.opcode = VKD3D_SM4_OP_MOVC; + +- sm4_src_from_deref(tpf->ctx, &instr.srcs[0], &load->src, instr.dsts[0].writemask); ++ sm4_src_from_deref(tpf->ctx, &instr.srcs[0], &load->src, instr.dsts[0].write_mask); + + memset(&value, 0xff, sizeof(value)); +- sm4_src_from_constant_value(&instr.srcs[1], &value, type->dimx, instr.dsts[0].writemask); ++ sm4_src_from_constant_value(&instr.srcs[1], &value, type->dimx, instr.dsts[0].write_mask); + memset(&value, 0, sizeof(value)); +- sm4_src_from_constant_value(&instr.srcs[2], &value, type->dimx, instr.dsts[0].writemask); ++ sm4_src_from_constant_value(&instr.srcs[2], &value, type->dimx, instr.dsts[0].write_mask); + instr.src_count = 3; + } + else + { + instr.opcode = VKD3D_SM4_OP_MOV; + +- sm4_src_from_deref(tpf->ctx, &instr.srcs[0], &load->src, instr.dsts[0].writemask); ++ sm4_src_from_deref(tpf->ctx, &instr.srcs[0], &load->src, instr.dsts[0].write_mask); + instr.src_count = 1; + } + +@@ -5295,8 +5324,8 @@ static void write_sm4_loop(const struct tpf_writer *tpf, const struct hlsl_ir_lo + } + + static void write_sm4_gather(const struct tpf_writer *tpf, const struct hlsl_ir_node *dst, +- const struct hlsl_deref *resource, const struct hlsl_deref *sampler, const struct hlsl_ir_node *coords, +- unsigned int swizzle, const struct hlsl_ir_node *texel_offset) ++ const struct hlsl_deref *resource, const struct hlsl_deref *sampler, ++ const struct hlsl_ir_node *coords, DWORD swizzle, const struct hlsl_ir_node *texel_offset) + { + struct sm4_src_register *src; + struct sm4_instruction instr; +@@ -5325,11 +5354,11 @@ static void write_sm4_gather(const struct tpf_writer *tpf, const struct hlsl_ir_ + } + } + +- sm4_src_from_deref(tpf->ctx, &instr.srcs[instr.src_count++], resource, instr.dsts[0].writemask); ++ sm4_src_from_deref(tpf->ctx, &instr.srcs[instr.src_count++], resource, instr.dsts[0].write_mask); + + src = &instr.srcs[instr.src_count++]; + sm4_src_from_deref(tpf->ctx, src, sampler, VKD3DSP_WRITEMASK_ALL); +- src->reg.dim = VKD3D_SM4_DIMENSION_VEC4; ++ src->reg.dimension = VSIR_DIMENSION_VEC4; + src->swizzle_type = VKD3D_SM4_SWIZZLE_SCALAR; + src->swizzle = swizzle; + +@@ -5374,22 +5403,22 @@ static void write_sm4_resource_load(const struct tpf_writer *tpf, const struct h + + case HLSL_RESOURCE_GATHER_RED: + write_sm4_gather(tpf, &load->node, &load->resource, &load->sampler, coords, +- HLSL_SWIZZLE(X, X, X, X), texel_offset); ++ VKD3D_SHADER_SWIZZLE(X, X, X, X), texel_offset); + break; + + case HLSL_RESOURCE_GATHER_GREEN: + write_sm4_gather(tpf, &load->node, &load->resource, &load->sampler, coords, +- HLSL_SWIZZLE(Y, Y, Y, Y), texel_offset); ++ VKD3D_SHADER_SWIZZLE(Y, Y, Y, Y), texel_offset); + break; + + case HLSL_RESOURCE_GATHER_BLUE: + write_sm4_gather(tpf, &load->node, &load->resource, &load->sampler, coords, +- HLSL_SWIZZLE(Z, Z, Z, Z), texel_offset); ++ VKD3D_SHADER_SWIZZLE(Z, Z, Z, Z), texel_offset); + break; + + case HLSL_RESOURCE_GATHER_ALPHA: + write_sm4_gather(tpf, &load->node, &load->resource, &load->sampler, coords, +- HLSL_SWIZZLE(W, W, W, W), texel_offset); ++ VKD3D_SHADER_SWIZZLE(W, W, W, W), texel_offset); + break; + + case HLSL_RESOURCE_SAMPLE_INFO: +@@ -5431,10 +5460,10 @@ static void write_sm4_store(const struct tpf_writer *tpf, const struct hlsl_ir_s + instr.opcode = VKD3D_SM4_OP_MOV; + + sm4_register_from_deref(tpf->ctx, &instr.dsts[0].reg, &writemask, NULL, &store->lhs); +- instr.dsts[0].writemask = hlsl_combine_writemasks(writemask, store->writemask); ++ instr.dsts[0].write_mask = hlsl_combine_writemasks(writemask, store->writemask); + instr.dst_count = 1; + +- sm4_src_from_node(&instr.srcs[0], rhs, instr.dsts[0].writemask); ++ sm4_src_from_node(&instr.srcs[0], rhs, instr.dsts[0].write_mask); + instr.src_count = 1; + + write_sm4_instruction(tpf, &instr); +@@ -5442,8 +5471,8 @@ static void write_sm4_store(const struct tpf_writer *tpf, const struct hlsl_ir_s + + static void write_sm4_swizzle(const struct tpf_writer *tpf, const struct hlsl_ir_swizzle *swizzle) + { ++ unsigned int writemask, hlsl_swizzle; + struct sm4_instruction instr; +- unsigned int writemask; + + memset(&instr, 0, sizeof(instr)); + instr.opcode = VKD3D_SM4_OP_MOV; +@@ -5452,8 +5481,9 @@ static void write_sm4_swizzle(const struct tpf_writer *tpf, const struct hlsl_ir + instr.dst_count = 1; + + sm4_register_from_node(&instr.srcs[0].reg, &writemask, &instr.srcs[0].swizzle_type, swizzle->val.node); +- instr.srcs[0].swizzle = hlsl_map_swizzle(hlsl_combine_swizzles(hlsl_swizzle_from_writemask(writemask), +- swizzle->swizzle, swizzle->node.data_type->dimx), instr.dsts[0].writemask); ++ hlsl_swizzle = hlsl_map_swizzle(hlsl_combine_swizzles(hlsl_swizzle_from_writemask(writemask), ++ swizzle->swizzle, swizzle->node.data_type->dimx), instr.dsts[0].write_mask); ++ instr.srcs[0].swizzle = swizzle_from_sm4(hlsl_swizzle); + instr.src_count = 1; + + write_sm4_instruction(tpf, &instr); +diff --git a/libs/vkd3d/libs/vkd3d-shader/vkd3d_shader_main.c b/libs/vkd3d/libs/vkd3d-shader/vkd3d_shader_main.c +index 077d0144bc5..1bd61090139 100644 +--- a/libs/vkd3d/libs/vkd3d-shader/vkd3d_shader_main.c ++++ b/libs/vkd3d/libs/vkd3d-shader/vkd3d_shader_main.c +@@ -1773,7 +1773,7 @@ static struct vkd3d_shader_param_node *shader_param_allocator_node_create( + static void shader_param_allocator_init(struct vkd3d_shader_param_allocator *allocator, + unsigned int count, unsigned int stride) + { +- allocator->count = max(count, 4); ++ allocator->count = max(count, MAX_REG_OUTPUT); + allocator->stride = stride; + allocator->head = NULL; + allocator->current = NULL; +diff --git a/libs/vkd3d/libs/vkd3d-shader/vkd3d_shader_private.h b/libs/vkd3d/libs/vkd3d-shader/vkd3d_shader_private.h +index 5fd930918be..af75ef3bda8 100644 +--- a/libs/vkd3d/libs/vkd3d-shader/vkd3d_shader_private.h ++++ b/libs/vkd3d/libs/vkd3d-shader/vkd3d_shader_private.h +@@ -92,6 +92,7 @@ enum vkd3d_shader_error + VKD3D_SHADER_ERROR_SPV_INVALID_DESCRIPTOR_BINDING = 2002, + VKD3D_SHADER_ERROR_SPV_DESCRIPTOR_IDX_UNSUPPORTED = 2003, + VKD3D_SHADER_ERROR_SPV_STENCIL_EXPORT_UNSUPPORTED = 2004, ++ VKD3D_SHADER_ERROR_SPV_OUT_OF_MEMORY = 2005, + + VKD3D_SHADER_WARNING_SPV_INVALID_SWIZZLE = 2300, + +@@ -174,13 +175,13 @@ enum vkd3d_shader_error + VKD3D_SHADER_ERROR_DXIL_INVALID_TYPE_ID = 8010, + VKD3D_SHADER_ERROR_DXIL_INVALID_MODULE = 8011, + VKD3D_SHADER_ERROR_DXIL_INVALID_OPERAND = 8012, ++ VKD3D_SHADER_ERROR_DXIL_UNHANDLED_INTRINSIC = 8013, + + VKD3D_SHADER_WARNING_DXIL_UNKNOWN_MAGIC_NUMBER = 8300, + VKD3D_SHADER_WARNING_DXIL_UNKNOWN_SHADER_TYPE = 8301, + VKD3D_SHADER_WARNING_DXIL_INVALID_BLOCK_LENGTH = 8302, + VKD3D_SHADER_WARNING_DXIL_INVALID_MODULE_LENGTH = 8303, + VKD3D_SHADER_WARNING_DXIL_IGNORING_OPERANDS = 8304, +- VKD3D_SHADER_WARNING_DXIL_UNHANDLED_INTRINSIC = 8305, + + VKD3D_SHADER_ERROR_VSIR_NOT_IMPLEMENTED = 9000, + VKD3D_SHADER_ERROR_VSIR_INVALID_HANDLER = 9001, +@@ -529,6 +530,7 @@ enum vkd3d_shader_register_type + VKD3DSPR_RASTERIZER, + VKD3DSPR_OUTSTENCILREF, + VKD3DSPR_UNDEF, ++ VKD3DSPR_SSA, + + VKD3DSPR_COUNT, + +@@ -569,10 +571,11 @@ static inline bool data_type_is_integer(enum vkd3d_data_type data_type) + return data_type == VKD3D_DATA_INT || data_type == VKD3D_DATA_UINT8 || data_type == VKD3D_DATA_UINT; + } + +-enum vkd3d_immconst_type ++enum vsir_dimension + { +- VKD3D_IMMCONST_SCALAR, +- VKD3D_IMMCONST_VEC4, ++ VSIR_DIMENSION_NONE, ++ VSIR_DIMENSION_SCALAR, ++ VSIR_DIMENSION_VEC4, + }; + + enum vkd3d_shader_src_modifier +@@ -738,7 +741,7 @@ struct vkd3d_shader_register + enum vkd3d_data_type data_type; + struct vkd3d_shader_register_index idx[3]; + unsigned int idx_count; +- enum vkd3d_immconst_type immconst_type; ++ enum vsir_dimension dimension; + union + { + DWORD immconst_uint[VKD3D_VEC4_SIZE]; +@@ -749,7 +752,7 @@ struct vkd3d_shader_register + } u; + }; + +-void shader_register_init(struct vkd3d_shader_register *reg, enum vkd3d_shader_register_type reg_type, ++void vsir_register_init(struct vkd3d_shader_register *reg, enum vkd3d_shader_register_type reg_type, + enum vkd3d_data_type data_type, unsigned int idx_count); + + struct vkd3d_shader_dst_param +@@ -880,6 +883,7 @@ struct vkd3d_shader_desc + struct shader_signature patch_constant_signature; + + uint32_t temp_count; ++ unsigned int ssa_count; + + struct + { +diff --git a/libs/vkd3d/libs/vkd3d/command.c b/libs/vkd3d/libs/vkd3d/command.c +index 3be45120d8c..4ac0329f7e7 100644 +--- a/libs/vkd3d/libs/vkd3d/command.c ++++ b/libs/vkd3d/libs/vkd3d/command.c +@@ -5462,6 +5462,7 @@ static void STDMETHODCALLTYPE d3d12_command_list_ClearUnorderedAccessViewUint(ID + view_desc.layer_idx = view->info.texture.layer_idx; + view_desc.layer_count = view->info.texture.layer_count; + view_desc.vk_image_aspect = VK_IMAGE_ASPECT_COLOR_BIT; ++ view_desc.usage = VK_IMAGE_USAGE_STORAGE_BIT; + + if (!vkd3d_create_texture_view(device, VKD3D_DESCRIPTOR_MAGIC_UAV, resource_impl->u.vk_image, &view_desc, + &uint_view)) +diff --git a/libs/vkd3d/libs/vkd3d/device.c b/libs/vkd3d/libs/vkd3d/device.c +index 69727e09cc7..d8c94fbfd94 100644 +--- a/libs/vkd3d/libs/vkd3d/device.c ++++ b/libs/vkd3d/libs/vkd3d/device.c +@@ -83,6 +83,7 @@ static const struct vkd3d_optional_extension_info optional_device_extensions[] = + VK_EXTENSION(KHR_DRAW_INDIRECT_COUNT, KHR_draw_indirect_count), + VK_EXTENSION(KHR_GET_MEMORY_REQUIREMENTS_2, KHR_get_memory_requirements2), + VK_EXTENSION(KHR_IMAGE_FORMAT_LIST, KHR_image_format_list), ++ VK_EXTENSION(KHR_MAINTENANCE2, KHR_maintenance2), + VK_EXTENSION(KHR_MAINTENANCE3, KHR_maintenance3), + VK_EXTENSION(KHR_PUSH_DESCRIPTOR, KHR_push_descriptor), + VK_EXTENSION(KHR_SAMPLER_MIRROR_CLAMP_TO_EDGE, KHR_sampler_mirror_clamp_to_edge), +@@ -2456,17 +2457,20 @@ static void vkd3d_desc_object_cache_cleanup(struct vkd3d_desc_object_cache *cach + } + + /* ID3D12Device */ +-static inline struct d3d12_device *impl_from_ID3D12Device2(ID3D12Device2 *iface) ++static inline struct d3d12_device *impl_from_ID3D12Device5(ID3D12Device5 *iface) + { +- return CONTAINING_RECORD(iface, struct d3d12_device, ID3D12Device2_iface); ++ return CONTAINING_RECORD(iface, struct d3d12_device, ID3D12Device5_iface); + } + +-static HRESULT STDMETHODCALLTYPE d3d12_device_QueryInterface(ID3D12Device2 *iface, ++static HRESULT STDMETHODCALLTYPE d3d12_device_QueryInterface(ID3D12Device5 *iface, + REFIID riid, void **object) + { + TRACE("iface %p, riid %s, object %p.\n", iface, debugstr_guid(riid), object); + +- if (IsEqualGUID(riid, &IID_ID3D12Device2) ++ if (IsEqualGUID(riid, &IID_ID3D12Device5) ++ || IsEqualGUID(riid, &IID_ID3D12Device4) ++ || IsEqualGUID(riid, &IID_ID3D12Device3) ++ || IsEqualGUID(riid, &IID_ID3D12Device2) + || IsEqualGUID(riid, &IID_ID3D12Device1) + || IsEqualGUID(riid, &IID_ID3D12Device) + || IsEqualGUID(riid, &IID_ID3D12Object) +@@ -2483,9 +2487,9 @@ static HRESULT STDMETHODCALLTYPE d3d12_device_QueryInterface(ID3D12Device2 *ifac + return E_NOINTERFACE; + } + +-static ULONG STDMETHODCALLTYPE d3d12_device_AddRef(ID3D12Device2 *iface) ++static ULONG STDMETHODCALLTYPE d3d12_device_AddRef(ID3D12Device5 *iface) + { +- struct d3d12_device *device = impl_from_ID3D12Device2(iface); ++ struct d3d12_device *device = impl_from_ID3D12Device5(iface); + ULONG refcount = InterlockedIncrement(&device->refcount); + + TRACE("%p increasing refcount to %u.\n", device, refcount); +@@ -2493,9 +2497,9 @@ static ULONG STDMETHODCALLTYPE d3d12_device_AddRef(ID3D12Device2 *iface) + return refcount; + } + +-static ULONG STDMETHODCALLTYPE d3d12_device_Release(ID3D12Device2 *iface) ++static ULONG STDMETHODCALLTYPE d3d12_device_Release(ID3D12Device5 *iface) + { +- struct d3d12_device *device = impl_from_ID3D12Device2(iface); ++ struct d3d12_device *device = impl_from_ID3D12Device5(iface); + ULONG refcount = InterlockedDecrement(&device->refcount); + + TRACE("%p decreasing refcount to %u.\n", device, refcount); +@@ -2529,10 +2533,10 @@ static ULONG STDMETHODCALLTYPE d3d12_device_Release(ID3D12Device2 *iface) + return refcount; + } + +-static HRESULT STDMETHODCALLTYPE d3d12_device_GetPrivateData(ID3D12Device2 *iface, ++static HRESULT STDMETHODCALLTYPE d3d12_device_GetPrivateData(ID3D12Device5 *iface, + REFGUID guid, UINT *data_size, void *data) + { +- struct d3d12_device *device = impl_from_ID3D12Device2(iface); ++ struct d3d12_device *device = impl_from_ID3D12Device5(iface); + + TRACE("iface %p, guid %s, data_size %p, data %p.\n", + iface, debugstr_guid(guid), data_size, data); +@@ -2540,10 +2544,10 @@ static HRESULT STDMETHODCALLTYPE d3d12_device_GetPrivateData(ID3D12Device2 *ifac + return vkd3d_get_private_data(&device->private_store, guid, data_size, data); + } + +-static HRESULT STDMETHODCALLTYPE d3d12_device_SetPrivateData(ID3D12Device2 *iface, ++static HRESULT STDMETHODCALLTYPE d3d12_device_SetPrivateData(ID3D12Device5 *iface, + REFGUID guid, UINT data_size, const void *data) + { +- struct d3d12_device *device = impl_from_ID3D12Device2(iface); ++ struct d3d12_device *device = impl_from_ID3D12Device5(iface); + + TRACE("iface %p, guid %s, data_size %u, data %p.\n", + iface, debugstr_guid(guid), data_size, data); +@@ -2551,19 +2555,19 @@ static HRESULT STDMETHODCALLTYPE d3d12_device_SetPrivateData(ID3D12Device2 *ifac + return vkd3d_set_private_data(&device->private_store, guid, data_size, data); + } + +-static HRESULT STDMETHODCALLTYPE d3d12_device_SetPrivateDataInterface(ID3D12Device2 *iface, ++static HRESULT STDMETHODCALLTYPE d3d12_device_SetPrivateDataInterface(ID3D12Device5 *iface, + REFGUID guid, const IUnknown *data) + { +- struct d3d12_device *device = impl_from_ID3D12Device2(iface); ++ struct d3d12_device *device = impl_from_ID3D12Device5(iface); + + TRACE("iface %p, guid %s, data %p.\n", iface, debugstr_guid(guid), data); + + return vkd3d_set_private_data_interface(&device->private_store, guid, data); + } + +-static HRESULT STDMETHODCALLTYPE d3d12_device_SetName(ID3D12Device2 *iface, const WCHAR *name) ++static HRESULT STDMETHODCALLTYPE d3d12_device_SetName(ID3D12Device5 *iface, const WCHAR *name) + { +- struct d3d12_device *device = impl_from_ID3D12Device2(iface); ++ struct d3d12_device *device = impl_from_ID3D12Device5(iface); + + TRACE("iface %p, name %s.\n", iface, debugstr_w(name, device->wchar_size)); + +@@ -2571,17 +2575,17 @@ static HRESULT STDMETHODCALLTYPE d3d12_device_SetName(ID3D12Device2 *iface, cons + VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, name); + } + +-static UINT STDMETHODCALLTYPE d3d12_device_GetNodeCount(ID3D12Device2 *iface) ++static UINT STDMETHODCALLTYPE d3d12_device_GetNodeCount(ID3D12Device5 *iface) + { + TRACE("iface %p.\n", iface); + + return 1; + } + +-static HRESULT STDMETHODCALLTYPE d3d12_device_CreateCommandQueue(ID3D12Device2 *iface, ++static HRESULT STDMETHODCALLTYPE d3d12_device_CreateCommandQueue(ID3D12Device5 *iface, + const D3D12_COMMAND_QUEUE_DESC *desc, REFIID riid, void **command_queue) + { +- struct d3d12_device *device = impl_from_ID3D12Device2(iface); ++ struct d3d12_device *device = impl_from_ID3D12Device5(iface); + struct d3d12_command_queue *object; + HRESULT hr; + +@@ -2595,10 +2599,10 @@ static HRESULT STDMETHODCALLTYPE d3d12_device_CreateCommandQueue(ID3D12Device2 * + riid, command_queue); + } + +-static HRESULT STDMETHODCALLTYPE d3d12_device_CreateCommandAllocator(ID3D12Device2 *iface, ++static HRESULT STDMETHODCALLTYPE d3d12_device_CreateCommandAllocator(ID3D12Device5 *iface, + D3D12_COMMAND_LIST_TYPE type, REFIID riid, void **command_allocator) + { +- struct d3d12_device *device = impl_from_ID3D12Device2(iface); ++ struct d3d12_device *device = impl_from_ID3D12Device5(iface); + struct d3d12_command_allocator *object; + HRESULT hr; + +@@ -2612,10 +2616,10 @@ static HRESULT STDMETHODCALLTYPE d3d12_device_CreateCommandAllocator(ID3D12Devic + riid, command_allocator); + } + +-static HRESULT STDMETHODCALLTYPE d3d12_device_CreateGraphicsPipelineState(ID3D12Device2 *iface, ++static HRESULT STDMETHODCALLTYPE d3d12_device_CreateGraphicsPipelineState(ID3D12Device5 *iface, + const D3D12_GRAPHICS_PIPELINE_STATE_DESC *desc, REFIID riid, void **pipeline_state) + { +- struct d3d12_device *device = impl_from_ID3D12Device2(iface); ++ struct d3d12_device *device = impl_from_ID3D12Device5(iface); + struct d3d12_pipeline_state *object; + HRESULT hr; + +@@ -2629,10 +2633,10 @@ static HRESULT STDMETHODCALLTYPE d3d12_device_CreateGraphicsPipelineState(ID3D12 + &IID_ID3D12PipelineState, riid, pipeline_state); + } + +-static HRESULT STDMETHODCALLTYPE d3d12_device_CreateComputePipelineState(ID3D12Device2 *iface, ++static HRESULT STDMETHODCALLTYPE d3d12_device_CreateComputePipelineState(ID3D12Device5 *iface, + const D3D12_COMPUTE_PIPELINE_STATE_DESC *desc, REFIID riid, void **pipeline_state) + { +- struct d3d12_device *device = impl_from_ID3D12Device2(iface); ++ struct d3d12_device *device = impl_from_ID3D12Device5(iface); + struct d3d12_pipeline_state *object; + HRESULT hr; + +@@ -2646,11 +2650,11 @@ static HRESULT STDMETHODCALLTYPE d3d12_device_CreateComputePipelineState(ID3D12D + &IID_ID3D12PipelineState, riid, pipeline_state); + } + +-static HRESULT STDMETHODCALLTYPE d3d12_device_CreateCommandList(ID3D12Device2 *iface, ++static HRESULT STDMETHODCALLTYPE d3d12_device_CreateCommandList(ID3D12Device5 *iface, + UINT node_mask, D3D12_COMMAND_LIST_TYPE type, ID3D12CommandAllocator *command_allocator, + ID3D12PipelineState *initial_pipeline_state, REFIID riid, void **command_list) + { +- struct d3d12_device *device = impl_from_ID3D12Device2(iface); ++ struct d3d12_device *device = impl_from_ID3D12Device5(iface); + struct d3d12_command_list *object; + HRESULT hr; + +@@ -2773,10 +2777,10 @@ bool d3d12_device_is_uma(struct d3d12_device *device, bool *coherent) + return true; + } + +-static HRESULT STDMETHODCALLTYPE d3d12_device_CheckFeatureSupport(ID3D12Device2 *iface, ++static HRESULT STDMETHODCALLTYPE d3d12_device_CheckFeatureSupport(ID3D12Device5 *iface, + D3D12_FEATURE feature, void *feature_data, UINT feature_data_size) + { +- struct d3d12_device *device = impl_from_ID3D12Device2(iface); ++ struct d3d12_device *device = impl_from_ID3D12Device5(iface); + + TRACE("iface %p, feature %#x, feature_data %p, feature_data_size %u.\n", + iface, feature, feature_data, feature_data_size); +@@ -3275,10 +3279,10 @@ static HRESULT STDMETHODCALLTYPE d3d12_device_CheckFeatureSupport(ID3D12Device2 + } + } + +-static HRESULT STDMETHODCALLTYPE d3d12_device_CreateDescriptorHeap(ID3D12Device2 *iface, ++static HRESULT STDMETHODCALLTYPE d3d12_device_CreateDescriptorHeap(ID3D12Device5 *iface, + const D3D12_DESCRIPTOR_HEAP_DESC *desc, REFIID riid, void **descriptor_heap) + { +- struct d3d12_device *device = impl_from_ID3D12Device2(iface); ++ struct d3d12_device *device = impl_from_ID3D12Device5(iface); + struct d3d12_descriptor_heap *object; + HRESULT hr; + +@@ -3292,7 +3296,7 @@ static HRESULT STDMETHODCALLTYPE d3d12_device_CreateDescriptorHeap(ID3D12Device2 + &IID_ID3D12DescriptorHeap, riid, descriptor_heap); + } + +-static UINT STDMETHODCALLTYPE d3d12_device_GetDescriptorHandleIncrementSize(ID3D12Device2 *iface, ++static UINT STDMETHODCALLTYPE d3d12_device_GetDescriptorHandleIncrementSize(ID3D12Device5 *iface, + D3D12_DESCRIPTOR_HEAP_TYPE descriptor_heap_type) + { + TRACE("iface %p, descriptor_heap_type %#x.\n", iface, descriptor_heap_type); +@@ -3315,11 +3319,11 @@ static UINT STDMETHODCALLTYPE d3d12_device_GetDescriptorHandleIncrementSize(ID3D + } + } + +-static HRESULT STDMETHODCALLTYPE d3d12_device_CreateRootSignature(ID3D12Device2 *iface, ++static HRESULT STDMETHODCALLTYPE d3d12_device_CreateRootSignature(ID3D12Device5 *iface, + UINT node_mask, const void *bytecode, SIZE_T bytecode_length, + REFIID riid, void **root_signature) + { +- struct d3d12_device *device = impl_from_ID3D12Device2(iface); ++ struct d3d12_device *device = impl_from_ID3D12Device5(iface); + struct d3d12_root_signature *object; + HRESULT hr; + +@@ -3335,10 +3339,10 @@ static HRESULT STDMETHODCALLTYPE d3d12_device_CreateRootSignature(ID3D12Device2 + &IID_ID3D12RootSignature, riid, root_signature); + } + +-static void STDMETHODCALLTYPE d3d12_device_CreateConstantBufferView(ID3D12Device2 *iface, ++static void STDMETHODCALLTYPE d3d12_device_CreateConstantBufferView(ID3D12Device5 *iface, + const D3D12_CONSTANT_BUFFER_VIEW_DESC *desc, D3D12_CPU_DESCRIPTOR_HANDLE descriptor) + { +- struct d3d12_device *device = impl_from_ID3D12Device2(iface); ++ struct d3d12_device *device = impl_from_ID3D12Device5(iface); + struct d3d12_desc tmp = {0}; + + TRACE("iface %p, desc %p, descriptor %#lx.\n", iface, desc, descriptor.ptr); +@@ -3347,11 +3351,11 @@ static void STDMETHODCALLTYPE d3d12_device_CreateConstantBufferView(ID3D12Device + d3d12_desc_write_atomic(d3d12_desc_from_cpu_handle(descriptor), &tmp, device); + } + +-static void STDMETHODCALLTYPE d3d12_device_CreateShaderResourceView(ID3D12Device2 *iface, ++static void STDMETHODCALLTYPE d3d12_device_CreateShaderResourceView(ID3D12Device5 *iface, + ID3D12Resource *resource, const D3D12_SHADER_RESOURCE_VIEW_DESC *desc, + D3D12_CPU_DESCRIPTOR_HANDLE descriptor) + { +- struct d3d12_device *device = impl_from_ID3D12Device2(iface); ++ struct d3d12_device *device = impl_from_ID3D12Device5(iface); + struct d3d12_desc tmp = {0}; + + TRACE("iface %p, resource %p, desc %p, descriptor %#lx.\n", +@@ -3361,11 +3365,11 @@ static void STDMETHODCALLTYPE d3d12_device_CreateShaderResourceView(ID3D12Device + d3d12_desc_write_atomic(d3d12_desc_from_cpu_handle(descriptor), &tmp, device); + } + +-static void STDMETHODCALLTYPE d3d12_device_CreateUnorderedAccessView(ID3D12Device2 *iface, ++static void STDMETHODCALLTYPE d3d12_device_CreateUnorderedAccessView(ID3D12Device5 *iface, + ID3D12Resource *resource, ID3D12Resource *counter_resource, + const D3D12_UNORDERED_ACCESS_VIEW_DESC *desc, D3D12_CPU_DESCRIPTOR_HANDLE descriptor) + { +- struct d3d12_device *device = impl_from_ID3D12Device2(iface); ++ struct d3d12_device *device = impl_from_ID3D12Device5(iface); + struct d3d12_desc tmp = {0}; + + TRACE("iface %p, resource %p, counter_resource %p, desc %p, descriptor %#lx.\n", +@@ -3376,7 +3380,7 @@ static void STDMETHODCALLTYPE d3d12_device_CreateUnorderedAccessView(ID3D12Devic + d3d12_desc_write_atomic(d3d12_desc_from_cpu_handle(descriptor), &tmp, device); + } + +-static void STDMETHODCALLTYPE d3d12_device_CreateRenderTargetView(ID3D12Device2 *iface, ++static void STDMETHODCALLTYPE d3d12_device_CreateRenderTargetView(ID3D12Device5 *iface, + ID3D12Resource *resource, const D3D12_RENDER_TARGET_VIEW_DESC *desc, + D3D12_CPU_DESCRIPTOR_HANDLE descriptor) + { +@@ -3384,10 +3388,10 @@ static void STDMETHODCALLTYPE d3d12_device_CreateRenderTargetView(ID3D12Device2 + iface, resource, desc, descriptor.ptr); + + d3d12_rtv_desc_create_rtv(d3d12_rtv_desc_from_cpu_handle(descriptor), +- impl_from_ID3D12Device2(iface), unsafe_impl_from_ID3D12Resource(resource), desc); ++ impl_from_ID3D12Device5(iface), unsafe_impl_from_ID3D12Resource(resource), desc); + } + +-static void STDMETHODCALLTYPE d3d12_device_CreateDepthStencilView(ID3D12Device2 *iface, ++static void STDMETHODCALLTYPE d3d12_device_CreateDepthStencilView(ID3D12Device5 *iface, + ID3D12Resource *resource, const D3D12_DEPTH_STENCIL_VIEW_DESC *desc, + D3D12_CPU_DESCRIPTOR_HANDLE descriptor) + { +@@ -3395,13 +3399,13 @@ static void STDMETHODCALLTYPE d3d12_device_CreateDepthStencilView(ID3D12Device2 + iface, resource, desc, descriptor.ptr); + + d3d12_dsv_desc_create_dsv(d3d12_dsv_desc_from_cpu_handle(descriptor), +- impl_from_ID3D12Device2(iface), unsafe_impl_from_ID3D12Resource(resource), desc); ++ impl_from_ID3D12Device5(iface), unsafe_impl_from_ID3D12Resource(resource), desc); + } + +-static void STDMETHODCALLTYPE d3d12_device_CreateSampler(ID3D12Device2 *iface, ++static void STDMETHODCALLTYPE d3d12_device_CreateSampler(ID3D12Device5 *iface, + const D3D12_SAMPLER_DESC *desc, D3D12_CPU_DESCRIPTOR_HANDLE descriptor) + { +- struct d3d12_device *device = impl_from_ID3D12Device2(iface); ++ struct d3d12_device *device = impl_from_ID3D12Device5(iface); + struct d3d12_desc tmp = {0}; + + TRACE("iface %p, desc %p, descriptor %#lx.\n", iface, desc, descriptor.ptr); +@@ -3410,14 +3414,14 @@ static void STDMETHODCALLTYPE d3d12_device_CreateSampler(ID3D12Device2 *iface, + d3d12_desc_write_atomic(d3d12_desc_from_cpu_handle(descriptor), &tmp, device); + } + +-static void STDMETHODCALLTYPE d3d12_device_CopyDescriptors(ID3D12Device2 *iface, ++static void STDMETHODCALLTYPE d3d12_device_CopyDescriptors(ID3D12Device5 *iface, + UINT dst_descriptor_range_count, const D3D12_CPU_DESCRIPTOR_HANDLE *dst_descriptor_range_offsets, + const UINT *dst_descriptor_range_sizes, + UINT src_descriptor_range_count, const D3D12_CPU_DESCRIPTOR_HANDLE *src_descriptor_range_offsets, + const UINT *src_descriptor_range_sizes, + D3D12_DESCRIPTOR_HEAP_TYPE descriptor_heap_type) + { +- struct d3d12_device *device = impl_from_ID3D12Device2(iface); ++ struct d3d12_device *device = impl_from_ID3D12Device5(iface); + unsigned int dst_range_idx, dst_idx, src_range_idx, src_idx; + unsigned int dst_range_size, src_range_size; + struct d3d12_descriptor_heap *dst_heap; +@@ -3473,7 +3477,7 @@ static void STDMETHODCALLTYPE d3d12_device_CopyDescriptors(ID3D12Device2 *iface, + } + } + +-static void STDMETHODCALLTYPE d3d12_device_CopyDescriptorsSimple(ID3D12Device2 *iface, ++static void STDMETHODCALLTYPE d3d12_device_CopyDescriptorsSimple(ID3D12Device5 *iface, + UINT descriptor_count, const D3D12_CPU_DESCRIPTOR_HANDLE dst_descriptor_range_offset, + const D3D12_CPU_DESCRIPTOR_HANDLE src_descriptor_range_offset, + D3D12_DESCRIPTOR_HEAP_TYPE descriptor_heap_type) +@@ -3488,10 +3492,10 @@ static void STDMETHODCALLTYPE d3d12_device_CopyDescriptorsSimple(ID3D12Device2 * + } + + static D3D12_RESOURCE_ALLOCATION_INFO * STDMETHODCALLTYPE d3d12_device_GetResourceAllocationInfo( +- ID3D12Device2 *iface, D3D12_RESOURCE_ALLOCATION_INFO *info, UINT visible_mask, ++ ID3D12Device5 *iface, D3D12_RESOURCE_ALLOCATION_INFO *info, UINT visible_mask, + UINT count, const D3D12_RESOURCE_DESC *resource_descs) + { +- struct d3d12_device *device = impl_from_ID3D12Device2(iface); ++ struct d3d12_device *device = impl_from_ID3D12Device5(iface); + const D3D12_RESOURCE_DESC *desc; + uint64_t requested_alignment; + +@@ -3564,10 +3568,10 @@ invalid: + return info; + } + +-static D3D12_HEAP_PROPERTIES * STDMETHODCALLTYPE d3d12_device_GetCustomHeapProperties(ID3D12Device2 *iface, ++static D3D12_HEAP_PROPERTIES * STDMETHODCALLTYPE d3d12_device_GetCustomHeapProperties(ID3D12Device5 *iface, + D3D12_HEAP_PROPERTIES *heap_properties, UINT node_mask, D3D12_HEAP_TYPE heap_type) + { +- struct d3d12_device *device = impl_from_ID3D12Device2(iface); ++ struct d3d12_device *device = impl_from_ID3D12Device5(iface); + bool coherent; + + TRACE("iface %p, heap_properties %p, node_mask 0x%08x, heap_type %#x.\n", +@@ -3607,12 +3611,12 @@ static D3D12_HEAP_PROPERTIES * STDMETHODCALLTYPE d3d12_device_GetCustomHeapPrope + return heap_properties; + } + +-static HRESULT STDMETHODCALLTYPE d3d12_device_CreateCommittedResource(ID3D12Device2 *iface, ++static HRESULT STDMETHODCALLTYPE d3d12_device_CreateCommittedResource(ID3D12Device5 *iface, + const D3D12_HEAP_PROPERTIES *heap_properties, D3D12_HEAP_FLAGS heap_flags, + const D3D12_RESOURCE_DESC *desc, D3D12_RESOURCE_STATES initial_state, + const D3D12_CLEAR_VALUE *optimized_clear_value, REFIID iid, void **resource) + { +- struct d3d12_device *device = impl_from_ID3D12Device2(iface); ++ struct d3d12_device *device = impl_from_ID3D12Device5(iface); + struct d3d12_resource *object; + HRESULT hr; + +@@ -3631,10 +3635,10 @@ static HRESULT STDMETHODCALLTYPE d3d12_device_CreateCommittedResource(ID3D12Devi + return return_interface(&object->ID3D12Resource_iface, &IID_ID3D12Resource, iid, resource); + } + +-static HRESULT STDMETHODCALLTYPE d3d12_device_CreateHeap(ID3D12Device2 *iface, ++static HRESULT STDMETHODCALLTYPE d3d12_device_CreateHeap(ID3D12Device5 *iface, + const D3D12_HEAP_DESC *desc, REFIID iid, void **heap) + { +- struct d3d12_device *device = impl_from_ID3D12Device2(iface); ++ struct d3d12_device *device = impl_from_ID3D12Device5(iface); + struct d3d12_heap *object; + HRESULT hr; + +@@ -3650,12 +3654,12 @@ static HRESULT STDMETHODCALLTYPE d3d12_device_CreateHeap(ID3D12Device2 *iface, + return return_interface(&object->ID3D12Heap_iface, &IID_ID3D12Heap, iid, heap); + } + +-static HRESULT STDMETHODCALLTYPE d3d12_device_CreatePlacedResource(ID3D12Device2 *iface, ++static HRESULT STDMETHODCALLTYPE d3d12_device_CreatePlacedResource(ID3D12Device5 *iface, + ID3D12Heap *heap, UINT64 heap_offset, + const D3D12_RESOURCE_DESC *desc, D3D12_RESOURCE_STATES initial_state, + const D3D12_CLEAR_VALUE *optimized_clear_value, REFIID iid, void **resource) + { +- struct d3d12_device *device = impl_from_ID3D12Device2(iface); ++ struct d3d12_device *device = impl_from_ID3D12Device5(iface); + struct d3d12_heap *heap_object; + struct d3d12_resource *object; + HRESULT hr; +@@ -3674,11 +3678,11 @@ static HRESULT STDMETHODCALLTYPE d3d12_device_CreatePlacedResource(ID3D12Device2 + return return_interface(&object->ID3D12Resource_iface, &IID_ID3D12Resource, iid, resource); + } + +-static HRESULT STDMETHODCALLTYPE d3d12_device_CreateReservedResource(ID3D12Device2 *iface, ++static HRESULT STDMETHODCALLTYPE d3d12_device_CreateReservedResource(ID3D12Device5 *iface, + const D3D12_RESOURCE_DESC *desc, D3D12_RESOURCE_STATES initial_state, + const D3D12_CLEAR_VALUE *optimized_clear_value, REFIID iid, void **resource) + { +- struct d3d12_device *device = impl_from_ID3D12Device2(iface); ++ struct d3d12_device *device = impl_from_ID3D12Device5(iface); + struct d3d12_resource *object; + HRESULT hr; + +@@ -3692,11 +3696,11 @@ static HRESULT STDMETHODCALLTYPE d3d12_device_CreateReservedResource(ID3D12Devic + return return_interface(&object->ID3D12Resource_iface, &IID_ID3D12Resource, iid, resource); + } + +-static HRESULT STDMETHODCALLTYPE d3d12_device_CreateSharedHandle(ID3D12Device2 *iface, ++static HRESULT STDMETHODCALLTYPE d3d12_device_CreateSharedHandle(ID3D12Device5 *iface, + ID3D12DeviceChild *object, const SECURITY_ATTRIBUTES *attributes, DWORD access, + const WCHAR *name, HANDLE *handle) + { +- struct d3d12_device *device = impl_from_ID3D12Device2(iface); ++ struct d3d12_device *device = impl_from_ID3D12Device5(iface); + + FIXME("iface %p, object %p, attributes %p, access %#x, name %s, handle %p stub!\n", + iface, object, attributes, access, debugstr_w(name, device->wchar_size), handle); +@@ -3704,7 +3708,7 @@ static HRESULT STDMETHODCALLTYPE d3d12_device_CreateSharedHandle(ID3D12Device2 * + return E_NOTIMPL; + } + +-static HRESULT STDMETHODCALLTYPE d3d12_device_OpenSharedHandle(ID3D12Device2 *iface, ++static HRESULT STDMETHODCALLTYPE d3d12_device_OpenSharedHandle(ID3D12Device5 *iface, + HANDLE handle, REFIID riid, void **object) + { + FIXME("iface %p, handle %p, riid %s, object %p stub!\n", +@@ -3713,10 +3717,10 @@ static HRESULT STDMETHODCALLTYPE d3d12_device_OpenSharedHandle(ID3D12Device2 *if + return E_NOTIMPL; + } + +-static HRESULT STDMETHODCALLTYPE d3d12_device_OpenSharedHandleByName(ID3D12Device2 *iface, ++static HRESULT STDMETHODCALLTYPE d3d12_device_OpenSharedHandleByName(ID3D12Device5 *iface, + const WCHAR *name, DWORD access, HANDLE *handle) + { +- struct d3d12_device *device = impl_from_ID3D12Device2(iface); ++ struct d3d12_device *device = impl_from_ID3D12Device5(iface); + + FIXME("iface %p, name %s, access %#x, handle %p stub!\n", + iface, debugstr_w(name, device->wchar_size), access, handle); +@@ -3724,7 +3728,7 @@ static HRESULT STDMETHODCALLTYPE d3d12_device_OpenSharedHandleByName(ID3D12Devic + return E_NOTIMPL; + } + +-static HRESULT STDMETHODCALLTYPE d3d12_device_MakeResident(ID3D12Device2 *iface, ++static HRESULT STDMETHODCALLTYPE d3d12_device_MakeResident(ID3D12Device5 *iface, + UINT object_count, ID3D12Pageable * const *objects) + { + FIXME_ONCE("iface %p, object_count %u, objects %p stub!\n", +@@ -3733,7 +3737,7 @@ static HRESULT STDMETHODCALLTYPE d3d12_device_MakeResident(ID3D12Device2 *iface, + return S_OK; + } + +-static HRESULT STDMETHODCALLTYPE d3d12_device_Evict(ID3D12Device2 *iface, ++static HRESULT STDMETHODCALLTYPE d3d12_device_Evict(ID3D12Device5 *iface, + UINT object_count, ID3D12Pageable * const *objects) + { + FIXME_ONCE("iface %p, object_count %u, objects %p stub!\n", +@@ -3742,10 +3746,10 @@ static HRESULT STDMETHODCALLTYPE d3d12_device_Evict(ID3D12Device2 *iface, + return S_OK; + } + +-static HRESULT STDMETHODCALLTYPE d3d12_device_CreateFence(ID3D12Device2 *iface, ++static HRESULT STDMETHODCALLTYPE d3d12_device_CreateFence(ID3D12Device5 *iface, + UINT64 initial_value, D3D12_FENCE_FLAGS flags, REFIID riid, void **fence) + { +- struct d3d12_device *device = impl_from_ID3D12Device2(iface); ++ struct d3d12_device *device = impl_from_ID3D12Device5(iface); + struct d3d12_fence *object; + HRESULT hr; + +@@ -3758,21 +3762,21 @@ static HRESULT STDMETHODCALLTYPE d3d12_device_CreateFence(ID3D12Device2 *iface, + return return_interface(&object->ID3D12Fence1_iface, &IID_ID3D12Fence1, riid, fence); + } + +-static HRESULT STDMETHODCALLTYPE d3d12_device_GetDeviceRemovedReason(ID3D12Device2 *iface) ++static HRESULT STDMETHODCALLTYPE d3d12_device_GetDeviceRemovedReason(ID3D12Device5 *iface) + { +- struct d3d12_device *device = impl_from_ID3D12Device2(iface); ++ struct d3d12_device *device = impl_from_ID3D12Device5(iface); + + TRACE("iface %p.\n", iface); + + return device->removed_reason; + } + +-static void STDMETHODCALLTYPE d3d12_device_GetCopyableFootprints(ID3D12Device2 *iface, ++static void STDMETHODCALLTYPE d3d12_device_GetCopyableFootprints(ID3D12Device5 *iface, + const D3D12_RESOURCE_DESC *desc, UINT first_sub_resource, UINT sub_resource_count, + UINT64 base_offset, D3D12_PLACED_SUBRESOURCE_FOOTPRINT *layouts, + UINT *row_counts, UINT64 *row_sizes, UINT64 *total_bytes) + { +- struct d3d12_device *device = impl_from_ID3D12Device2(iface); ++ struct d3d12_device *device = impl_from_ID3D12Device5(iface); + + unsigned int i, sub_resource_idx, miplevel_idx, row_count, row_size, row_pitch; + unsigned int width, height, depth, plane_count, sub_resources_per_plane; +@@ -3852,10 +3856,10 @@ static void STDMETHODCALLTYPE d3d12_device_GetCopyableFootprints(ID3D12Device2 * + *total_bytes = total; + } + +-static HRESULT STDMETHODCALLTYPE d3d12_device_CreateQueryHeap(ID3D12Device2 *iface, ++static HRESULT STDMETHODCALLTYPE d3d12_device_CreateQueryHeap(ID3D12Device5 *iface, + const D3D12_QUERY_HEAP_DESC *desc, REFIID iid, void **heap) + { +- struct d3d12_device *device = impl_from_ID3D12Device2(iface); ++ struct d3d12_device *device = impl_from_ID3D12Device5(iface); + struct d3d12_query_heap *object; + HRESULT hr; + +@@ -3868,18 +3872,18 @@ static HRESULT STDMETHODCALLTYPE d3d12_device_CreateQueryHeap(ID3D12Device2 *ifa + return return_interface(&object->ID3D12QueryHeap_iface, &IID_ID3D12QueryHeap, iid, heap); + } + +-static HRESULT STDMETHODCALLTYPE d3d12_device_SetStablePowerState(ID3D12Device2 *iface, BOOL enable) ++static HRESULT STDMETHODCALLTYPE d3d12_device_SetStablePowerState(ID3D12Device5 *iface, BOOL enable) + { + FIXME("iface %p, enable %#x stub!\n", iface, enable); + + return E_NOTIMPL; + } + +-static HRESULT STDMETHODCALLTYPE d3d12_device_CreateCommandSignature(ID3D12Device2 *iface, ++static HRESULT STDMETHODCALLTYPE d3d12_device_CreateCommandSignature(ID3D12Device5 *iface, + const D3D12_COMMAND_SIGNATURE_DESC *desc, ID3D12RootSignature *root_signature, + REFIID iid, void **command_signature) + { +- struct d3d12_device *device = impl_from_ID3D12Device2(iface); ++ struct d3d12_device *device = impl_from_ID3D12Device5(iface); + struct d3d12_command_signature *object; + HRESULT hr; + +@@ -3893,14 +3897,14 @@ static HRESULT STDMETHODCALLTYPE d3d12_device_CreateCommandSignature(ID3D12Devic + &IID_ID3D12CommandSignature, iid, command_signature); + } + +-static void STDMETHODCALLTYPE d3d12_device_GetResourceTiling(ID3D12Device2 *iface, ++static void STDMETHODCALLTYPE d3d12_device_GetResourceTiling(ID3D12Device5 *iface, + ID3D12Resource *resource, UINT *total_tile_count, + D3D12_PACKED_MIP_INFO *packed_mip_info, D3D12_TILE_SHAPE *standard_tile_shape, + UINT *sub_resource_tiling_count, UINT first_sub_resource_tiling, + D3D12_SUBRESOURCE_TILING *sub_resource_tilings) + { + const struct d3d12_resource *resource_impl = impl_from_ID3D12Resource(resource); +- struct d3d12_device *device = impl_from_ID3D12Device2(iface); ++ struct d3d12_device *device = impl_from_ID3D12Device5(iface); + + TRACE("iface %p, resource %p, total_tile_count %p, packed_mip_info %p, " + "standard_title_shape %p, sub_resource_tiling_count %p, " +@@ -3913,9 +3917,9 @@ static void STDMETHODCALLTYPE d3d12_device_GetResourceTiling(ID3D12Device2 *ifac + sub_resource_tiling_count, first_sub_resource_tiling, sub_resource_tilings); + } + +-static LUID * STDMETHODCALLTYPE d3d12_device_GetAdapterLuid(ID3D12Device2 *iface, LUID *luid) ++static LUID * STDMETHODCALLTYPE d3d12_device_GetAdapterLuid(ID3D12Device5 *iface, LUID *luid) + { +- struct d3d12_device *device = impl_from_ID3D12Device2(iface); ++ struct d3d12_device *device = impl_from_ID3D12Device5(iface); + + TRACE("iface %p, luid %p.\n", iface, luid); + +@@ -3924,7 +3928,7 @@ static LUID * STDMETHODCALLTYPE d3d12_device_GetAdapterLuid(ID3D12Device2 *iface + return luid; + } + +-static HRESULT STDMETHODCALLTYPE d3d12_device_CreatePipelineLibrary(ID3D12Device2 *iface, ++static HRESULT STDMETHODCALLTYPE d3d12_device_CreatePipelineLibrary(ID3D12Device5 *iface, + const void *blob, SIZE_T blob_size, REFIID iid, void **lib) + { + FIXME("iface %p, blob %p, blob_size %lu, iid %s, lib %p stub!\n", iface, blob, blob_size, debugstr_guid(iid), lib); +@@ -3932,7 +3936,7 @@ static HRESULT STDMETHODCALLTYPE d3d12_device_CreatePipelineLibrary(ID3D12Device + return DXGI_ERROR_UNSUPPORTED; + } + +-static HRESULT STDMETHODCALLTYPE d3d12_device_SetEventOnMultipleFenceCompletion(ID3D12Device2 *iface, ++static HRESULT STDMETHODCALLTYPE d3d12_device_SetEventOnMultipleFenceCompletion(ID3D12Device5 *iface, + ID3D12Fence *const *fences, const UINT64 *values, UINT fence_count, + D3D12_MULTIPLE_FENCE_WAIT_FLAGS flags, HANDLE event) + { +@@ -3942,7 +3946,7 @@ static HRESULT STDMETHODCALLTYPE d3d12_device_SetEventOnMultipleFenceCompletion( + return E_NOTIMPL; + } + +-static HRESULT STDMETHODCALLTYPE d3d12_device_SetResidencyPriority(ID3D12Device2 *iface, ++static HRESULT STDMETHODCALLTYPE d3d12_device_SetResidencyPriority(ID3D12Device5 *iface, + UINT object_count, ID3D12Pageable *const *objects, const D3D12_RESIDENCY_PRIORITY *priorities) + { + FIXME_ONCE("iface %p, object_count %u, objects %p, priorities %p stub!\n", iface, object_count, objects, priorities); +@@ -3950,7 +3954,7 @@ static HRESULT STDMETHODCALLTYPE d3d12_device_SetResidencyPriority(ID3D12Device2 + return S_OK; + } + +-static HRESULT STDMETHODCALLTYPE d3d12_device_CreatePipelineState(ID3D12Device2 *iface, ++static HRESULT STDMETHODCALLTYPE d3d12_device_CreatePipelineState(ID3D12Device5 *iface, + const D3D12_PIPELINE_STATE_STREAM_DESC *desc, REFIID iid, void **pipeline_state) + { + FIXME("iface %p, desc %p, iid %s, pipeline_state %p stub!\n", iface, desc, debugstr_guid(iid), pipeline_state); +@@ -3958,7 +3962,168 @@ static HRESULT STDMETHODCALLTYPE d3d12_device_CreatePipelineState(ID3D12Device2 + return E_NOTIMPL; + } + +-static const struct ID3D12Device2Vtbl d3d12_device_vtbl = ++static HRESULT STDMETHODCALLTYPE d3d12_device_OpenExistingHeapFromAddress(ID3D12Device5 *iface, ++ const void *address, REFIID iid, void **heap) ++{ ++ FIXME("iface %p, address %p, iid %s, heap %p stub!\n", iface, address, debugstr_guid(iid), heap); ++ ++ return E_NOTIMPL; ++} ++ ++static HRESULT STDMETHODCALLTYPE d3d12_device_OpenExistingHeapFromFileMapping(ID3D12Device5 *iface, ++ HANDLE file_mapping, REFIID iid, void **heap) ++{ ++ FIXME("iface %p, file_mapping %p, iid %s, heap %p stub!\n", iface, file_mapping, debugstr_guid(iid), heap); ++ ++ return E_NOTIMPL; ++} ++ ++static HRESULT STDMETHODCALLTYPE d3d12_device_EnqueueMakeResident(ID3D12Device5 *iface, ++ D3D12_RESIDENCY_FLAGS flags, UINT num_objects, ID3D12Pageable *const *objects, ++ ID3D12Fence *fence, UINT64 fence_value) ++{ ++ FIXME("iface %p, flags %#x, num_objects %u, objects %p, fence %p, fence_value %#"PRIx64" stub!\n", ++ iface, flags, num_objects, objects, fence, fence_value); ++ ++ return E_NOTIMPL; ++} ++ ++static HRESULT STDMETHODCALLTYPE d3d12_device_CreateCommandList1(ID3D12Device5 *iface, ++ UINT node_mask, D3D12_COMMAND_LIST_TYPE type, D3D12_COMMAND_LIST_FLAGS flags, ++ REFIID iid, void **command_list) ++{ ++ FIXME("iface %p, node_mask 0x%08x, type %#x, flags %#x, iid %s, command_list %p stub!\n", ++ iface, node_mask, type, flags, debugstr_guid(iid), command_list); ++ ++ return E_NOTIMPL; ++} ++ ++static HRESULT STDMETHODCALLTYPE d3d12_device_CreateProtectedResourceSession(ID3D12Device5 *iface, ++ const D3D12_PROTECTED_RESOURCE_SESSION_DESC *desc, REFIID iid, void **session) ++{ ++ FIXME("iface %p, desc %p, iid %s, session %p stub!\n", iface, desc, debugstr_guid(iid), session); ++ ++ return E_NOTIMPL; ++} ++ ++static HRESULT STDMETHODCALLTYPE d3d12_device_CreateCommittedResource1(ID3D12Device5 *iface, ++ const D3D12_HEAP_PROPERTIES *heap_properties, D3D12_HEAP_FLAGS heap_flags, ++ const D3D12_RESOURCE_DESC *desc, D3D12_RESOURCE_STATES initial_state, ++ const D3D12_CLEAR_VALUE *optimized_clear_value, ++ ID3D12ProtectedResourceSession *protected_session, REFIID iid, void **resource) ++{ ++ FIXME("iface %p, heap_properties %p, heap_flags %#x, desc %p, initial_state %#x, " ++ "optimized_clear_value %p, protected_session %p, iid %s, resource %p stub!\n", ++ iface, heap_properties, heap_flags, desc, initial_state, ++ optimized_clear_value, protected_session, debugstr_guid(iid), resource); ++ ++ return E_NOTIMPL; ++} ++ ++static HRESULT STDMETHODCALLTYPE d3d12_device_CreateHeap1(ID3D12Device5 *iface, ++ const D3D12_HEAP_DESC *desc, ID3D12ProtectedResourceSession *protected_session, ++ REFIID iid, void **heap) ++{ ++ FIXME("iface %p, desc %p, protected_session %p, iid %s, heap %p stub!\n", ++ iface, desc, protected_session, debugstr_guid(iid), heap); ++ ++ return E_NOTIMPL; ++} ++ ++static HRESULT STDMETHODCALLTYPE d3d12_device_CreateReservedResource1(ID3D12Device5 *iface, ++ const D3D12_RESOURCE_DESC *desc, D3D12_RESOURCE_STATES initial_state, ++ const D3D12_CLEAR_VALUE *optimized_clear_value, ++ ID3D12ProtectedResourceSession *protected_session, REFIID iid, void **resource) ++{ ++ FIXME("iface %p, desc %p, initial_state %#x, optimized_clear_value %p, " ++ "protected_session %p, iid %s, resource %p stub!\n", ++ iface, desc, initial_state, optimized_clear_value, protected_session, ++ debugstr_guid(iid), resource); ++ ++ return E_NOTIMPL; ++} ++ ++static D3D12_RESOURCE_ALLOCATION_INFO * STDMETHODCALLTYPE d3d12_device_GetResourceAllocationInfo1( ++ ID3D12Device5 *iface, D3D12_RESOURCE_ALLOCATION_INFO *info, UINT visible_mask, ++ UINT count, const D3D12_RESOURCE_DESC *resource_descs, ++ D3D12_RESOURCE_ALLOCATION_INFO1 *info1) ++{ ++ FIXME("iface %p, info %p, visible_mask 0x%08x, count %u, resource_descs %p, info1 %p stub!\n", ++ iface, info, visible_mask, count, resource_descs, info1); ++ ++ return info; ++} ++ ++static HRESULT STDMETHODCALLTYPE d3d12_device_CreateLifetimeTracker(ID3D12Device5 *iface, ++ ID3D12LifetimeOwner *owner, REFIID iid, void **tracker) ++{ ++ FIXME("iface %p, owner %p, iid %s, tracker %p stub!\n", iface, owner, debugstr_guid(iid), tracker); ++ ++ return E_NOTIMPL; ++} ++ ++static void STDMETHODCALLTYPE d3d12_device_RemoveDevice(ID3D12Device5 *iface) ++{ ++ FIXME("iface %p stub!\n", iface); ++} ++ ++static HRESULT STDMETHODCALLTYPE d3d12_device_EnumerateMetaCommands(ID3D12Device5 *iface, ++ UINT *num_meta_commands, D3D12_META_COMMAND_DESC *command_desc) ++{ ++ FIXME("iface %p, num_meta_commands %p, command_desc %p stub!\n", iface, ++ num_meta_commands, command_desc); ++ ++ return E_NOTIMPL; ++} ++ ++static HRESULT STDMETHODCALLTYPE d3d12_device_EnumerateMetaCommandParameters(ID3D12Device5 *iface, ++ REFGUID command_id, D3D12_META_COMMAND_PARAMETER_STAGE stage, ++ UINT *size_in_bytes, UINT *parameter_count, ++ D3D12_META_COMMAND_PARAMETER_DESC *parameter_desc) ++{ ++ FIXME("iface %p, command_id %s, stage %u, size_in_bytes %p, " ++ "parameter_count %p, parameter_desc %p stub!\n", iface, ++ debugstr_guid(command_id), stage, size_in_bytes, parameter_count, parameter_desc); ++ ++ return E_NOTIMPL; ++} ++ ++static HRESULT STDMETHODCALLTYPE d3d12_device_CreateMetaCommand(ID3D12Device5 *iface, ++ REFGUID command_id, UINT node_mask, const void *parameters_data, ++ SIZE_T data_size_in_bytes, REFIID iid, void **meta_command) ++{ ++ FIXME("iface %p, command_id %s, node_mask %#x, parameters_data %p, " ++ "data_size_in_bytes %lu, iid %s, meta_command %p stub!\n", iface, ++ debugstr_guid(command_id), node_mask, parameters_data, ++ data_size_in_bytes, debugstr_guid(iid), meta_command); ++ ++ return E_NOTIMPL; ++} ++ ++static HRESULT STDMETHODCALLTYPE d3d12_device_CreateStateObject(ID3D12Device5 *iface, ++ const D3D12_STATE_OBJECT_DESC *desc, REFIID iid, void **state_object) ++{ ++ FIXME("iface %p, desc %p, iid %s, state_object %p stub!\n", iface, desc, debugstr_guid(iid), state_object); ++ ++ return E_NOTIMPL; ++} ++ ++static void STDMETHODCALLTYPE d3d12_device_GetRaytracingAccelerationStructurePrebuildInfo(ID3D12Device5 *iface, ++ const D3D12_BUILD_RAYTRACING_ACCELERATION_STRUCTURE_INPUTS *desc, ++ D3D12_RAYTRACING_ACCELERATION_STRUCTURE_PREBUILD_INFO *info) ++{ ++ FIXME("iface %p, desc %p, info %p stub!\n", iface, desc, info); ++} ++ ++static D3D12_DRIVER_MATCHING_IDENTIFIER_STATUS STDMETHODCALLTYPE d3d12_device_CheckDriverMatchingIdentifier(ID3D12Device5 *iface, ++ D3D12_SERIALIZED_DATA_TYPE data_type, const D3D12_SERIALIZED_DATA_DRIVER_MATCHING_IDENTIFIER *identifier) ++{ ++ FIXME("iface %p, data_type %u, identifier %p stub!\n", iface, data_type, identifier); ++ ++ return D3D12_DRIVER_MATCHING_IDENTIFIER_UNRECOGNIZED; ++} ++ ++static const struct ID3D12Device5Vtbl d3d12_device_vtbl = + { + /* IUnknown methods */ + d3d12_device_QueryInterface, +@@ -4013,14 +4178,34 @@ static const struct ID3D12Device2Vtbl d3d12_device_vtbl = + d3d12_device_SetResidencyPriority, + /* ID3D12Device2 methods */ + d3d12_device_CreatePipelineState, ++ /* ID3D12Device3 methods */ ++ d3d12_device_OpenExistingHeapFromAddress, ++ d3d12_device_OpenExistingHeapFromFileMapping, ++ d3d12_device_EnqueueMakeResident, ++ /* ID3D12Device4 methods */ ++ d3d12_device_CreateCommandList1, ++ d3d12_device_CreateProtectedResourceSession, ++ d3d12_device_CreateCommittedResource1, ++ d3d12_device_CreateHeap1, ++ d3d12_device_CreateReservedResource1, ++ d3d12_device_GetResourceAllocationInfo1, ++ /* ID3D12Device5 methods */ ++ d3d12_device_CreateLifetimeTracker, ++ d3d12_device_RemoveDevice, ++ d3d12_device_EnumerateMetaCommands, ++ d3d12_device_EnumerateMetaCommandParameters, ++ d3d12_device_CreateMetaCommand, ++ d3d12_device_CreateStateObject, ++ d3d12_device_GetRaytracingAccelerationStructurePrebuildInfo, ++ d3d12_device_CheckDriverMatchingIdentifier, + }; + +-struct d3d12_device *unsafe_impl_from_ID3D12Device2(ID3D12Device2 *iface) ++struct d3d12_device *unsafe_impl_from_ID3D12Device5(ID3D12Device5 *iface) + { + if (!iface) + return NULL; + assert(iface->lpVtbl == &d3d12_device_vtbl); +- return impl_from_ID3D12Device2(iface); ++ return impl_from_ID3D12Device5(iface); + } + + static HRESULT d3d12_device_init(struct d3d12_device *device, +@@ -4029,7 +4214,7 @@ static HRESULT d3d12_device_init(struct d3d12_device *device, + const struct vkd3d_vk_device_procs *vk_procs; + HRESULT hr; + +- device->ID3D12Device2_iface.lpVtbl = &d3d12_device_vtbl; ++ device->ID3D12Device5_iface.lpVtbl = &d3d12_device_vtbl; + device->refcount = 1; + + vkd3d_instance_incref(device->vkd3d_instance = instance); +@@ -4226,28 +4411,28 @@ HRESULT vkd3d_join_thread(struct vkd3d_instance *instance, union vkd3d_thread_ha + + IUnknown *vkd3d_get_device_parent(ID3D12Device *device) + { +- struct d3d12_device *d3d12_device = impl_from_ID3D12Device2((ID3D12Device2 *)device); ++ struct d3d12_device *d3d12_device = impl_from_ID3D12Device5((ID3D12Device5 *)device); + + return d3d12_device->parent; + } + + VkDevice vkd3d_get_vk_device(ID3D12Device *device) + { +- struct d3d12_device *d3d12_device = impl_from_ID3D12Device2((ID3D12Device2 *)device); ++ struct d3d12_device *d3d12_device = impl_from_ID3D12Device5((ID3D12Device5 *)device); + + return d3d12_device->vk_device; + } + + VkPhysicalDevice vkd3d_get_vk_physical_device(ID3D12Device *device) + { +- struct d3d12_device *d3d12_device = impl_from_ID3D12Device2((ID3D12Device2 *)device); ++ struct d3d12_device *d3d12_device = impl_from_ID3D12Device5((ID3D12Device5 *)device); + + return d3d12_device->vk_physical_device; + } + + struct vkd3d_instance *vkd3d_instance_from_device(ID3D12Device *device) + { +- struct d3d12_device *d3d12_device = impl_from_ID3D12Device2((ID3D12Device2 *)device); ++ struct d3d12_device *d3d12_device = impl_from_ID3D12Device5((ID3D12Device5 *)device); + + return d3d12_device->vkd3d_instance; + } +diff --git a/libs/vkd3d/libs/vkd3d/resource.c b/libs/vkd3d/libs/vkd3d/resource.c +index 0dfb4d379ca..14fb24a9c94 100644 +--- a/libs/vkd3d/libs/vkd3d/resource.c ++++ b/libs/vkd3d/libs/vkd3d/resource.c +@@ -2220,7 +2220,7 @@ HRESULT d3d12_reserved_resource_create(struct d3d12_device *device, + HRESULT vkd3d_create_image_resource(ID3D12Device *device, + const struct vkd3d_image_resource_create_info *create_info, ID3D12Resource **resource) + { +- struct d3d12_device *d3d12_device = unsafe_impl_from_ID3D12Device2((ID3D12Device2 *)device); ++ struct d3d12_device *d3d12_device = unsafe_impl_from_ID3D12Device5((ID3D12Device5 *)device); + struct d3d12_resource *object; + HRESULT hr; + +@@ -2998,6 +2998,7 @@ static bool init_default_texture_view_desc(struct vkd3d_texture_view_desc *desc, + desc->components.b = VK_COMPONENT_SWIZZLE_IDENTITY; + desc->components.a = VK_COMPONENT_SWIZZLE_IDENTITY; + desc->allowed_swizzle = false; ++ desc->usage = 0; + return true; + } + +@@ -3039,6 +3040,7 @@ bool vkd3d_create_texture_view(struct d3d12_device *device, uint32_t magic, VkIm + { + const struct vkd3d_vk_device_procs *vk_procs = &device->vk_procs; + const struct vkd3d_format *format = desc->format; ++ VkImageViewUsageCreateInfoKHR usage_desc; + struct VkImageViewCreateInfo view_desc; + VkImageView vk_view = VK_NULL_HANDLE; + struct vkd3d_view *object; +@@ -3060,6 +3062,13 @@ bool vkd3d_create_texture_view(struct d3d12_device *device, uint32_t magic, VkIm + view_desc.subresourceRange.levelCount = desc->miplevel_count; + view_desc.subresourceRange.baseArrayLayer = desc->layer_idx; + view_desc.subresourceRange.layerCount = desc->layer_count; ++ if (device->vk_info.KHR_maintenance2) ++ { ++ usage_desc.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_USAGE_CREATE_INFO; ++ usage_desc.pNext = NULL; ++ usage_desc.usage = desc->usage; ++ view_desc.pNext = &usage_desc; ++ } + if ((vr = VK_CALL(vkCreateImageView(device->vk_device, &view_desc, NULL, &vk_view))) < 0) + { + WARN("Failed to create Vulkan image view, vr %d.\n", vr); +@@ -3196,6 +3205,7 @@ static void vkd3d_create_null_srv(struct d3d12_desc *descriptor, + vkd3d_desc.components.b = VK_COMPONENT_SWIZZLE_ZERO; + vkd3d_desc.components.a = VK_COMPONENT_SWIZZLE_ZERO; + vkd3d_desc.allowed_swizzle = true; ++ vkd3d_desc.usage = VK_IMAGE_USAGE_SAMPLED_BIT; + + vkd3d_create_texture_view(device, VKD3D_DESCRIPTOR_MAGIC_SRV, vk_image, &vkd3d_desc, &descriptor->s.u.view); + } +@@ -3268,6 +3278,7 @@ void d3d12_desc_create_srv(struct d3d12_desc *descriptor, + + vkd3d_desc.miplevel_count = VK_REMAINING_MIP_LEVELS; + vkd3d_desc.allowed_swizzle = true; ++ vkd3d_desc.usage = VK_IMAGE_USAGE_SAMPLED_BIT; + + if (desc) + { +@@ -3421,6 +3432,7 @@ static void vkd3d_create_null_uav(struct d3d12_desc *descriptor, + vkd3d_desc.components.b = VK_COMPONENT_SWIZZLE_B; + vkd3d_desc.components.a = VK_COMPONENT_SWIZZLE_A; + vkd3d_desc.allowed_swizzle = false; ++ vkd3d_desc.usage = VK_IMAGE_USAGE_STORAGE_BIT; + + vkd3d_create_texture_view(device, VKD3D_DESCRIPTOR_MAGIC_UAV, vk_image, &vkd3d_desc, &descriptor->s.u.view); + } +@@ -3480,6 +3492,8 @@ static void vkd3d_create_texture_uav(struct d3d12_desc *descriptor, + if (!init_default_texture_view_desc(&vkd3d_desc, resource, desc ? desc->Format : 0)) + return; + ++ vkd3d_desc.usage = VK_IMAGE_USAGE_STORAGE_BIT; ++ + if (vkd3d_format_is_compressed(vkd3d_desc.format)) + { + WARN("UAVs cannot be created for compressed formats.\n"); +@@ -3747,6 +3761,8 @@ void d3d12_rtv_desc_create_rtv(struct d3d12_rtv_desc *rtv_desc, struct d3d12_dev + if (!init_default_texture_view_desc(&vkd3d_desc, resource, desc ? desc->Format : 0)) + return; + ++ vkd3d_desc.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; ++ + if (vkd3d_desc.format->vk_aspect_mask != VK_IMAGE_ASPECT_COLOR_BIT) + { + WARN("Trying to create RTV for depth/stencil format %#x.\n", vkd3d_desc.format->dxgi_format); +@@ -3847,6 +3863,8 @@ void d3d12_dsv_desc_create_dsv(struct d3d12_dsv_desc *dsv_desc, struct d3d12_dev + if (!init_default_texture_view_desc(&vkd3d_desc, resource, desc ? desc->Format : 0)) + return; + ++ vkd3d_desc.usage = VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT; ++ + if (!(vkd3d_desc.format->vk_aspect_mask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT))) + { + WARN("Trying to create DSV for format %#x.\n", vkd3d_desc.format->dxgi_format); +diff --git a/libs/vkd3d/libs/vkd3d/vkd3d_main.c b/libs/vkd3d/libs/vkd3d/vkd3d_main.c +index 7651acc3820..245edb5aeac 100644 +--- a/libs/vkd3d/libs/vkd3d/vkd3d_main.c ++++ b/libs/vkd3d/libs/vkd3d/vkd3d_main.c +@@ -71,11 +71,11 @@ HRESULT vkd3d_create_device(const struct vkd3d_device_create_info *create_info, + + if (!device) + { +- ID3D12Device_Release(&object->ID3D12Device2_iface); ++ ID3D12Device_Release(&object->ID3D12Device5_iface); + return S_FALSE; + } + +- return return_interface(&object->ID3D12Device2_iface, &IID_ID3D12Device, iid, device); ++ return return_interface(&object->ID3D12Device5_iface, &IID_ID3D12Device, iid, device); + } + + /* ID3D12RootSignatureDeserializer */ +diff --git a/libs/vkd3d/libs/vkd3d/vkd3d_private.h b/libs/vkd3d/libs/vkd3d/vkd3d_private.h +index fceb06fc05a..89f8b15ee14 100644 +--- a/libs/vkd3d/libs/vkd3d/vkd3d_private.h ++++ b/libs/vkd3d/libs/vkd3d/vkd3d_private.h +@@ -121,6 +121,7 @@ struct vkd3d_vulkan_info + bool KHR_draw_indirect_count; + bool KHR_get_memory_requirements2; + bool KHR_image_format_list; ++ bool KHR_maintenance2; + bool KHR_maintenance3; + bool KHR_push_descriptor; + bool KHR_sampler_mirror_clamp_to_edge; +@@ -835,6 +836,7 @@ struct vkd3d_texture_view_desc + VkImageAspectFlags vk_image_aspect; + VkComponentMapping components; + bool allowed_swizzle; ++ VkImageUsageFlags usage; + }; + + struct vkd3d_desc_header +@@ -1709,7 +1711,7 @@ struct vkd3d_desc_object_cache + /* ID3D12Device */ + struct d3d12_device + { +- ID3D12Device2 ID3D12Device2_iface; ++ ID3D12Device5 ID3D12Device5_iface; + LONG refcount; + + VkDevice vk_device; +@@ -1775,27 +1777,27 @@ struct vkd3d_queue *d3d12_device_get_vkd3d_queue(struct d3d12_device *device, D3 + bool d3d12_device_is_uma(struct d3d12_device *device, bool *coherent); + void d3d12_device_mark_as_removed(struct d3d12_device *device, HRESULT reason, + const char *message, ...) VKD3D_PRINTF_FUNC(3, 4); +-struct d3d12_device *unsafe_impl_from_ID3D12Device2(ID3D12Device2 *iface); ++struct d3d12_device *unsafe_impl_from_ID3D12Device5(ID3D12Device5 *iface); + + static inline HRESULT d3d12_device_query_interface(struct d3d12_device *device, REFIID iid, void **object) + { +- return ID3D12Device2_QueryInterface(&device->ID3D12Device2_iface, iid, object); ++ return ID3D12Device5_QueryInterface(&device->ID3D12Device5_iface, iid, object); + } + + static inline ULONG d3d12_device_add_ref(struct d3d12_device *device) + { +- return ID3D12Device2_AddRef(&device->ID3D12Device2_iface); ++ return ID3D12Device5_AddRef(&device->ID3D12Device5_iface); + } + + static inline ULONG d3d12_device_release(struct d3d12_device *device) + { +- return ID3D12Device2_Release(&device->ID3D12Device2_iface); ++ return ID3D12Device5_Release(&device->ID3D12Device5_iface); + } + + static inline unsigned int d3d12_device_get_descriptor_handle_increment_size(struct d3d12_device *device, + D3D12_DESCRIPTOR_HEAP_TYPE descriptor_type) + { +- return ID3D12Device2_GetDescriptorHandleIncrementSize(&device->ID3D12Device2_iface, descriptor_type); ++ return ID3D12Device5_GetDescriptorHandleIncrementSize(&device->ID3D12Device5_iface, descriptor_type); + } + + /* utils */ +-- +2.42.0 + diff --git a/patches/vkd3d-latest/0003-vkd3d-shader-Fix-compiler-warning.patch b/patches/vkd3d-latest/0003-vkd3d-shader-Fix-compiler-warning.patch new file mode 100644 index 00000000..8cc333dc --- /dev/null +++ b/patches/vkd3d-latest/0003-vkd3d-shader-Fix-compiler-warning.patch @@ -0,0 +1,30 @@ +From f624fad55b83330c84a883efa46a1541325db9aa Mon Sep 17 00:00:00 2001 +From: Alistair Leslie-Hughes +Date: Fri, 29 Sep 2023 17:22:13 +1000 +Subject: [PATCH 3/3] vkd3d-shader: Fix compiler warning. +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +vkd3d-shader/tpf.c:3810:39: warning: passing argument 2 of ‘sm4_register_from_node’ from incompatible pointer type [-Wincompatible-pointer-types] +vkd3d-shader/tpf.c:4750:59: warning: passing argument 3 of ‘sm4_register_from_deref’ from incompatible pointer type [-Wincompatible-pointer-types] +--- + libs/vkd3d/libs/vkd3d-shader/vkd3d_shader_private.h | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/libs/vkd3d/libs/vkd3d-shader/vkd3d_shader_private.h b/libs/vkd3d/libs/vkd3d-shader/vkd3d_shader_private.h +index af75ef3bda8..2e9a98a6fc3 100644 +--- a/libs/vkd3d/libs/vkd3d-shader/vkd3d_shader_private.h ++++ b/libs/vkd3d/libs/vkd3d-shader/vkd3d_shader_private.h +@@ -758,7 +758,7 @@ void vsir_register_init(struct vkd3d_shader_register *reg, enum vkd3d_shader_reg + struct vkd3d_shader_dst_param + { + struct vkd3d_shader_register reg; +- DWORD write_mask; ++ unsigned int write_mask; + DWORD modifiers; + DWORD shift; + }; +-- +2.42.0 +