From ab9df8f9326906dd359425bc7efd5bb34b3b0c1a Mon Sep 17 00:00:00 2001 From: Alistair Leslie-Hughes Date: Sat, 7 Sep 2024 08:48:33 +1000 Subject: [PATCH] Updated vkd3d-latest patchset --- ...-6d28cc131b0cad61c681aed6b9f6611a12b.patch | 2 +- ...-0a6bcf5da78863cc6402756a429b21b6234.patch | 2749 +++++++++++++++++ ...-bfd1fc9cd6cf9cf4e9c23b4ffad2ba8a328.patch | 619 ++++ 3 files changed, 3369 insertions(+), 1 deletion(-) create mode 100644 patches/vkd3d-latest/0002-Updated-vkd3d-to-0a6bcf5da78863cc6402756a429b21b6234.patch create mode 100644 patches/vkd3d-latest/0003-Updated-vkd3d-to-bfd1fc9cd6cf9cf4e9c23b4ffad2ba8a328.patch diff --git a/patches/vkd3d-latest/0001-Updated-vkd3d-to-6d28cc131b0cad61c681aed6b9f6611a12b.patch b/patches/vkd3d-latest/0001-Updated-vkd3d-to-6d28cc131b0cad61c681aed6b9f6611a12b.patch index 74f2018c..e19fd8d1 100644 --- a/patches/vkd3d-latest/0001-Updated-vkd3d-to-6d28cc131b0cad61c681aed6b9f6611a12b.patch +++ b/patches/vkd3d-latest/0001-Updated-vkd3d-to-6d28cc131b0cad61c681aed6b9f6611a12b.patch @@ -1,4 +1,4 @@ -From b95b0da9efaa79a639d9425769ab01aae98b475d Mon Sep 17 00:00:00 2001 +From 075859526b7d11f65c195a4cc0f2c7b954fec6af Mon Sep 17 00:00:00 2001 From: Alistair Leslie-Hughes Date: Tue, 3 Sep 2024 07:18:49 +1000 Subject: [PATCH] Updated vkd3d to 6d28cc131b0cad61c681aed6b9f6611a12b352d1. diff --git a/patches/vkd3d-latest/0002-Updated-vkd3d-to-0a6bcf5da78863cc6402756a429b21b6234.patch b/patches/vkd3d-latest/0002-Updated-vkd3d-to-0a6bcf5da78863cc6402756a429b21b6234.patch new file mode 100644 index 00000000..e7112e05 --- /dev/null +++ b/patches/vkd3d-latest/0002-Updated-vkd3d-to-0a6bcf5da78863cc6402756a429b21b6234.patch @@ -0,0 +1,2749 @@ +From 9ef5dd9c786b5614acff15554de4b1ba2e5874c7 Mon Sep 17 00:00:00 2001 +From: Alistair Leslie-Hughes +Date: Thu, 5 Sep 2024 06:59:11 +1000 +Subject: [PATCH] Updated vkd3d to 0a6bcf5da78863cc6402756a429b21b623400790. + +--- + libs/vkd3d/libs/vkd3d-shader/d3dbc.c | 495 ++++++----- + libs/vkd3d/libs/vkd3d-shader/hlsl.c | 205 ++++- + libs/vkd3d/libs/vkd3d-shader/hlsl.h | 69 ++ + libs/vkd3d/libs/vkd3d-shader/hlsl.l | 1 + + libs/vkd3d/libs/vkd3d-shader/hlsl.y | 274 +++++-- + libs/vkd3d/libs/vkd3d-shader/hlsl_codegen.c | 767 +++++++++++++++++- + .../libs/vkd3d-shader/hlsl_constant_ops.c | 20 +- + libs/vkd3d/libs/vkd3d-shader/ir.c | 2 +- + libs/vkd3d/libs/vkd3d-shader/spirv.c | 36 +- + libs/vkd3d/libs/vkd3d-shader/tpf.c | 24 +- + .../libs/vkd3d-shader/vkd3d_shader_private.h | 13 + + 11 files changed, 1555 insertions(+), 351 deletions(-) + +diff --git a/libs/vkd3d/libs/vkd3d-shader/d3dbc.c b/libs/vkd3d/libs/vkd3d-shader/d3dbc.c +index 1145a91f3e6..3b9ec98448d 100644 +--- a/libs/vkd3d/libs/vkd3d-shader/d3dbc.c ++++ b/libs/vkd3d/libs/vkd3d-shader/d3dbc.c +@@ -1468,9 +1468,11 @@ bool hlsl_sm1_usage_from_semantic(const char *semantic_name, + + struct d3dbc_compiler + { ++ const struct vkd3d_sm1_opcode_info *opcode_table; + struct vsir_program *program; + struct vkd3d_bytecode_buffer buffer; + struct vkd3d_shader_message_context *message_context; ++ bool failed; + + /* OBJECTIVE: Store all the required information in the other fields so + * that this hlsl_ctx is no longer necessary. */ +@@ -2169,31 +2171,221 @@ static void d3dbc_write_cast(struct d3dbc_compiler *d3dbc, const struct hlsl_ir_ + } + } + +-static void d3dbc_write_constant_defs(struct d3dbc_compiler *d3dbc) ++static const struct vkd3d_sm1_opcode_info *shader_sm1_get_opcode_info_from_vsir( ++ struct d3dbc_compiler *d3dbc, enum vkd3d_shader_opcode vkd3d_opcode) ++{ ++ const struct vkd3d_shader_version *version = &d3dbc->program->shader_version; ++ const struct vkd3d_sm1_opcode_info *info; ++ unsigned int i = 0; ++ ++ for (;;) ++ { ++ info = &d3dbc->opcode_table[i++]; ++ if (info->vkd3d_opcode == VKD3DSIH_INVALID) ++ return NULL; ++ ++ if (vkd3d_opcode == info->vkd3d_opcode ++ && vkd3d_shader_ver_ge(version, info->min_version.major, info->min_version.minor) ++ && (vkd3d_shader_ver_le(version, info->max_version.major, info->max_version.minor) ++ || !info->max_version.major)) ++ return info; ++ } ++} ++ ++static uint32_t swizzle_from_vsir(uint32_t swizzle) ++{ ++ uint32_t x = vsir_swizzle_get_component(swizzle, 0); ++ uint32_t y = vsir_swizzle_get_component(swizzle, 1); ++ uint32_t z = vsir_swizzle_get_component(swizzle, 2); ++ uint32_t w = vsir_swizzle_get_component(swizzle, 3); ++ ++ if (x & ~0x3u || y & ~0x3u || z & ~0x3u || w & ~0x3u) ++ ERR("Unexpected vsir swizzle: 0x%08x.\n", swizzle); ++ ++ return ((x & 0x3u) << VKD3D_SM1_SWIZZLE_COMPONENT_SHIFT(0)) ++ | ((y & 0x3) << VKD3D_SM1_SWIZZLE_COMPONENT_SHIFT(1)) ++ | ((z & 0x3) << VKD3D_SM1_SWIZZLE_COMPONENT_SHIFT(2)) ++ | ((w & 0x3) << VKD3D_SM1_SWIZZLE_COMPONENT_SHIFT(3)); ++} ++ ++static void sm1_src_reg_from_vsir(struct d3dbc_compiler *d3dbc, const struct vkd3d_shader_src_param *param, ++ struct sm1_src_register *src, const struct vkd3d_shader_location *loc) ++{ ++ src->mod = (uint32_t)param->modifiers << VKD3D_SM1_SRC_MODIFIER_SHIFT; ++ src->reg = param->reg.idx[0].offset; ++ src->type = param->reg.type; ++ src->swizzle = swizzle_from_vsir(param->swizzle); ++ ++ if (param->reg.idx[0].rel_addr) ++ { ++ vkd3d_shader_error(d3dbc->message_context, loc, VKD3D_SHADER_ERROR_D3DBC_NOT_IMPLEMENTED, ++ "Unhandled relative addressing on source register."); ++ d3dbc->failed = true; ++ } ++} ++ ++static void sm1_dst_reg_from_vsir(struct d3dbc_compiler *d3dbc, const struct vkd3d_shader_dst_param *param, ++ struct sm1_dst_register *dst, const struct vkd3d_shader_location *loc) ++{ ++ dst->mod = (uint32_t)param->modifiers << VKD3D_SM1_DST_MODIFIER_SHIFT; ++ dst->reg = param->reg.idx[0].offset; ++ dst->type = param->reg.type; ++ dst->writemask = param->write_mask; ++ ++ if (param->reg.idx[0].rel_addr) ++ { ++ vkd3d_shader_error(d3dbc->message_context, loc, VKD3D_SHADER_ERROR_D3DBC_NOT_IMPLEMENTED, ++ "Unhandled relative addressing on destination register."); ++ d3dbc->failed = true; ++ } ++} ++ ++static void d3dbc_write_vsir_def(struct d3dbc_compiler *d3dbc, const struct vkd3d_shader_instruction *ins) + { + const struct vkd3d_shader_version *version = &d3dbc->program->shader_version; + struct vkd3d_bytecode_buffer *buffer = &d3dbc->buffer; +- struct hlsl_ctx *ctx = d3dbc->ctx; +- unsigned int i, x; ++ uint32_t token; + +- for (i = 0; i < ctx->constant_defs.count; ++i) ++ const struct sm1_dst_register reg = + { +- const struct hlsl_constant_register *constant_reg = &ctx->constant_defs.regs[i]; +- uint32_t token = D3DSIO_DEF; +- const struct sm1_dst_register reg = +- { +- .type = VKD3DSPR_CONST, +- .writemask = VKD3DSP_WRITEMASK_ALL, +- .reg = constant_reg->index, +- }; ++ .type = VKD3DSPR_CONST, ++ .writemask = VKD3DSP_WRITEMASK_ALL, ++ .reg = ins->dst[0].reg.idx[0].offset, ++ }; ++ ++ token = VKD3D_SM1_OP_DEF; ++ if (version->major > 1) ++ token |= 5 << VKD3D_SM1_INSTRUCTION_LENGTH_SHIFT; ++ put_u32(buffer, token); + +- if (version->major > 1) +- token |= 5 << D3DSI_INSTLENGTH_SHIFT; +- put_u32(buffer, token); ++ write_sm1_dst_register(buffer, ®); ++ for (unsigned int x = 0; x < 4; ++x) ++ put_f32(buffer, ins->src[0].reg.u.immconst_f32[x]); ++} ++ ++static void d3dbc_write_vsir_sampler_dcl(struct d3dbc_compiler *d3dbc, ++ unsigned int reg_id, enum vkd3d_sm1_resource_type res_type) ++{ ++ const struct vkd3d_shader_version *version = &d3dbc->program->shader_version; ++ struct vkd3d_bytecode_buffer *buffer = &d3dbc->buffer; ++ struct sm1_dst_register reg = {0}; ++ uint32_t token; ++ ++ token = VKD3D_SM1_OP_DCL; ++ if (version->major > 1) ++ token |= 2 << VKD3D_SM1_INSTRUCTION_LENGTH_SHIFT; ++ put_u32(buffer, token); ++ ++ token = VKD3D_SM1_INSTRUCTION_PARAMETER; ++ token |= res_type << VKD3D_SM1_RESOURCE_TYPE_SHIFT; ++ put_u32(buffer, token); ++ ++ reg.type = VKD3DSPR_COMBINED_SAMPLER; ++ reg.writemask = VKD3DSP_WRITEMASK_ALL; ++ reg.reg = reg_id; ++ ++ write_sm1_dst_register(buffer, ®); ++} ++ ++static void d3dbc_write_vsir_dcl(struct d3dbc_compiler *d3dbc, const struct vkd3d_shader_instruction *ins) ++{ ++ const struct vkd3d_shader_version *version = &d3dbc->program->shader_version; ++ const struct vkd3d_shader_semantic *semantic = &ins->declaration.semantic; ++ unsigned int reg_id; + +- write_sm1_dst_register(buffer, ®); +- for (x = 0; x < 4; ++x) +- put_f32(buffer, constant_reg->value.f[x]); ++ if (version->major < 2) ++ return; ++ ++ reg_id = semantic->resource.reg.reg.idx[0].offset; ++ ++ if (semantic->resource.reg.reg.type != VKD3DSPR_SAMPLER) ++ { ++ vkd3d_shader_error(d3dbc->message_context, &ins->location, VKD3D_SHADER_ERROR_D3DBC_INVALID_REGISTER_TYPE, ++ "dcl instruction with register type %u.", semantic->resource.reg.reg.type); ++ d3dbc->failed = true; ++ return; ++ } ++ ++ switch (semantic->resource_type) ++ { ++ case VKD3D_SHADER_RESOURCE_TEXTURE_2D: ++ d3dbc_write_vsir_sampler_dcl(d3dbc, reg_id, VKD3D_SM1_RESOURCE_TEXTURE_2D); ++ break; ++ ++ case VKD3D_SHADER_RESOURCE_TEXTURE_CUBE: ++ d3dbc_write_vsir_sampler_dcl(d3dbc, reg_id, VKD3D_SM1_RESOURCE_TEXTURE_CUBE); ++ break; ++ ++ case VKD3D_SHADER_RESOURCE_TEXTURE_3D: ++ d3dbc_write_vsir_sampler_dcl(d3dbc, reg_id, VKD3D_SM1_RESOURCE_TEXTURE_3D); ++ break; ++ ++ default: ++ vkd3d_shader_error(d3dbc->message_context, &ins->location, VKD3D_SHADER_ERROR_D3DBC_INVALID_RESOURCE_TYPE, ++ "dcl instruction with resource_type %u.", semantic->resource_type); ++ d3dbc->failed = true; ++ return; ++ } ++} ++ ++static void d3dbc_write_vsir_simple_instruction(struct d3dbc_compiler *d3dbc, ++ const struct vkd3d_shader_instruction *ins) ++{ ++ const struct vkd3d_sm1_opcode_info *info; ++ struct sm1_instruction instr = {0}; ++ ++ info = shader_sm1_get_opcode_info_from_vsir(d3dbc, ins->opcode); ++ ++ if (ins->dst_count != info->dst_count) ++ { ++ vkd3d_shader_error(d3dbc->message_context, &ins->location, VKD3D_SHADER_ERROR_D3DBC_INVALID_REGISTER_COUNT, ++ "Invalid destination count %u for vsir instruction %#x (expected %u).", ++ ins->dst_count, ins->opcode, info->dst_count); ++ d3dbc->failed = true; ++ return; ++ } ++ if (ins->src_count != info->src_count) ++ { ++ vkd3d_shader_error(d3dbc->message_context, &ins->location, VKD3D_SHADER_ERROR_D3DBC_INVALID_REGISTER_COUNT, ++ "Invalid source count %u for vsir instruction %#x (expected %u).", ++ ins->src_count, ins->opcode, info->src_count); ++ d3dbc->failed = true; ++ return; ++ } ++ ++ instr.opcode = (D3DSHADER_INSTRUCTION_OPCODE_TYPE)info->sm1_opcode; ++ instr.has_dst = info->dst_count; ++ instr.src_count = info->src_count; ++ ++ if (instr.has_dst) ++ sm1_dst_reg_from_vsir(d3dbc, &ins->dst[0], &instr.dst, &ins->location); ++ for (unsigned int i = 0; i < instr.src_count; ++i) ++ sm1_src_reg_from_vsir(d3dbc, &ins->src[i], &instr.srcs[i], &ins->location); ++ ++ d3dbc_write_instruction(d3dbc, &instr); ++} ++ ++static void d3dbc_write_vsir_instruction(struct d3dbc_compiler *d3dbc, const struct vkd3d_shader_instruction *ins) ++{ ++ switch (ins->opcode) ++ { ++ case VKD3DSIH_DEF: ++ d3dbc_write_vsir_def(d3dbc, ins); ++ break; ++ ++ case VKD3DSIH_DCL: ++ d3dbc_write_vsir_dcl(d3dbc, ins); ++ break; ++ ++ case VKD3DSIH_MOV: ++ d3dbc_write_vsir_simple_instruction(d3dbc, ins); ++ break; ++ ++ default: ++ vkd3d_shader_error(d3dbc->message_context, &ins->location, VKD3D_SHADER_ERROR_D3DBC_INVALID_OPCODE, ++ "vsir instruction with opcode %#x.", ins->opcode); ++ d3dbc->failed = true; ++ break; + } + } + +@@ -2262,110 +2454,6 @@ static void d3dbc_write_semantic_dcls(struct d3dbc_compiler *d3dbc) + } + } + +-static void d3dbc_write_sampler_dcl(struct d3dbc_compiler *d3dbc, +- unsigned int reg_id, enum hlsl_sampler_dim sampler_dim) +-{ +- const struct vkd3d_shader_version *version = &d3dbc->program->shader_version; +- struct vkd3d_bytecode_buffer *buffer = &d3dbc->buffer; +- struct sm1_dst_register reg = {0}; +- uint32_t token, res_type = 0; +- +- token = D3DSIO_DCL; +- if (version->major > 1) +- token |= 2 << D3DSI_INSTLENGTH_SHIFT; +- put_u32(buffer, token); +- +- switch (sampler_dim) +- { +- case HLSL_SAMPLER_DIM_2D: +- res_type = VKD3D_SM1_RESOURCE_TEXTURE_2D; +- break; +- +- case HLSL_SAMPLER_DIM_CUBE: +- res_type = VKD3D_SM1_RESOURCE_TEXTURE_CUBE; +- break; +- +- case HLSL_SAMPLER_DIM_3D: +- res_type = VKD3D_SM1_RESOURCE_TEXTURE_3D; +- break; +- +- default: +- vkd3d_unreachable(); +- break; +- } +- +- token = (1u << 31); +- token |= res_type << VKD3D_SM1_RESOURCE_TYPE_SHIFT; +- put_u32(buffer, token); +- +- reg.type = VKD3DSPR_COMBINED_SAMPLER; +- reg.writemask = VKD3DSP_WRITEMASK_ALL; +- reg.reg = reg_id; +- +- write_sm1_dst_register(buffer, ®); +-} +- +-static void d3dbc_write_sampler_dcls(struct d3dbc_compiler *d3dbc) +-{ +- const struct vkd3d_shader_version *version = &d3dbc->program->shader_version; +- struct hlsl_ctx *ctx = d3dbc->ctx; +- enum hlsl_sampler_dim sampler_dim; +- unsigned int i, count, reg_id; +- struct hlsl_ir_var *var; +- +- if (version->major < 2) +- return; +- +- LIST_FOR_EACH_ENTRY(var, &ctx->extern_vars, struct hlsl_ir_var, extern_entry) +- { +- if (!var->regs[HLSL_REGSET_SAMPLERS].allocated) +- continue; +- +- count = var->bind_count[HLSL_REGSET_SAMPLERS]; +- +- for (i = 0; i < count; ++i) +- { +- if (var->objects_usage[HLSL_REGSET_SAMPLERS][i].used) +- { +- sampler_dim = var->objects_usage[HLSL_REGSET_SAMPLERS][i].sampler_dim; +- if (sampler_dim == HLSL_SAMPLER_DIM_GENERIC) +- { +- /* These can appear in sm4-style combined sample instructions. */ +- hlsl_fixme(ctx, &var->loc, "Generic samplers need to be lowered."); +- continue; +- } +- +- reg_id = var->regs[HLSL_REGSET_SAMPLERS].index + i; +- d3dbc_write_sampler_dcl(d3dbc, reg_id, sampler_dim); +- } +- } +- } +-} +- +-static void d3dbc_write_constant(struct d3dbc_compiler *d3dbc, const struct hlsl_ir_node *instr) +-{ +- const struct hlsl_ir_constant *constant = hlsl_ir_constant(instr); +- struct sm1_instruction sm1_instr = +- { +- .opcode = D3DSIO_MOV, +- +- .dst.type = VKD3DSPR_TEMP, +- .dst.reg = instr->reg.id, +- .dst.writemask = instr->reg.writemask, +- .has_dst = 1, +- +- .srcs[0].type = VKD3DSPR_CONST, +- .srcs[0].reg = constant->reg.id, +- .srcs[0].swizzle = hlsl_swizzle_from_writemask(constant->reg.writemask), +- .src_count = 1, +- }; +- +- VKD3D_ASSERT(instr->reg.allocated); +- VKD3D_ASSERT(constant->reg.allocated); +- sm1_map_src_swizzle(&sm1_instr.srcs[0], sm1_instr.dst.writemask); +- d3dbc_write_instruction(d3dbc, &sm1_instr); +-} +- + static void d3dbc_write_per_component_unary_op(struct d3dbc_compiler *d3dbc, + const struct hlsl_ir_node *instr, D3DSHADER_INSTRUCTION_OPCODE_TYPE opcode) + { +@@ -2636,50 +2724,6 @@ static void d3dbc_write_jump(struct d3dbc_compiler *d3dbc, const struct hlsl_ir_ + } + } + +-static void d3dbc_write_load(struct d3dbc_compiler *d3dbc, const struct hlsl_ir_node *instr) +-{ +- const struct hlsl_ir_load *load = hlsl_ir_load(instr); +- struct hlsl_ctx *ctx = d3dbc->ctx; +- const struct hlsl_reg reg = hlsl_reg_from_deref(ctx, &load->src); +- struct sm1_instruction sm1_instr = +- { +- .opcode = D3DSIO_MOV, +- +- .dst.type = VKD3DSPR_TEMP, +- .dst.reg = instr->reg.id, +- .dst.writemask = instr->reg.writemask, +- .has_dst = 1, +- +- .srcs[0].type = VKD3DSPR_TEMP, +- .srcs[0].reg = reg.id, +- .srcs[0].swizzle = hlsl_swizzle_from_writemask(reg.writemask), +- .src_count = 1, +- }; +- +- VKD3D_ASSERT(instr->reg.allocated); +- +- if (load->src.var->is_uniform) +- { +- VKD3D_ASSERT(reg.allocated); +- sm1_instr.srcs[0].type = VKD3DSPR_CONST; +- } +- else if (load->src.var->is_input_semantic) +- { +- if (!hlsl_sm1_register_from_semantic(&d3dbc->program->shader_version, load->src.var->semantic.name, +- load->src.var->semantic.index, false, &sm1_instr.srcs[0].type, &sm1_instr.srcs[0].reg)) +- { +- VKD3D_ASSERT(reg.allocated); +- sm1_instr.srcs[0].type = VKD3DSPR_INPUT; +- sm1_instr.srcs[0].reg = reg.id; +- } +- else +- sm1_instr.srcs[0].swizzle = hlsl_swizzle_from_writemask((1 << load->src.var->data_type->dimx) - 1); +- } +- +- sm1_map_src_swizzle(&sm1_instr.srcs[0], sm1_instr.dst.writemask); +- d3dbc_write_instruction(d3dbc, &sm1_instr); +-} +- + static void d3dbc_write_resource_load(struct d3dbc_compiler *d3dbc, const struct hlsl_ir_node *instr) + { + const struct hlsl_ir_resource_load *load = hlsl_ir_resource_load(instr); +@@ -2751,82 +2795,12 @@ static void d3dbc_write_resource_load(struct d3dbc_compiler *d3dbc, const struct + d3dbc_write_instruction(d3dbc, &sm1_instr); + } + +-static void d3dbc_write_store(struct d3dbc_compiler *d3dbc, const struct hlsl_ir_node *instr) +-{ +- const struct vkd3d_shader_version *version = &d3dbc->program->shader_version; +- const struct hlsl_ir_store *store = hlsl_ir_store(instr); +- struct hlsl_ctx *ctx = d3dbc->ctx; +- const struct hlsl_reg reg = hlsl_reg_from_deref(ctx, &store->lhs); +- const struct hlsl_ir_node *rhs = store->rhs.node; +- struct sm1_instruction sm1_instr = +- { +- .opcode = D3DSIO_MOV, +- +- .dst.type = VKD3DSPR_TEMP, +- .dst.reg = reg.id, +- .dst.writemask = hlsl_combine_writemasks(reg.writemask, store->writemask), +- .has_dst = 1, +- +- .srcs[0].type = VKD3DSPR_TEMP, +- .srcs[0].reg = rhs->reg.id, +- .srcs[0].swizzle = hlsl_swizzle_from_writemask(rhs->reg.writemask), +- .src_count = 1, +- }; +- +- if (store->lhs.var->is_output_semantic) +- { +- if (version->type == VKD3D_SHADER_TYPE_PIXEL && version->major == 1) +- { +- sm1_instr.dst.type = VKD3DSPR_TEMP; +- sm1_instr.dst.reg = 0; +- } +- else if (!hlsl_sm1_register_from_semantic(&d3dbc->program->shader_version, store->lhs.var->semantic.name, +- store->lhs.var->semantic.index, true, &sm1_instr.dst.type, &sm1_instr.dst.reg)) +- { +- VKD3D_ASSERT(reg.allocated); +- sm1_instr.dst.type = VKD3DSPR_OUTPUT; +- sm1_instr.dst.reg = reg.id; +- } +- else +- sm1_instr.dst.writemask = (1u << store->lhs.var->data_type->dimx) - 1; +- } +- else +- VKD3D_ASSERT(reg.allocated); +- +- sm1_map_src_swizzle(&sm1_instr.srcs[0], sm1_instr.dst.writemask); +- d3dbc_write_instruction(d3dbc, &sm1_instr); +-} +- +-static void d3dbc_write_swizzle(struct d3dbc_compiler *d3dbc, const struct hlsl_ir_node *instr) +-{ +- const struct hlsl_ir_swizzle *swizzle = hlsl_ir_swizzle(instr); +- const struct hlsl_ir_node *val = swizzle->val.node; +- struct sm1_instruction sm1_instr = +- { +- .opcode = D3DSIO_MOV, +- +- .dst.type = VKD3DSPR_TEMP, +- .dst.reg = instr->reg.id, +- .dst.writemask = instr->reg.writemask, +- .has_dst = 1, +- +- .srcs[0].type = VKD3DSPR_TEMP, +- .srcs[0].reg = val->reg.id, +- .srcs[0].swizzle = hlsl_combine_swizzles(hlsl_swizzle_from_writemask(val->reg.writemask), +- swizzle->swizzle, instr->data_type->dimx), +- .src_count = 1, +- }; +- +- VKD3D_ASSERT(instr->reg.allocated); +- VKD3D_ASSERT(val->reg.allocated); +- sm1_map_src_swizzle(&sm1_instr.srcs[0], sm1_instr.dst.writemask); +- d3dbc_write_instruction(d3dbc, &sm1_instr); +-} +- + static void d3dbc_write_block(struct d3dbc_compiler *d3dbc, const struct hlsl_block *block) + { ++ struct vkd3d_shader_instruction *vsir_instr; + struct hlsl_ctx *ctx = d3dbc->ctx; + const struct hlsl_ir_node *instr; ++ unsigned int vsir_instr_idx; + + LIST_FOR_EACH_ENTRY(instr, &block->instrs, struct hlsl_ir_node, entry) + { +@@ -2844,10 +2818,6 @@ static void d3dbc_write_block(struct d3dbc_compiler *d3dbc, const struct hlsl_bl + case HLSL_IR_CALL: + vkd3d_unreachable(); + +- case HLSL_IR_CONSTANT: +- d3dbc_write_constant(d3dbc, instr); +- break; +- + case HLSL_IR_EXPR: + d3dbc_write_expr(d3dbc, instr); + break; +@@ -2863,20 +2833,14 @@ static void d3dbc_write_block(struct d3dbc_compiler *d3dbc, const struct hlsl_bl + d3dbc_write_jump(d3dbc, instr); + break; + +- case HLSL_IR_LOAD: +- d3dbc_write_load(d3dbc, instr); +- break; +- + case HLSL_IR_RESOURCE_LOAD: + d3dbc_write_resource_load(d3dbc, instr); + break; + +- case HLSL_IR_STORE: +- d3dbc_write_store(d3dbc, instr); +- break; +- +- case HLSL_IR_SWIZZLE: +- d3dbc_write_swizzle(d3dbc, instr); ++ case HLSL_IR_VSIR_INSTRUCTION_REF: ++ vsir_instr_idx = hlsl_ir_vsir_instruction_ref(instr)->vsir_instr_idx; ++ vsir_instr = &d3dbc->program->instructions.elements[vsir_instr_idx]; ++ d3dbc_write_vsir_instruction(d3dbc, vsir_instr); + break; + + default: +@@ -2897,26 +2861,43 @@ int d3dbc_compile(struct vsir_program *program, uint64_t config_flags, + const struct vkd3d_shader_version *version = &program->shader_version; + struct d3dbc_compiler d3dbc = {0}; + struct vkd3d_bytecode_buffer *buffer = &d3dbc.buffer; ++ int result; + + d3dbc.ctx = ctx; + d3dbc.program = program; + d3dbc.message_context = message_context; ++ switch (version->type) ++ { ++ case VKD3D_SHADER_TYPE_VERTEX: ++ d3dbc.opcode_table = vs_opcode_table; ++ break; ++ ++ case VKD3D_SHADER_TYPE_PIXEL: ++ d3dbc.opcode_table = ps_opcode_table; ++ break; ++ ++ default: ++ vkd3d_shader_error(message_context, NULL, VKD3D_SHADER_ERROR_D3DBC_INVALID_PROFILE, ++ "Invalid shader type %u.", version->type); ++ return VKD3D_ERROR_INVALID_SHADER; ++ } + + put_u32(buffer, sm1_version(version->type, version->major, version->minor)); + + bytecode_put_bytes(buffer, ctab->code, ctab->size); + +- d3dbc_write_constant_defs(&d3dbc); + d3dbc_write_semantic_dcls(&d3dbc); +- d3dbc_write_sampler_dcls(&d3dbc); + d3dbc_write_block(&d3dbc, &entry_func->body); + + put_u32(buffer, D3DSIO_END); + ++ result = ctx->result; + if (buffer->status) +- ctx->result = buffer->status; ++ result = buffer->status; ++ if (d3dbc.failed) ++ result = VKD3D_ERROR_INVALID_SHADER; + +- if (!ctx->result) ++ if (!result) + { + out->code = buffer->data; + out->size = buffer->size; +@@ -2925,5 +2906,5 @@ int d3dbc_compile(struct vsir_program *program, uint64_t config_flags, + { + vkd3d_free(buffer->data); + } +- return ctx->result; ++ return result; + } +diff --git a/libs/vkd3d/libs/vkd3d-shader/hlsl.c b/libs/vkd3d/libs/vkd3d-shader/hlsl.c +index bd5baacd83d..6f737be2e2a 100644 +--- a/libs/vkd3d/libs/vkd3d-shader/hlsl.c ++++ b/libs/vkd3d/libs/vkd3d-shader/hlsl.c +@@ -254,6 +254,45 @@ bool hlsl_type_is_resource(const struct hlsl_type *type) + } + } + ++bool hlsl_type_is_shader(const struct hlsl_type *type) ++{ ++ switch (type->class) ++ { ++ case HLSL_CLASS_ARRAY: ++ return hlsl_type_is_shader(type->e.array.type); ++ ++ case HLSL_CLASS_COMPUTE_SHADER: ++ case HLSL_CLASS_DOMAIN_SHADER: ++ case HLSL_CLASS_GEOMETRY_SHADER: ++ case HLSL_CLASS_HULL_SHADER: ++ case HLSL_CLASS_PIXEL_SHADER: ++ case HLSL_CLASS_VERTEX_SHADER: ++ return true; ++ ++ case HLSL_CLASS_SCALAR: ++ case HLSL_CLASS_VECTOR: ++ case HLSL_CLASS_MATRIX: ++ case HLSL_CLASS_STRUCT: ++ case HLSL_CLASS_DEPTH_STENCIL_STATE: ++ case HLSL_CLASS_DEPTH_STENCIL_VIEW: ++ case HLSL_CLASS_EFFECT_GROUP: ++ case HLSL_CLASS_PASS: ++ case HLSL_CLASS_RASTERIZER_STATE: ++ case HLSL_CLASS_RENDER_TARGET_VIEW: ++ case HLSL_CLASS_SAMPLER: ++ case HLSL_CLASS_STRING: ++ case HLSL_CLASS_TECHNIQUE: ++ case HLSL_CLASS_TEXTURE: ++ case HLSL_CLASS_UAV: ++ case HLSL_CLASS_CONSTANT_BUFFER: ++ case HLSL_CLASS_BLEND_STATE: ++ case HLSL_CLASS_VOID: ++ case HLSL_CLASS_NULL: ++ return false; ++ } ++ return false; ++} ++ + /* Only intended to be used for derefs (after copies have been lowered to components or vectors) or + * resources, since for both their data types span across a single regset. */ + static enum hlsl_regset type_get_regset(const struct hlsl_type *type) +@@ -1640,6 +1679,22 @@ struct hlsl_ir_node *hlsl_new_switch(struct hlsl_ctx *ctx, struct hlsl_ir_node * + return &s->node; + } + ++struct hlsl_ir_node *hlsl_new_vsir_instruction_ref(struct hlsl_ctx *ctx, unsigned int vsir_instr_idx, ++ struct hlsl_type *type, const struct hlsl_reg *reg, const struct vkd3d_shader_location *loc) ++{ ++ struct hlsl_ir_vsir_instruction_ref *vsir_instr; ++ ++ if (!(vsir_instr = hlsl_alloc(ctx, sizeof(*vsir_instr)))) ++ return NULL; ++ init_node(&vsir_instr->node, HLSL_IR_VSIR_INSTRUCTION_REF, type, loc); ++ vsir_instr->vsir_instr_idx = vsir_instr_idx; ++ ++ if (reg) ++ vsir_instr->node.reg = *reg; ++ ++ return &vsir_instr->node; ++} ++ + struct hlsl_ir_load *hlsl_new_load_index(struct hlsl_ctx *ctx, const struct hlsl_deref *deref, + struct hlsl_ir_node *idx, const struct vkd3d_shader_location *loc) + { +@@ -1792,6 +1847,54 @@ struct hlsl_ir_node *hlsl_new_swizzle(struct hlsl_ctx *ctx, uint32_t s, unsigned + return &swizzle->node; + } + ++struct hlsl_ir_node *hlsl_new_compile(struct hlsl_ctx *ctx, const char *profile_name, ++ struct hlsl_ir_node **args, unsigned int args_count, struct hlsl_block *args_instrs, ++ const struct vkd3d_shader_location *loc) ++{ ++ const struct hlsl_profile_info *profile_info = NULL; ++ struct hlsl_ir_compile *compile; ++ struct hlsl_type *type = NULL; ++ unsigned int i; ++ ++ if (!(profile_info = hlsl_get_target_info(profile_name))) ++ { ++ hlsl_error(ctx, loc, VKD3D_SHADER_ERROR_HLSL_INVALID_PROFILE, "Unknown profile \"%s\".", profile_name); ++ return NULL; ++ } ++ ++ if (profile_info->type == VKD3D_SHADER_TYPE_PIXEL) ++ type = hlsl_get_type(ctx->cur_scope, "PixelShader", true, true); ++ else if (profile_info->type == VKD3D_SHADER_TYPE_VERTEX) ++ type = hlsl_get_type(ctx->cur_scope, "VertexShader", true, true); ++ ++ if (!type) ++ { ++ hlsl_error(ctx, loc, VKD3D_SHADER_ERROR_HLSL_INVALID_PROFILE, "Invalid profile \"%s\".", profile_name); ++ return NULL; ++ } ++ ++ if (!(compile = hlsl_alloc(ctx, sizeof(*compile)))) ++ return NULL; ++ ++ init_node(&compile->node, HLSL_IR_COMPILE, type, loc); ++ ++ compile->profile = profile_info; ++ ++ hlsl_block_init(&compile->instrs); ++ hlsl_block_add_block(&compile->instrs, args_instrs); ++ ++ compile->args_count = args_count; ++ if (!(compile->args = hlsl_alloc(ctx, sizeof(*compile->args) * args_count))) ++ { ++ vkd3d_free(compile); ++ return NULL; ++ } ++ for (i = 0; i < compile->args_count; ++i) ++ hlsl_src_from_node(&compile->args[i], args[i]); ++ ++ return &compile->node; ++} ++ + struct hlsl_ir_node *hlsl_new_stateblock_constant(struct hlsl_ctx *ctx, const char *name, + struct vkd3d_shader_location *loc) + { +@@ -2142,6 +2245,43 @@ static struct hlsl_ir_node *clone_index(struct hlsl_ctx *ctx, struct clone_instr + return dst; + } + ++static struct hlsl_ir_node *clone_compile(struct hlsl_ctx *ctx, ++ struct clone_instr_map *map, struct hlsl_ir_compile *compile) ++{ ++ const char *profile_name = NULL; ++ struct hlsl_ir_node **args; ++ struct hlsl_ir_node *node; ++ struct hlsl_block block; ++ unsigned int i; ++ ++ if (!(clone_block(ctx, &block, &compile->instrs, map))) ++ return NULL; ++ ++ if (!(args = hlsl_alloc(ctx, sizeof(*args) * compile->args_count))) ++ { ++ hlsl_block_cleanup(&block); ++ return NULL; ++ } ++ for (i = 0; i < compile->args_count; ++i) ++ { ++ args[i] = map_instr(map, compile->args[i].node); ++ VKD3D_ASSERT(args[i]); ++ } ++ ++ if (compile->profile) ++ profile_name = compile->profile->name; ++ ++ if (!(node = hlsl_new_compile(ctx, profile_name, args, compile->args_count, &block, &compile->node.loc))) ++ { ++ hlsl_block_cleanup(&block); ++ vkd3d_free(args); ++ return NULL; ++ } ++ ++ vkd3d_free(args); ++ return node; ++} ++ + static struct hlsl_ir_node *clone_stateblock_constant(struct hlsl_ctx *ctx, + struct clone_instr_map *map, struct hlsl_ir_stateblock_constant *constant) + { +@@ -2284,8 +2424,14 @@ static struct hlsl_ir_node *clone_instr(struct hlsl_ctx *ctx, + case HLSL_IR_SWIZZLE: + return clone_swizzle(ctx, map, hlsl_ir_swizzle(instr)); + ++ case HLSL_IR_COMPILE: ++ return clone_compile(ctx, map, hlsl_ir_compile(instr)); ++ + case HLSL_IR_STATEBLOCK_CONSTANT: + return clone_stateblock_constant(ctx, map, hlsl_ir_stateblock_constant(instr)); ++ ++ case HLSL_IR_VSIR_INSTRUCTION_REF: ++ vkd3d_unreachable(); + } + + vkd3d_unreachable(); +@@ -2698,7 +2844,10 @@ const char *hlsl_node_type_to_string(enum hlsl_ir_node_type type) + [HLSL_IR_STORE ] = "HLSL_IR_STORE", + [HLSL_IR_SWITCH ] = "HLSL_IR_SWITCH", + [HLSL_IR_SWIZZLE ] = "HLSL_IR_SWIZZLE", ++ ++ [HLSL_IR_COMPILE] = "HLSL_IR_COMPILE", + [HLSL_IR_STATEBLOCK_CONSTANT] = "HLSL_IR_STATEBLOCK_CONSTANT", ++ [HLSL_IR_VSIR_INSTRUCTION_REF] = "HLSL_IR_VSIR_INSTRUCTION_REF", + }; + + if (type >= ARRAY_SIZE(names)) +@@ -3146,6 +3295,25 @@ static void dump_ir_index(struct vkd3d_string_buffer *buffer, const struct hlsl_ + vkd3d_string_buffer_printf(buffer, "]"); + } + ++static void dump_ir_compile(struct hlsl_ctx *ctx, struct vkd3d_string_buffer *buffer, ++ const struct hlsl_ir_compile *compile) ++{ ++ unsigned int i; ++ ++ vkd3d_string_buffer_printf(buffer, "compile %s {\n", compile->profile->name); ++ ++ dump_block(ctx, buffer, &compile->instrs); ++ ++ vkd3d_string_buffer_printf(buffer, " %10s } (", ""); ++ for (i = 0; i < compile->args_count; ++i) ++ { ++ dump_src(buffer, &compile->args[i]); ++ if (i + 1 < compile->args_count) ++ vkd3d_string_buffer_printf(buffer, ", "); ++ } ++ vkd3d_string_buffer_printf(buffer, ")"); ++} ++ + static void dump_ir_stateblock_constant(struct vkd3d_string_buffer *buffer, + const struct hlsl_ir_stateblock_constant *constant) + { +@@ -3245,9 +3413,18 @@ static void dump_instr(struct hlsl_ctx *ctx, struct vkd3d_string_buffer *buffer, + dump_ir_swizzle(buffer, hlsl_ir_swizzle(instr)); + break; + ++ case HLSL_IR_COMPILE: ++ dump_ir_compile(ctx, buffer, hlsl_ir_compile(instr)); ++ break; ++ + case HLSL_IR_STATEBLOCK_CONSTANT: + dump_ir_stateblock_constant(buffer, hlsl_ir_stateblock_constant(instr)); + break; ++ ++ case HLSL_IR_VSIR_INSTRUCTION_REF: ++ vkd3d_string_buffer_printf(buffer, "vsir_program instruction %u", ++ hlsl_ir_vsir_instruction_ref(instr)->vsir_instr_idx); ++ break; + } + } + +@@ -3308,8 +3485,8 @@ void hlsl_replace_node(struct hlsl_ir_node *old, struct hlsl_ir_node *new) + { + struct hlsl_src *src, *next; + +- VKD3D_ASSERT(old->data_type->dimx == new->data_type->dimx); +- VKD3D_ASSERT(old->data_type->dimy == new->data_type->dimy); ++ VKD3D_ASSERT(old->data_type == new->data_type || old->data_type->dimx == new->data_type->dimx); ++ VKD3D_ASSERT(old->data_type == new->data_type || old->data_type->dimy == new->data_type->dimy); + + LIST_FOR_EACH_ENTRY_SAFE(src, next, &old->uses, struct hlsl_src, entry) + { +@@ -3459,6 +3636,17 @@ static void free_ir_index(struct hlsl_ir_index *index) + vkd3d_free(index); + } + ++static void free_ir_compile(struct hlsl_ir_compile *compile) ++{ ++ unsigned int i; ++ ++ for (i = 0; i < compile->args_count; ++i) ++ hlsl_src_remove(&compile->args[i]); ++ ++ hlsl_block_cleanup(&compile->instrs); ++ vkd3d_free(compile); ++} ++ + static void free_ir_stateblock_constant(struct hlsl_ir_stateblock_constant *constant) + { + vkd3d_free(constant->name); +@@ -3527,9 +3715,17 @@ void hlsl_free_instr(struct hlsl_ir_node *node) + free_ir_switch(hlsl_ir_switch(node)); + break; + ++ case HLSL_IR_COMPILE: ++ free_ir_compile(hlsl_ir_compile(node)); ++ break; ++ + case HLSL_IR_STATEBLOCK_CONSTANT: + free_ir_stateblock_constant(hlsl_ir_stateblock_constant(node)); + break; ++ ++ case HLSL_IR_VSIR_INSTRUCTION_REF: ++ vkd3d_free(hlsl_ir_vsir_instruction_ref(node)); ++ break; + } + } + +@@ -4078,6 +4274,11 @@ static bool hlsl_ctx_init(struct hlsl_ctx *ctx, const struct vkd3d_shader_compil + } + } + ++ ctx->domain = VKD3D_TESSELLATOR_DOMAIN_INVALID; ++ ctx->output_control_point_count = UINT_MAX; ++ ctx->output_primitive = 0; ++ ctx->partitioning = 0; ++ + return true; + } + +diff --git a/libs/vkd3d/libs/vkd3d-shader/hlsl.h b/libs/vkd3d/libs/vkd3d-shader/hlsl.h +index bf38c0cd945..9bcba99efff 100644 +--- a/libs/vkd3d/libs/vkd3d-shader/hlsl.h ++++ b/libs/vkd3d/libs/vkd3d-shader/hlsl.h +@@ -70,6 +70,14 @@ static inline unsigned int hlsl_swizzle_get_component(uint32_t swizzle, unsigned + return (swizzle >> HLSL_SWIZZLE_SHIFT(idx)) & HLSL_SWIZZLE_MASK; + } + ++static inline uint32_t vsir_swizzle_from_hlsl(uint32_t swizzle) ++{ ++ return vkd3d_shader_create_swizzle(hlsl_swizzle_get_component(swizzle, 0), ++ hlsl_swizzle_get_component(swizzle, 1), ++ hlsl_swizzle_get_component(swizzle, 2), ++ hlsl_swizzle_get_component(swizzle, 3)); ++} ++ + enum hlsl_type_class + { + HLSL_CLASS_SCALAR, +@@ -316,7 +324,11 @@ enum hlsl_ir_node_type + HLSL_IR_STORE, + HLSL_IR_SWIZZLE, + HLSL_IR_SWITCH, ++ ++ HLSL_IR_COMPILE, + HLSL_IR_STATEBLOCK_CONSTANT, ++ ++ HLSL_IR_VSIR_INSTRUCTION_REF, + }; + + /* Common data for every type of IR instruction node. */ +@@ -854,6 +866,27 @@ struct hlsl_ir_string_constant + char *string; + }; + ++/* Represents shader compilation call for effects, such as "CompileShader()". ++ * ++ * Unlike hlsl_ir_call, it is not flattened, thus, it keeps track of its ++ * arguments and maintains its own instruction block. */ ++struct hlsl_ir_compile ++{ ++ struct hlsl_ir_node node; ++ ++ /* Special field to store the profile argument. */ ++ const struct hlsl_profile_info *profile; ++ ++ /* Block containing the instructions required by the arguments of the ++ * compilation call. */ ++ struct hlsl_block instrs; ++ ++ /* Arguments to the compilation call. For a "compile" or "CompileShader()" ++ * args[0] is an hlsl_ir_call to the specified function. */ ++ struct hlsl_src *args; ++ unsigned int args_count; ++}; ++ + /* Stateblock constants are undeclared values found on state blocks or technique passes descriptions, + * that do not concern regular pixel, vertex, or compute shaders, except for parsing. */ + struct hlsl_ir_stateblock_constant +@@ -862,6 +895,16 @@ struct hlsl_ir_stateblock_constant + char *name; + }; + ++/* A vkd3d_shader_instruction that can be inserted in a hlsl_block. ++ * Only used for the HLSL IR to vsir translation, might be removed once this translation is complete. */ ++struct hlsl_ir_vsir_instruction_ref ++{ ++ struct hlsl_ir_node node; ++ ++ /* Index to a vkd3d_shader_instruction within a vkd3d_shader_instruction_array in a vsir_program. */ ++ unsigned int vsir_instr_idx; ++}; ++ + struct hlsl_scope + { + /* Item entry for hlsl_ctx.scopes. */ +@@ -1016,6 +1059,7 @@ struct hlsl_ctx + { + uint32_t index; + struct hlsl_vec4 value; ++ struct vkd3d_shader_location loc; + } *regs; + size_t count, size; + } constant_defs; +@@ -1029,6 +1073,12 @@ struct hlsl_ctx + * compute shader profiles. It is set using the numthreads() attribute in the entry point. */ + uint32_t thread_count[3]; + ++ enum vkd3d_tessellator_domain domain; ++ unsigned int output_control_point_count; ++ enum vkd3d_shader_tessellator_output_primitive output_primitive; ++ enum vkd3d_shader_tessellator_partitioning partitioning; ++ struct hlsl_ir_function_decl *patch_constant_func; ++ + /* In some cases we generate opcodes by parsing an HLSL function and then + * invoking it. If not NULL, this field is the name of the function that we + * are currently parsing, "mangled" with an internal prefix to avoid +@@ -1149,12 +1199,24 @@ static inline struct hlsl_ir_switch *hlsl_ir_switch(const struct hlsl_ir_node *n + return CONTAINING_RECORD(node, struct hlsl_ir_switch, node); + } + ++static inline struct hlsl_ir_compile *hlsl_ir_compile(const struct hlsl_ir_node *node) ++{ ++ VKD3D_ASSERT(node->type == HLSL_IR_COMPILE); ++ return CONTAINING_RECORD(node, struct hlsl_ir_compile, node); ++} ++ + static inline struct hlsl_ir_stateblock_constant *hlsl_ir_stateblock_constant(const struct hlsl_ir_node *node) + { + VKD3D_ASSERT(node->type == HLSL_IR_STATEBLOCK_CONSTANT); + return CONTAINING_RECORD(node, struct hlsl_ir_stateblock_constant, node); + } + ++static inline struct hlsl_ir_vsir_instruction_ref *hlsl_ir_vsir_instruction_ref(const struct hlsl_ir_node *node) ++{ ++ VKD3D_ASSERT(node->type == HLSL_IR_VSIR_INSTRUCTION_REF); ++ return CONTAINING_RECORD(node, struct hlsl_ir_vsir_instruction_ref, node); ++} ++ + static inline void hlsl_block_init(struct hlsl_block *block) + { + list_init(&block->instrs); +@@ -1428,6 +1490,9 @@ bool hlsl_index_is_noncontiguous(struct hlsl_ir_index *index); + bool hlsl_index_is_resource_access(struct hlsl_ir_index *index); + bool hlsl_index_chain_has_resource_access(struct hlsl_ir_index *index); + ++struct hlsl_ir_node *hlsl_new_compile(struct hlsl_ctx *ctx, const char *profile_name, ++ struct hlsl_ir_node **args, unsigned int args_count, struct hlsl_block *args_instrs, ++ const struct vkd3d_shader_location *loc); + struct hlsl_ir_node *hlsl_new_index(struct hlsl_ctx *ctx, struct hlsl_ir_node *val, + struct hlsl_ir_node *idx, const struct vkd3d_shader_location *loc); + struct hlsl_ir_node *hlsl_new_loop(struct hlsl_ctx *ctx, +@@ -1466,6 +1531,9 @@ struct hlsl_ir_switch_case *hlsl_new_switch_case(struct hlsl_ctx *ctx, unsigned + struct hlsl_ir_node *hlsl_new_switch(struct hlsl_ctx *ctx, struct hlsl_ir_node *selector, + struct list *cases, const struct vkd3d_shader_location *loc); + ++struct hlsl_ir_node *hlsl_new_vsir_instruction_ref(struct hlsl_ctx *ctx, unsigned int vsir_instr_idx, ++ struct hlsl_type *type, const struct hlsl_reg *reg, const struct vkd3d_shader_location *loc); ++ + void hlsl_error(struct hlsl_ctx *ctx, const struct vkd3d_shader_location *loc, + enum vkd3d_shader_error error, const char *fmt, ...) VKD3D_PRINTF_FUNC(4, 5); + void hlsl_fixme(struct hlsl_ctx *ctx, const struct vkd3d_shader_location *loc, +@@ -1493,6 +1561,7 @@ unsigned int hlsl_type_minor_size(const struct hlsl_type *type); + unsigned int hlsl_type_major_size(const struct hlsl_type *type); + unsigned int hlsl_type_element_count(const struct hlsl_type *type); + bool hlsl_type_is_resource(const struct hlsl_type *type); ++bool hlsl_type_is_shader(const struct hlsl_type *type); + unsigned int hlsl_type_get_sm4_offset(const struct hlsl_type *type, unsigned int offset); + bool hlsl_types_are_equal(const struct hlsl_type *t1, const struct hlsl_type *t2); + +diff --git a/libs/vkd3d/libs/vkd3d-shader/hlsl.l b/libs/vkd3d/libs/vkd3d-shader/hlsl.l +index 0c02b27817e..e5472709a8c 100644 +--- a/libs/vkd3d/libs/vkd3d-shader/hlsl.l ++++ b/libs/vkd3d/libs/vkd3d-shader/hlsl.l +@@ -80,6 +80,7 @@ centroid {return KW_CENTROID; } + column_major {return KW_COLUMN_MAJOR; } + ComputeShader {return KW_COMPUTESHADER; } + compile {return KW_COMPILE; } ++CompileShader {return KW_COMPILESHADER; } + const {return KW_CONST; } + continue {return KW_CONTINUE; } + DepthStencilState {return KW_DEPTHSTENCILSTATE; } +diff --git a/libs/vkd3d/libs/vkd3d-shader/hlsl.y b/libs/vkd3d/libs/vkd3d-shader/hlsl.y +index 3f319dea0d8..816d992afa8 100644 +--- a/libs/vkd3d/libs/vkd3d-shader/hlsl.y ++++ b/libs/vkd3d/libs/vkd3d-shader/hlsl.y +@@ -516,7 +516,7 @@ enum loop_type + LOOP_DO_WHILE + }; + +-static bool attribute_list_has_duplicates(const struct parse_attribute_list *attrs) ++static void check_attribute_list_for_duplicates(struct hlsl_ctx *ctx, const struct parse_attribute_list *attrs) + { + unsigned int i, j; + +@@ -525,11 +525,10 @@ static bool attribute_list_has_duplicates(const struct parse_attribute_list *att + for (j = i + 1; j < attrs->count; ++j) + { + if (!strcmp(attrs->attrs[i]->name, attrs->attrs[j]->name)) +- return true; ++ hlsl_error(ctx, &attrs->attrs[j]->loc, VKD3D_SHADER_ERROR_HLSL_INVALID_SYNTAX, ++ "Found duplicate attribute \"%s\".", attrs->attrs[j]->name); + } + } +- +- return false; + } + + static void resolve_loop_continue(struct hlsl_ctx *ctx, struct hlsl_block *block, enum loop_type type, +@@ -628,10 +627,13 @@ static struct hlsl_default_value evaluate_static_expression(struct hlsl_ctx *ctx + case HLSL_IR_RESOURCE_LOAD: + case HLSL_IR_RESOURCE_STORE: + case HLSL_IR_SWITCH: ++ case HLSL_IR_COMPILE: + case HLSL_IR_STATEBLOCK_CONSTANT: + hlsl_error(ctx, &node->loc, VKD3D_SHADER_ERROR_HLSL_INVALID_SYNTAX, + "Expected literal expression."); + break; ++ case HLSL_IR_VSIR_INSTRUCTION_REF: ++ vkd3d_unreachable(); + } + } + +@@ -697,9 +699,7 @@ static struct hlsl_block *create_loop(struct hlsl_ctx *ctx, enum loop_type type, + unsigned int i, unroll_limit = 0; + struct hlsl_ir_node *loop; + +- if (attribute_list_has_duplicates(attributes)) +- hlsl_error(ctx, loc, VKD3D_SHADER_ERROR_HLSL_INVALID_SYNTAX, "Found duplicate attribute."); +- ++ check_attribute_list_for_duplicates(ctx, attributes); + check_loop_attributes(ctx, attributes, loc); + + /* Ignore unroll(0) attribute, and any invalid attribute. */ +@@ -2733,13 +2733,15 @@ static struct hlsl_block *initialize_vars(struct hlsl_ctx *ctx, struct list *var + + if (v->initializer.args_count) + { +- unsigned int store_index = 0; + bool is_default_values_initializer; ++ unsigned int store_index = 0; + unsigned int size, k; + + is_default_values_initializer = (ctx->cur_buffer != ctx->globals_buffer) + || (var->storage_modifiers & HLSL_STORAGE_UNIFORM) + || ctx->cur_scope->annotations; ++ if (hlsl_type_is_shader(type)) ++ is_default_values_initializer = false; + + if (is_default_values_initializer) + { +@@ -2835,28 +2837,36 @@ static struct hlsl_block *initialize_vars(struct hlsl_ctx *ctx, struct list *var + return initializers; + } + +-static bool func_is_compatible_match(struct hlsl_ctx *ctx, +- const struct hlsl_ir_function_decl *decl, const struct parse_initializer *args) ++static bool func_is_compatible_match(struct hlsl_ctx *ctx, const struct hlsl_ir_function_decl *decl, ++ bool is_compile, const struct parse_initializer *args) + { +- unsigned int i; +- +- if (decl->parameters.count < args->args_count) +- return false; ++ unsigned int i, k; + +- for (i = 0; i < args->args_count; ++i) ++ k = 0; ++ for (i = 0; i < decl->parameters.count; ++i) + { +- if (!implicit_compatible_data_types(ctx, args->args[i]->data_type, decl->parameters.vars[i]->data_type)) ++ if (is_compile && !(decl->parameters.vars[i]->storage_modifiers & HLSL_STORAGE_UNIFORM)) ++ continue; ++ ++ if (k >= args->args_count) ++ { ++ if (!decl->parameters.vars[i]->default_values) ++ return false; ++ return true; ++ } ++ ++ if (!implicit_compatible_data_types(ctx, args->args[k]->data_type, decl->parameters.vars[i]->data_type)) + return false; +- } + +- if (args->args_count < decl->parameters.count && !decl->parameters.vars[args->args_count]->default_values) ++ ++k; ++ } ++ if (k < args->args_count) + return false; +- + return true; + } + + static struct hlsl_ir_function_decl *find_function_call(struct hlsl_ctx *ctx, +- const char *name, const struct parse_initializer *args, ++ const char *name, const struct parse_initializer *args, bool is_compile, + const struct vkd3d_shader_location *loc) + { + struct hlsl_ir_function_decl *decl, *compatible_match = NULL; +@@ -2869,7 +2879,7 @@ static struct hlsl_ir_function_decl *find_function_call(struct hlsl_ctx *ctx, + + LIST_FOR_EACH_ENTRY(decl, &func->overloads, struct hlsl_ir_function_decl, entry) + { +- if (func_is_compatible_match(ctx, decl, args)) ++ if (func_is_compatible_match(ctx, decl, is_compile, args)) + { + if (compatible_match) + { +@@ -2890,26 +2900,35 @@ static struct hlsl_ir_node *hlsl_new_void_expr(struct hlsl_ctx *ctx, const struc + return hlsl_new_expr(ctx, HLSL_OP0_VOID, operands, ctx->builtin_types.Void, loc); + } + +-static bool add_user_call(struct hlsl_ctx *ctx, struct hlsl_ir_function_decl *func, +- const struct parse_initializer *args, const struct vkd3d_shader_location *loc) ++static struct hlsl_ir_node *add_user_call(struct hlsl_ctx *ctx, ++ struct hlsl_ir_function_decl *func, const struct parse_initializer *args, ++ bool is_compile, const struct vkd3d_shader_location *loc) + { + struct hlsl_ir_node *call; +- unsigned int i, j; ++ unsigned int i, j, k; + + VKD3D_ASSERT(args->args_count <= func->parameters.count); + +- for (i = 0; i < args->args_count; ++i) ++ k = 0; ++ for (i = 0; i < func->parameters.count; ++i) + { + struct hlsl_ir_var *param = func->parameters.vars[i]; +- struct hlsl_ir_node *arg = args->args[i]; ++ struct hlsl_ir_node *arg; ++ ++ if (is_compile && !(param->storage_modifiers & HLSL_STORAGE_UNIFORM)) ++ continue; ++ ++ if (k >= args->args_count) ++ break; ++ arg = args->args[k]; + + if (!hlsl_types_are_equal(arg->data_type, param->data_type)) + { + struct hlsl_ir_node *cast; + + if (!(cast = add_cast(ctx, args->instrs, arg, param->data_type, &arg->loc))) +- return false; +- args->args[i] = cast; ++ return NULL; ++ args->args[k] = cast; + arg = cast; + } + +@@ -2918,13 +2937,15 @@ static bool add_user_call(struct hlsl_ctx *ctx, struct hlsl_ir_function_decl *fu + struct hlsl_ir_node *store; + + if (!(store = hlsl_new_simple_store(ctx, param, arg))) +- return false; ++ return NULL; + hlsl_block_add_instr(args->instrs, store); + } ++ ++ ++k; + } + + /* Add default values for the remaining parameters. */ +- for (i = args->args_count; i < func->parameters.count; ++i) ++ for (; i < func->parameters.count; ++i) + { + struct hlsl_ir_var *param = func->parameters.vars[i]; + unsigned int comp_count = hlsl_type_component_count(param->data_type); +@@ -2932,6 +2953,9 @@ static bool add_user_call(struct hlsl_ctx *ctx, struct hlsl_ir_function_decl *fu + + VKD3D_ASSERT(param->default_values); + ++ if (is_compile && !(param->storage_modifiers & HLSL_STORAGE_UNIFORM)) ++ continue; ++ + hlsl_init_simple_deref_from_var(¶m_deref, param); + + for (j = 0; j < comp_count; ++j) +@@ -2945,20 +2969,23 @@ static bool add_user_call(struct hlsl_ctx *ctx, struct hlsl_ir_function_decl *fu + { + value.u[0] = param->default_values[j].number; + if (!(comp = hlsl_new_constant(ctx, type, &value, loc))) +- return false; ++ return NULL; + hlsl_block_add_instr(args->instrs, comp); + + if (!hlsl_new_store_component(ctx, &store_block, ¶m_deref, j, comp)) +- return false; ++ return NULL; + hlsl_block_add_block(args->instrs, &store_block); + } + } + } + + if (!(call = hlsl_new_call(ctx, func, loc))) +- return false; ++ return NULL; + hlsl_block_add_instr(args->instrs, call); + ++ if (is_compile) ++ return call; ++ + for (i = 0; i < args->args_count; ++i) + { + struct hlsl_ir_var *param = func->parameters.vars[i]; +@@ -2973,11 +3000,11 @@ static bool add_user_call(struct hlsl_ctx *ctx, struct hlsl_ir_function_decl *fu + "Output argument to \"%s\" is const.", func->func->name); + + if (!(load = hlsl_new_var_load(ctx, param, &arg->loc))) +- return false; ++ return NULL; + hlsl_block_add_instr(args->instrs, &load->node); + + if (!add_assignment(ctx, args->instrs, arg, ASSIGN_OP_ASSIGN, &load->node)) +- return false; ++ return NULL; + } + } + +@@ -2998,7 +3025,7 @@ static bool add_user_call(struct hlsl_ctx *ctx, struct hlsl_ir_function_decl *fu + hlsl_block_add_instr(args->instrs, expr); + } + +- return true; ++ return call; + } + + static struct hlsl_ir_node *intrinsic_float_convert_arg(struct hlsl_ctx *ctx, +@@ -3165,7 +3192,7 @@ static bool write_acos_or_asin(struct hlsl_ctx *ctx, + if (!func) + return false; + +- return add_user_call(ctx, func, params, loc); ++ return !!add_user_call(ctx, func, params, false, loc); + } + + static bool intrinsic_acos(struct hlsl_ctx *ctx, +@@ -3314,7 +3341,7 @@ static bool write_atan_or_atan2(struct hlsl_ctx *ctx, + if (!func) + return false; + +- return add_user_call(ctx, func, params, loc); ++ return !!add_user_call(ctx, func, params, false, loc); + } + + static bool intrinsic_atan(struct hlsl_ctx *ctx, +@@ -3507,7 +3534,7 @@ static bool write_cosh_or_sinh(struct hlsl_ctx *ctx, + if (!func) + return false; + +- return add_user_call(ctx, func, params, loc); ++ return !!add_user_call(ctx, func, params, false, loc); + } + + static bool intrinsic_cosh(struct hlsl_ctx *ctx, +@@ -3734,7 +3761,7 @@ static bool intrinsic_determinant(struct hlsl_ctx *ctx, + if (!func) + return false; + +- return add_user_call(ctx, func, params, loc); ++ return !!add_user_call(ctx, func, params, false, loc); + } + + static bool intrinsic_distance(struct hlsl_ctx *ctx, +@@ -3766,6 +3793,50 @@ static bool intrinsic_dot(struct hlsl_ctx *ctx, + return !!add_binary_dot_expr(ctx, params->instrs, params->args[0], params->args[1], loc); + } + ++static bool intrinsic_dst(struct hlsl_ctx *ctx, const struct parse_initializer *params, ++ const struct vkd3d_shader_location *loc) ++{ ++ struct hlsl_ir_function_decl *func; ++ struct hlsl_type *type, *vec4_type; ++ char *body; ++ ++ static const char template[] = ++ "%s dst(%s i0, %s i1)\n" ++ "{\n" ++ /* Scalars and vector-4s are both valid inputs, so promote scalars ++ * if necessary. */ ++ " %s src0 = i0, src1 = i1;\n" ++ " return %s(1, src0.y * src1.y, src0.z, src1.w);\n" ++ "}"; ++ ++ if (!elementwise_intrinsic_float_convert_args(ctx, params, loc)) ++ return false; ++ type = params->args[0]->data_type; ++ if (!(type->class == HLSL_CLASS_SCALAR ++ || (type->class == HLSL_CLASS_VECTOR && type->dimx == 4))) ++ { ++ struct vkd3d_string_buffer *string; ++ if ((string = hlsl_type_to_string(ctx, type))) ++ hlsl_error(ctx, loc, VKD3D_SHADER_ERROR_HLSL_INVALID_TYPE, ++ "Wrong dimension for dst(): expected scalar or 4-dimensional vector, but got %s.", ++ string->buffer); ++ hlsl_release_string_buffer(ctx, string); ++ } ++ vec4_type = hlsl_get_vector_type(ctx, type->e.numeric.type, 4); ++ ++ if (!(body = hlsl_sprintf_alloc(ctx, template, ++ vec4_type->name, type->name, type->name, ++ vec4_type->name, ++ vec4_type->name))) ++ return false; ++ func = hlsl_compile_internal_function(ctx, "dst", body); ++ vkd3d_free(body); ++ if (!func) ++ return false; ++ ++ return !!add_user_call(ctx, func, params, false, loc); ++} ++ + static bool intrinsic_exp(struct hlsl_ctx *ctx, + const struct parse_initializer *params, const struct vkd3d_shader_location *loc) + { +@@ -3821,7 +3892,7 @@ static bool intrinsic_faceforward(struct hlsl_ctx *ctx, + if (!func) + return false; + +- return add_user_call(ctx, func, params, loc); ++ return !!add_user_call(ctx, func, params, false, loc); + } + + static bool intrinsic_f16tof32(struct hlsl_ctx *ctx, +@@ -3926,7 +3997,7 @@ static bool intrinsic_fwidth(struct hlsl_ctx *ctx, + if (!func) + return false; + +- return add_user_call(ctx, func, params, loc); ++ return !!add_user_call(ctx, func, params, false, loc); + } + + static bool intrinsic_ldexp(struct hlsl_ctx *ctx, +@@ -4029,7 +4100,7 @@ static bool intrinsic_lit(struct hlsl_ctx *ctx, + if (!(func = hlsl_compile_internal_function(ctx, "lit", body))) + return false; + +- return add_user_call(ctx, func, params, loc); ++ return !!add_user_call(ctx, func, params, false, loc); + } + + static bool intrinsic_log(struct hlsl_ctx *ctx, +@@ -4332,7 +4403,7 @@ static bool intrinsic_refract(struct hlsl_ctx *ctx, + if (!func) + return false; + +- return add_user_call(ctx, func, params, loc); ++ return !!add_user_call(ctx, func, params, false, loc); + } + + static bool intrinsic_round(struct hlsl_ctx *ctx, +@@ -4415,6 +4486,35 @@ static bool intrinsic_sin(struct hlsl_ctx *ctx, + return !!add_unary_arithmetic_expr(ctx, params->instrs, HLSL_OP1_SIN, arg, loc); + } + ++static bool intrinsic_sincos(struct hlsl_ctx *ctx, ++ const struct parse_initializer *params, const struct vkd3d_shader_location *loc) ++{ ++ struct hlsl_ir_function_decl *func; ++ struct hlsl_type *type; ++ char *body; ++ ++ static const char template[] = ++ "void sincos(%s f, out %s s, out %s c)\n" ++ "{\n" ++ " s = sin(f);\n" ++ " c = cos(f);\n" ++ "}"; ++ ++ if (!elementwise_intrinsic_float_convert_args(ctx, params, loc)) ++ return false; ++ type = params->args[0]->data_type; ++ ++ if (!(body = hlsl_sprintf_alloc(ctx, template, ++ type->name, type->name, type->name))) ++ return false; ++ func = hlsl_compile_internal_function(ctx, "sincos", body); ++ vkd3d_free(body); ++ if (!func) ++ return false; ++ ++ return !!add_user_call(ctx, func, params, false, loc); ++} ++ + static bool intrinsic_sinh(struct hlsl_ctx *ctx, + const struct parse_initializer *params, const struct vkd3d_shader_location *loc) + { +@@ -4447,7 +4547,7 @@ static bool intrinsic_smoothstep(struct hlsl_ctx *ctx, + if (!func) + return false; + +- return add_user_call(ctx, func, params, loc); ++ return !!add_user_call(ctx, func, params, false, loc); + } + + static bool intrinsic_sqrt(struct hlsl_ctx *ctx, +@@ -4523,7 +4623,7 @@ static bool intrinsic_tanh(struct hlsl_ctx *ctx, + if (!func) + return false; + +- return add_user_call(ctx, func, params, loc); ++ return !!add_user_call(ctx, func, params, false, loc); + } + + static bool intrinsic_tex(struct hlsl_ctx *ctx, const struct parse_initializer *params, +@@ -4937,6 +5037,7 @@ intrinsic_functions[] = + {"determinant", 1, true, intrinsic_determinant}, + {"distance", 2, true, intrinsic_distance}, + {"dot", 2, true, intrinsic_dot}, ++ {"dst", 2, true, intrinsic_dst}, + {"exp", 1, true, intrinsic_exp}, + {"exp2", 1, true, intrinsic_exp2}, + {"f16tof32", 1, true, intrinsic_f16tof32}, +@@ -4966,6 +5067,7 @@ intrinsic_functions[] = + {"saturate", 1, true, intrinsic_saturate}, + {"sign", 1, true, intrinsic_sign}, + {"sin", 1, true, intrinsic_sin}, ++ {"sincos", 3, true, intrinsic_sincos}, + {"sinh", 1, true, intrinsic_sinh}, + {"smoothstep", 3, true, intrinsic_smoothstep}, + {"sqrt", 1, true, intrinsic_sqrt}, +@@ -5002,9 +5104,9 @@ static struct hlsl_block *add_call(struct hlsl_ctx *ctx, const char *name, + struct intrinsic_function *intrinsic; + struct hlsl_ir_function_decl *decl; + +- if ((decl = find_function_call(ctx, name, args, loc))) ++ if ((decl = find_function_call(ctx, name, args, false, loc))) + { +- if (!add_user_call(ctx, decl, args, loc)) ++ if (!add_user_call(ctx, decl, args, false, loc)) + goto fail; + } + else if ((intrinsic = bsearch(name, intrinsic_functions, ARRAY_SIZE(intrinsic_functions), +@@ -5060,6 +5162,53 @@ fail: + return NULL; + } + ++static struct hlsl_block *add_shader_compilation(struct hlsl_ctx *ctx, const char *profile_name, ++ const char *function_name, struct parse_initializer *args, const struct vkd3d_shader_location *loc) ++{ ++ struct hlsl_ir_node *compile, *call_to_compile = NULL; ++ struct hlsl_ir_function_decl *decl; ++ ++ if (!ctx->in_state_block && ctx->cur_scope != ctx->globals) ++ { ++ hlsl_error(ctx, loc, VKD3D_SHADER_ERROR_HLSL_MISPLACED_COMPILE, ++ "Shader compilation statements must be in global scope or a state block."); ++ free_parse_initializer(args); ++ return NULL; ++ } ++ ++ if (!(decl = find_function_call(ctx, function_name, args, true, loc))) ++ { ++ if (rb_get(&ctx->functions, function_name)) ++ { ++ hlsl_error(ctx, loc, VKD3D_SHADER_ERROR_HLSL_NOT_DEFINED, ++ "No compatible \"%s\" declaration with %u uniform parameters found.", ++ function_name, args->args_count); ++ } ++ else ++ { ++ hlsl_error(ctx, loc, VKD3D_SHADER_ERROR_HLSL_NOT_DEFINED, ++ "Function \"%s\" is not defined.", function_name); ++ } ++ free_parse_initializer(args); ++ return NULL; ++ } ++ ++ if (!(call_to_compile = add_user_call(ctx, decl, args, true, loc))) ++ { ++ free_parse_initializer(args); ++ return NULL; ++ } ++ ++ if (!(compile = hlsl_new_compile(ctx, profile_name, &call_to_compile, 1, args->instrs, loc))) ++ { ++ free_parse_initializer(args); ++ return NULL; ++ } ++ ++ free_parse_initializer(args); ++ return make_block(ctx, compile); ++} ++ + static struct hlsl_block *add_constructor(struct hlsl_ctx *ctx, struct hlsl_type *type, + struct parse_initializer *params, const struct vkd3d_shader_location *loc) + { +@@ -6058,6 +6207,7 @@ static bool state_block_add_entry(struct hlsl_state_block *state_block, struct h + %token KW_CENTROID + %token KW_COLUMN_MAJOR + %token KW_COMPILE ++%token KW_COMPILESHADER + %token KW_COMPUTESHADER + %token KW_CONST + %token KW_CONTINUE +@@ -6827,6 +6977,8 @@ func_prototype: + func_prototype_no_attrs + | attribute_list func_prototype_no_attrs + { ++ check_attribute_list_for_duplicates(ctx, &$1); ++ + if ($2.first) + { + $2.decl->attr_count = $1.count; +@@ -8092,8 +8244,7 @@ selection_statement: + struct hlsl_ir_node *instr; + unsigned int i; + +- if (attribute_list_has_duplicates(attributes)) +- hlsl_error(ctx, &@1, VKD3D_SHADER_ERROR_HLSL_INVALID_SYNTAX, "Found duplicate attribute."); ++ check_attribute_list_for_duplicates(ctx, attributes); + + for (i = 0; i < attributes->count; ++i) + { +@@ -8391,6 +8542,29 @@ primary_expr: + { + $$ = $2; + } ++ ++ | KW_COMPILE any_identifier var_identifier '(' func_arguments ')' ++ { ++ if (!($$ = add_shader_compilation(ctx, $2, $3, &$5, &@1))) ++ { ++ vkd3d_free($2); ++ vkd3d_free($3); ++ YYABORT; ++ } ++ vkd3d_free($2); ++ vkd3d_free($3); ++ } ++ | KW_COMPILESHADER '(' any_identifier ',' var_identifier '(' func_arguments ')' ')' ++ { ++ if (!($$ = add_shader_compilation(ctx, $3, $5, &$7, &@1))) ++ { ++ vkd3d_free($3); ++ vkd3d_free($5); ++ YYABORT; ++ } ++ vkd3d_free($3); ++ vkd3d_free($5); ++ } + | var_identifier '(' func_arguments ')' + { + if (!($$ = add_call(ctx, $1, &$3, &@1))) +diff --git a/libs/vkd3d/libs/vkd3d-shader/hlsl_codegen.c b/libs/vkd3d/libs/vkd3d-shader/hlsl_codegen.c +index 154328a64c3..e470115f191 100644 +--- a/libs/vkd3d/libs/vkd3d-shader/hlsl_codegen.c ++++ b/libs/vkd3d/libs/vkd3d-shader/hlsl_codegen.c +@@ -4050,6 +4050,7 @@ static bool dce(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, void *context) + switch (instr->type) + { + case HLSL_IR_CONSTANT: ++ case HLSL_IR_COMPILE: + case HLSL_IR_EXPR: + case HLSL_IR_INDEX: + case HLSL_IR_LOAD: +@@ -4088,6 +4089,9 @@ static bool dce(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, void *context) + case HLSL_IR_STATEBLOCK_CONSTANT: + /* Stateblock constants should not appear in the shader program. */ + vkd3d_unreachable(); ++ case HLSL_IR_VSIR_INSTRUCTION_REF: ++ /* HLSL IR nodes are not translated to hlsl_ir_vsir_instruction_ref at this point. */ ++ vkd3d_unreachable(); + } + + return false; +@@ -4213,6 +4217,9 @@ static void compute_liveness_recurse(struct hlsl_block *block, unsigned int loop + case HLSL_IR_STATEBLOCK_CONSTANT: + /* Stateblock constants should not appear in the shader program. */ + vkd3d_unreachable(); ++ case HLSL_IR_VSIR_INSTRUCTION_REF: ++ /* HLSL IR nodes are not translated to hlsl_ir_vsir_instruction_ref at this point. */ ++ vkd3d_unreachable(); + + case HLSL_IR_STORE: + { +@@ -4337,6 +4344,9 @@ static void compute_liveness_recurse(struct hlsl_block *block, unsigned int loop + case HLSL_IR_CONSTANT: + case HLSL_IR_STRING_CONSTANT: + break; ++ case HLSL_IR_COMPILE: ++ /* Compile calls are skipped as they are only relevent to effects. */ ++ break; + } + } + } +@@ -4816,7 +4826,8 @@ static void allocate_temp_registers_recurse(struct hlsl_ctx *ctx, + } + } + +-static void record_constant(struct hlsl_ctx *ctx, unsigned int component_index, float f) ++static void record_constant(struct hlsl_ctx *ctx, unsigned int component_index, float f, ++ const struct vkd3d_shader_location *loc) + { + struct hlsl_constant_defs *defs = &ctx->constant_defs; + struct hlsl_constant_register *reg; +@@ -4838,6 +4849,7 @@ static void record_constant(struct hlsl_ctx *ctx, unsigned int component_index, + memset(reg, 0, sizeof(*reg)); + reg->index = component_index / 4; + reg->value.f[component_index % 4] = f; ++ reg->loc = *loc; + } + + static void allocate_const_registers_recurse(struct hlsl_ctx *ctx, +@@ -4898,7 +4910,7 @@ static void allocate_const_registers_recurse(struct hlsl_ctx *ctx, + vkd3d_unreachable(); + } + +- record_constant(ctx, constant->reg.id * 4 + x, f); ++ record_constant(ctx, constant->reg.id * 4 + x, f, &constant->node.loc); + } + + break; +@@ -4991,17 +5003,17 @@ static void allocate_sincos_const_registers(struct hlsl_ctx *ctx, struct hlsl_bl + + ctx->d3dsincosconst1 = allocate_numeric_registers_for_type(ctx, allocator, 1, UINT_MAX, type); + TRACE("Allocated D3DSINCOSCONST1 to %s.\n", debug_register('c', ctx->d3dsincosconst1, type)); +- record_constant(ctx, ctx->d3dsincosconst1.id * 4 + 0, -1.55009923e-06f); +- record_constant(ctx, ctx->d3dsincosconst1.id * 4 + 1, -2.17013894e-05f); +- record_constant(ctx, ctx->d3dsincosconst1.id * 4 + 2, 2.60416674e-03f); +- record_constant(ctx, ctx->d3dsincosconst1.id * 4 + 3, 2.60416680e-04f); ++ record_constant(ctx, ctx->d3dsincosconst1.id * 4 + 0, -1.55009923e-06f, &instr->loc); ++ record_constant(ctx, ctx->d3dsincosconst1.id * 4 + 1, -2.17013894e-05f, &instr->loc); ++ record_constant(ctx, ctx->d3dsincosconst1.id * 4 + 2, 2.60416674e-03f, &instr->loc); ++ record_constant(ctx, ctx->d3dsincosconst1.id * 4 + 3, 2.60416680e-04f, &instr->loc); + + ctx->d3dsincosconst2 = allocate_numeric_registers_for_type(ctx, allocator, 1, UINT_MAX, type); + TRACE("Allocated D3DSINCOSCONST2 to %s.\n", debug_register('c', ctx->d3dsincosconst2, type)); +- record_constant(ctx, ctx->d3dsincosconst2.id * 4 + 0, -2.08333340e-02f); +- record_constant(ctx, ctx->d3dsincosconst2.id * 4 + 1, -1.25000000e-01f); +- record_constant(ctx, ctx->d3dsincosconst2.id * 4 + 2, 1.00000000e+00f); +- record_constant(ctx, ctx->d3dsincosconst2.id * 4 + 3, 5.00000000e-01f); ++ record_constant(ctx, ctx->d3dsincosconst2.id * 4 + 0, -2.08333340e-02f, &instr->loc); ++ record_constant(ctx, ctx->d3dsincosconst2.id * 4 + 1, -1.25000000e-01f, &instr->loc); ++ record_constant(ctx, ctx->d3dsincosconst2.id * 4 + 2, 1.00000000e+00f, &instr->loc); ++ record_constant(ctx, ctx->d3dsincosconst2.id * 4 + 3, 5.00000000e-01f, &instr->loc); + + return; + } +@@ -5786,6 +5798,26 @@ struct hlsl_reg hlsl_reg_from_deref(struct hlsl_ctx *ctx, const struct hlsl_dere + return ret; + } + ++static const char *get_string_argument_value(struct hlsl_ctx *ctx, const struct hlsl_attribute *attr, unsigned int i) ++{ ++ const struct hlsl_ir_node *instr = attr->args[i].node; ++ const struct hlsl_type *type = instr->data_type; ++ ++ if (type->class != HLSL_CLASS_STRING) ++ { ++ struct vkd3d_string_buffer *string; ++ ++ if ((string = hlsl_type_to_string(ctx, type))) ++ hlsl_error(ctx, &instr->loc, VKD3D_SHADER_ERROR_HLSL_INVALID_TYPE, ++ "Wrong type for the argument %u of [%s]: expected string, but got %s.", ++ i, attr->name, string->buffer); ++ hlsl_release_string_buffer(ctx, string); ++ return NULL; ++ } ++ ++ return hlsl_ir_string_constant(instr)->string; ++} ++ + static void parse_numthreads_attribute(struct hlsl_ctx *ctx, const struct hlsl_attribute *attr) + { + unsigned int i; +@@ -5834,6 +5866,261 @@ static void parse_numthreads_attribute(struct hlsl_ctx *ctx, const struct hlsl_a + } + } + ++static void parse_domain_attribute(struct hlsl_ctx *ctx, const struct hlsl_attribute *attr) ++{ ++ const char *value; ++ ++ if (attr->args_count != 1) ++ { ++ hlsl_error(ctx, &attr->loc, VKD3D_SHADER_ERROR_HLSL_WRONG_PARAMETER_COUNT, ++ "Expected 1 parameter for [domain] attribute, but got %u.", attr->args_count); ++ return; ++ } ++ ++ if (!(value = get_string_argument_value(ctx, attr, 0))) ++ return; ++ ++ if (!strcmp(value, "isoline")) ++ ctx->domain = VKD3D_TESSELLATOR_DOMAIN_LINE; ++ else if (!strcmp(value, "tri")) ++ ctx->domain = VKD3D_TESSELLATOR_DOMAIN_TRIANGLE; ++ else if (!strcmp(value, "quad")) ++ ctx->domain = VKD3D_TESSELLATOR_DOMAIN_QUAD; ++ else ++ hlsl_error(ctx, &attr->args[0].node->loc, VKD3D_SHADER_ERROR_HLSL_INVALID_DOMAIN, ++ "Invalid tessellator domain \"%s\": expected \"isoline\", \"tri\", or \"quad\".", ++ value); ++} ++ ++static void parse_outputcontrolpoints_attribute(struct hlsl_ctx *ctx, const struct hlsl_attribute *attr) ++{ ++ const struct hlsl_ir_node *instr; ++ const struct hlsl_type *type; ++ const struct hlsl_ir_constant *constant; ++ ++ if (attr->args_count != 1) ++ { ++ hlsl_error(ctx, &attr->loc, VKD3D_SHADER_ERROR_HLSL_WRONG_PARAMETER_COUNT, ++ "Expected 1 parameter for [outputcontrolpoints] attribute, but got %u.", attr->args_count); ++ return; ++ } ++ ++ instr = attr->args[0].node; ++ type = instr->data_type; ++ ++ if (type->class != HLSL_CLASS_SCALAR ++ || (type->e.numeric.type != HLSL_TYPE_INT && type->e.numeric.type != HLSL_TYPE_UINT)) ++ { ++ struct vkd3d_string_buffer *string; ++ ++ if ((string = hlsl_type_to_string(ctx, type))) ++ hlsl_error(ctx, &instr->loc, VKD3D_SHADER_ERROR_HLSL_INVALID_TYPE, ++ "Wrong type for argument 0 of [outputcontrolpoints]: expected int or uint, but got %s.", ++ string->buffer); ++ hlsl_release_string_buffer(ctx, string); ++ return; ++ } ++ ++ if (instr->type != HLSL_IR_CONSTANT) ++ { ++ hlsl_fixme(ctx, &instr->loc, "Non-constant expression in [outputcontrolpoints] initializer."); ++ return; ++ } ++ constant = hlsl_ir_constant(instr); ++ ++ if ((type->e.numeric.type == HLSL_TYPE_INT && constant->value.u[0].i < 0) ++ || constant->value.u[0].u > 32) ++ hlsl_error(ctx, &instr->loc, VKD3D_SHADER_ERROR_HLSL_INVALID_CONTROL_POINT_COUNT, ++ "Output control point count must be between 0 and 32."); ++ ++ ctx->output_control_point_count = constant->value.u[0].u; ++} ++ ++static void parse_outputtopology_attribute(struct hlsl_ctx *ctx, const struct hlsl_attribute *attr) ++{ ++ const char *value; ++ ++ if (attr->args_count != 1) ++ { ++ hlsl_error(ctx, &attr->loc, VKD3D_SHADER_ERROR_HLSL_WRONG_PARAMETER_COUNT, ++ "Expected 1 parameter for [outputtopology] attribute, but got %u.", attr->args_count); ++ return; ++ } ++ ++ if (!(value = get_string_argument_value(ctx, attr, 0))) ++ return; ++ ++ if (!strcmp(value, "point")) ++ ctx->output_primitive = VKD3D_SHADER_TESSELLATOR_OUTPUT_POINT; ++ else if (!strcmp(value, "line")) ++ ctx->output_primitive = VKD3D_SHADER_TESSELLATOR_OUTPUT_LINE; ++ else if (!strcmp(value, "triangle_cw")) ++ ctx->output_primitive = VKD3D_SHADER_TESSELLATOR_OUTPUT_TRIANGLE_CW; ++ else if (!strcmp(value, "triangle_ccw")) ++ ctx->output_primitive = VKD3D_SHADER_TESSELLATOR_OUTPUT_TRIANGLE_CCW; ++ else ++ hlsl_error(ctx, &attr->args[0].node->loc, VKD3D_SHADER_ERROR_HLSL_INVALID_OUTPUT_PRIMITIVE, ++ "Invalid tessellator output topology \"%s\": " ++ "expected \"point\", \"line\", \"triangle_cw\", or \"triangle_ccw\".", value); ++} ++ ++static void parse_partitioning_attribute(struct hlsl_ctx *ctx, const struct hlsl_attribute *attr) ++{ ++ const char *value; ++ ++ if (attr->args_count != 1) ++ { ++ hlsl_error(ctx, &attr->loc, VKD3D_SHADER_ERROR_HLSL_WRONG_PARAMETER_COUNT, ++ "Expected 1 parameter for [partitioning] attribute, but got %u.", attr->args_count); ++ return; ++ } ++ ++ if (!(value = get_string_argument_value(ctx, attr, 0))) ++ return; ++ ++ if (!strcmp(value, "integer")) ++ ctx->partitioning = VKD3D_SHADER_TESSELLATOR_PARTITIONING_INTEGER; ++ else if (!strcmp(value, "pow2")) ++ ctx->partitioning = VKD3D_SHADER_TESSELLATOR_PARTITIONING_POW2; ++ else if (!strcmp(value, "fractional_even")) ++ ctx->partitioning = VKD3D_SHADER_TESSELLATOR_PARTITIONING_FRACTIONAL_EVEN; ++ else if (!strcmp(value, "fractional_odd")) ++ ctx->partitioning = VKD3D_SHADER_TESSELLATOR_PARTITIONING_FRACTIONAL_ODD; ++ else ++ hlsl_error(ctx, &attr->args[0].node->loc, VKD3D_SHADER_ERROR_HLSL_INVALID_PARTITIONING, ++ "Invalid tessellator partitioning \"%s\": " ++ "expected \"integer\", \"pow2\", \"fractional_even\", or \"fractional_odd\".", value); ++} ++ ++static void parse_patchconstantfunc_attribute(struct hlsl_ctx *ctx, const struct hlsl_attribute *attr) ++{ ++ const char *name; ++ struct hlsl_ir_function *func; ++ struct hlsl_ir_function_decl *decl; ++ ++ if (attr->args_count != 1) ++ { ++ hlsl_error(ctx, &attr->loc, VKD3D_SHADER_ERROR_HLSL_WRONG_PARAMETER_COUNT, ++ "Expected 1 parameter for [patchconstantfunc] attribute, but got %u.", attr->args_count); ++ return; ++ } ++ ++ if (!(name = get_string_argument_value(ctx, attr, 0))) ++ return; ++ ++ ctx->patch_constant_func = NULL; ++ if ((func = hlsl_get_function(ctx, name))) ++ { ++ /* Pick the last overload with a body. */ ++ LIST_FOR_EACH_ENTRY_REV(decl, &func->overloads, struct hlsl_ir_function_decl, entry) ++ { ++ if (decl->has_body) ++ { ++ ctx->patch_constant_func = decl; ++ break; ++ } ++ } ++ } ++ ++ if (!ctx->patch_constant_func) ++ hlsl_error(ctx, &attr->loc, VKD3D_SHADER_ERROR_HLSL_NOT_DEFINED, ++ "Patch constant function \"%s\" is not defined.", name); ++} ++ ++static void parse_entry_function_attributes(struct hlsl_ctx *ctx, const struct hlsl_ir_function_decl *entry_func) ++{ ++ const struct hlsl_profile_info *profile = ctx->profile; ++ unsigned int i; ++ ++ for (i = 0; i < entry_func->attr_count; ++i) ++ { ++ const struct hlsl_attribute *attr = entry_func->attrs[i]; ++ ++ if (!strcmp(attr->name, "numthreads") && profile->type == VKD3D_SHADER_TYPE_COMPUTE) ++ parse_numthreads_attribute(ctx, attr); ++ else if (!strcmp(attr->name, "domain") ++ && (profile->type == VKD3D_SHADER_TYPE_HULL || profile->type == VKD3D_SHADER_TYPE_DOMAIN)) ++ parse_domain_attribute(ctx, attr); ++ else if (!strcmp(attr->name, "outputcontrolpoints") && profile->type == VKD3D_SHADER_TYPE_HULL) ++ parse_outputcontrolpoints_attribute(ctx, attr); ++ else if (!strcmp(attr->name, "outputtopology") && profile->type == VKD3D_SHADER_TYPE_HULL) ++ parse_outputtopology_attribute(ctx, attr); ++ else if (!strcmp(attr->name, "partitioning") && profile->type == VKD3D_SHADER_TYPE_HULL) ++ parse_partitioning_attribute(ctx, attr); ++ else if (!strcmp(attr->name, "patchconstantfunc") && profile->type == VKD3D_SHADER_TYPE_HULL) ++ parse_patchconstantfunc_attribute(ctx, attr); ++ else ++ hlsl_warning(ctx, &entry_func->attrs[i]->loc, VKD3D_SHADER_WARNING_HLSL_UNKNOWN_ATTRIBUTE, ++ "Ignoring unknown attribute \"%s\".", entry_func->attrs[i]->name); ++ } ++} ++ ++static void validate_hull_shader_attributes(struct hlsl_ctx *ctx, const struct hlsl_ir_function_decl *entry_func) ++{ ++ if (ctx->domain == VKD3D_TESSELLATOR_DOMAIN_INVALID) ++ { ++ hlsl_error(ctx, &entry_func->loc, VKD3D_SHADER_ERROR_HLSL_MISSING_ATTRIBUTE, ++ "Entry point \"%s\" is missing a [domain] attribute.", entry_func->func->name); ++ } ++ ++ if (ctx->output_control_point_count == UINT_MAX) ++ { ++ hlsl_error(ctx, &entry_func->loc, VKD3D_SHADER_ERROR_HLSL_MISSING_ATTRIBUTE, ++ "Entry point \"%s\" is missing a [outputcontrolpoints] attribute.", entry_func->func->name); ++ } ++ ++ if (!ctx->output_primitive) ++ { ++ hlsl_error(ctx, &entry_func->loc, VKD3D_SHADER_ERROR_HLSL_MISSING_ATTRIBUTE, ++ "Entry point \"%s\" is missing a [outputtopology] attribute.", entry_func->func->name); ++ } ++ ++ if (!ctx->partitioning) ++ { ++ hlsl_error(ctx, &entry_func->loc, VKD3D_SHADER_ERROR_HLSL_MISSING_ATTRIBUTE, ++ "Entry point \"%s\" is missing a [partitioning] attribute.", entry_func->func->name); ++ } ++ ++ if (!ctx->patch_constant_func) ++ { ++ hlsl_error(ctx, &entry_func->loc, VKD3D_SHADER_ERROR_HLSL_MISSING_ATTRIBUTE, ++ "Entry point \"%s\" is missing a [patchconstantfunc] attribute.", entry_func->func->name); ++ } ++ else if (ctx->patch_constant_func == entry_func) ++ { ++ hlsl_error(ctx, &entry_func->loc, VKD3D_SHADER_ERROR_HLSL_RECURSIVE_CALL, ++ "Patch constant function cannot be the entry point function."); ++ /* Native returns E_NOTIMPL instead of E_FAIL here. */ ++ ctx->result = VKD3D_ERROR_NOT_IMPLEMENTED; ++ return; ++ } ++ ++ switch (ctx->domain) ++ { ++ case VKD3D_TESSELLATOR_DOMAIN_LINE: ++ if (ctx->output_primitive == VKD3D_SHADER_TESSELLATOR_OUTPUT_TRIANGLE_CW ++ || ctx->output_primitive == VKD3D_SHADER_TESSELLATOR_OUTPUT_TRIANGLE_CCW) ++ hlsl_error(ctx, &entry_func->loc, VKD3D_SHADER_ERROR_HLSL_INVALID_OUTPUT_PRIMITIVE, ++ "Triangle output topologies are not available for isoline domains."); ++ break; ++ ++ case VKD3D_TESSELLATOR_DOMAIN_TRIANGLE: ++ if (ctx->output_primitive == VKD3D_SHADER_TESSELLATOR_OUTPUT_LINE) ++ hlsl_error(ctx, &entry_func->loc, VKD3D_SHADER_ERROR_HLSL_INVALID_OUTPUT_PRIMITIVE, ++ "Line output topologies are not available for triangle domains."); ++ break; ++ ++ case VKD3D_TESSELLATOR_DOMAIN_QUAD: ++ if (ctx->output_primitive == VKD3D_SHADER_TESSELLATOR_OUTPUT_LINE) ++ hlsl_error(ctx, &entry_func->loc, VKD3D_SHADER_ERROR_HLSL_INVALID_OUTPUT_PRIMITIVE, ++ "Line output topologies are not available for quad domains."); ++ break; ++ ++ default: ++ break; ++ } ++} ++ + static void remove_unreachable_code(struct hlsl_ctx *ctx, struct hlsl_block *body) + { + struct hlsl_ir_node *instr, *next; +@@ -6006,6 +6293,441 @@ static void sm1_generate_vsir_signature(struct hlsl_ctx *ctx, struct vsir_progra + } + } + ++static uint32_t sm1_generate_vsir_get_src_swizzle(uint32_t src_writemask, uint32_t dst_writemask) ++{ ++ uint32_t swizzle; ++ ++ swizzle = hlsl_swizzle_from_writemask(src_writemask); ++ swizzle = hlsl_map_swizzle(swizzle, dst_writemask); ++ swizzle = vsir_swizzle_from_hlsl(swizzle); ++ return swizzle; ++} ++ ++static void sm1_generate_vsir_constant_defs(struct hlsl_ctx *ctx, struct vsir_program *program, ++ struct hlsl_block *block) ++{ ++ struct vkd3d_shader_instruction_array *instructions = &program->instructions; ++ struct vkd3d_shader_dst_param *dst_param; ++ struct vkd3d_shader_src_param *src_param; ++ struct vkd3d_shader_instruction *ins; ++ struct hlsl_ir_node *vsir_instr; ++ unsigned int i, x; ++ ++ for (i = 0; i < ctx->constant_defs.count; ++i) ++ { ++ const struct hlsl_constant_register *constant_reg = &ctx->constant_defs.regs[i]; ++ ++ if (!shader_instruction_array_reserve(instructions, instructions->count + 1)) ++ { ++ ctx->result = VKD3D_ERROR_OUT_OF_MEMORY; ++ return; ++ } ++ ++ ins = &instructions->elements[instructions->count]; ++ if (!vsir_instruction_init_with_params(program, ins, &constant_reg->loc, VKD3DSIH_DEF, 1, 1)) ++ { ++ ctx->result = VKD3D_ERROR_OUT_OF_MEMORY; ++ return; ++ } ++ ++instructions->count; ++ ++ dst_param = &ins->dst[0]; ++ vsir_register_init(&dst_param->reg, VKD3DSPR_CONST, VKD3D_DATA_FLOAT, 1); ++ ins->dst[0].reg.dimension = VSIR_DIMENSION_VEC4; ++ ins->dst[0].reg.idx[0].offset = constant_reg->index; ++ ins->dst[0].write_mask = VKD3DSP_WRITEMASK_ALL; ++ ++ src_param = &ins->src[0]; ++ vsir_register_init(&src_param->reg, VKD3DSPR_IMMCONST, VKD3D_DATA_FLOAT, 0); ++ src_param->reg.type = VKD3DSPR_IMMCONST; ++ src_param->reg.precision = VKD3D_SHADER_REGISTER_PRECISION_DEFAULT; ++ src_param->reg.non_uniform = false; ++ src_param->reg.data_type = VKD3D_DATA_FLOAT; ++ src_param->reg.dimension = VSIR_DIMENSION_VEC4; ++ for (x = 0; x < 4; ++x) ++ src_param->reg.u.immconst_f32[x] = constant_reg->value.f[x]; ++ src_param->swizzle = VKD3D_SHADER_NO_SWIZZLE; ++ ++ if (!(vsir_instr = hlsl_new_vsir_instruction_ref(ctx, instructions->count - 1, NULL, NULL, ++ &constant_reg->loc))) ++ { ++ ctx->result = VKD3D_ERROR_OUT_OF_MEMORY; ++ return; ++ } ++ hlsl_block_add_instr(block, vsir_instr); ++ } ++} ++ ++static void sm1_generate_vsir_sampler_dcls(struct hlsl_ctx *ctx, ++ struct vsir_program *program, struct hlsl_block *block) ++{ ++ struct vkd3d_shader_instruction_array *instructions = &program->instructions; ++ enum vkd3d_shader_resource_type resource_type; ++ struct vkd3d_shader_register_range *range; ++ struct vkd3d_shader_dst_param *dst_param; ++ struct vkd3d_shader_semantic *semantic; ++ struct vkd3d_shader_instruction *ins; ++ enum hlsl_sampler_dim sampler_dim; ++ struct hlsl_ir_node *vsir_instr; ++ struct hlsl_ir_var *var; ++ unsigned int i, count; ++ ++ LIST_FOR_EACH_ENTRY(var, &ctx->extern_vars, struct hlsl_ir_var, extern_entry) ++ { ++ if (!var->regs[HLSL_REGSET_SAMPLERS].allocated) ++ continue; ++ ++ count = var->bind_count[HLSL_REGSET_SAMPLERS]; ++ for (i = 0; i < count; ++i) ++ { ++ if (var->objects_usage[HLSL_REGSET_SAMPLERS][i].used) ++ { ++ sampler_dim = var->objects_usage[HLSL_REGSET_SAMPLERS][i].sampler_dim; ++ ++ switch (sampler_dim) ++ { ++ case HLSL_SAMPLER_DIM_2D: ++ resource_type = VKD3D_SHADER_RESOURCE_TEXTURE_2D; ++ break; ++ ++ case HLSL_SAMPLER_DIM_CUBE: ++ resource_type = VKD3D_SHADER_RESOURCE_TEXTURE_CUBE; ++ break; ++ ++ case HLSL_SAMPLER_DIM_3D: ++ resource_type = VKD3D_SHADER_RESOURCE_TEXTURE_3D; ++ break; ++ ++ case HLSL_SAMPLER_DIM_GENERIC: ++ /* These can appear in sm4-style combined sample instructions. */ ++ hlsl_fixme(ctx, &var->loc, "Generic samplers need to be lowered."); ++ continue; ++ ++ default: ++ vkd3d_unreachable(); ++ break; ++ } ++ ++ if (!shader_instruction_array_reserve(instructions, instructions->count + 1)) ++ { ++ ctx->result = VKD3D_ERROR_OUT_OF_MEMORY; ++ return; ++ } ++ ++ ins = &instructions->elements[instructions->count]; ++ if (!vsir_instruction_init_with_params(program, ins, &var->loc, VKD3DSIH_DCL, 0, 0)) ++ { ++ ctx->result = VKD3D_ERROR_OUT_OF_MEMORY; ++ return; ++ } ++ ++instructions->count; ++ ++ semantic = &ins->declaration.semantic; ++ semantic->resource_type = resource_type; ++ ++ dst_param = &semantic->resource.reg; ++ vsir_register_init(&dst_param->reg, VKD3DSPR_SAMPLER, VKD3D_DATA_FLOAT, 1); ++ dst_param->reg.dimension = VSIR_DIMENSION_NONE; ++ dst_param->reg.idx[0].offset = var->regs[HLSL_REGSET_SAMPLERS].index + i; ++ dst_param->write_mask = 0; ++ range = &semantic->resource.range; ++ range->space = 0; ++ range->first = range->last = dst_param->reg.idx[0].offset; ++ ++ if (!(vsir_instr = hlsl_new_vsir_instruction_ref(ctx, instructions->count - 1, NULL, ++ NULL, &var->loc))) ++ { ++ ctx->result = VKD3D_ERROR_OUT_OF_MEMORY; ++ return; ++ } ++ hlsl_block_add_instr(block, vsir_instr); ++ } ++ } ++ } ++} ++ ++static struct vkd3d_shader_instruction *generate_vsir_add_program_instruction( ++ struct hlsl_ctx *ctx, struct vsir_program *program, ++ const struct vkd3d_shader_location *loc, enum vkd3d_shader_opcode opcode, ++ unsigned int dst_count, unsigned int src_count) ++{ ++ struct vkd3d_shader_instruction_array *instructions = &program->instructions; ++ struct vkd3d_shader_instruction *ins; ++ ++ if (!shader_instruction_array_reserve(instructions, instructions->count + 1)) ++ { ++ ctx->result = VKD3D_ERROR_OUT_OF_MEMORY; ++ return NULL; ++ } ++ ins = &instructions->elements[instructions->count]; ++ if (!vsir_instruction_init_with_params(program, ins, loc, opcode, dst_count, src_count)) ++ { ++ ctx->result = VKD3D_ERROR_OUT_OF_MEMORY; ++ return NULL; ++ } ++ ++instructions->count; ++ return ins; ++} ++ ++static void sm1_generate_vsir_instr_constant(struct hlsl_ctx *ctx, ++ struct vsir_program *program, struct hlsl_ir_constant *constant) ++{ ++ struct vkd3d_shader_instruction_array *instructions = &program->instructions; ++ struct hlsl_ir_node *instr = &constant->node; ++ struct vkd3d_shader_dst_param *dst_param; ++ struct vkd3d_shader_src_param *src_param; ++ struct vkd3d_shader_instruction *ins; ++ struct hlsl_ir_node *vsir_instr; ++ ++ VKD3D_ASSERT(instr->reg.allocated); ++ VKD3D_ASSERT(constant->reg.allocated); ++ ++ if (!(ins = generate_vsir_add_program_instruction(ctx, program, &instr->loc, VKD3DSIH_MOV, 1, 1))) ++ return; ++ ++ src_param = &ins->src[0]; ++ vsir_register_init(&src_param->reg, VKD3DSPR_CONST, VKD3D_DATA_FLOAT, 1); ++ src_param->reg.idx[0].offset = constant->reg.id; ++ src_param->swizzle = sm1_generate_vsir_get_src_swizzle(constant->reg.writemask, instr->reg.writemask); ++ ++ dst_param = &ins->dst[0]; ++ vsir_register_init(&dst_param->reg, VKD3DSPR_TEMP, VKD3D_DATA_FLOAT, 1); ++ dst_param->reg.idx[0].offset = instr->reg.id; ++ dst_param->write_mask = instr->reg.writemask; ++ ++ if (!(vsir_instr = hlsl_new_vsir_instruction_ref(ctx, instructions->count - 1, ++ instr->data_type, &instr->reg, &instr->loc))) ++ { ++ ctx->result = VKD3D_ERROR_OUT_OF_MEMORY; ++ return; ++ } ++ ++ list_add_before(&instr->entry, &vsir_instr->entry); ++ hlsl_replace_node(instr, vsir_instr); ++} ++ ++static void sm1_generate_vsir_init_dst_param_from_deref(struct hlsl_ctx *ctx, ++ struct vkd3d_shader_dst_param *dst_param, struct hlsl_deref *deref, ++ const struct vkd3d_shader_location *loc, unsigned int writemask) ++{ ++ enum vkd3d_shader_register_type type = VKD3DSPR_TEMP; ++ struct vkd3d_shader_version version; ++ uint32_t register_index; ++ struct hlsl_reg reg; ++ ++ reg = hlsl_reg_from_deref(ctx, deref); ++ register_index = reg.id; ++ writemask = hlsl_combine_writemasks(reg.writemask, writemask); ++ ++ if (deref->var->is_output_semantic) ++ { ++ version.major = ctx->profile->major_version; ++ version.minor = ctx->profile->minor_version; ++ version.type = ctx->profile->type; ++ ++ if (version.type == VKD3D_SHADER_TYPE_PIXEL && version.major == 1) ++ { ++ type = VKD3DSPR_TEMP; ++ register_index = 0; ++ } ++ else if (!hlsl_sm1_register_from_semantic(&version, deref->var->semantic.name, ++ deref->var->semantic.index, true, &type, ®ister_index)) ++ { ++ VKD3D_ASSERT(reg.allocated); ++ type = VKD3DSPR_OUTPUT; ++ register_index = reg.id; ++ } ++ else ++ writemask = (1u << deref->var->data_type->dimx) - 1; ++ } ++ else ++ VKD3D_ASSERT(reg.allocated); ++ ++ vsir_register_init(&dst_param->reg, type, VKD3D_DATA_FLOAT, 1); ++ dst_param->write_mask = writemask; ++ dst_param->reg.idx[0].offset = register_index; ++ ++ if (deref->rel_offset.node) ++ hlsl_fixme(ctx, loc, "Translate relative addressing on dst register for vsir."); ++} ++ ++static void sm1_generate_vsir_init_src_param_from_deref(struct hlsl_ctx *ctx, ++ struct vkd3d_shader_src_param *src_param, struct hlsl_deref *deref, ++ unsigned int dst_writemask, const struct vkd3d_shader_location *loc) ++{ ++ enum vkd3d_shader_register_type type = VKD3DSPR_TEMP; ++ struct vkd3d_shader_version version; ++ uint32_t register_index; ++ unsigned int writemask; ++ struct hlsl_reg reg; ++ ++ reg = hlsl_reg_from_deref(ctx, deref); ++ register_index = reg.id; ++ writemask = reg.writemask; ++ ++ if (deref->var->is_uniform) ++ { ++ VKD3D_ASSERT(reg.allocated); ++ type = VKD3DSPR_CONST; ++ } ++ else if (deref->var->is_input_semantic) ++ { ++ version.major = ctx->profile->major_version; ++ version.minor = ctx->profile->minor_version; ++ version.type = ctx->profile->type; ++ if (!hlsl_sm1_register_from_semantic(&version, deref->var->semantic.name, ++ deref->var->semantic.index, false, &type, ®ister_index)) ++ { ++ VKD3D_ASSERT(reg.allocated); ++ type = VKD3DSPR_INPUT; ++ register_index = reg.id; ++ } ++ else ++ writemask = (1 << deref->var->data_type->dimx) - 1; ++ } ++ ++ vsir_register_init(&src_param->reg, type, VKD3D_DATA_FLOAT, 1); ++ src_param->reg.idx[0].offset = register_index; ++ src_param->swizzle = sm1_generate_vsir_get_src_swizzle(writemask, dst_writemask); ++ ++ if (deref->rel_offset.node) ++ hlsl_fixme(ctx, loc, "Translate relative addressing on src register for vsir."); ++} ++ ++static void sm1_generate_vsir_instr_load(struct hlsl_ctx *ctx, struct vsir_program *program, ++ struct hlsl_ir_load *load) ++{ ++ struct vkd3d_shader_instruction_array *instructions = &program->instructions; ++ struct hlsl_ir_node *instr = &load->node; ++ struct vkd3d_shader_dst_param *dst_param; ++ struct vkd3d_shader_instruction *ins; ++ struct hlsl_ir_node *vsir_instr; ++ ++ VKD3D_ASSERT(instr->reg.allocated); ++ ++ if (!(ins = generate_vsir_add_program_instruction(ctx, program, &instr->loc, VKD3DSIH_MOV, 1, 1))) ++ return; ++ ++ dst_param = &ins->dst[0]; ++ vsir_register_init(&dst_param->reg, VKD3DSPR_TEMP, VKD3D_DATA_FLOAT, 1); ++ dst_param->reg.idx[0].offset = instr->reg.id; ++ dst_param->write_mask = instr->reg.writemask; ++ ++ sm1_generate_vsir_init_src_param_from_deref(ctx, &ins->src[0], &load->src, dst_param->write_mask, ++ &ins->location); ++ ++ if (!(vsir_instr = hlsl_new_vsir_instruction_ref(ctx, instructions->count - 1, instr->data_type, ++ &instr->reg, &instr->loc))) ++ { ++ ctx->result = VKD3D_ERROR_OUT_OF_MEMORY; ++ return; ++ } ++ ++ list_add_before(&instr->entry, &vsir_instr->entry); ++ hlsl_replace_node(instr, vsir_instr); ++} ++ ++static void sm1_generate_vsir_instr_swizzle(struct hlsl_ctx *ctx, struct vsir_program *program, ++ struct hlsl_ir_swizzle *swizzle_instr) ++{ ++ struct vkd3d_shader_instruction_array *instructions = &program->instructions; ++ struct hlsl_ir_node *instr = &swizzle_instr->node, *val = swizzle_instr->val.node; ++ struct vkd3d_shader_dst_param *dst_param; ++ struct vkd3d_shader_src_param *src_param; ++ struct vkd3d_shader_instruction *ins; ++ struct hlsl_ir_node *vsir_instr; ++ uint32_t swizzle; ++ ++ VKD3D_ASSERT(instr->reg.allocated); ++ ++ if (!(ins = generate_vsir_add_program_instruction(ctx, program, &instr->loc, VKD3DSIH_MOV, 1, 1))) ++ return; ++ ++ dst_param = &ins->dst[0]; ++ vsir_register_init(&dst_param->reg, VKD3DSPR_TEMP, VKD3D_DATA_FLOAT, 1); ++ dst_param->reg.idx[0].offset = instr->reg.id; ++ dst_param->write_mask = instr->reg.writemask; ++ ++ swizzle = hlsl_swizzle_from_writemask(val->reg.writemask); ++ swizzle = hlsl_combine_swizzles(swizzle, swizzle_instr->swizzle, instr->data_type->dimx); ++ swizzle = hlsl_map_swizzle(swizzle, ins->dst[0].write_mask); ++ swizzle = vsir_swizzle_from_hlsl(swizzle); ++ ++ src_param = &ins->src[0]; ++ vsir_register_init(&src_param->reg, VKD3DSPR_TEMP, VKD3D_DATA_FLOAT, 1); ++ src_param->reg.idx[0].offset = val->reg.id; ++ src_param->swizzle = swizzle; ++ ++ if (!(vsir_instr = hlsl_new_vsir_instruction_ref(ctx, instructions->count - 1, instr->data_type, ++ &instr->reg, &instr->loc))) ++ { ++ ctx->result = VKD3D_ERROR_OUT_OF_MEMORY; ++ return; ++ } ++ ++ list_add_before(&instr->entry, &vsir_instr->entry); ++ hlsl_replace_node(instr, vsir_instr); ++} ++ ++static void sm1_generate_vsir_instr_store(struct hlsl_ctx *ctx, struct vsir_program *program, ++ struct hlsl_ir_store *store) ++{ ++ struct vkd3d_shader_instruction_array *instructions = &program->instructions; ++ struct hlsl_ir_node *rhs = store->rhs.node; ++ struct hlsl_ir_node *instr = &store->node; ++ struct vkd3d_shader_instruction *ins; ++ struct vkd3d_shader_src_param *src_param; ++ struct hlsl_ir_node *vsir_instr; ++ ++ if (!(ins = generate_vsir_add_program_instruction(ctx, program, &instr->loc, VKD3DSIH_MOV, 1, 1))) ++ return; ++ ++ sm1_generate_vsir_init_dst_param_from_deref(ctx, &ins->dst[0], &store->lhs, &ins->location, store->writemask); ++ ++ src_param = &ins->src[0]; ++ vsir_register_init(&src_param->reg, VKD3DSPR_TEMP, VKD3D_DATA_FLOAT, 1); ++ src_param->reg.idx[0].offset = rhs->reg.id; ++ src_param->swizzle = sm1_generate_vsir_get_src_swizzle(rhs->reg.writemask, ins->dst[0].write_mask); ++ ++ if (!(vsir_instr = hlsl_new_vsir_instruction_ref(ctx, instructions->count - 1, NULL, NULL, &instr->loc))) ++ { ++ ctx->result = VKD3D_ERROR_OUT_OF_MEMORY; ++ return; ++ } ++ ++ list_add_before(&instr->entry, &vsir_instr->entry); ++ hlsl_replace_node(instr, vsir_instr); ++} ++ ++static bool sm1_generate_vsir_instr(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, void *context) ++{ ++ struct vsir_program *program = context; ++ ++ switch (instr->type) ++ { ++ case HLSL_IR_CONSTANT: ++ sm1_generate_vsir_instr_constant(ctx, program, hlsl_ir_constant(instr)); ++ return true; ++ ++ case HLSL_IR_LOAD: ++ sm1_generate_vsir_instr_load(ctx, program, hlsl_ir_load(instr)); ++ return true; ++ ++ case HLSL_IR_STORE: ++ sm1_generate_vsir_instr_store(ctx, program, hlsl_ir_store(instr)); ++ return true; ++ ++ case HLSL_IR_SWIZZLE: ++ sm1_generate_vsir_instr_swizzle(ctx, program, hlsl_ir_swizzle(instr)); ++ return true; ++ ++ default: ++ break; ++ } ++ ++ return false; ++} ++ + /* OBJECTIVE: Translate all the information from ctx and entry_func to the + * vsir_program and ctab blob, so they can be used as input to d3dbc_compile() + * without relying on ctx and entry_func. */ +@@ -6014,6 +6736,7 @@ static void sm1_generate_vsir(struct hlsl_ctx *ctx, struct hlsl_ir_function_decl + { + struct vkd3d_shader_version version = {0}; + struct vkd3d_bytecode_buffer buffer = {0}; ++ struct hlsl_block block; + + version.major = ctx->profile->major_version; + version.minor = ctx->profile->minor_version; +@@ -6035,6 +6758,13 @@ static void sm1_generate_vsir(struct hlsl_ctx *ctx, struct hlsl_ir_function_decl + ctab->size = buffer.size; + + sm1_generate_vsir_signature(ctx, program); ++ ++ hlsl_block_init(&block); ++ sm1_generate_vsir_constant_defs(ctx, program, &block); ++ sm1_generate_vsir_sampler_dcls(ctx, program, &block); ++ list_move_head(&entry_func->body.instrs, &block.instrs); ++ ++ hlsl_transform_ir(ctx, sm1_generate_vsir_instr, &entry_func->body, program); + } + + static struct hlsl_ir_jump *loop_unrolling_find_jump(struct hlsl_block *block, struct hlsl_ir_node *stop_point, +@@ -6406,18 +7136,13 @@ int hlsl_emit_bytecode(struct hlsl_ctx *ctx, struct hlsl_ir_function_decl *entry + append_output_var_copy(ctx, body, entry_func->return_var); + } + +- for (i = 0; i < entry_func->attr_count; ++i) +- { +- const struct hlsl_attribute *attr = entry_func->attrs[i]; +- +- if (!strcmp(attr->name, "numthreads") && profile->type == VKD3D_SHADER_TYPE_COMPUTE) +- parse_numthreads_attribute(ctx, attr); +- else +- hlsl_warning(ctx, &entry_func->attrs[i]->loc, VKD3D_SHADER_WARNING_HLSL_UNKNOWN_ATTRIBUTE, +- "Ignoring unknown attribute \"%s\".", entry_func->attrs[i]->name); +- } ++ parse_entry_function_attributes(ctx, entry_func); ++ if (ctx->result) ++ return ctx->result; + +- if (profile->type == VKD3D_SHADER_TYPE_COMPUTE && !ctx->found_numthreads) ++ if (profile->type == VKD3D_SHADER_TYPE_HULL) ++ validate_hull_shader_attributes(ctx, entry_func); ++ else if (profile->type == VKD3D_SHADER_TYPE_COMPUTE && !ctx->found_numthreads) + hlsl_error(ctx, &entry_func->loc, VKD3D_SHADER_ERROR_HLSL_MISSING_ATTRIBUTE, + "Entry point \"%s\" is missing a [numthreads] attribute.", entry_func->func->name); + +diff --git a/libs/vkd3d/libs/vkd3d-shader/hlsl_constant_ops.c b/libs/vkd3d/libs/vkd3d-shader/hlsl_constant_ops.c +index db4913b7c62..716adb15f08 100644 +--- a/libs/vkd3d/libs/vkd3d-shader/hlsl_constant_ops.c ++++ b/libs/vkd3d/libs/vkd3d-shader/hlsl_constant_ops.c +@@ -1452,11 +1452,15 @@ static bool constant_is_one(struct hlsl_ir_constant *const_arg) + + case HLSL_TYPE_UINT: + case HLSL_TYPE_INT: +- case HLSL_TYPE_BOOL: + if (const_arg->value.u[k].u != 1) + return false; + break; + ++ case HLSL_TYPE_BOOL: ++ if (const_arg->value.u[k].u != ~0) ++ return false; ++ break; ++ + default: + return false; + } +@@ -1514,6 +1518,20 @@ bool hlsl_fold_constant_identities(struct hlsl_ctx *ctx, struct hlsl_ir_node *in + res_node = mut_arg; + break; + ++ case HLSL_OP2_LOGIC_AND: ++ if (constant_is_zero(const_arg)) ++ res_node = &const_arg->node; ++ else if (constant_is_one(const_arg)) ++ res_node = mut_arg; ++ break; ++ ++ case HLSL_OP2_LOGIC_OR: ++ if (constant_is_zero(const_arg)) ++ res_node = mut_arg; ++ else if (constant_is_one(const_arg)) ++ res_node = &const_arg->node; ++ break; ++ + default: + break; + } +diff --git a/libs/vkd3d/libs/vkd3d-shader/ir.c b/libs/vkd3d/libs/vkd3d-shader/ir.c +index 747238e2fee..d765abc938b 100644 +--- a/libs/vkd3d/libs/vkd3d-shader/ir.c ++++ b/libs/vkd3d/libs/vkd3d-shader/ir.c +@@ -136,7 +136,7 @@ static void vkd3d_shader_instruction_make_nop(struct vkd3d_shader_instruction *i + vsir_instruction_init(ins, &location, VKD3DSIH_NOP); + } + +-static bool vsir_instruction_init_with_params(struct vsir_program *program, ++bool vsir_instruction_init_with_params(struct vsir_program *program, + struct vkd3d_shader_instruction *ins, const struct vkd3d_shader_location *location, + enum vkd3d_shader_opcode opcode, unsigned int dst_count, unsigned int src_count) + { +diff --git a/libs/vkd3d/libs/vkd3d-shader/spirv.c b/libs/vkd3d/libs/vkd3d-shader/spirv.c +index 8052e951704..c1fd07a533a 100644 +--- a/libs/vkd3d/libs/vkd3d-shader/spirv.c ++++ b/libs/vkd3d/libs/vkd3d-shader/spirv.c +@@ -97,15 +97,37 @@ static enum vkd3d_result vkd3d_spirv_binary_to_text(const struct vkd3d_shader_co + if (!(spvret = spvBinaryToText(context, spirv->code, spirv->size / sizeof(uint32_t), + get_binary_to_text_options(formatting), &text, &diagnostic))) + { +- void *code = vkd3d_malloc(text->length); +- if (code) ++ const char *p, *q, *end, *pad, *truncate; ++ struct vkd3d_string_buffer buffer; ++ size_t line_len; ++ ++ vkd3d_string_buffer_init(&buffer); ++ ++ for (p = text->str, end = p + text->length; p < end; p = q) + { +- memcpy(code, text->str, text->length); +- out->size = text->length; +- out->code = code; ++ if (!(q = memchr(p, '\n', end - p))) ++ q = end; ++ else ++ ++q; ++ ++ /* FIXME: Note that when colour output is enabled, we count colour ++ * escape codes towards the line length. It's possible to fix ++ * that, but not completely trivial. */ ++ for (pad = "", line_len = 100; q - p > line_len; line_len = 100 - strlen(pad)) ++ { ++ if (!(truncate = memchr(p + line_len, ' ', q - p - line_len))) ++ break; ++ vkd3d_string_buffer_printf(&buffer, "%s%.*s\n", pad, (int)(truncate - p), p); ++ p = truncate + 1; ++ if (formatting & VKD3D_SHADER_COMPILE_OPTION_FORMATTING_INDENT) ++ pad = " "; ++ else ++ pad = " "; ++ } ++ vkd3d_string_buffer_printf(&buffer, "%s%.*s", pad, (int)(q - p), p); + } +- else +- result = VKD3D_ERROR_OUT_OF_MEMORY; ++ ++ vkd3d_shader_code_from_string_buffer(out, &buffer); + } + else + { +diff --git a/libs/vkd3d/libs/vkd3d-shader/tpf.c b/libs/vkd3d/libs/vkd3d-shader/tpf.c +index 497a4c3b335..c61086419a6 100644 +--- a/libs/vkd3d/libs/vkd3d-shader/tpf.c ++++ b/libs/vkd3d/libs/vkd3d-shader/tpf.c +@@ -2918,16 +2918,16 @@ static void write_sm4_signature(struct hlsl_ctx *ctx, struct dxbc_writer *dxbc, + { + case HLSL_TYPE_FLOAT: + case HLSL_TYPE_HALF: +- put_u32(&buffer, D3D_REGISTER_COMPONENT_FLOAT32); ++ put_u32(&buffer, VKD3D_SHADER_COMPONENT_FLOAT); + break; + + case HLSL_TYPE_INT: +- put_u32(&buffer, D3D_REGISTER_COMPONENT_SINT32); ++ put_u32(&buffer, VKD3D_SHADER_COMPONENT_INT); + break; + + case HLSL_TYPE_BOOL: + case HLSL_TYPE_UINT: +- put_u32(&buffer, D3D_REGISTER_COMPONENT_UINT32); ++ put_u32(&buffer, VKD3D_SHADER_COMPONENT_UINT); + break; + + default: +@@ -2935,7 +2935,7 @@ static void write_sm4_signature(struct hlsl_ctx *ctx, struct dxbc_writer *dxbc, + hlsl_error(ctx, &var->loc, VKD3D_SHADER_ERROR_HLSL_INVALID_TYPE, + "Invalid data type %s for semantic variable %s.", string->buffer, var->name); + hlsl_release_string_buffer(ctx, string); +- put_u32(&buffer, D3D_REGISTER_COMPONENT_UNKNOWN); ++ put_u32(&buffer, VKD3D_SHADER_COMPONENT_VOID); + } + put_u32(&buffer, reg_idx); + put_u32(&buffer, vkd3d_make_u16(width, use_mask)); +@@ -3123,24 +3123,24 @@ static D3D_SHADER_INPUT_TYPE sm4_resource_type(const struct hlsl_type *type) + vkd3d_unreachable(); + } + +-static D3D_RESOURCE_RETURN_TYPE sm4_resource_format(const struct hlsl_type *type) ++static enum vkd3d_sm4_data_type sm4_data_type(const struct hlsl_type *type) + { + switch (type->e.resource.format->e.numeric.type) + { + case HLSL_TYPE_DOUBLE: +- return D3D_RETURN_TYPE_DOUBLE; ++ return VKD3D_SM4_DATA_DOUBLE; + + case HLSL_TYPE_FLOAT: + case HLSL_TYPE_HALF: +- return D3D_RETURN_TYPE_FLOAT; ++ return VKD3D_SM4_DATA_FLOAT; + + case HLSL_TYPE_INT: +- return D3D_RETURN_TYPE_SINT; ++ return VKD3D_SM4_DATA_INT; + break; + + case HLSL_TYPE_BOOL: + case HLSL_TYPE_UINT: +- return D3D_RETURN_TYPE_UINT; ++ return VKD3D_SM4_DATA_UINT; + + default: + vkd3d_unreachable(); +@@ -3471,7 +3471,7 @@ static void write_sm4_rdef(struct hlsl_ctx *ctx, struct dxbc_writer *dxbc) + { + unsigned int dimx = resource->component_type->e.resource.format->dimx; + +- put_u32(&buffer, sm4_resource_format(resource->component_type)); ++ put_u32(&buffer, sm4_data_type(resource->component_type)); + put_u32(&buffer, sm4_rdef_resource_dimension(resource->component_type)); + put_u32(&buffer, ~0u); /* FIXME: multisample count */ + flags |= (dimx - 1) << VKD3D_SM4_SIF_TEXTURE_COMPONENTS_SHIFT; +@@ -4348,7 +4348,7 @@ static void write_sm4_dcl_textures(const struct tpf_writer *tpf, const struct ex + .dsts[0].reg.idx_count = 1, + .dst_count = 1, + +- .idx[0] = sm4_resource_format(component_type) * 0x1111, ++ .idx[0] = sm4_data_type(component_type) * 0x1111, + .idx_count = 1, + }; + +@@ -6110,7 +6110,7 @@ static void write_sm4_sfi0(struct hlsl_ctx *ctx, struct dxbc_writer *dxbc) + /* FIXME: We also emit code that should require UAVS_AT_EVERY_STAGE, + * STENCIL_REF, and TYPED_UAV_LOAD_ADDITIONAL_FORMATS. */ + +- if (flags) ++ if (*flags) + dxbc_writer_add_section(dxbc, TAG_SFI0, flags, sizeof(*flags)); + else + vkd3d_free(flags); +diff --git a/libs/vkd3d/libs/vkd3d-shader/vkd3d_shader_private.h b/libs/vkd3d/libs/vkd3d-shader/vkd3d_shader_private.h +index 442885f53b4..327461371a4 100644 +--- a/libs/vkd3d/libs/vkd3d-shader/vkd3d_shader_private.h ++++ b/libs/vkd3d/libs/vkd3d-shader/vkd3d_shader_private.h +@@ -152,6 +152,12 @@ enum vkd3d_shader_error + VKD3D_SHADER_ERROR_HLSL_UNKNOWN_MODIFIER = 5030, + VKD3D_SHADER_ERROR_HLSL_INVALID_STATE_BLOCK_ENTRY = 5031, + VKD3D_SHADER_ERROR_HLSL_FAILED_FORCED_UNROLL = 5032, ++ VKD3D_SHADER_ERROR_HLSL_INVALID_PROFILE = 5033, ++ VKD3D_SHADER_ERROR_HLSL_MISPLACED_COMPILE = 5034, ++ VKD3D_SHADER_ERROR_HLSL_INVALID_DOMAIN = 5035, ++ VKD3D_SHADER_ERROR_HLSL_INVALID_CONTROL_POINT_COUNT = 5036, ++ VKD3D_SHADER_ERROR_HLSL_INVALID_OUTPUT_PRIMITIVE = 5037, ++ VKD3D_SHADER_ERROR_HLSL_INVALID_PARTITIONING = 5038, + + VKD3D_SHADER_WARNING_HLSL_IMPLICIT_TRUNCATION = 5300, + VKD3D_SHADER_WARNING_HLSL_DIVISION_BY_ZERO = 5301, +@@ -169,6 +175,10 @@ enum vkd3d_shader_error + VKD3D_SHADER_ERROR_D3DBC_OUT_OF_MEMORY = 7004, + VKD3D_SHADER_ERROR_D3DBC_INVALID_REGISTER_INDEX = 7005, + VKD3D_SHADER_ERROR_D3DBC_UNDECLARED_SEMANTIC = 7006, ++ VKD3D_SHADER_ERROR_D3DBC_INVALID_REGISTER_TYPE = 7007, ++ VKD3D_SHADER_ERROR_D3DBC_INVALID_REGISTER_COUNT = 7008, ++ VKD3D_SHADER_ERROR_D3DBC_NOT_IMPLEMENTED = 7009, ++ VKD3D_SHADER_ERROR_D3DBC_INVALID_PROFILE = 7010, + + VKD3D_SHADER_WARNING_D3DBC_IGNORED_INSTRUCTION_FLAGS= 7300, + +@@ -1389,6 +1399,9 @@ enum vkd3d_result vsir_program_normalise(struct vsir_program *program, uint64_t + const struct vkd3d_shader_compile_info *compile_info, struct vkd3d_shader_message_context *message_context); + enum vkd3d_result vsir_program_validate(struct vsir_program *program, uint64_t config_flags, + const char *source_name, struct vkd3d_shader_message_context *message_context); ++bool vsir_instruction_init_with_params(struct vsir_program *program, ++ struct vkd3d_shader_instruction *ins, const struct vkd3d_shader_location *location, ++ enum vkd3d_shader_opcode opcode, unsigned int dst_count, unsigned int src_count); + + static inline struct vkd3d_shader_dst_param *vsir_program_get_dst_params( + struct vsir_program *program, unsigned int count) +-- +2.45.2 + diff --git a/patches/vkd3d-latest/0003-Updated-vkd3d-to-bfd1fc9cd6cf9cf4e9c23b4ffad2ba8a328.patch b/patches/vkd3d-latest/0003-Updated-vkd3d-to-bfd1fc9cd6cf9cf4e9c23b4ffad2ba8a328.patch new file mode 100644 index 00000000..ffd08a39 --- /dev/null +++ b/patches/vkd3d-latest/0003-Updated-vkd3d-to-bfd1fc9cd6cf9cf4e9c23b4ffad2ba8a328.patch @@ -0,0 +1,619 @@ +From a6e448523e93d3ade908df12b1d8675b19420de4 Mon Sep 17 00:00:00 2001 +From: Alistair Leslie-Hughes +Date: Fri, 6 Sep 2024 08:13:50 +1000 +Subject: [PATCH] Updated vkd3d to bfd1fc9cd6cf9cf4e9c23b4ffad2ba8a3282c1f9. + +--- + libs/vkd3d/libs/vkd3d-shader/d3dbc.c | 98 +++++++++++----------- + libs/vkd3d/libs/vkd3d-shader/ir.c | 121 ++++++++++++++++----------- + 2 files changed, 121 insertions(+), 98 deletions(-) + +diff --git a/libs/vkd3d/libs/vkd3d-shader/d3dbc.c b/libs/vkd3d/libs/vkd3d-shader/d3dbc.c +index 3b9ec98448d..de5f28c1815 100644 +--- a/libs/vkd3d/libs/vkd3d-shader/d3dbc.c ++++ b/libs/vkd3d/libs/vkd3d-shader/d3dbc.c +@@ -1862,7 +1862,7 @@ void write_sm1_uniforms(struct hlsl_ctx *ctx, struct vkd3d_bytecode_buffer *buff + set_u32(buffer, creator_offset, offset - ctab_start); + + ctab_end = bytecode_align(buffer); +- set_u32(buffer, size_offset, vkd3d_make_u32(D3DSIO_COMMENT, (ctab_end - ctab_offset) / sizeof(uint32_t))); ++ set_u32(buffer, size_offset, vkd3d_make_u32(VKD3D_SM1_OP_COMMENT, (ctab_end - ctab_offset) / sizeof(uint32_t))); + } + + static uint32_t sm1_encode_register_type(enum vkd3d_shader_register_type type) +@@ -1873,7 +1873,7 @@ static uint32_t sm1_encode_register_type(enum vkd3d_shader_register_type type) + + struct sm1_instruction + { +- D3DSHADER_INSTRUCTION_OPCODE_TYPE opcode; ++ enum vkd3d_sm1_opcode opcode; + unsigned int flags; + + struct sm1_dst_register +@@ -1902,7 +1902,7 @@ static bool is_inconsequential_instr(const struct sm1_instruction *instr) + const struct sm1_dst_register *dst = &instr->dst; + unsigned int i; + +- if (instr->opcode != D3DSIO_MOV) ++ if (instr->opcode != VKD3D_SM1_OP_MOV) + return false; + if (dst->mod != D3DSPDM_NONE) + return false; +@@ -1947,7 +1947,7 @@ static void d3dbc_write_instruction(struct d3dbc_compiler *d3dbc, const struct s + token |= VKD3D_SM1_INSTRUCTION_FLAGS_MASK & (instr->flags << VKD3D_SM1_INSTRUCTION_FLAGS_SHIFT); + + if (version->major > 1) +- token |= (instr->has_dst + instr->src_count) << D3DSI_INSTLENGTH_SHIFT; ++ token |= (instr->has_dst + instr->src_count) << VKD3D_SM1_INSTRUCTION_LENGTH_SHIFT; + put_u32(buffer, token); + + if (instr->has_dst) +@@ -1967,7 +1967,7 @@ static void d3dbc_write_dp2add(struct d3dbc_compiler *d3dbc, const struct hlsl_r + { + struct sm1_instruction instr = + { +- .opcode = D3DSIO_DP2ADD, ++ .opcode = VKD3D_SM1_OP_DP2ADD, + + .dst.type = VKD3DSPR_TEMP, + .dst.writemask = dst->writemask, +@@ -1989,9 +1989,9 @@ static void d3dbc_write_dp2add(struct d3dbc_compiler *d3dbc, const struct hlsl_r + d3dbc_write_instruction(d3dbc, &instr); + } + +-static void d3dbc_write_ternary_op(struct d3dbc_compiler *d3dbc, +- D3DSHADER_INSTRUCTION_OPCODE_TYPE opcode, const struct hlsl_reg *dst, +- const struct hlsl_reg *src1, const struct hlsl_reg *src2, const struct hlsl_reg *src3) ++static void d3dbc_write_ternary_op(struct d3dbc_compiler *d3dbc, enum vkd3d_sm1_opcode opcode, ++ const struct hlsl_reg *dst, const struct hlsl_reg *src1, ++ const struct hlsl_reg *src2, const struct hlsl_reg *src3) + { + struct sm1_instruction instr = + { +@@ -2020,7 +2020,7 @@ static void d3dbc_write_ternary_op(struct d3dbc_compiler *d3dbc, + d3dbc_write_instruction(d3dbc, &instr); + } + +-static void d3dbc_write_binary_op(struct d3dbc_compiler *d3dbc, D3DSHADER_INSTRUCTION_OPCODE_TYPE opcode, ++static void d3dbc_write_binary_op(struct d3dbc_compiler *d3dbc, enum vkd3d_sm1_opcode opcode, + const struct hlsl_reg *dst, const struct hlsl_reg *src1, const struct hlsl_reg *src2) + { + struct sm1_instruction instr = +@@ -2046,7 +2046,7 @@ static void d3dbc_write_binary_op(struct d3dbc_compiler *d3dbc, D3DSHADER_INSTRU + d3dbc_write_instruction(d3dbc, &instr); + } + +-static void d3dbc_write_dot(struct d3dbc_compiler *d3dbc, D3DSHADER_INSTRUCTION_OPCODE_TYPE opcode, ++static void d3dbc_write_dot(struct d3dbc_compiler *d3dbc, enum vkd3d_sm1_opcode opcode, + const struct hlsl_reg *dst, const struct hlsl_reg *src1, const struct hlsl_reg *src2) + { + struct sm1_instruction instr = +@@ -2070,7 +2070,7 @@ static void d3dbc_write_dot(struct d3dbc_compiler *d3dbc, D3DSHADER_INSTRUCTION_ + d3dbc_write_instruction(d3dbc, &instr); + } + +-static void d3dbc_write_unary_op(struct d3dbc_compiler *d3dbc, D3DSHADER_INSTRUCTION_OPCODE_TYPE opcode, ++static void d3dbc_write_unary_op(struct d3dbc_compiler *d3dbc, enum vkd3d_sm1_opcode opcode, + const struct hlsl_reg *dst, const struct hlsl_reg *src, + D3DSHADER_PARAM_SRCMOD_TYPE src_mod, D3DSHADER_PARAM_DSTMOD_TYPE dst_mod) + { +@@ -2118,7 +2118,7 @@ static void d3dbc_write_cast(struct d3dbc_compiler *d3dbc, const struct hlsl_ir_ + /* Integrals are internally represented as floats, so no change is necessary.*/ + case HLSL_TYPE_HALF: + case HLSL_TYPE_FLOAT: +- d3dbc_write_unary_op(d3dbc, D3DSIO_MOV, &instr->reg, &arg1->reg, 0, 0); ++ d3dbc_write_unary_op(d3dbc, VKD3D_SM1_OP_MOV, &instr->reg, &arg1->reg, 0, 0); + break; + + case HLSL_TYPE_DOUBLE: +@@ -2142,7 +2142,7 @@ static void d3dbc_write_cast(struct d3dbc_compiler *d3dbc, const struct hlsl_ir_ + break; + case HLSL_TYPE_INT: + case HLSL_TYPE_UINT: +- d3dbc_write_unary_op(d3dbc, D3DSIO_MOV, &instr->reg, &arg1->reg, 0, 0); ++ d3dbc_write_unary_op(d3dbc, VKD3D_SM1_OP_MOV, &instr->reg, &arg1->reg, 0, 0); + break; + + case HLSL_TYPE_BOOL: +@@ -2353,7 +2353,7 @@ static void d3dbc_write_vsir_simple_instruction(struct d3dbc_compiler *d3dbc, + return; + } + +- instr.opcode = (D3DSHADER_INSTRUCTION_OPCODE_TYPE)info->sm1_opcode; ++ instr.opcode = info->sm1_opcode; + instr.has_dst = info->dst_count; + instr.src_count = info->src_count; + +@@ -2413,9 +2413,9 @@ static void d3dbc_write_semantic_dcl(struct d3dbc_compiler *d3dbc, + reg.reg = element->register_index; + } + +- token = D3DSIO_DCL; ++ token = VKD3D_SM1_OP_DCL; + if (version->major > 1) +- token |= 2 << D3DSI_INSTLENGTH_SHIFT; ++ token |= 2 << VKD3D_SM1_INSTRUCTION_LENGTH_SHIFT; + put_u32(buffer, token); + + token = (1u << 31); +@@ -2455,7 +2455,7 @@ static void d3dbc_write_semantic_dcls(struct d3dbc_compiler *d3dbc) + } + + static void d3dbc_write_per_component_unary_op(struct d3dbc_compiler *d3dbc, +- const struct hlsl_ir_node *instr, D3DSHADER_INSTRUCTION_OPCODE_TYPE opcode) ++ const struct hlsl_ir_node *instr, enum vkd3d_sm1_opcode opcode) + { + struct hlsl_ir_expr *expr = hlsl_ir_expr(instr); + struct hlsl_ir_node *arg1 = expr->operands[0].node; +@@ -2476,7 +2476,7 @@ static void d3dbc_write_sincos(struct d3dbc_compiler *d3dbc, enum hlsl_ir_expr_o + { + struct sm1_instruction instr = + { +- .opcode = D3DSIO_SINCOS, ++ .opcode = VKD3D_SM1_OP_SINCOS, + + .dst.type = VKD3DSPR_TEMP, + .dst.writemask = dst->writemask, +@@ -2523,7 +2523,7 @@ static void d3dbc_write_expr(struct d3dbc_compiler *d3dbc, const struct hlsl_ir_ + + if (expr->op == HLSL_OP1_REINTERPRET) + { +- d3dbc_write_unary_op(d3dbc, D3DSIO_MOV, &instr->reg, &arg1->reg, 0, 0); ++ d3dbc_write_unary_op(d3dbc, VKD3D_SM1_OP_MOV, &instr->reg, &arg1->reg, 0, 0); + return; + } + +@@ -2543,39 +2543,39 @@ static void d3dbc_write_expr(struct d3dbc_compiler *d3dbc, const struct hlsl_ir_ + switch (expr->op) + { + case HLSL_OP1_ABS: +- d3dbc_write_unary_op(d3dbc, D3DSIO_ABS, &instr->reg, &arg1->reg, 0, 0); ++ d3dbc_write_unary_op(d3dbc, VKD3D_SM1_OP_ABS, &instr->reg, &arg1->reg, 0, 0); + break; + + case HLSL_OP1_DSX: +- d3dbc_write_unary_op(d3dbc, D3DSIO_DSX, &instr->reg, &arg1->reg, 0, 0); ++ d3dbc_write_unary_op(d3dbc, VKD3D_SM1_OP_DSX, &instr->reg, &arg1->reg, 0, 0); + break; + + case HLSL_OP1_DSY: +- d3dbc_write_unary_op(d3dbc, D3DSIO_DSY, &instr->reg, &arg1->reg, 0, 0); ++ d3dbc_write_unary_op(d3dbc, VKD3D_SM1_OP_DSY, &instr->reg, &arg1->reg, 0, 0); + break; + + case HLSL_OP1_EXP2: +- d3dbc_write_per_component_unary_op(d3dbc, instr, D3DSIO_EXP); ++ d3dbc_write_per_component_unary_op(d3dbc, instr, VKD3D_SM1_OP_EXP); + break; + + case HLSL_OP1_LOG2: +- d3dbc_write_per_component_unary_op(d3dbc, instr, D3DSIO_LOG); ++ d3dbc_write_per_component_unary_op(d3dbc, instr, VKD3D_SM1_OP_LOG); + break; + + case HLSL_OP1_NEG: +- d3dbc_write_unary_op(d3dbc, D3DSIO_MOV, &instr->reg, &arg1->reg, D3DSPSM_NEG, 0); ++ d3dbc_write_unary_op(d3dbc, VKD3D_SM1_OP_MOV, &instr->reg, &arg1->reg, D3DSPSM_NEG, 0); + break; + + case HLSL_OP1_SAT: +- d3dbc_write_unary_op(d3dbc, D3DSIO_MOV, &instr->reg, &arg1->reg, 0, D3DSPDM_SATURATE); ++ d3dbc_write_unary_op(d3dbc, VKD3D_SM1_OP_MOV, &instr->reg, &arg1->reg, 0, D3DSPDM_SATURATE); + break; + + case HLSL_OP1_RCP: +- d3dbc_write_per_component_unary_op(d3dbc, instr, D3DSIO_RCP); ++ d3dbc_write_per_component_unary_op(d3dbc, instr, VKD3D_SM1_OP_RCP); + break; + + case HLSL_OP1_RSQ: +- d3dbc_write_per_component_unary_op(d3dbc, instr, D3DSIO_RSQ); ++ d3dbc_write_per_component_unary_op(d3dbc, instr, VKD3D_SM1_OP_RSQ); + break; + + case HLSL_OP1_COS_REDUCED: +@@ -2584,34 +2584,34 @@ static void d3dbc_write_expr(struct d3dbc_compiler *d3dbc, const struct hlsl_ir_ + break; + + case HLSL_OP2_ADD: +- d3dbc_write_binary_op(d3dbc, D3DSIO_ADD, &instr->reg, &arg1->reg, &arg2->reg); ++ d3dbc_write_binary_op(d3dbc, VKD3D_SM1_OP_ADD, &instr->reg, &arg1->reg, &arg2->reg); + break; + + case HLSL_OP2_MAX: +- d3dbc_write_binary_op(d3dbc, D3DSIO_MAX, &instr->reg, &arg1->reg, &arg2->reg); ++ d3dbc_write_binary_op(d3dbc, VKD3D_SM1_OP_MAX, &instr->reg, &arg1->reg, &arg2->reg); + break; + + case HLSL_OP2_MIN: +- d3dbc_write_binary_op(d3dbc, D3DSIO_MIN, &instr->reg, &arg1->reg, &arg2->reg); ++ d3dbc_write_binary_op(d3dbc, VKD3D_SM1_OP_MIN, &instr->reg, &arg1->reg, &arg2->reg); + break; + + case HLSL_OP2_MUL: +- d3dbc_write_binary_op(d3dbc, D3DSIO_MUL, &instr->reg, &arg1->reg, &arg2->reg); ++ d3dbc_write_binary_op(d3dbc, VKD3D_SM1_OP_MUL, &instr->reg, &arg1->reg, &arg2->reg); + break; + + case HLSL_OP1_FRACT: +- d3dbc_write_unary_op(d3dbc, D3DSIO_FRC, &instr->reg, &arg1->reg, D3DSPSM_NONE, 0); ++ d3dbc_write_unary_op(d3dbc, VKD3D_SM1_OP_FRC, &instr->reg, &arg1->reg, D3DSPSM_NONE, 0); + break; + + case HLSL_OP2_DOT: + switch (arg1->data_type->dimx) + { + case 4: +- d3dbc_write_dot(d3dbc, D3DSIO_DP4, &instr->reg, &arg1->reg, &arg2->reg); ++ d3dbc_write_dot(d3dbc, VKD3D_SM1_OP_DP4, &instr->reg, &arg1->reg, &arg2->reg); + break; + + case 3: +- d3dbc_write_dot(d3dbc, D3DSIO_DP3, &instr->reg, &arg1->reg, &arg2->reg); ++ d3dbc_write_dot(d3dbc, VKD3D_SM1_OP_DP3, &instr->reg, &arg1->reg, &arg2->reg); + break; + + default: +@@ -2620,23 +2620,23 @@ static void d3dbc_write_expr(struct d3dbc_compiler *d3dbc, const struct hlsl_ir_ + break; + + case HLSL_OP2_LOGIC_AND: +- d3dbc_write_binary_op(d3dbc, D3DSIO_MIN, &instr->reg, &arg1->reg, &arg2->reg); ++ d3dbc_write_binary_op(d3dbc, VKD3D_SM1_OP_MIN, &instr->reg, &arg1->reg, &arg2->reg); + break; + + case HLSL_OP2_LOGIC_OR: +- d3dbc_write_binary_op(d3dbc, D3DSIO_MAX, &instr->reg, &arg1->reg, &arg2->reg); ++ d3dbc_write_binary_op(d3dbc, VKD3D_SM1_OP_MAX, &instr->reg, &arg1->reg, &arg2->reg); + break; + + case HLSL_OP2_SLT: + if (version->type == VKD3D_SHADER_TYPE_PIXEL) + hlsl_fixme(ctx, &instr->loc, "Lower SLT instructions for pixel shaders."); +- d3dbc_write_binary_op(d3dbc, D3DSIO_SLT, &instr->reg, &arg1->reg, &arg2->reg); ++ d3dbc_write_binary_op(d3dbc, VKD3D_SM1_OP_SLT, &instr->reg, &arg1->reg, &arg2->reg); + break; + + case HLSL_OP3_CMP: + if (version->type == VKD3D_SHADER_TYPE_VERTEX) + hlsl_fixme(ctx, &instr->loc, "Lower CMP instructions for vertex shaders."); +- d3dbc_write_ternary_op(d3dbc, D3DSIO_CMP, &instr->reg, &arg1->reg, &arg2->reg, &arg3->reg); ++ d3dbc_write_ternary_op(d3dbc, VKD3D_SM1_OP_CMP, &instr->reg, &arg1->reg, &arg2->reg, &arg3->reg); + break; + + case HLSL_OP3_DP2ADD: +@@ -2644,7 +2644,7 @@ static void d3dbc_write_expr(struct d3dbc_compiler *d3dbc, const struct hlsl_ir_ + break; + + case HLSL_OP3_MAD: +- d3dbc_write_ternary_op(d3dbc, D3DSIO_MAD, &instr->reg, &arg1->reg, &arg2->reg, &arg3->reg); ++ d3dbc_write_ternary_op(d3dbc, VKD3D_SM1_OP_MAD, &instr->reg, &arg1->reg, &arg2->reg, &arg3->reg); + break; + + default: +@@ -2666,7 +2666,7 @@ static void d3dbc_write_if(struct d3dbc_compiler *d3dbc, const struct hlsl_ir_no + + sm1_ifc = (struct sm1_instruction) + { +- .opcode = D3DSIO_IFC, ++ .opcode = VKD3D_SM1_OP_IFC, + .flags = VKD3D_SHADER_REL_OP_NE, /* Make it a "if_ne" instruction. */ + + .srcs[0].type = VKD3DSPR_TEMP, +@@ -2686,12 +2686,12 @@ static void d3dbc_write_if(struct d3dbc_compiler *d3dbc, const struct hlsl_ir_no + + if (!list_empty(&iff->else_block.instrs)) + { +- sm1_else = (struct sm1_instruction){.opcode = D3DSIO_ELSE}; ++ sm1_else = (struct sm1_instruction){.opcode = VKD3D_SM1_OP_ELSE}; + d3dbc_write_instruction(d3dbc, &sm1_else); + d3dbc_write_block(d3dbc, &iff->else_block); + } + +- sm1_endif = (struct sm1_instruction){.opcode = D3DSIO_ENDIF}; ++ sm1_endif = (struct sm1_instruction){.opcode = VKD3D_SM1_OP_ENDIF}; + d3dbc_write_instruction(d3dbc, &sm1_endif); + } + +@@ -2707,7 +2707,7 @@ static void d3dbc_write_jump(struct d3dbc_compiler *d3dbc, const struct hlsl_ir_ + + struct sm1_instruction sm1_instr = + { +- .opcode = D3DSIO_TEXKILL, ++ .opcode = VKD3D_SM1_OP_TEXKILL, + + .dst.type = VKD3DSPR_TEMP, + .dst.reg = reg->id, +@@ -2758,21 +2758,21 @@ static void d3dbc_write_resource_load(struct d3dbc_compiler *d3dbc, const struct + switch (load->load_type) + { + case HLSL_RESOURCE_SAMPLE: +- sm1_instr.opcode = D3DSIO_TEX; ++ sm1_instr.opcode = VKD3D_SM1_OP_TEX; + break; + + case HLSL_RESOURCE_SAMPLE_PROJ: +- sm1_instr.opcode = D3DSIO_TEX; ++ sm1_instr.opcode = VKD3D_SM1_OP_TEX; + sm1_instr.opcode |= VKD3DSI_TEXLD_PROJECT << VKD3D_SM1_INSTRUCTION_FLAGS_SHIFT; + break; + + case HLSL_RESOURCE_SAMPLE_LOD_BIAS: +- sm1_instr.opcode = D3DSIO_TEX; ++ sm1_instr.opcode = VKD3D_SM1_OP_TEX; + sm1_instr.opcode |= VKD3DSI_TEXLD_BIAS << VKD3D_SM1_INSTRUCTION_FLAGS_SHIFT; + break; + + case HLSL_RESOURCE_SAMPLE_GRAD: +- sm1_instr.opcode = D3DSIO_TEXLDD; ++ sm1_instr.opcode = VKD3D_SM1_OP_TEXLDD; + + sm1_instr.srcs[2].type = VKD3DSPR_TEMP; + sm1_instr.srcs[2].reg = ddx->reg.id; +@@ -2889,7 +2889,7 @@ int d3dbc_compile(struct vsir_program *program, uint64_t config_flags, + d3dbc_write_semantic_dcls(&d3dbc); + d3dbc_write_block(&d3dbc, &entry_func->body); + +- put_u32(buffer, D3DSIO_END); ++ put_u32(buffer, VKD3D_SM1_OP_END); + + result = ctx->result; + if (buffer->status) +diff --git a/libs/vkd3d/libs/vkd3d-shader/ir.c b/libs/vkd3d/libs/vkd3d-shader/ir.c +index d765abc938b..a483c25f3ad 100644 +--- a/libs/vkd3d/libs/vkd3d-shader/ir.c ++++ b/libs/vkd3d/libs/vkd3d-shader/ir.c +@@ -19,6 +19,15 @@ + #include "vkd3d_shader_private.h" + #include "vkd3d_types.h" + ++struct vsir_normalisation_context ++{ ++ enum vkd3d_result result; ++ struct vsir_program *program; ++ uint64_t config_flags; ++ const struct vkd3d_shader_compile_info *compile_info; ++ struct vkd3d_shader_message_context *message_context; ++}; ++ + static int convert_parameter_info(const struct vkd3d_shader_compile_info *compile_info, + unsigned int *ret_count, const struct vkd3d_shader_parameter1 **ret_parameters) + { +@@ -442,9 +451,10 @@ static enum vkd3d_result vsir_program_lower_sm1_sincos(struct vsir_program *prog + } + + static enum vkd3d_result vsir_program_lower_instructions(struct vsir_program *program, +- struct vkd3d_shader_message_context *message_context) ++ struct vsir_normalisation_context *ctx) + { + struct vkd3d_shader_instruction_array *instructions = &program->instructions; ++ struct vkd3d_shader_message_context *message_context = ctx->message_context; + unsigned int tmp_idx = ~0u, i; + enum vkd3d_result ret; + +@@ -1755,8 +1765,7 @@ static enum vkd3d_result vsir_program_normalise_io_registers(struct vsir_program + { + struct io_normaliser normaliser = {program->instructions}; + struct vkd3d_shader_instruction *ins; +- bool has_control_point_phase; +- unsigned int i, j; ++ unsigned int i; + + normaliser.phase = VKD3DSIH_INVALID; + normaliser.shader_type = program->shader_version.type; +@@ -1765,7 +1774,7 @@ static enum vkd3d_result vsir_program_normalise_io_registers(struct vsir_program + normaliser.output_signature = &program->output_signature; + normaliser.patch_constant_signature = &program->patch_constant_signature; + +- for (i = 0, has_control_point_phase = false; i < program->instructions.count; ++i) ++ for (i = 0; i < program->instructions.count; ++i) + { + ins = &program->instructions.elements[i]; + +@@ -1779,8 +1788,6 @@ static enum vkd3d_result vsir_program_normalise_io_registers(struct vsir_program + vkd3d_shader_instruction_make_nop(ins); + break; + case VKD3DSIH_HS_CONTROL_POINT_PHASE: +- has_control_point_phase = true; +- /* fall through */ + case VKD3DSIH_HS_FORK_PHASE: + case VKD3DSIH_HS_JOIN_PHASE: + normaliser.phase = ins->opcode; +@@ -1790,22 +1797,6 @@ static enum vkd3d_result vsir_program_normalise_io_registers(struct vsir_program + } + } + +- if (normaliser.shader_type == VKD3D_SHADER_TYPE_HULL && !has_control_point_phase) +- { +- /* Inputs and outputs must match for the default phase, so merge ranges must match too. */ +- for (i = 0; i < MAX_REG_OUTPUT; ++i) +- { +- for (j = 0; j < VKD3D_VEC4_SIZE; ++j) +- { +- if (!normaliser.input_range_map[i][j] && normaliser.output_range_map[i][j]) +- normaliser.input_range_map[i][j] = normaliser.output_range_map[i][j]; +- else if (normaliser.input_range_map[i][j] && !normaliser.output_range_map[i][j]) +- normaliser.output_range_map[i][j] = normaliser.input_range_map[i][j]; +- else VKD3D_ASSERT(normaliser.input_range_map[i][j] == normaliser.output_range_map[i][j]); +- } +- } +- } +- + if (!shader_signature_merge(&program->input_signature, normaliser.input_range_map, false) + || !shader_signature_merge(&program->output_signature, normaliser.output_range_map, false) + || !shader_signature_merge(&program->patch_constant_signature, normaliser.pc_range_map, true)) +@@ -2789,8 +2780,9 @@ static enum vkd3d_result cf_flattener_iterate_instruction_array(struct cf_flatte + } + + static enum vkd3d_result vsir_program_flatten_control_flow_constructs(struct vsir_program *program, +- struct vkd3d_shader_message_context *message_context) ++ struct vsir_normalisation_context *ctx) + { ++ struct vkd3d_shader_message_context *message_context = ctx->message_context; + struct cf_flattener flattener = {.program = program}; + enum vkd3d_result result; + +@@ -2860,7 +2852,8 @@ static bool lower_switch_to_if_ladder_add_block_mapping(struct lower_switch_to_i + return true; + } + +-static enum vkd3d_result lower_switch_to_if_ladder(struct vsir_program *program) ++static enum vkd3d_result vsir_program_lower_switch_to_selection_ladder(struct vsir_program *program, ++ struct vsir_normalisation_context *ctx) + { + unsigned int block_count = program->block_count, ssa_count = program->ssa_count, current_label = 0, if_label; + size_t ins_capacity = 0, ins_count = 0, i, map_capacity = 0, map_count = 0; +@@ -3050,7 +3043,8 @@ static void ssas_to_temps_block_info_cleanup(struct ssas_to_temps_block_info *bl + vkd3d_free(block_info); + } + +-static enum vkd3d_result vsir_program_materialise_phi_ssas_to_temps(struct vsir_program *program) ++static enum vkd3d_result vsir_program_materialise_phi_ssas_to_temps(struct vsir_program *program, ++ struct vsir_normalisation_context *ctx) + { + size_t ins_capacity = 0, ins_count = 0, phi_count, incoming_count, i; + struct ssas_to_temps_block_info *info, *block_info = NULL; +@@ -5271,8 +5265,9 @@ out: + } + + static enum vkd3d_result vsir_program_structurize(struct vsir_program *program, +- struct vkd3d_shader_message_context *message_context) ++ struct vsir_normalisation_context *ctx) + { ++ struct vkd3d_shader_message_context *message_context = ctx->message_context; + struct vsir_cfg_emit_target target = {0}; + enum vkd3d_result ret; + size_t i; +@@ -5451,8 +5446,9 @@ static enum vkd3d_result vsir_program_materialize_undominated_ssas_to_temps_in_f + } + + static enum vkd3d_result vsir_program_materialize_undominated_ssas_to_temps(struct vsir_program *program, +- struct vkd3d_shader_message_context *message_context) ++ struct vsir_normalisation_context *ctx) + { ++ struct vkd3d_shader_message_context *message_context = ctx->message_context; + enum vkd3d_result ret; + size_t i; + +@@ -5731,14 +5727,14 @@ static void VKD3D_PRINTF_FUNC(3, 4) validator_error(struct validation_context *c + if (ctx->invalid_instruction_idx) + { + vkd3d_shader_error(ctx->message_context, &ctx->null_location, error, "%s", buf.buffer); +- ERR("VSIR validation error: %s\n", buf.buffer); ++ WARN("VSIR validation error: %s\n", buf.buffer); + } + else + { + const struct vkd3d_shader_instruction *ins = &ctx->program->instructions.elements[ctx->instruction_idx]; + vkd3d_shader_error(ctx->message_context, &ins->location, error, + "instruction %zu: %s", ctx->instruction_idx + 1, buf.buffer); +- ERR("VSIR validation error: instruction %zu: %s\n", ctx->instruction_idx + 1, buf.buffer); ++ WARN("VSIR validation error: instruction %zu: %s\n", ctx->instruction_idx + 1, buf.buffer); + } + + vkd3d_string_buffer_cleanup(&buf); +@@ -6243,12 +6239,13 @@ static void vsir_validate_instruction(struct validation_context *ctx) + /* We support two different control flow types in shaders: + * block-based, like DXIL and SPIR-V, and structured, like D3DBC + * and TPF. The shader is detected as block-based when its first +- * instruction, except for DCL_* and phases, is a LABEL. Currently +- * we mandate that each shader is either purely block-based or ++ * instruction, except for NOP, DCL_* and phases, is a LABEL. ++ * Currently we mandate that each shader is either purely block-based or + * purely structured. In principle we could allow structured + * constructs in a block, provided they are confined in a single + * block, but need for that hasn't arisen yet, so we don't. */ +- if (ctx->cf_type == CF_TYPE_UNKNOWN && !vsir_instruction_is_dcl(instruction)) ++ if (ctx->cf_type == CF_TYPE_UNKNOWN && instruction->opcode != VKD3DSIH_NOP ++ && !vsir_instruction_is_dcl(instruction)) + { + if (instruction->opcode == VKD3DSIH_LABEL) + ctx->cf_type = CF_TYPE_BLOCKS; +@@ -6610,33 +6607,59 @@ fail: + return VKD3D_ERROR_OUT_OF_MEMORY; + } + +-enum vkd3d_result vsir_program_normalise(struct vsir_program *program, uint64_t config_flags, +- const struct vkd3d_shader_compile_info *compile_info, struct vkd3d_shader_message_context *message_context) ++#define vsir_transform(ctx, step) vsir_transform_(ctx, #step, step) ++static void vsir_transform_( ++ struct vsir_normalisation_context *ctx, const char *step_name, ++ enum vkd3d_result (*step)(struct vsir_program *program, struct vsir_normalisation_context *ctx)) + { +- enum vkd3d_result result = VKD3D_OK; ++ if (ctx->result < 0) ++ return; + +- if ((result = vsir_program_lower_instructions(program, message_context)) < 0) +- return result; ++ if ((ctx->result = step(ctx->program, ctx)) < 0) ++ { ++ WARN("Transformation \"%s\" failed with result %u.\n", step_name, ctx->result); ++ return; ++ } + +- if (program->shader_version.major >= 6) ++ if ((ctx->result = vsir_program_validate(ctx->program, ctx->config_flags, ++ ctx->compile_info->source_name, ctx->message_context)) < 0) + { +- if ((result = vsir_program_materialise_phi_ssas_to_temps(program)) < 0) +- return result; ++ WARN("Validation failed with result %u after transformation \"%s\".\n", ctx->result, step_name); ++ return; ++ } ++} + +- if ((result = lower_switch_to_if_ladder(program)) < 0) +- return result; ++enum vkd3d_result vsir_program_normalise(struct vsir_program *program, uint64_t config_flags, ++ const struct vkd3d_shader_compile_info *compile_info, struct vkd3d_shader_message_context *message_context) ++{ ++ struct vsir_normalisation_context ctx = ++ { ++ .result = VKD3D_OK, ++ .program = program, ++ .config_flags = config_flags, ++ .compile_info = compile_info, ++ .message_context = message_context, ++ }; ++ enum vkd3d_result result; + +- if ((result = vsir_program_structurize(program, message_context)) < 0) +- return result; ++ vsir_transform(&ctx, vsir_program_lower_instructions); + +- if ((result = vsir_program_flatten_control_flow_constructs(program, message_context)) < 0) +- return result; ++ if (program->shader_version.major >= 6) ++ { ++ vsir_transform(&ctx, vsir_program_materialise_phi_ssas_to_temps); ++ vsir_transform(&ctx, vsir_program_lower_switch_to_selection_ladder); ++ vsir_transform(&ctx, vsir_program_structurize); ++ vsir_transform(&ctx, vsir_program_flatten_control_flow_constructs); ++ vsir_transform(&ctx, vsir_program_materialize_undominated_ssas_to_temps); + +- if ((result = vsir_program_materialize_undominated_ssas_to_temps(program, message_context)) < 0) +- return result; ++ if (ctx.result < 0) ++ return ctx.result; + } + else + { ++ if (ctx.result < 0) ++ return ctx.result; ++ + if (program->shader_version.type != VKD3D_SHADER_TYPE_PIXEL) + { + if ((result = vsir_program_remap_output_signature(program, compile_info, message_context)) < 0) +@@ -6665,7 +6688,7 @@ enum vkd3d_result vsir_program_normalise(struct vsir_program *program, uint64_t + return result; + + if (compile_info->target_type != VKD3D_SHADER_TARGET_GLSL +- && (result = vsir_program_flatten_control_flow_constructs(program, message_context)) < 0) ++ && (result = vsir_program_flatten_control_flow_constructs(program, &ctx)) < 0) + return result; + } + +-- +2.45.2 +