From 3802344e97ecd2b94a0e2f78f12a64ace2b93941 Mon Sep 17 00:00:00 2001 From: Shaun Ren Date: Thu, 31 Jul 2025 21:38:55 -0400 Subject: [PATCH] vkd3d-shader/hlsl: Allocate groupshared registers. --- libs/vkd3d-shader/hlsl.h | 1 + libs/vkd3d-shader/hlsl.y | 5 +-- libs/vkd3d-shader/hlsl_codegen.c | 56 +++++++++++++++++++++++++++++--- 3 files changed, 53 insertions(+), 9 deletions(-) diff --git a/libs/vkd3d-shader/hlsl.h b/libs/vkd3d-shader/hlsl.h index c52709242..2a7523ac7 100644 --- a/libs/vkd3d-shader/hlsl.h +++ b/libs/vkd3d-shader/hlsl.h @@ -554,6 +554,7 @@ struct hlsl_ir_var uint32_t is_param : 1; uint32_t is_separated_resource : 1; uint32_t is_synthetic : 1; + uint32_t is_tgsm : 1; uint32_t has_explicit_bind_point : 1; }; diff --git a/libs/vkd3d-shader/hlsl.y b/libs/vkd3d-shader/hlsl.y index 7160106f0..5d364d66d 100644 --- a/libs/vkd3d-shader/hlsl.y +++ b/libs/vkd3d-shader/hlsl.y @@ -2566,13 +2566,10 @@ static void declare_var(struct hlsl_ctx *ctx, struct parse_variable_def *v) "Ignoring the 'groupshared' modifier in a non-compute shader."); } - if (modifiers & HLSL_STORAGE_GROUPSHARED) - hlsl_fixme(ctx, &var->loc, "Group shared variables."); - /* Mark it as uniform. We need to do this here since synthetic * variables also get put in the global scope, but shouldn't be * considered uniforms, and we have no way of telling otherwise. */ - if (!(modifiers & HLSL_STORAGE_STATIC)) + if (!(modifiers & (HLSL_STORAGE_STATIC | HLSL_STORAGE_GROUPSHARED))) var->storage_modifiers |= HLSL_STORAGE_UNIFORM; if (stream_output) diff --git a/libs/vkd3d-shader/hlsl_codegen.c b/libs/vkd3d-shader/hlsl_codegen.c index 9a026744e..11d1aabd0 100644 --- a/libs/vkd3d-shader/hlsl_codegen.c +++ b/libs/vkd3d-shader/hlsl_codegen.c @@ -6171,7 +6171,7 @@ static bool track_components_usage(struct hlsl_ctx *ctx, struct hlsl_ir_node *in { struct hlsl_ir_load *load = hlsl_ir_load(instr); - if (!load->src.var->is_uniform) + if (!load->src.var->is_uniform && !load->src.var->is_tgsm) return false; /* These will are handled by validate_static_object_references(). */ @@ -6695,7 +6695,7 @@ static uint32_t allocate_temp_registers(struct hlsl_ctx *ctx, struct hlsl_block { LIST_FOR_EACH_ENTRY(var, &scope->vars, struct hlsl_ir_var, scope_entry) { - if (!(var->is_input_semantic || var->is_output_semantic || var->is_uniform)) + if (!(var->is_input_semantic || var->is_output_semantic || var->is_uniform || var->is_tgsm)) memset(var->regs, 0, sizeof(var->regs)); } } @@ -7345,6 +7345,27 @@ static void allocate_stream_outputs(struct hlsl_ctx *ctx) } } +static void allocate_tgsms(struct hlsl_ctx *ctx) +{ + struct hlsl_ir_var *var; + struct hlsl_reg *reg; + uint32_t index = 0; + + LIST_FOR_EACH_ENTRY(var, &ctx->extern_vars, struct hlsl_ir_var, extern_entry) + { + if (!var->is_tgsm || !var->bind_count[HLSL_REGSET_NUMERIC]) + continue; + + reg = &var->regs[HLSL_REGSET_NUMERIC]; + reg->space = 0; + reg->index = index; + reg->id = index; + reg->allocated = true; + + ++index; + } +} + bool hlsl_component_index_range_from_deref(struct hlsl_ctx *ctx, const struct hlsl_deref *deref, unsigned int *start, unsigned int *count) { @@ -11142,6 +11163,12 @@ static bool sm4_generate_vsir_instr_store(struct hlsl_ctx *ctx, struct vkd3d_shader_src_param *src_param; struct vkd3d_shader_instruction *ins; + if (store->lhs.var->is_tgsm) + { + hlsl_fixme(ctx, &instr->loc, "Store to groupshared variable."); + return false; + } + if (!(ins = generate_vsir_add_program_instruction(ctx, program, &instr->loc, VSIR_OP_MOV, 1, 1))) return false; @@ -11176,6 +11203,12 @@ static bool sm4_generate_vsir_instr_load(struct hlsl_ctx *ctx, struct vsir_progr struct vkd3d_shader_instruction *ins; struct hlsl_constant_value value; + if (load->src.var->is_tgsm) + { + hlsl_fixme(ctx, &instr->loc, "Load from groupshared variable."); + return false; + } + VKD3D_ASSERT(hlsl_is_numeric_type(type)); if (type->e.numeric.type == HLSL_TYPE_BOOL && var_is_user_input(version, load->src.var)) { @@ -12003,7 +12036,7 @@ static void sm4_generate_vsir_add_function(struct hlsl_ctx *ctx, struct list *se { LIST_FOR_EACH_ENTRY(var, &scope->vars, struct hlsl_ir_var, scope_entry) { - if (var->is_uniform || var->is_input_semantic || var->is_output_semantic) + if (var->is_uniform || var->is_tgsm || var->is_input_semantic || var->is_output_semantic) continue; if (!var->regs[HLSL_REGSET_NUMERIC].allocated) continue; @@ -12572,6 +12605,7 @@ static void sm4_generate_vsir(struct hlsl_ctx *ctx, struct extern_resource *extern_resources; unsigned int extern_resources_count; const struct hlsl_buffer *cbuffer; + const struct hlsl_ir_var *var; if (version->type == VKD3D_SHADER_TYPE_COMPUTE) { @@ -12622,10 +12656,14 @@ static void sm4_generate_vsir(struct hlsl_ctx *ctx, } sm4_free_extern_resources(extern_resources, extern_resources_count); + LIST_FOR_EACH_ENTRY(var, &ctx->extern_vars, struct hlsl_ir_var, extern_entry) + { + if (var->is_tgsm && var->regs[HLSL_REGSET_NUMERIC].allocated) + hlsl_fixme(ctx, &var->loc, "Groupshared variable."); + } + if (version->type == VKD3D_SHADER_TYPE_GEOMETRY && version->major >= 5) { - const struct hlsl_ir_var *var; - LIST_FOR_EACH_ENTRY(var, &ctx->extern_vars, struct hlsl_ir_var, extern_entry) { if (var->bind_count[HLSL_REGSET_STREAM_OUTPUTS]) @@ -14151,7 +14189,14 @@ int hlsl_emit_vsir(struct hlsl_ctx *ctx, const struct vkd3d_shader_compile_info LIST_FOR_EACH_ENTRY(var, &ctx->globals->vars, struct hlsl_ir_var, scope_entry) { if (var->storage_modifiers & HLSL_STORAGE_UNIFORM) + { prepend_uniform_copy(ctx, &global_uniform_block, var); + } + else if (var->storage_modifiers & HLSL_STORAGE_GROUPSHARED) + { + var->is_tgsm = 1; + list_add_tail(&ctx->extern_vars, &var->extern_entry); + } } process_entry_function(ctx, &semantic_vars, &body, &global_uniform_block, entry_func); @@ -14180,6 +14225,7 @@ int hlsl_emit_vsir(struct hlsl_ctx *ctx, const struct vkd3d_shader_compile_info allocate_objects(ctx, &semantic_vars, HLSL_REGSET_TEXTURES); allocate_objects(ctx, &semantic_vars, HLSL_REGSET_UAVS); allocate_objects(ctx, &semantic_vars, HLSL_REGSET_SAMPLERS); + allocate_tgsms(ctx); } if (TRACE_ON())