vkd3d-shader/hlsl: Allocate groupshared registers.

This commit is contained in:
Shaun Ren
2025-07-31 21:38:55 -04:00
committed by Henri Verbeet
parent e615e435d9
commit 3802344e97
Notes: Henri Verbeet 2025-08-05 16:40:26 +02:00
Approved-by: Elizabeth Figura (@zfigura)
Approved-by: Henri Verbeet (@hverbeet)
Merge-Request: https://gitlab.winehq.org/wine/vkd3d/-/merge_requests/1605
3 changed files with 53 additions and 9 deletions

View File

@@ -554,6 +554,7 @@ struct hlsl_ir_var
uint32_t is_param : 1;
uint32_t is_separated_resource : 1;
uint32_t is_synthetic : 1;
uint32_t is_tgsm : 1;
uint32_t has_explicit_bind_point : 1;
};

View File

@@ -2566,13 +2566,10 @@ static void declare_var(struct hlsl_ctx *ctx, struct parse_variable_def *v)
"Ignoring the 'groupshared' modifier in a non-compute shader.");
}
if (modifiers & HLSL_STORAGE_GROUPSHARED)
hlsl_fixme(ctx, &var->loc, "Group shared variables.");
/* Mark it as uniform. We need to do this here since synthetic
* variables also get put in the global scope, but shouldn't be
* considered uniforms, and we have no way of telling otherwise. */
if (!(modifiers & HLSL_STORAGE_STATIC))
if (!(modifiers & (HLSL_STORAGE_STATIC | HLSL_STORAGE_GROUPSHARED)))
var->storage_modifiers |= HLSL_STORAGE_UNIFORM;
if (stream_output)

View File

@@ -6171,7 +6171,7 @@ static bool track_components_usage(struct hlsl_ctx *ctx, struct hlsl_ir_node *in
{
struct hlsl_ir_load *load = hlsl_ir_load(instr);
if (!load->src.var->is_uniform)
if (!load->src.var->is_uniform && !load->src.var->is_tgsm)
return false;
/* These will are handled by validate_static_object_references(). */
@@ -6695,7 +6695,7 @@ static uint32_t allocate_temp_registers(struct hlsl_ctx *ctx, struct hlsl_block
{
LIST_FOR_EACH_ENTRY(var, &scope->vars, struct hlsl_ir_var, scope_entry)
{
if (!(var->is_input_semantic || var->is_output_semantic || var->is_uniform))
if (!(var->is_input_semantic || var->is_output_semantic || var->is_uniform || var->is_tgsm))
memset(var->regs, 0, sizeof(var->regs));
}
}
@@ -7345,6 +7345,27 @@ static void allocate_stream_outputs(struct hlsl_ctx *ctx)
}
}
static void allocate_tgsms(struct hlsl_ctx *ctx)
{
struct hlsl_ir_var *var;
struct hlsl_reg *reg;
uint32_t index = 0;
LIST_FOR_EACH_ENTRY(var, &ctx->extern_vars, struct hlsl_ir_var, extern_entry)
{
if (!var->is_tgsm || !var->bind_count[HLSL_REGSET_NUMERIC])
continue;
reg = &var->regs[HLSL_REGSET_NUMERIC];
reg->space = 0;
reg->index = index;
reg->id = index;
reg->allocated = true;
++index;
}
}
bool hlsl_component_index_range_from_deref(struct hlsl_ctx *ctx, const struct hlsl_deref *deref,
unsigned int *start, unsigned int *count)
{
@@ -11142,6 +11163,12 @@ static bool sm4_generate_vsir_instr_store(struct hlsl_ctx *ctx,
struct vkd3d_shader_src_param *src_param;
struct vkd3d_shader_instruction *ins;
if (store->lhs.var->is_tgsm)
{
hlsl_fixme(ctx, &instr->loc, "Store to groupshared variable.");
return false;
}
if (!(ins = generate_vsir_add_program_instruction(ctx, program, &instr->loc, VSIR_OP_MOV, 1, 1)))
return false;
@@ -11176,6 +11203,12 @@ static bool sm4_generate_vsir_instr_load(struct hlsl_ctx *ctx, struct vsir_progr
struct vkd3d_shader_instruction *ins;
struct hlsl_constant_value value;
if (load->src.var->is_tgsm)
{
hlsl_fixme(ctx, &instr->loc, "Load from groupshared variable.");
return false;
}
VKD3D_ASSERT(hlsl_is_numeric_type(type));
if (type->e.numeric.type == HLSL_TYPE_BOOL && var_is_user_input(version, load->src.var))
{
@@ -12003,7 +12036,7 @@ static void sm4_generate_vsir_add_function(struct hlsl_ctx *ctx, struct list *se
{
LIST_FOR_EACH_ENTRY(var, &scope->vars, struct hlsl_ir_var, scope_entry)
{
if (var->is_uniform || var->is_input_semantic || var->is_output_semantic)
if (var->is_uniform || var->is_tgsm || var->is_input_semantic || var->is_output_semantic)
continue;
if (!var->regs[HLSL_REGSET_NUMERIC].allocated)
continue;
@@ -12572,6 +12605,7 @@ static void sm4_generate_vsir(struct hlsl_ctx *ctx,
struct extern_resource *extern_resources;
unsigned int extern_resources_count;
const struct hlsl_buffer *cbuffer;
const struct hlsl_ir_var *var;
if (version->type == VKD3D_SHADER_TYPE_COMPUTE)
{
@@ -12622,10 +12656,14 @@ static void sm4_generate_vsir(struct hlsl_ctx *ctx,
}
sm4_free_extern_resources(extern_resources, extern_resources_count);
LIST_FOR_EACH_ENTRY(var, &ctx->extern_vars, struct hlsl_ir_var, extern_entry)
{
if (var->is_tgsm && var->regs[HLSL_REGSET_NUMERIC].allocated)
hlsl_fixme(ctx, &var->loc, "Groupshared variable.");
}
if (version->type == VKD3D_SHADER_TYPE_GEOMETRY && version->major >= 5)
{
const struct hlsl_ir_var *var;
LIST_FOR_EACH_ENTRY(var, &ctx->extern_vars, struct hlsl_ir_var, extern_entry)
{
if (var->bind_count[HLSL_REGSET_STREAM_OUTPUTS])
@@ -14151,8 +14189,15 @@ int hlsl_emit_vsir(struct hlsl_ctx *ctx, const struct vkd3d_shader_compile_info
LIST_FOR_EACH_ENTRY(var, &ctx->globals->vars, struct hlsl_ir_var, scope_entry)
{
if (var->storage_modifiers & HLSL_STORAGE_UNIFORM)
{
prepend_uniform_copy(ctx, &global_uniform_block, var);
}
else if (var->storage_modifiers & HLSL_STORAGE_GROUPSHARED)
{
var->is_tgsm = 1;
list_add_tail(&ctx->extern_vars, &var->extern_entry);
}
}
process_entry_function(ctx, &semantic_vars, &body, &global_uniform_block, entry_func);
if (ctx->result)
@@ -14180,6 +14225,7 @@ int hlsl_emit_vsir(struct hlsl_ctx *ctx, const struct vkd3d_shader_compile_info
allocate_objects(ctx, &semantic_vars, HLSL_REGSET_TEXTURES);
allocate_objects(ctx, &semantic_vars, HLSL_REGSET_UAVS);
allocate_objects(ctx, &semantic_vars, HLSL_REGSET_SAMPLERS);
allocate_tgsms(ctx);
}
if (TRACE_ON())