mirror of
https://gitlab.winehq.org/wine/vkd3d.git
synced 2025-04-13 05:43:18 -07:00
vkd3d-shader: Allocate constant buffers.
Signed-off-by: Zebediah Figura <zfigura@codeweavers.com> Signed-off-by: Henri Verbeet <hverbeet@codeweavers.com> Signed-off-by: Matteo Bruni <mbruni@codeweavers.com> Signed-off-by: Alexandre Julliard <julliard@winehq.org>
This commit is contained in:
committed by
Alexandre Julliard
parent
f7bf1dc01c
commit
dd03242417
@ -112,6 +112,18 @@ static unsigned int get_array_size(const struct hlsl_type *type)
|
|||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
unsigned int hlsl_type_get_sm4_offset(const struct hlsl_type *type, unsigned int offset)
|
||||||
|
{
|
||||||
|
/* Align to the next vec4 boundary if:
|
||||||
|
* (a) the type is a struct or array type, or
|
||||||
|
* (b) the type would cross a vec4 boundary; i.e. a vec3 and a
|
||||||
|
* vec1 can be packed together, but not a vec3 and a vec2.
|
||||||
|
*/
|
||||||
|
if (type->type > HLSL_CLASS_LAST_NUMERIC || (offset & 3) + type->reg_size > 4)
|
||||||
|
return align(offset, 4);
|
||||||
|
return offset;
|
||||||
|
}
|
||||||
|
|
||||||
static void hlsl_type_calculate_reg_size(struct hlsl_ctx *ctx, struct hlsl_type *type)
|
static void hlsl_type_calculate_reg_size(struct hlsl_ctx *ctx, struct hlsl_type *type)
|
||||||
{
|
{
|
||||||
bool is_sm4 = (ctx->profile->major_version >= 4);
|
bool is_sm4 = (ctx->profile->major_version >= 4);
|
||||||
@ -155,14 +167,7 @@ static void hlsl_type_calculate_reg_size(struct hlsl_ctx *ctx, struct hlsl_type
|
|||||||
|
|
||||||
assert(field_size);
|
assert(field_size);
|
||||||
|
|
||||||
/* Align to the next vec4 boundary if:
|
type->reg_size = hlsl_type_get_sm4_offset(field->type, type->reg_size);
|
||||||
* (a) the type is a struct or array type, or
|
|
||||||
* (b) the type would cross a vec4 boundary; i.e. a vec3 and a
|
|
||||||
* vec1 can be packed together, but not a vec3 and a vec2.
|
|
||||||
*/
|
|
||||||
if (field->type->type > HLSL_CLASS_LAST_NUMERIC || (type->reg_size & 3) + field_size > 4)
|
|
||||||
type->reg_size = align(type->reg_size, 4);
|
|
||||||
|
|
||||||
field->reg_offset = type->reg_size;
|
field->reg_offset = type->reg_size;
|
||||||
type->reg_size += field_size;
|
type->reg_size += field_size;
|
||||||
|
|
||||||
|
@ -228,6 +228,7 @@ struct hlsl_ir_var
|
|||||||
struct list scope_entry, param_entry, extern_entry;
|
struct list scope_entry, param_entry, extern_entry;
|
||||||
|
|
||||||
unsigned int first_write, last_read;
|
unsigned int first_write, last_read;
|
||||||
|
unsigned int buffer_offset;
|
||||||
struct hlsl_reg reg;
|
struct hlsl_reg reg;
|
||||||
|
|
||||||
uint32_t is_input_semantic : 1;
|
uint32_t is_input_semantic : 1;
|
||||||
@ -431,6 +432,9 @@ struct hlsl_buffer
|
|||||||
const char *name;
|
const char *name;
|
||||||
struct hlsl_reg_reservation reservation;
|
struct hlsl_reg_reservation reservation;
|
||||||
struct list entry;
|
struct list entry;
|
||||||
|
|
||||||
|
unsigned size, used_size;
|
||||||
|
struct hlsl_reg reg;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct hlsl_ctx
|
struct hlsl_ctx
|
||||||
@ -678,6 +682,7 @@ bool hlsl_scope_add_type(struct hlsl_scope *scope, struct hlsl_type *type) DECLS
|
|||||||
struct hlsl_type *hlsl_type_clone(struct hlsl_ctx *ctx, struct hlsl_type *old,
|
struct hlsl_type *hlsl_type_clone(struct hlsl_ctx *ctx, struct hlsl_type *old,
|
||||||
unsigned int default_majority, unsigned int modifiers) DECLSPEC_HIDDEN;
|
unsigned int default_majority, unsigned int modifiers) DECLSPEC_HIDDEN;
|
||||||
unsigned int hlsl_type_component_count(struct hlsl_type *type) DECLSPEC_HIDDEN;
|
unsigned int hlsl_type_component_count(struct hlsl_type *type) DECLSPEC_HIDDEN;
|
||||||
|
unsigned int hlsl_type_get_sm4_offset(const struct hlsl_type *type, unsigned int offset) DECLSPEC_HIDDEN;
|
||||||
bool hlsl_type_is_void(const struct hlsl_type *type) DECLSPEC_HIDDEN;
|
bool hlsl_type_is_void(const struct hlsl_type *type) DECLSPEC_HIDDEN;
|
||||||
bool hlsl_types_are_equal(const struct hlsl_type *t1, const struct hlsl_type *t2) DECLSPEC_HIDDEN;
|
bool hlsl_types_are_equal(const struct hlsl_type *t1, const struct hlsl_type *t2) DECLSPEC_HIDDEN;
|
||||||
|
|
||||||
|
@ -1115,6 +1115,105 @@ static void allocate_semantic_registers(struct hlsl_ctx *ctx)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static const struct hlsl_buffer *get_reserved_buffer(struct hlsl_ctx *ctx, uint32_t index)
|
||||||
|
{
|
||||||
|
const struct hlsl_buffer *buffer;
|
||||||
|
|
||||||
|
LIST_FOR_EACH_ENTRY(buffer, &ctx->buffers, const struct hlsl_buffer, entry)
|
||||||
|
{
|
||||||
|
if (buffer->used_size && buffer->reservation.type == 'b' && buffer->reservation.index == index)
|
||||||
|
return buffer;
|
||||||
|
}
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void calculate_buffer_offset(struct hlsl_ir_var *var)
|
||||||
|
{
|
||||||
|
struct hlsl_buffer *buffer = var->buffer;
|
||||||
|
|
||||||
|
buffer->size = hlsl_type_get_sm4_offset(var->data_type, buffer->size);
|
||||||
|
|
||||||
|
var->buffer_offset = buffer->size;
|
||||||
|
TRACE("Allocated buffer offset %u to %s.\n", var->buffer_offset, var->name);
|
||||||
|
buffer->size += var->data_type->reg_size;
|
||||||
|
if (var->last_read)
|
||||||
|
buffer->used_size = buffer->size;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void allocate_buffers(struct hlsl_ctx *ctx)
|
||||||
|
{
|
||||||
|
struct hlsl_buffer *buffer, *params_buffer;
|
||||||
|
struct hlsl_ir_var *var;
|
||||||
|
uint32_t index = 0;
|
||||||
|
|
||||||
|
if (!(params_buffer = hlsl_new_buffer(ctx, HLSL_BUFFER_CONSTANT,
|
||||||
|
hlsl_strdup(ctx, "$Params"), NULL, ctx->location)))
|
||||||
|
return;
|
||||||
|
|
||||||
|
/* The $Globals and $Params buffers should be allocated first, before all
|
||||||
|
* explicit buffers. */
|
||||||
|
list_remove(¶ms_buffer->entry);
|
||||||
|
list_add_head(&ctx->buffers, ¶ms_buffer->entry);
|
||||||
|
list_remove(&ctx->globals_buffer->entry);
|
||||||
|
list_add_head(&ctx->buffers, &ctx->globals_buffer->entry);
|
||||||
|
|
||||||
|
LIST_FOR_EACH_ENTRY(var, &ctx->extern_vars, struct hlsl_ir_var, extern_entry)
|
||||||
|
{
|
||||||
|
if (var->is_uniform)
|
||||||
|
{
|
||||||
|
if (var->is_param)
|
||||||
|
var->buffer = params_buffer;
|
||||||
|
|
||||||
|
calculate_buffer_offset(var);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
LIST_FOR_EACH_ENTRY(buffer, &ctx->buffers, struct hlsl_buffer, entry)
|
||||||
|
{
|
||||||
|
if (!buffer->used_size)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
if (buffer->type == HLSL_BUFFER_CONSTANT)
|
||||||
|
{
|
||||||
|
if (buffer->reservation.type == 'b')
|
||||||
|
{
|
||||||
|
const struct hlsl_buffer *reserved_buffer = get_reserved_buffer(ctx, buffer->reservation.index);
|
||||||
|
|
||||||
|
if (reserved_buffer && reserved_buffer != buffer)
|
||||||
|
{
|
||||||
|
hlsl_error(ctx, buffer->loc, VKD3D_SHADER_ERROR_HLSL_OVERLAPPING_RESERVATIONS,
|
||||||
|
"Multiple buffers bound to cb%u.", buffer->reservation.index);
|
||||||
|
hlsl_note(ctx, reserved_buffer->loc, VKD3D_SHADER_LOG_ERROR,
|
||||||
|
"Buffer %s is already bound to cb%u.", reserved_buffer->name, buffer->reservation.index);
|
||||||
|
}
|
||||||
|
|
||||||
|
buffer->reg.id = buffer->reservation.index;
|
||||||
|
buffer->reg.allocated = true;
|
||||||
|
TRACE("Allocated reserved %s to cb%u.\n", buffer->name, index);
|
||||||
|
}
|
||||||
|
else if (!buffer->reservation.type)
|
||||||
|
{
|
||||||
|
while (get_reserved_buffer(ctx, index))
|
||||||
|
++index;
|
||||||
|
|
||||||
|
buffer->reg.id = index;
|
||||||
|
buffer->reg.allocated = true;
|
||||||
|
TRACE("Allocated %s to cb%u.\n", buffer->name, index);
|
||||||
|
++index;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
hlsl_error(ctx, buffer->loc, VKD3D_SHADER_ERROR_HLSL_INVALID_RESERVATION,
|
||||||
|
"Constant buffers must be allocated to register type 'b'.");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
FIXME("Allocate registers for texture buffers.\n");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static unsigned int map_swizzle(unsigned int swizzle, unsigned int writemask)
|
static unsigned int map_swizzle(unsigned int swizzle, unsigned int writemask)
|
||||||
{
|
{
|
||||||
unsigned int i, ret = 0;
|
unsigned int i, ret = 0;
|
||||||
@ -2062,6 +2161,8 @@ int hlsl_emit_dxbc(struct hlsl_ctx *ctx, struct hlsl_ir_function_decl *entry_fun
|
|||||||
allocate_temp_registers(ctx, entry_func);
|
allocate_temp_registers(ctx, entry_func);
|
||||||
if (ctx->profile->major_version < 4)
|
if (ctx->profile->major_version < 4)
|
||||||
allocate_const_registers(ctx, entry_func);
|
allocate_const_registers(ctx, entry_func);
|
||||||
|
else
|
||||||
|
allocate_buffers(ctx);
|
||||||
allocate_semantic_registers(ctx);
|
allocate_semantic_registers(ctx);
|
||||||
|
|
||||||
if (ctx->result)
|
if (ctx->result)
|
||||||
|
@ -107,6 +107,8 @@ enum vkd3d_shader_error
|
|||||||
VKD3D_SHADER_ERROR_HLSL_INVALID_INDEX = 5012,
|
VKD3D_SHADER_ERROR_HLSL_INVALID_INDEX = 5012,
|
||||||
VKD3D_SHADER_ERROR_HLSL_INVALID_SEMANTIC = 5013,
|
VKD3D_SHADER_ERROR_HLSL_INVALID_SEMANTIC = 5013,
|
||||||
VKD3D_SHADER_ERROR_HLSL_INVALID_RETURN = 5014,
|
VKD3D_SHADER_ERROR_HLSL_INVALID_RETURN = 5014,
|
||||||
|
VKD3D_SHADER_ERROR_HLSL_OVERLAPPING_RESERVATIONS = 5015,
|
||||||
|
VKD3D_SHADER_ERROR_HLSL_INVALID_RESERVATION = 5016,
|
||||||
|
|
||||||
VKD3D_SHADER_WARNING_HLSL_IMPLICIT_TRUNCATION = 5300,
|
VKD3D_SHADER_WARNING_HLSL_IMPLICIT_TRUNCATION = 5300,
|
||||||
};
|
};
|
||||||
|
Reference in New Issue
Block a user