vkd3d-shader/hlsl: Rename the "type" field of struct hlsl_type to "class".

To be consistent with enum hlsl_type_class and HLSL_CLASS_*.
This commit is contained in:
Zebediah Figura 2022-11-11 19:31:55 -06:00 committed by Alexandre Julliard
parent b172f4c257
commit 7a9e393ea0
Notes: Alexandre Julliard 2023-04-03 22:08:48 +02:00
Approved-by: Giovanni Mascellani (@giomasce)
Approved-by: Francisco Casas (@fcasas)
Approved-by: Henri Verbeet (@hverbeet)
Approved-by: Alexandre Julliard (@julliard)
Merge-Request: https://gitlab.winehq.org/wine/vkd3d/-/merge_requests/121
7 changed files with 196 additions and 196 deletions

View File

@ -126,7 +126,7 @@ bool hlsl_type_is_row_major(const struct hlsl_type *type)
unsigned int hlsl_type_minor_size(const struct hlsl_type *type)
{
if (type->type != HLSL_CLASS_MATRIX || hlsl_type_is_row_major(type))
if (type->class != HLSL_CLASS_MATRIX || hlsl_type_is_row_major(type))
return type->dimx;
else
return type->dimy;
@ -134,7 +134,7 @@ unsigned int hlsl_type_minor_size(const struct hlsl_type *type)
unsigned int hlsl_type_major_size(const struct hlsl_type *type)
{
if (type->type != HLSL_CLASS_MATRIX || hlsl_type_is_row_major(type))
if (type->class != HLSL_CLASS_MATRIX || hlsl_type_is_row_major(type))
return type->dimy;
else
return type->dimx;
@ -142,7 +142,7 @@ unsigned int hlsl_type_major_size(const struct hlsl_type *type)
unsigned int hlsl_type_element_count(const struct hlsl_type *type)
{
switch (type->type)
switch (type->class)
{
case HLSL_CLASS_VECTOR:
return type->dimx;
@ -159,14 +159,14 @@ unsigned int hlsl_type_element_count(const struct hlsl_type *type)
static unsigned int get_array_size(const struct hlsl_type *type)
{
if (type->type == HLSL_CLASS_ARRAY)
if (type->class == HLSL_CLASS_ARRAY)
return get_array_size(type->e.array.type) * type->e.array.elements_count;
return 1;
}
bool hlsl_type_is_resource(const struct hlsl_type *type)
{
if (type->type == HLSL_CLASS_OBJECT)
if (type->class == HLSL_CLASS_OBJECT)
{
switch (type->base_type)
{
@ -183,10 +183,10 @@ bool hlsl_type_is_resource(const struct hlsl_type *type)
enum hlsl_regset hlsl_type_get_regset(const struct hlsl_type *type)
{
if (type->type <= HLSL_CLASS_LAST_NUMERIC)
if (type->class <= HLSL_CLASS_LAST_NUMERIC)
return HLSL_REGSET_NUMERIC;
if (type->type == HLSL_CLASS_OBJECT)
if (type->class == HLSL_CLASS_OBJECT)
{
switch (type->base_type)
{
@ -203,7 +203,7 @@ enum hlsl_regset hlsl_type_get_regset(const struct hlsl_type *type)
vkd3d_unreachable();
}
}
else if (type->type == HLSL_CLASS_ARRAY)
else if (type->class == HLSL_CLASS_ARRAY)
return hlsl_type_get_regset(type->e.array.type);
vkd3d_unreachable();
@ -216,7 +216,7 @@ unsigned int hlsl_type_get_sm4_offset(const struct hlsl_type *type, unsigned int
* (b) the type would cross a vec4 boundary; i.e. a vec3 and a
* vec1 can be packed together, but not a vec3 and a vec2.
*/
if (type->type > HLSL_CLASS_LAST_NUMERIC || (offset & 3) + type->reg_size[HLSL_REGSET_NUMERIC] > 4)
if (type->class > HLSL_CLASS_LAST_NUMERIC || (offset & 3) + type->reg_size[HLSL_REGSET_NUMERIC] > 4)
return align(offset, 4);
return offset;
}
@ -229,7 +229,7 @@ static void hlsl_type_calculate_reg_size(struct hlsl_ctx *ctx, struct hlsl_type
for (k = 0; k <= HLSL_REGSET_LAST; ++k)
type->reg_size[k] = 0;
switch (type->type)
switch (type->class)
{
case HLSL_CLASS_SCALAR:
case HLSL_CLASS_VECTOR:
@ -317,7 +317,7 @@ static struct hlsl_type *hlsl_new_type(struct hlsl_ctx *ctx, const char *name, e
vkd3d_free(type);
return NULL;
}
type->type = type_class;
type->class = type_class;
type->base_type = base_type;
type->dimx = dimx;
type->dimy = dimy;
@ -330,7 +330,7 @@ static struct hlsl_type *hlsl_new_type(struct hlsl_ctx *ctx, const char *name, e
static bool type_is_single_component(const struct hlsl_type *type)
{
return type->type == HLSL_CLASS_SCALAR || type->type == HLSL_CLASS_OBJECT;
return type->class == HLSL_CLASS_SCALAR || type->class == HLSL_CLASS_OBJECT;
}
/* Given a type and a component index, this function moves one step through the path required to
@ -349,7 +349,7 @@ static unsigned int traverse_path_from_component_index(struct hlsl_ctx *ctx,
assert(!type_is_single_component(type));
assert(index < hlsl_type_component_count(type));
switch (type->type)
switch (type->class)
{
case HLSL_CLASS_VECTOR:
assert(index < type->dimx);
@ -505,7 +505,7 @@ struct hlsl_type *hlsl_get_element_type_from_path_index(struct hlsl_ctx *ctx, co
{
assert(idx);
switch (type->type)
switch (type->class)
{
case HLSL_CLASS_VECTOR:
return hlsl_get_scalar_type(ctx, type->base_type);
@ -539,7 +539,7 @@ struct hlsl_type *hlsl_new_array_type(struct hlsl_ctx *ctx, struct hlsl_type *ba
if (!(type = hlsl_alloc(ctx, sizeof(*type))))
return NULL;
type->type = HLSL_CLASS_ARRAY;
type->class = HLSL_CLASS_ARRAY;
type->modifiers = basic_type->modifiers;
type->e.array.elements_count = array_size;
type->e.array.type = basic_type;
@ -559,7 +559,7 @@ struct hlsl_type *hlsl_new_struct_type(struct hlsl_ctx *ctx, const char *name,
if (!(type = hlsl_alloc(ctx, sizeof(*type))))
return NULL;
type->type = HLSL_CLASS_STRUCT;
type->class = HLSL_CLASS_STRUCT;
type->base_type = HLSL_TYPE_VOID;
type->name = name;
type->dimy = 1;
@ -579,7 +579,7 @@ struct hlsl_type *hlsl_new_texture_type(struct hlsl_ctx *ctx, enum hlsl_sampler_
if (!(type = hlsl_alloc(ctx, sizeof(*type))))
return NULL;
type->type = HLSL_CLASS_OBJECT;
type->class = HLSL_CLASS_OBJECT;
type->base_type = HLSL_TYPE_TEXTURE;
type->dimx = 4;
type->dimy = 1;
@ -597,7 +597,7 @@ struct hlsl_type *hlsl_new_uav_type(struct hlsl_ctx *ctx, enum hlsl_sampler_dim
if (!(type = vkd3d_calloc(1, sizeof(*type))))
return NULL;
type->type = HLSL_CLASS_OBJECT;
type->class = HLSL_CLASS_OBJECT;
type->base_type = HLSL_TYPE_UAV;
type->dimx = format->dimx;
type->dimy = 1;
@ -679,7 +679,7 @@ struct hlsl_ir_function_decl *hlsl_get_func_decl(struct hlsl_ctx *ctx, const cha
unsigned int hlsl_type_component_count(const struct hlsl_type *type)
{
switch (type->type)
switch (type->class)
{
case HLSL_CLASS_SCALAR:
case HLSL_CLASS_VECTOR:
@ -711,7 +711,7 @@ bool hlsl_types_are_equal(const struct hlsl_type *t1, const struct hlsl_type *t2
if (t1 == t2)
return true;
if (t1->type != t2->type)
if (t1->class != t2->class)
return false;
if (t1->base_type != t2->base_type)
return false;
@ -731,7 +731,7 @@ bool hlsl_types_are_equal(const struct hlsl_type *t1, const struct hlsl_type *t2
return false;
if (t1->dimy != t2->dimy)
return false;
if (t1->type == HLSL_CLASS_STRUCT)
if (t1->class == HLSL_CLASS_STRUCT)
{
size_t i;
@ -750,7 +750,7 @@ bool hlsl_types_are_equal(const struct hlsl_type *t1, const struct hlsl_type *t2
return false;
}
}
if (t1->type == HLSL_CLASS_ARRAY)
if (t1->class == HLSL_CLASS_ARRAY)
return t1->e.array.elements_count == t2->e.array.elements_count
&& hlsl_types_are_equal(t1->e.array.type, t2->e.array.type);
@ -774,7 +774,7 @@ struct hlsl_type *hlsl_type_clone(struct hlsl_ctx *ctx, struct hlsl_type *old,
return NULL;
}
}
type->type = old->type;
type->class = old->class;
type->base_type = old->base_type;
type->dimx = old->dimx;
type->dimy = old->dimy;
@ -783,7 +783,7 @@ struct hlsl_type *hlsl_type_clone(struct hlsl_ctx *ctx, struct hlsl_type *old,
type->modifiers |= default_majority;
type->sampler_dim = old->sampler_dim;
type->is_minimum_precision = old->is_minimum_precision;
switch (old->type)
switch (old->class)
{
case HLSL_CLASS_ARRAY:
if (!(type->e.array.type = hlsl_type_clone(ctx, old->e.array.type, default_majority, modifiers)))
@ -912,7 +912,7 @@ struct hlsl_ir_var *hlsl_new_synthetic_var(struct hlsl_ctx *ctx, const char *tem
static bool type_is_single_reg(const struct hlsl_type *type)
{
return type->type == HLSL_CLASS_SCALAR || type->type == HLSL_CLASS_VECTOR;
return type->class == HLSL_CLASS_SCALAR || type->class == HLSL_CLASS_VECTOR;
}
bool hlsl_copy_deref(struct hlsl_ctx *ctx, struct hlsl_deref *deref, const struct hlsl_deref *other)
@ -1052,7 +1052,7 @@ struct hlsl_ir_constant *hlsl_new_constant(struct hlsl_ctx *ctx, struct hlsl_typ
{
struct hlsl_ir_constant *c;
assert(type->type <= HLSL_CLASS_VECTOR);
assert(type->class <= HLSL_CLASS_VECTOR);
if (!(c = hlsl_alloc(ctx, sizeof(*c))))
return NULL;
@ -1700,10 +1700,10 @@ static int compare_param_hlsl_types(const struct hlsl_type *t1, const struct hls
{
int r;
if ((r = vkd3d_u32_compare(t1->type, t2->type)))
if ((r = vkd3d_u32_compare(t1->class, t2->class)))
{
if (!((t1->type == HLSL_CLASS_SCALAR && t2->type == HLSL_CLASS_VECTOR)
|| (t1->type == HLSL_CLASS_VECTOR && t2->type == HLSL_CLASS_SCALAR)))
if (!((t1->class == HLSL_CLASS_SCALAR && t2->class == HLSL_CLASS_VECTOR)
|| (t1->class == HLSL_CLASS_VECTOR && t2->class == HLSL_CLASS_SCALAR)))
return r;
}
if ((r = vkd3d_u32_compare(t1->base_type, t2->base_type)))
@ -1720,7 +1720,7 @@ static int compare_param_hlsl_types(const struct hlsl_type *t1, const struct hls
return r;
if ((r = vkd3d_u32_compare(t1->dimy, t2->dimy)))
return r;
if (t1->type == HLSL_CLASS_STRUCT)
if (t1->class == HLSL_CLASS_STRUCT)
{
size_t i;
@ -1740,7 +1740,7 @@ static int compare_param_hlsl_types(const struct hlsl_type *t1, const struct hls
}
return 0;
}
if (t1->type == HLSL_CLASS_ARRAY)
if (t1->class == HLSL_CLASS_ARRAY)
{
if ((r = vkd3d_u32_compare(t1->e.array.elements_count, t2->e.array.elements_count)))
return r;
@ -1791,7 +1791,7 @@ struct vkd3d_string_buffer *hlsl_type_to_string(struct hlsl_ctx *ctx, const stru
return string;
}
switch (type->type)
switch (type->class)
{
case HLSL_CLASS_SCALAR:
assert(type->base_type < ARRAY_SIZE(base_types));
@ -1813,7 +1813,7 @@ struct vkd3d_string_buffer *hlsl_type_to_string(struct hlsl_ctx *ctx, const stru
struct vkd3d_string_buffer *inner_string;
const struct hlsl_type *t;
for (t = type; t->type == HLSL_CLASS_ARRAY; t = t->e.array.type)
for (t = type; t->class == HLSL_CLASS_ARRAY; t = t->e.array.type)
;
if ((inner_string = hlsl_type_to_string(ctx, t)))
@ -1822,7 +1822,7 @@ struct vkd3d_string_buffer *hlsl_type_to_string(struct hlsl_ctx *ctx, const stru
hlsl_release_string_buffer(ctx, inner_string);
}
for (t = type; t->type == HLSL_CLASS_ARRAY; t = t->e.array.type)
for (t = type; t->class == HLSL_CLASS_ARRAY; t = t->e.array.type)
{
if (t->e.array.elements_count == HLSL_ARRAY_ELEMENTS_COUNT_IMPLICIT)
vkd3d_string_buffer_printf(string, "[]");
@ -2423,7 +2423,7 @@ void hlsl_free_type(struct hlsl_type *type)
size_t i;
vkd3d_free((void *)type->name);
if (type->type == HLSL_CLASS_STRUCT)
if (type->class == HLSL_CLASS_STRUCT)
{
for (i = 0; i < type->e.record.field_count; ++i)
{

View File

@ -134,7 +134,7 @@ struct hlsl_type
/* Item entry in hlsl_scope->types. hlsl_type->name is used as key (if not NULL). */
struct rb_entry scope_entry;
enum hlsl_type_class type;
enum hlsl_type_class class;
/* If type is <= HLSL_CLASS_LAST_NUMERIC, then base_type is <= HLSL_TYPE_LAST_SCALAR.
* If type is HLSL_CLASS_OBJECT, then base_type is > HLSL_TYPE_LAST_SCALAR.
* Otherwise, base_type is not used. */

File diff suppressed because it is too large Load Diff

View File

@ -31,7 +31,7 @@ static struct hlsl_ir_node *new_offset_from_path_index(struct hlsl_ctx *ctx, str
list_init(&block->instrs);
switch (type->type)
switch (type->class)
{
case HLSL_CLASS_VECTOR:
idx_offset = idx;
@ -140,7 +140,7 @@ static void replace_deref_path_with_offset(struct hlsl_ctx *ctx, struct hlsl_der
/* Instructions that directly refer to structs or arrays (instead of single-register components)
* are removed later by dce. So it is not a problem to just cleanup their derefs. */
if (type->type == HLSL_CLASS_STRUCT || type->type == HLSL_CLASS_ARRAY)
if (type->class == HLSL_CLASS_STRUCT || type->class == HLSL_CLASS_ARRAY)
{
hlsl_cleanup_deref(deref);
return;
@ -282,7 +282,7 @@ static void prepend_input_copy(struct hlsl_ctx *ctx, struct list *instrs, struct
return;
list_add_after(&lhs->node.entry, &load->node.entry);
if (type->type == HLSL_CLASS_MATRIX)
if (type->class == HLSL_CLASS_MATRIX)
{
if (!(c = hlsl_new_uint_constant(ctx, i, &var->loc)))
return;
@ -324,7 +324,7 @@ static void prepend_input_struct_copy(struct hlsl_ctx *ctx, struct list *instrs,
return;
list_add_after(&c->node.entry, &field_load->node.entry);
if (field->type->type == HLSL_CLASS_STRUCT)
if (field->type->class == HLSL_CLASS_STRUCT)
prepend_input_struct_copy(ctx, instrs, field_load);
else if (field->semantic.name)
prepend_input_copy(ctx, instrs, field_load, field->storage_modifiers, &field->semantic);
@ -345,7 +345,7 @@ static void prepend_input_var_copy(struct hlsl_ctx *ctx, struct list *instrs, st
return;
list_add_head(instrs, &load->node.entry);
if (var->data_type->type == HLSL_CLASS_STRUCT)
if (var->data_type->class == HLSL_CLASS_STRUCT)
prepend_input_struct_copy(ctx, instrs, load);
else if (var->semantic.name)
prepend_input_copy(ctx, instrs, load, var->storage_modifiers, &var->semantic);
@ -373,7 +373,7 @@ static void append_output_copy(struct hlsl_ctx *ctx, struct list *instrs, struct
if (!(output = add_semantic_var(ctx, var, vector_type, modifiers, &semantic_copy, true)))
return;
if (type->type == HLSL_CLASS_MATRIX)
if (type->class == HLSL_CLASS_MATRIX)
{
if (!(c = hlsl_new_uint_constant(ctx, i, &var->loc)))
return;
@ -419,7 +419,7 @@ static void append_output_struct_copy(struct hlsl_ctx *ctx, struct list *instrs,
return;
list_add_tail(instrs, &field_load->node.entry);
if (field->type->type == HLSL_CLASS_STRUCT)
if (field->type->class == HLSL_CLASS_STRUCT)
append_output_struct_copy(ctx, instrs, field_load);
else if (field->semantic.name)
append_output_copy(ctx, instrs, field_load, field->storage_modifiers, &field->semantic);
@ -441,7 +441,7 @@ static void append_output_var_copy(struct hlsl_ctx *ctx, struct list *instrs, st
return;
list_add_tail(instrs, &load->node.entry);
if (var->data_type->type == HLSL_CLASS_STRUCT)
if (var->data_type->class == HLSL_CLASS_STRUCT)
append_output_struct_copy(ctx, instrs, load);
else if (var->semantic.name)
append_output_copy(ctx, instrs, load, var->storage_modifiers, &var->semantic);
@ -746,7 +746,7 @@ static bool lower_broadcasts(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, v
src_type = cast->operands[0].node->data_type;
dst_type = cast->node.data_type;
if (src_type->type <= HLSL_CLASS_VECTOR && dst_type->type <= HLSL_CLASS_VECTOR && src_type->dimx == 1)
if (src_type->class <= HLSL_CLASS_VECTOR && dst_type->class <= HLSL_CLASS_VECTOR && src_type->dimx == 1)
{
struct hlsl_ir_node *replacement;
struct hlsl_ir_swizzle *swizzle;
@ -949,7 +949,7 @@ static void copy_propagation_invalidate_variable_from_deref_recurse(struct hlsl_
path_node = deref->path[depth].node;
subtype = hlsl_get_element_type_from_path_index(ctx, type, path_node);
if (type->type == HLSL_CLASS_STRUCT)
if (type->class == HLSL_CLASS_STRUCT)
{
unsigned int idx = hlsl_ir_constant(path_node)->value[0].u;
@ -1041,7 +1041,7 @@ static bool copy_propagation_replace_with_single_instr(struct hlsl_ctx *ctx,
var->name, start, start + count, debug_hlsl_swizzle(swizzle, instr_component_count),
new_instr, debug_hlsl_swizzle(ret_swizzle, instr_component_count));
if (instr->data_type->type != HLSL_CLASS_OBJECT)
if (instr->data_type->class != HLSL_CLASS_OBJECT)
{
struct hlsl_ir_swizzle *swizzle_node;
@ -1099,7 +1099,7 @@ static bool copy_propagation_transform_load(struct hlsl_ctx *ctx,
{
struct hlsl_type *type = load->node.data_type;
switch (type->type)
switch (type->class)
{
case HLSL_CLASS_SCALAR:
case HLSL_CLASS_VECTOR:
@ -1220,7 +1220,7 @@ static void copy_propagation_record_store(struct hlsl_ctx *ctx, struct hlsl_ir_s
{
unsigned int writemask = store->writemask;
if (store->rhs.node->data_type->type == HLSL_CLASS_OBJECT)
if (store->rhs.node->data_type->class == HLSL_CLASS_OBJECT)
writemask = VKD3DSP_WRITEMASK_0;
copy_propagation_set_value(var_def, start, writemask, store->rhs.node);
}
@ -1471,7 +1471,7 @@ static bool validate_static_object_references(struct hlsl_ctx *ctx, struct hlsl_
static bool is_vec1(const struct hlsl_type *type)
{
return (type->type == HLSL_CLASS_SCALAR) || (type->type == HLSL_CLASS_VECTOR && type->dimx == 1);
return (type->class == HLSL_CLASS_SCALAR) || (type->class == HLSL_CLASS_VECTOR && type->dimx == 1);
}
static bool fold_redundant_casts(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, void *context)
@ -1538,7 +1538,7 @@ static bool split_array_copies(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr,
store = hlsl_ir_store(instr);
rhs = store->rhs.node;
type = rhs->data_type;
if (type->type != HLSL_CLASS_ARRAY)
if (type->class != HLSL_CLASS_ARRAY)
return false;
element_type = type->e.array.type;
@ -1575,7 +1575,7 @@ static bool split_struct_copies(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr
store = hlsl_ir_store(instr);
rhs = store->rhs.node;
type = rhs->data_type;
if (type->type != HLSL_CLASS_STRUCT)
if (type->class != HLSL_CLASS_STRUCT)
return false;
if (rhs->type != HLSL_IR_LOAD)
@ -1614,7 +1614,7 @@ static bool split_matrix_copies(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr
store = hlsl_ir_store(instr);
rhs = store->rhs.node;
type = rhs->data_type;
if (type->type != HLSL_CLASS_MATRIX)
if (type->class != HLSL_CLASS_MATRIX)
return false;
element_type = hlsl_get_vector_type(ctx, type->base_type, hlsl_type_minor_size(type));
@ -1649,7 +1649,7 @@ static bool lower_narrowing_casts(struct hlsl_ctx *ctx, struct hlsl_ir_node *ins
src_type = cast->operands[0].node->data_type;
dst_type = cast->node.data_type;
if (src_type->type <= HLSL_CLASS_VECTOR && dst_type->type <= HLSL_CLASS_VECTOR && dst_type->dimx < src_type->dimx)
if (src_type->class <= HLSL_CLASS_VECTOR && dst_type->class <= HLSL_CLASS_VECTOR && dst_type->dimx < src_type->dimx)
{
struct hlsl_ir_swizzle *swizzle;
struct hlsl_ir_expr *new_cast;
@ -1860,7 +1860,7 @@ static bool lower_casts_to_bool(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr
if (expr->op != HLSL_OP1_CAST)
return false;
arg_type = expr->operands[0].node->data_type;
if (type->type > HLSL_CLASS_VECTOR || arg_type->type > HLSL_CLASS_VECTOR)
if (type->class > HLSL_CLASS_VECTOR || arg_type->class > HLSL_CLASS_VECTOR)
return false;
if (type->base_type != HLSL_TYPE_BOOL)
return false;
@ -1928,11 +1928,11 @@ static bool lower_int_division(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr,
arg2 = expr->operands[1].node;
if (expr->op != HLSL_OP2_DIV)
return false;
if (type->type != HLSL_CLASS_SCALAR && type->type != HLSL_CLASS_VECTOR)
if (type->class != HLSL_CLASS_SCALAR && type->class != HLSL_CLASS_VECTOR)
return false;
if (type->base_type != HLSL_TYPE_INT)
return false;
utype = hlsl_get_numeric_type(ctx, type->type, HLSL_TYPE_UINT, type->dimx, type->dimy);
utype = hlsl_get_numeric_type(ctx, type->class, HLSL_TYPE_UINT, type->dimx, type->dimy);
if (!(xor = hlsl_new_binary_expr(ctx, HLSL_OP2_BIT_XOR, arg1, arg2)))
return false;
@ -2000,11 +2000,11 @@ static bool lower_int_modulus(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr,
arg2 = expr->operands[1].node;
if (expr->op != HLSL_OP2_MOD)
return false;
if (type->type != HLSL_CLASS_SCALAR && type->type != HLSL_CLASS_VECTOR)
if (type->class != HLSL_CLASS_SCALAR && type->class != HLSL_CLASS_VECTOR)
return false;
if (type->base_type != HLSL_TYPE_INT)
return false;
utype = hlsl_get_numeric_type(ctx, type->type, HLSL_TYPE_UINT, type->dimx, type->dimy);
utype = hlsl_get_numeric_type(ctx, type->class, HLSL_TYPE_UINT, type->dimx, type->dimy);
if (!(high_bit = hlsl_new_constant(ctx, type, &instr->loc)))
return false;
@ -2063,7 +2063,7 @@ static bool lower_int_abs(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, void
if (expr->op != HLSL_OP1_ABS)
return false;
if (type->type != HLSL_CLASS_SCALAR && type->type != HLSL_CLASS_VECTOR)
if (type->class != HLSL_CLASS_SCALAR && type->class != HLSL_CLASS_VECTOR)
return false;
if (type->base_type != HLSL_TYPE_INT)
return false;
@ -2096,11 +2096,11 @@ static bool lower_float_modulus(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr
arg2 = expr->operands[1].node;
if (expr->op != HLSL_OP2_MOD)
return false;
if (type->type != HLSL_CLASS_SCALAR && type->type != HLSL_CLASS_VECTOR)
if (type->class != HLSL_CLASS_SCALAR && type->class != HLSL_CLASS_VECTOR)
return false;
if (type->base_type != HLSL_TYPE_FLOAT)
return false;
btype = hlsl_get_numeric_type(ctx, type->type, HLSL_TYPE_BOOL, type->dimx, type->dimy);
btype = hlsl_get_numeric_type(ctx, type->class, HLSL_TYPE_BOOL, type->dimx, type->dimy);
if (!(mul1 = hlsl_new_binary_expr(ctx, HLSL_OP2_MUL, arg2, arg1)))
return false;
@ -2543,7 +2543,7 @@ static struct hlsl_reg allocate_numeric_registers_for_type(struct hlsl_ctx *ctx,
{
unsigned int reg_size = type->reg_size[HLSL_REGSET_NUMERIC];
if (type->type <= HLSL_CLASS_VECTOR)
if (type->class <= HLSL_CLASS_VECTOR)
return allocate_register(ctx, liveness, first_write, last_read, reg_size, type->dimx);
else
return allocate_range(ctx, liveness, first_write, last_read, reg_size);
@ -2662,7 +2662,7 @@ static void allocate_const_registers_recurse(struct hlsl_ctx *ctx, struct hlsl_b
defs->count = end_reg;
}
assert(type->type <= HLSL_CLASS_LAST_NUMERIC);
assert(type->class <= HLSL_CLASS_LAST_NUMERIC);
if (!(writemask = constant->reg.writemask))
writemask = (1u << type->dimx) - 1;
@ -2880,7 +2880,7 @@ static void allocate_buffers(struct hlsl_ctx *ctx)
LIST_FOR_EACH_ENTRY(var, &ctx->extern_vars, struct hlsl_ir_var, extern_entry)
{
if (var->is_uniform && var->data_type->type != HLSL_CLASS_OBJECT)
if (var->is_uniform && var->data_type->class != HLSL_CLASS_OBJECT)
{
if (var->is_param)
var->buffer = ctx->params_buffer;
@ -3034,12 +3034,12 @@ bool hlsl_component_index_range_from_deref(struct hlsl_ctx *ctx, const struct hl
return false;
/* We should always have generated a cast to UINT. */
assert(path_node->data_type->type == HLSL_CLASS_SCALAR
assert(path_node->data_type->class == HLSL_CLASS_SCALAR
&& path_node->data_type->base_type == HLSL_TYPE_UINT);
idx = hlsl_ir_constant(path_node)->value[0].u;
switch (type->type)
switch (type->class)
{
case HLSL_CLASS_VECTOR:
if (idx >= type->dimx)
@ -3102,7 +3102,7 @@ bool hlsl_offset_from_deref(struct hlsl_ctx *ctx, const struct hlsl_deref *deref
}
/* We should always have generated a cast to UINT. */
assert(offset_node->data_type->type == HLSL_CLASS_SCALAR
assert(offset_node->data_type->class == HLSL_CLASS_SCALAR
&& offset_node->data_type->base_type == HLSL_TYPE_UINT);
if (offset_node->type != HLSL_IR_CONSTANT)
@ -3170,7 +3170,7 @@ static void parse_numthreads_attribute(struct hlsl_ctx *ctx, const struct hlsl_a
const struct hlsl_type *type = instr->data_type;
const struct hlsl_ir_constant *constant;
if (type->type != HLSL_CLASS_SCALAR
if (type->class != HLSL_CLASS_SCALAR
|| (type->base_type != HLSL_TYPE_INT && type->base_type != HLSL_TYPE_UINT))
{
struct vkd3d_string_buffer *string;
@ -3234,13 +3234,13 @@ int hlsl_emit_bytecode(struct hlsl_ctx *ctx, struct hlsl_ir_function_decl *entry
{
var = entry_func->parameters.vars[i];
if (var->data_type->type == HLSL_CLASS_OBJECT || (var->storage_modifiers & HLSL_STORAGE_UNIFORM))
if (var->data_type->class == HLSL_CLASS_OBJECT || (var->storage_modifiers & HLSL_STORAGE_UNIFORM))
{
prepend_uniform_copy(ctx, &body->instrs, var);
}
else
{
if (var->data_type->type != HLSL_CLASS_STRUCT && !var->semantic.name)
if (var->data_type->class != HLSL_CLASS_STRUCT && !var->semantic.name)
hlsl_error(ctx, &var->loc, VKD3D_SHADER_ERROR_HLSL_MISSING_SEMANTIC,
"Parameter \"%s\" is missing a semantic.", var->name);
@ -3252,7 +3252,7 @@ int hlsl_emit_bytecode(struct hlsl_ctx *ctx, struct hlsl_ir_function_decl *entry
}
if (entry_func->return_var)
{
if (entry_func->return_var->data_type->type != HLSL_CLASS_STRUCT && !entry_func->return_var->semantic.name)
if (entry_func->return_var->data_type->class != HLSL_CLASS_STRUCT && !entry_func->return_var->semantic.name)
hlsl_error(ctx, &entry_func->loc, VKD3D_SHADER_ERROR_HLSL_MISSING_SEMANTIC,
"Entry point \"%s\" is missing a return value semantic.", entry_func->func->name);

View File

@ -512,7 +512,7 @@ bool hlsl_fold_constant_exprs(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr,
if (!expr->operands[0].node)
return false;
if (instr->data_type->type > HLSL_CLASS_VECTOR)
if (instr->data_type->class > HLSL_CLASS_VECTOR)
return false;
for (i = 0; i < ARRAY_SIZE(expr->operands); ++i)
@ -521,7 +521,7 @@ bool hlsl_fold_constant_exprs(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr,
{
if (expr->operands[i].node->type != HLSL_IR_CONSTANT)
return false;
assert(expr->operands[i].node->data_type->type <= HLSL_CLASS_VECTOR);
assert(expr->operands[i].node->data_type->class <= HLSL_CLASS_VECTOR);
}
}
arg1 = hlsl_ir_constant(expr->operands[0].node);

View File

@ -139,7 +139,7 @@ static uint32_t sm1_version(enum vkd3d_shader_type type, unsigned int major, uns
static D3DXPARAMETER_CLASS sm1_class(const struct hlsl_type *type)
{
switch (type->type)
switch (type->class)
{
case HLSL_CLASS_ARRAY:
return sm1_class(type->e.array.type);
@ -158,7 +158,7 @@ static D3DXPARAMETER_CLASS sm1_class(const struct hlsl_type *type)
case HLSL_CLASS_VECTOR:
return D3DXPC_VECTOR;
default:
ERR("Invalid class %#x.\n", type->type);
ERR("Invalid class %#x.\n", type->class);
vkd3d_unreachable();
}
}
@ -226,14 +226,14 @@ static D3DXPARAMETER_TYPE sm1_base_type(const struct hlsl_type *type)
static const struct hlsl_type *get_array_type(const struct hlsl_type *type)
{
if (type->type == HLSL_CLASS_ARRAY)
if (type->class == HLSL_CLASS_ARRAY)
return get_array_type(type->e.array.type);
return type;
}
static unsigned int get_array_size(const struct hlsl_type *type)
{
if (type->type == HLSL_CLASS_ARRAY)
if (type->class == HLSL_CLASS_ARRAY)
return get_array_size(type->e.array.type) * type->e.array.elements_count;
return 1;
}
@ -249,7 +249,7 @@ static void write_sm1_type(struct vkd3d_bytecode_buffer *buffer, struct hlsl_typ
if (type->bytecode_offset)
return;
if (array_type->type == HLSL_CLASS_STRUCT)
if (array_type->class == HLSL_CLASS_STRUCT)
{
field_count = array_type->e.record.field_count;
@ -360,7 +360,7 @@ static void write_sm1_uniforms(struct hlsl_ctx *ctx, struct vkd3d_bytecode_buffe
if (!var->semantic.name && var->regs[regset].allocated)
{
put_u32(buffer, 0); /* name */
if (var->data_type->type == HLSL_CLASS_OBJECT
if (var->data_type->class == HLSL_CLASS_OBJECT
&& (var->data_type->base_type == HLSL_TYPE_SAMPLER
|| var->data_type->base_type == HLSL_TYPE_TEXTURE))
{
@ -849,7 +849,7 @@ static void write_sm1_store(struct hlsl_ctx *ctx, struct vkd3d_bytecode_buffer *
.src_count = 1,
};
if (store->lhs.var->data_type->type == HLSL_CLASS_MATRIX)
if (store->lhs.var->data_type->class == HLSL_CLASS_MATRIX)
{
FIXME("Matrix writemasks need to be lowered.\n");
return;
@ -910,19 +910,19 @@ static void write_sm1_instructions(struct hlsl_ctx *ctx, struct vkd3d_bytecode_b
{
if (instr->data_type)
{
if (instr->data_type->type == HLSL_CLASS_MATRIX)
if (instr->data_type->class == HLSL_CLASS_MATRIX)
{
/* These need to be lowered. */
hlsl_fixme(ctx, &instr->loc, "SM1 matrix expression.");
continue;
}
else if (instr->data_type->type == HLSL_CLASS_OBJECT)
else if (instr->data_type->class == HLSL_CLASS_OBJECT)
{
hlsl_fixme(ctx, &instr->loc, "Object copy.");
break;
}
assert(instr->data_type->type == HLSL_CLASS_SCALAR || instr->data_type->type == HLSL_CLASS_VECTOR);
assert(instr->data_type->class == HLSL_CLASS_SCALAR || instr->data_type->class == HLSL_CLASS_VECTOR);
}
switch (instr->type)

View File

@ -257,21 +257,21 @@ static void write_sm4_signature(struct hlsl_ctx *ctx, struct dxbc_writer *dxbc,
static const struct hlsl_type *get_array_type(const struct hlsl_type *type)
{
if (type->type == HLSL_CLASS_ARRAY)
if (type->class == HLSL_CLASS_ARRAY)
return get_array_type(type->e.array.type);
return type;
}
static unsigned int get_array_size(const struct hlsl_type *type)
{
if (type->type == HLSL_CLASS_ARRAY)
if (type->class == HLSL_CLASS_ARRAY)
return get_array_size(type->e.array.type) * type->e.array.elements_count;
return 1;
}
static D3D_SHADER_VARIABLE_CLASS sm4_class(const struct hlsl_type *type)
{
switch (type->type)
switch (type->class)
{
case HLSL_CLASS_ARRAY:
return sm4_class(type->e.array.type);
@ -290,7 +290,7 @@ static D3D_SHADER_VARIABLE_CLASS sm4_class(const struct hlsl_type *type)
case HLSL_CLASS_VECTOR:
return D3D_SVC_VECTOR;
default:
ERR("Invalid class %#x.\n", type->type);
ERR("Invalid class %#x.\n", type->class);
vkd3d_unreachable();
}
}
@ -372,10 +372,10 @@ static void write_sm4_type(struct hlsl_ctx *ctx, struct vkd3d_bytecode_buffer *b
if (profile->major_version >= 5)
name_offset = put_string(buffer, name);
if (type->type == HLSL_CLASS_ARRAY)
if (type->class == HLSL_CLASS_ARRAY)
array_size = get_array_size(type);
if (array_type->type == HLSL_CLASS_STRUCT)
if (array_type->class == HLSL_CLASS_STRUCT)
{
field_count = array_type->e.record.field_count;
@ -858,7 +858,7 @@ static void sm4_register_from_deref(struct hlsl_ctx *ctx, struct sm4_register *r
if (var->is_uniform)
{
if (data_type->type == HLSL_CLASS_OBJECT && data_type->base_type == HLSL_TYPE_TEXTURE)
if (data_type->class == HLSL_CLASS_OBJECT && data_type->base_type == HLSL_TYPE_TEXTURE)
{
reg->type = VKD3D_SM4_RT_RESOURCE;
reg->dim = VKD3D_SM4_DIMENSION_VEC4;
@ -868,7 +868,7 @@ static void sm4_register_from_deref(struct hlsl_ctx *ctx, struct sm4_register *r
reg->idx_count = 1;
*writemask = VKD3DSP_WRITEMASK_ALL;
}
else if (data_type->type == HLSL_CLASS_OBJECT && data_type->base_type == HLSL_TYPE_UAV)
else if (data_type->class == HLSL_CLASS_OBJECT && data_type->base_type == HLSL_TYPE_UAV)
{
reg->type = VKD3D_SM5_RT_UAV;
reg->dim = VKD3D_SM4_DIMENSION_VEC4;
@ -878,7 +878,7 @@ static void sm4_register_from_deref(struct hlsl_ctx *ctx, struct sm4_register *r
reg->idx_count = 1;
*writemask = VKD3DSP_WRITEMASK_ALL;
}
else if (data_type->type == HLSL_CLASS_OBJECT && data_type->base_type == HLSL_TYPE_SAMPLER)
else if (data_type->class == HLSL_CLASS_OBJECT && data_type->base_type == HLSL_TYPE_SAMPLER)
{
reg->type = VKD3D_SM4_RT_SAMPLER;
reg->dim = VKD3D_SM4_DIMENSION_NONE;
@ -892,7 +892,7 @@ static void sm4_register_from_deref(struct hlsl_ctx *ctx, struct sm4_register *r
{
unsigned int offset = hlsl_offset_from_deref_safe(ctx, deref) + var->buffer_offset;
assert(data_type->type <= HLSL_CLASS_VECTOR);
assert(data_type->class <= HLSL_CLASS_VECTOR);
reg->type = VKD3D_SM4_RT_CONSTBUFFER;
reg->dim = VKD3D_SM4_DIMENSION_VEC4;
if (swizzle_type)
@ -2231,9 +2231,9 @@ static void write_sm4_resource_load(struct hlsl_ctx *ctx,
const struct hlsl_ir_node *texel_offset = load->texel_offset.node;
const struct hlsl_ir_node *coords = load->coords.node;
if (resource_type->type != HLSL_CLASS_OBJECT)
if (resource_type->class != HLSL_CLASS_OBJECT)
{
assert(resource_type->type == HLSL_CLASS_ARRAY || resource_type->type == HLSL_CLASS_STRUCT);
assert(resource_type->class == HLSL_CLASS_ARRAY || resource_type->class == HLSL_CLASS_STRUCT);
hlsl_fixme(ctx, &load->node.loc, "Resource being a component of another variable.");
return;
}
@ -2242,9 +2242,9 @@ static void write_sm4_resource_load(struct hlsl_ctx *ctx,
{
const struct hlsl_type *sampler_type = load->sampler.var->data_type;
if (sampler_type->type != HLSL_CLASS_OBJECT)
if (sampler_type->class != HLSL_CLASS_OBJECT)
{
assert(sampler_type->type == HLSL_CLASS_ARRAY || sampler_type->type == HLSL_CLASS_STRUCT);
assert(sampler_type->class == HLSL_CLASS_ARRAY || sampler_type->class == HLSL_CLASS_STRUCT);
hlsl_fixme(ctx, &load->node.loc, "Sampler being a component of another variable.");
return;
}
@ -2312,9 +2312,9 @@ static void write_sm4_resource_store(struct hlsl_ctx *ctx,
{
const struct hlsl_type *resource_type = store->resource.var->data_type;
if (resource_type->type != HLSL_CLASS_OBJECT)
if (resource_type->class != HLSL_CLASS_OBJECT)
{
assert(resource_type->type == HLSL_CLASS_ARRAY || resource_type->type == HLSL_CLASS_STRUCT);
assert(resource_type->class == HLSL_CLASS_ARRAY || resource_type->class == HLSL_CLASS_STRUCT);
hlsl_fixme(ctx, &store->node.loc, "Resource being a component of another variable.");
return;
}
@ -2377,18 +2377,18 @@ static void write_sm4_block(struct hlsl_ctx *ctx, struct vkd3d_bytecode_buffer *
{
if (instr->data_type)
{
if (instr->data_type->type == HLSL_CLASS_MATRIX)
if (instr->data_type->class == HLSL_CLASS_MATRIX)
{
hlsl_fixme(ctx, &instr->loc, "Matrix operations need to be lowered.");
break;
}
else if (instr->data_type->type == HLSL_CLASS_OBJECT)
else if (instr->data_type->class == HLSL_CLASS_OBJECT)
{
hlsl_fixme(ctx, &instr->loc, "Object copy.");
break;
}
assert(instr->data_type->type == HLSL_CLASS_SCALAR || instr->data_type->type == HLSL_CLASS_VECTOR);
assert(instr->data_type->class == HLSL_CLASS_SCALAR || instr->data_type->class == HLSL_CLASS_VECTOR);
}
switch (instr->type)