mirror of
https://gitlab.winehq.org/wine/vkd3d.git
synced 2025-01-28 13:05:02 -08:00
vkd3d-shader/hlsl: Put the hlsl_ir_constant value in a structure.
This commit is contained in:
parent
3cce4e70e9
commit
0a44e6043e
Notes:
Alexandre Julliard
2023-05-01 22:25:00 +02:00
Approved-by: Giovanni Mascellani (@giomasce) Approved-by: Francisco Casas (@fcasas) Approved-by: Henri Verbeet (@hverbeet) Approved-by: Alexandre Julliard (@julliard) Merge-Request: https://gitlab.winehq.org/wine/vkd3d/-/merge_requests/177
@ -596,8 +596,8 @@ struct hlsl_type *hlsl_get_element_type_from_path_index(struct hlsl_ctx *ctx, co
|
||||
{
|
||||
struct hlsl_ir_constant *c = hlsl_ir_constant(idx);
|
||||
|
||||
assert(c->value[0].u < type->e.record.field_count);
|
||||
return type->e.record.fields[c->value[0].u].type;
|
||||
assert(c->value.u[0].u < type->e.record.field_count);
|
||||
return type->e.record.fields[c->value.u[0].u].type;
|
||||
}
|
||||
|
||||
default:
|
||||
@ -1140,7 +1140,7 @@ struct hlsl_ir_node *hlsl_new_bool_constant(struct hlsl_ctx *ctx, bool b, const
|
||||
struct hlsl_ir_constant *c;
|
||||
|
||||
if ((c = hlsl_new_constant(ctx, hlsl_get_scalar_type(ctx, HLSL_TYPE_BOOL), loc)))
|
||||
c->value[0].u = b ? ~0u : 0;
|
||||
c->value.u[0].u = b ? ~0u : 0;
|
||||
|
||||
return &c->node;
|
||||
}
|
||||
@ -1151,7 +1151,7 @@ struct hlsl_ir_node *hlsl_new_float_constant(struct hlsl_ctx *ctx, float f,
|
||||
struct hlsl_ir_constant *c;
|
||||
|
||||
if ((c = hlsl_new_constant(ctx, hlsl_get_scalar_type(ctx, HLSL_TYPE_FLOAT), loc)))
|
||||
c->value[0].f = f;
|
||||
c->value.u[0].f = f;
|
||||
|
||||
return &c->node;
|
||||
}
|
||||
@ -1163,7 +1163,7 @@ struct hlsl_ir_node *hlsl_new_int_constant(struct hlsl_ctx *ctx, int32_t n, cons
|
||||
c = hlsl_new_constant(ctx, hlsl_get_scalar_type(ctx, HLSL_TYPE_INT), loc);
|
||||
|
||||
if (c)
|
||||
c->value[0].i = n;
|
||||
c->value.u[0].i = n;
|
||||
|
||||
return &c->node;
|
||||
}
|
||||
@ -1176,7 +1176,7 @@ struct hlsl_ir_constant *hlsl_new_uint_constant(struct hlsl_ctx *ctx, unsigned i
|
||||
c = hlsl_new_constant(ctx, hlsl_get_scalar_type(ctx, HLSL_TYPE_UINT), loc);
|
||||
|
||||
if (c)
|
||||
c->value[0].u = n;
|
||||
c->value.u[0].u = n;
|
||||
|
||||
return c;
|
||||
}
|
||||
@ -1510,7 +1510,7 @@ static struct hlsl_ir_node *clone_constant(struct hlsl_ctx *ctx, struct hlsl_ir_
|
||||
|
||||
if (!(dst = hlsl_new_constant(ctx, src->node.data_type, &src->node.loc)))
|
||||
return NULL;
|
||||
memcpy(dst->value, src->value, sizeof(src->value));
|
||||
dst->value = src->value;
|
||||
return &dst->node;
|
||||
}
|
||||
|
||||
@ -2241,7 +2241,7 @@ static void dump_ir_constant(struct vkd3d_string_buffer *buffer, const struct hl
|
||||
vkd3d_string_buffer_printf(buffer, "{");
|
||||
for (x = 0; x < type->dimx; ++x)
|
||||
{
|
||||
const union hlsl_constant_value *value = &constant->value[x];
|
||||
const union hlsl_constant_value_component *value = &constant->value.u[x];
|
||||
|
||||
switch (type->base_type)
|
||||
{
|
||||
|
@ -632,13 +632,16 @@ struct hlsl_ir_store
|
||||
struct hlsl_ir_constant
|
||||
{
|
||||
struct hlsl_ir_node node;
|
||||
union hlsl_constant_value
|
||||
struct hlsl_constant_value
|
||||
{
|
||||
uint32_t u;
|
||||
int32_t i;
|
||||
float f;
|
||||
double d;
|
||||
} value[4];
|
||||
union hlsl_constant_value_component
|
||||
{
|
||||
uint32_t u;
|
||||
int32_t i;
|
||||
float f;
|
||||
double d;
|
||||
} u[4];
|
||||
} value;
|
||||
/* Constant register of type 'c' where the constant value is stored for SM1. */
|
||||
struct hlsl_reg reg;
|
||||
};
|
||||
|
@ -1151,7 +1151,7 @@ static unsigned int evaluate_static_expression(struct hlsl_ir_node *node)
|
||||
case HLSL_IR_CONSTANT:
|
||||
{
|
||||
struct hlsl_ir_constant *constant = hlsl_ir_constant(node);
|
||||
const union hlsl_constant_value *value = &constant->value[0];
|
||||
const union hlsl_constant_value_component *value = &constant->value.u[0];
|
||||
|
||||
switch (constant->node.data_type->base_type)
|
||||
{
|
||||
@ -2863,10 +2863,10 @@ static bool intrinsic_lit(struct hlsl_ctx *ctx,
|
||||
|
||||
if (!(init = hlsl_new_constant(ctx, ret_type, loc)))
|
||||
return false;
|
||||
init->value[0].f = 1.0f;
|
||||
init->value[1].f = 0.0f;
|
||||
init->value[2].f = 0.0f;
|
||||
init->value[3].f = 1.0f;
|
||||
init->value.u[0].f = 1.0f;
|
||||
init->value.u[1].f = 0.0f;
|
||||
init->value.u[2].f = 0.0f;
|
||||
init->value.u[3].f = 1.0f;
|
||||
list_add_tail(params->instrs, &init->node.entry);
|
||||
|
||||
if (!(store = hlsl_new_simple_store(ctx, var, &init->node)))
|
||||
|
@ -67,7 +67,7 @@ static struct hlsl_ir_node *new_offset_from_path_index(struct hlsl_ctx *ctx, str
|
||||
|
||||
case HLSL_CLASS_STRUCT:
|
||||
{
|
||||
unsigned int field_idx = hlsl_ir_constant(idx)->value[0].u;
|
||||
unsigned int field_idx = hlsl_ir_constant(idx)->value.u[0].u;
|
||||
struct hlsl_struct_field *field = &type->e.record.fields[field_idx];
|
||||
|
||||
if (!(c = hlsl_new_uint_constant(ctx, field->reg_offset[regset], loc)))
|
||||
@ -1152,7 +1152,7 @@ static void copy_propagation_invalidate_variable_from_deref_recurse(struct hlsl_
|
||||
|
||||
if (type->class == HLSL_CLASS_STRUCT)
|
||||
{
|
||||
unsigned int idx = hlsl_ir_constant(path_node)->value[0].u;
|
||||
unsigned int idx = hlsl_ir_constant(path_node)->value.u[0].u;
|
||||
|
||||
for (i = 0; i < idx; ++i)
|
||||
comp_start += hlsl_type_component_count(type->e.record.fields[i].type);
|
||||
@ -1167,7 +1167,7 @@ static void copy_propagation_invalidate_variable_from_deref_recurse(struct hlsl_
|
||||
if (path_node->type == HLSL_IR_CONSTANT)
|
||||
{
|
||||
copy_propagation_invalidate_variable_from_deref_recurse(ctx, var_def, deref, subtype,
|
||||
depth + 1, hlsl_ir_constant(path_node)->value[0].u * subtype_comp_count, writemask);
|
||||
depth + 1, hlsl_ir_constant(path_node)->value.u[0].u * subtype_comp_count, writemask);
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -1262,7 +1262,7 @@ static bool copy_propagation_replace_with_constant_vector(struct hlsl_ctx *ctx,
|
||||
{
|
||||
const unsigned int instr_component_count = hlsl_type_component_count(instr->data_type);
|
||||
const struct hlsl_ir_var *var = deref->var;
|
||||
union hlsl_constant_value values[4] = {0};
|
||||
struct hlsl_constant_value values = {0};
|
||||
struct hlsl_ir_constant *cons;
|
||||
unsigned int start, count, i;
|
||||
|
||||
@ -1277,15 +1277,12 @@ static bool copy_propagation_replace_with_constant_vector(struct hlsl_ctx *ctx,
|
||||
|| value->node->type != HLSL_IR_CONSTANT)
|
||||
return false;
|
||||
|
||||
values[i] = hlsl_ir_constant(value->node)->value[value->component];
|
||||
values.u[i] = hlsl_ir_constant(value->node)->value.u[value->component];
|
||||
}
|
||||
|
||||
if (!(cons = hlsl_new_constant(ctx, instr->data_type, &instr->loc)))
|
||||
return false;
|
||||
cons->value[0] = values[0];
|
||||
cons->value[1] = values[1];
|
||||
cons->value[2] = values[2];
|
||||
cons->value[3] = values[3];
|
||||
cons->value = values;
|
||||
list_add_before(&instr->entry, &cons->node.entry);
|
||||
|
||||
TRACE("Load from %s[%u-%u]%s turned into a constant %p.\n",
|
||||
@ -2070,7 +2067,7 @@ static bool lower_round(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, void *
|
||||
|
||||
component_count = hlsl_type_component_count(type);
|
||||
for (i = 0; i < component_count; ++i)
|
||||
half->value[i].f = 0.5f;
|
||||
half->value.u[i].f = 0.5f;
|
||||
list_add_before(&instr->entry, &half->node.entry);
|
||||
|
||||
if (!(sum = hlsl_new_binary_expr(ctx, HLSL_OP2_ADD, arg, &half->node)))
|
||||
@ -2189,7 +2186,7 @@ static bool lower_int_division(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr,
|
||||
if (!(high_bit = hlsl_new_constant(ctx, type, &instr->loc)))
|
||||
return false;
|
||||
for (i = 0; i < type->dimx; ++i)
|
||||
high_bit->value[i].u = 0x80000000;
|
||||
high_bit->value.u[i].u = 0x80000000;
|
||||
list_add_before(&instr->entry, &high_bit->node.entry);
|
||||
|
||||
if (!(and = hlsl_new_binary_expr(ctx, HLSL_OP2_BIT_AND, xor, &high_bit->node)))
|
||||
@ -2256,7 +2253,7 @@ static bool lower_int_modulus(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr,
|
||||
if (!(high_bit = hlsl_new_constant(ctx, type, &instr->loc)))
|
||||
return false;
|
||||
for (i = 0; i < type->dimx; ++i)
|
||||
high_bit->value[i].u = 0x80000000;
|
||||
high_bit->value.u[i].u = 0x80000000;
|
||||
list_add_before(&instr->entry, &high_bit->node.entry);
|
||||
|
||||
if (!(and = hlsl_new_binary_expr(ctx, HLSL_OP2_BIT_AND, arg1, &high_bit->node)))
|
||||
@ -2372,7 +2369,7 @@ static bool lower_float_modulus(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr
|
||||
if (!(one = hlsl_new_constant(ctx, type, &instr->loc)))
|
||||
return false;
|
||||
for (i = 0; i < type->dimx; ++i)
|
||||
one->value[i].f = 1.0f;
|
||||
one->value.u[i].f = 1.0f;
|
||||
list_add_before(&instr->entry, &one->node.entry);
|
||||
|
||||
if (!(div = hlsl_new_binary_expr(ctx, HLSL_OP2_DIV, &one->node, &cond->node)))
|
||||
@ -2928,12 +2925,12 @@ static void allocate_const_registers_recurse(struct hlsl_ctx *ctx, struct hlsl_b
|
||||
{
|
||||
for (x = 0, i = 0; x < 4; ++x)
|
||||
{
|
||||
const union hlsl_constant_value *value;
|
||||
const union hlsl_constant_value_component *value;
|
||||
float f;
|
||||
|
||||
if (!(writemask & (1u << x)))
|
||||
continue;
|
||||
value = &constant->value[i++];
|
||||
value = &constant->value.u[i++];
|
||||
|
||||
switch (type->base_type)
|
||||
{
|
||||
@ -3389,7 +3386,7 @@ bool hlsl_component_index_range_from_deref(struct hlsl_ctx *ctx, const struct hl
|
||||
assert(path_node->data_type->class == HLSL_CLASS_SCALAR
|
||||
&& path_node->data_type->base_type == HLSL_TYPE_UINT);
|
||||
|
||||
idx = hlsl_ir_constant(path_node)->value[0].u;
|
||||
idx = hlsl_ir_constant(path_node)->value.u[0].u;
|
||||
|
||||
switch (type->class)
|
||||
{
|
||||
@ -3460,7 +3457,7 @@ bool hlsl_offset_from_deref(struct hlsl_ctx *ctx, const struct hlsl_deref *deref
|
||||
if (offset_node->type != HLSL_IR_CONSTANT)
|
||||
return false;
|
||||
|
||||
*offset = hlsl_ir_constant(offset_node)->value[0].u;
|
||||
*offset = hlsl_ir_constant(offset_node)->value.u[0].u;
|
||||
|
||||
size = deref->var->data_type->reg_size[deref->offset_regset];
|
||||
if (*offset >= size)
|
||||
@ -3542,12 +3539,12 @@ static void parse_numthreads_attribute(struct hlsl_ctx *ctx, const struct hlsl_a
|
||||
}
|
||||
constant = hlsl_ir_constant(instr);
|
||||
|
||||
if ((type->base_type == HLSL_TYPE_INT && constant->value[0].i <= 0)
|
||||
|| (type->base_type == HLSL_TYPE_UINT && !constant->value[0].u))
|
||||
if ((type->base_type == HLSL_TYPE_INT && constant->value.u[0].i <= 0)
|
||||
|| (type->base_type == HLSL_TYPE_UINT && !constant->value.u[0].u))
|
||||
hlsl_error(ctx, &instr->loc, VKD3D_SHADER_ERROR_HLSL_INVALID_THREAD_COUNT,
|
||||
"Thread count must be a positive integer.");
|
||||
|
||||
ctx->thread_count[i] = constant->value[0].u;
|
||||
ctx->thread_count[i] = constant->value.u[0].u;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -44,38 +44,38 @@ static bool fold_cast(struct hlsl_ctx *ctx, struct hlsl_ir_constant *dst, struct
|
||||
{
|
||||
case HLSL_TYPE_FLOAT:
|
||||
case HLSL_TYPE_HALF:
|
||||
u = src->value[k].f;
|
||||
i = src->value[k].f;
|
||||
f = src->value[k].f;
|
||||
d = src->value[k].f;
|
||||
u = src->value.u[k].f;
|
||||
i = src->value.u[k].f;
|
||||
f = src->value.u[k].f;
|
||||
d = src->value.u[k].f;
|
||||
break;
|
||||
|
||||
case HLSL_TYPE_DOUBLE:
|
||||
u = src->value[k].d;
|
||||
i = src->value[k].d;
|
||||
f = src->value[k].d;
|
||||
d = src->value[k].d;
|
||||
u = src->value.u[k].d;
|
||||
i = src->value.u[k].d;
|
||||
f = src->value.u[k].d;
|
||||
d = src->value.u[k].d;
|
||||
break;
|
||||
|
||||
case HLSL_TYPE_INT:
|
||||
u = src->value[k].i;
|
||||
i = src->value[k].i;
|
||||
f = src->value[k].i;
|
||||
d = src->value[k].i;
|
||||
u = src->value.u[k].i;
|
||||
i = src->value.u[k].i;
|
||||
f = src->value.u[k].i;
|
||||
d = src->value.u[k].i;
|
||||
break;
|
||||
|
||||
case HLSL_TYPE_UINT:
|
||||
u = src->value[k].u;
|
||||
i = src->value[k].u;
|
||||
f = src->value[k].u;
|
||||
d = src->value[k].u;
|
||||
u = src->value.u[k].u;
|
||||
i = src->value.u[k].u;
|
||||
f = src->value.u[k].u;
|
||||
d = src->value.u[k].u;
|
||||
break;
|
||||
|
||||
case HLSL_TYPE_BOOL:
|
||||
u = !!src->value[k].u;
|
||||
i = !!src->value[k].u;
|
||||
f = !!src->value[k].u;
|
||||
d = !!src->value[k].u;
|
||||
u = !!src->value.u[k].u;
|
||||
i = !!src->value.u[k].u;
|
||||
f = !!src->value.u[k].u;
|
||||
d = !!src->value.u[k].u;
|
||||
break;
|
||||
|
||||
default:
|
||||
@ -86,19 +86,19 @@ static bool fold_cast(struct hlsl_ctx *ctx, struct hlsl_ir_constant *dst, struct
|
||||
{
|
||||
case HLSL_TYPE_FLOAT:
|
||||
case HLSL_TYPE_HALF:
|
||||
dst->value[k].f = f;
|
||||
dst->value.u[k].f = f;
|
||||
break;
|
||||
|
||||
case HLSL_TYPE_DOUBLE:
|
||||
dst->value[k].d = d;
|
||||
dst->value.u[k].d = d;
|
||||
break;
|
||||
|
||||
case HLSL_TYPE_INT:
|
||||
dst->value[k].i = i;
|
||||
dst->value.u[k].i = i;
|
||||
break;
|
||||
|
||||
case HLSL_TYPE_UINT:
|
||||
dst->value[k].u = u;
|
||||
dst->value.u[k].u = u;
|
||||
break;
|
||||
|
||||
case HLSL_TYPE_BOOL:
|
||||
@ -123,16 +123,16 @@ static bool fold_neg(struct hlsl_ctx *ctx, struct hlsl_ir_constant *dst, struct
|
||||
{
|
||||
case HLSL_TYPE_FLOAT:
|
||||
case HLSL_TYPE_HALF:
|
||||
dst->value[k].f = -src->value[k].f;
|
||||
dst->value.u[k].f = -src->value.u[k].f;
|
||||
break;
|
||||
|
||||
case HLSL_TYPE_DOUBLE:
|
||||
dst->value[k].d = -src->value[k].d;
|
||||
dst->value.u[k].d = -src->value.u[k].d;
|
||||
break;
|
||||
|
||||
case HLSL_TYPE_INT:
|
||||
case HLSL_TYPE_UINT:
|
||||
dst->value[k].u = -src->value[k].u;
|
||||
dst->value.u[k].u = -src->value.u[k].u;
|
||||
break;
|
||||
|
||||
default:
|
||||
@ -158,18 +158,18 @@ static bool fold_add(struct hlsl_ctx *ctx, struct hlsl_ir_constant *dst, struct
|
||||
{
|
||||
case HLSL_TYPE_FLOAT:
|
||||
case HLSL_TYPE_HALF:
|
||||
dst->value[k].f = src1->value[k].f + src2->value[k].f;
|
||||
dst->value.u[k].f = src1->value.u[k].f + src2->value.u[k].f;
|
||||
break;
|
||||
|
||||
case HLSL_TYPE_DOUBLE:
|
||||
dst->value[k].d = src1->value[k].d + src2->value[k].d;
|
||||
dst->value.u[k].d = src1->value.u[k].d + src2->value.u[k].d;
|
||||
break;
|
||||
|
||||
/* Handling HLSL_TYPE_INT through the unsigned field to avoid
|
||||
* undefined behavior with signed integers in C. */
|
||||
case HLSL_TYPE_INT:
|
||||
case HLSL_TYPE_UINT:
|
||||
dst->value[k].u = src1->value[k].u + src2->value[k].u;
|
||||
dst->value.u[k].u = src1->value.u[k].u + src2->value.u[k].u;
|
||||
break;
|
||||
|
||||
default:
|
||||
@ -195,16 +195,16 @@ static bool fold_mul(struct hlsl_ctx *ctx, struct hlsl_ir_constant *dst,
|
||||
{
|
||||
case HLSL_TYPE_FLOAT:
|
||||
case HLSL_TYPE_HALF:
|
||||
dst->value[k].f = src1->value[k].f * src2->value[k].f;
|
||||
dst->value.u[k].f = src1->value.u[k].f * src2->value.u[k].f;
|
||||
break;
|
||||
|
||||
case HLSL_TYPE_DOUBLE:
|
||||
dst->value[k].d = src1->value[k].d * src2->value[k].d;
|
||||
dst->value.u[k].d = src1->value.u[k].d * src2->value.u[k].d;
|
||||
break;
|
||||
|
||||
case HLSL_TYPE_INT:
|
||||
case HLSL_TYPE_UINT:
|
||||
dst->value[k].u = src1->value[k].u * src2->value[k].u;
|
||||
dst->value.u[k].u = src1->value.u[k].u * src2->value.u[k].u;
|
||||
break;
|
||||
|
||||
default:
|
||||
@ -229,24 +229,24 @@ static bool fold_nequal(struct hlsl_ctx *ctx, struct hlsl_ir_constant *dst,
|
||||
{
|
||||
case HLSL_TYPE_FLOAT:
|
||||
case HLSL_TYPE_HALF:
|
||||
dst->value[k].u = src1->value[k].f != src2->value[k].f;
|
||||
dst->value.u[k].u = src1->value.u[k].f != src2->value.u[k].f;
|
||||
break;
|
||||
|
||||
case HLSL_TYPE_DOUBLE:
|
||||
dst->value[k].u = src1->value[k].d != src2->value[k].d;
|
||||
dst->value.u[k].u = src1->value.u[k].d != src2->value.u[k].d;
|
||||
break;
|
||||
|
||||
case HLSL_TYPE_INT:
|
||||
case HLSL_TYPE_UINT:
|
||||
case HLSL_TYPE_BOOL:
|
||||
dst->value[k].u = src1->value[k].u != src2->value[k].u;
|
||||
dst->value.u[k].u = src1->value.u[k].u != src2->value.u[k].u;
|
||||
break;
|
||||
|
||||
default:
|
||||
vkd3d_unreachable();
|
||||
}
|
||||
|
||||
dst->value[k].u *= ~0u;
|
||||
dst->value.u[k].u *= ~0u;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
@ -266,13 +266,13 @@ static bool fold_div(struct hlsl_ctx *ctx, struct hlsl_ir_constant *dst,
|
||||
{
|
||||
case HLSL_TYPE_FLOAT:
|
||||
case HLSL_TYPE_HALF:
|
||||
if (ctx->profile->major_version >= 4 && src2->value[k].f == 0)
|
||||
if (ctx->profile->major_version >= 4 && src2->value.u[k].f == 0)
|
||||
{
|
||||
hlsl_warning(ctx, &dst->node.loc, VKD3D_SHADER_WARNING_HLSL_DIVISION_BY_ZERO,
|
||||
"Floating point division by zero.");
|
||||
}
|
||||
dst->value[k].f = src1->value[k].f / src2->value[k].f;
|
||||
if (ctx->profile->major_version < 4 && !isfinite(dst->value[k].f))
|
||||
dst->value.u[k].f = src1->value.u[k].f / src2->value.u[k].f;
|
||||
if (ctx->profile->major_version < 4 && !isfinite(dst->value.u[k].f))
|
||||
{
|
||||
hlsl_error(ctx, &dst->node.loc, VKD3D_SHADER_ERROR_HLSL_DIVISION_BY_ZERO,
|
||||
"Infinities and NaNs are not allowed by the shader model.");
|
||||
@ -280,35 +280,35 @@ static bool fold_div(struct hlsl_ctx *ctx, struct hlsl_ir_constant *dst,
|
||||
break;
|
||||
|
||||
case HLSL_TYPE_DOUBLE:
|
||||
if (src2->value[k].d == 0)
|
||||
if (src2->value.u[k].d == 0)
|
||||
{
|
||||
hlsl_warning(ctx, &dst->node.loc, VKD3D_SHADER_WARNING_HLSL_DIVISION_BY_ZERO,
|
||||
"Floating point division by zero.");
|
||||
}
|
||||
dst->value[k].d = src1->value[k].d / src2->value[k].d;
|
||||
dst->value.u[k].d = src1->value.u[k].d / src2->value.u[k].d;
|
||||
break;
|
||||
|
||||
case HLSL_TYPE_INT:
|
||||
if (src2->value[k].i == 0)
|
||||
if (src2->value.u[k].i == 0)
|
||||
{
|
||||
hlsl_error(ctx, &dst->node.loc, VKD3D_SHADER_ERROR_HLSL_DIVISION_BY_ZERO,
|
||||
"Division by zero.");
|
||||
return false;
|
||||
}
|
||||
if (src1->value[k].i == INT_MIN && src2->value[k].i == -1)
|
||||
dst->value[k].i = INT_MIN;
|
||||
if (src1->value.u[k].i == INT_MIN && src2->value.u[k].i == -1)
|
||||
dst->value.u[k].i = INT_MIN;
|
||||
else
|
||||
dst->value[k].i = src1->value[k].i / src2->value[k].i;
|
||||
dst->value.u[k].i = src1->value.u[k].i / src2->value.u[k].i;
|
||||
break;
|
||||
|
||||
case HLSL_TYPE_UINT:
|
||||
if (src2->value[k].u == 0)
|
||||
if (src2->value.u[k].u == 0)
|
||||
{
|
||||
hlsl_error(ctx, &dst->node.loc, VKD3D_SHADER_ERROR_HLSL_DIVISION_BY_ZERO,
|
||||
"Division by zero.");
|
||||
return false;
|
||||
}
|
||||
dst->value[k].u = src1->value[k].u / src2->value[k].u;
|
||||
dst->value.u[k].u = src1->value.u[k].u / src2->value.u[k].u;
|
||||
break;
|
||||
|
||||
default:
|
||||
@ -333,26 +333,26 @@ static bool fold_mod(struct hlsl_ctx *ctx, struct hlsl_ir_constant *dst,
|
||||
switch (type)
|
||||
{
|
||||
case HLSL_TYPE_INT:
|
||||
if (src2->value[k].i == 0)
|
||||
if (src2->value.u[k].i == 0)
|
||||
{
|
||||
hlsl_error(ctx, &dst->node.loc, VKD3D_SHADER_ERROR_HLSL_DIVISION_BY_ZERO,
|
||||
"Division by zero.");
|
||||
return false;
|
||||
}
|
||||
if (src1->value[k].i == INT_MIN && src2->value[k].i == -1)
|
||||
dst->value[k].i = 0;
|
||||
if (src1->value.u[k].i == INT_MIN && src2->value.u[k].i == -1)
|
||||
dst->value.u[k].i = 0;
|
||||
else
|
||||
dst->value[k].i = src1->value[k].i % src2->value[k].i;
|
||||
dst->value.u[k].i = src1->value.u[k].i % src2->value.u[k].i;
|
||||
break;
|
||||
|
||||
case HLSL_TYPE_UINT:
|
||||
if (src2->value[k].u == 0)
|
||||
if (src2->value.u[k].u == 0)
|
||||
{
|
||||
hlsl_error(ctx, &dst->node.loc, VKD3D_SHADER_ERROR_HLSL_DIVISION_BY_ZERO,
|
||||
"Division by zero.");
|
||||
return false;
|
||||
}
|
||||
dst->value[k].u = src1->value[k].u % src2->value[k].u;
|
||||
dst->value.u[k].u = src1->value.u[k].u % src2->value.u[k].u;
|
||||
break;
|
||||
|
||||
default:
|
||||
@ -377,11 +377,11 @@ static bool fold_max(struct hlsl_ctx *ctx, struct hlsl_ir_constant *dst,
|
||||
switch (type)
|
||||
{
|
||||
case HLSL_TYPE_INT:
|
||||
dst->value[k].i = max(src1->value[k].i, src2->value[k].i);
|
||||
dst->value.u[k].i = max(src1->value.u[k].i, src2->value.u[k].i);
|
||||
break;
|
||||
|
||||
case HLSL_TYPE_UINT:
|
||||
dst->value[k].u = max(src1->value[k].u, src2->value[k].u);
|
||||
dst->value.u[k].u = max(src1->value.u[k].u, src2->value.u[k].u);
|
||||
break;
|
||||
|
||||
default:
|
||||
@ -406,11 +406,11 @@ static bool fold_min(struct hlsl_ctx *ctx, struct hlsl_ir_constant *dst,
|
||||
switch (type)
|
||||
{
|
||||
case HLSL_TYPE_INT:
|
||||
dst->value[k].i = min(src1->value[k].i, src2->value[k].i);
|
||||
dst->value.u[k].i = min(src1->value.u[k].i, src2->value.u[k].i);
|
||||
break;
|
||||
|
||||
case HLSL_TYPE_UINT:
|
||||
dst->value[k].u = min(src1->value[k].u, src2->value[k].u);
|
||||
dst->value.u[k].u = min(src1->value.u[k].u, src2->value.u[k].u);
|
||||
break;
|
||||
|
||||
default:
|
||||
@ -436,7 +436,7 @@ static bool fold_bit_xor(struct hlsl_ctx *ctx, struct hlsl_ir_constant *dst,
|
||||
{
|
||||
case HLSL_TYPE_INT:
|
||||
case HLSL_TYPE_UINT:
|
||||
dst->value[k].u = src1->value[k].u ^ src2->value[k].u;
|
||||
dst->value.u[k].u = src1->value.u[k].u ^ src2->value.u[k].u;
|
||||
break;
|
||||
|
||||
default:
|
||||
@ -462,7 +462,7 @@ static bool fold_bit_and(struct hlsl_ctx *ctx, struct hlsl_ir_constant *dst,
|
||||
{
|
||||
case HLSL_TYPE_INT:
|
||||
case HLSL_TYPE_UINT:
|
||||
dst->value[k].u = src1->value[k].u & src2->value[k].u;
|
||||
dst->value.u[k].u = src1->value.u[k].u & src2->value.u[k].u;
|
||||
break;
|
||||
|
||||
default:
|
||||
@ -488,7 +488,7 @@ static bool fold_bit_or(struct hlsl_ctx *ctx, struct hlsl_ir_constant *dst,
|
||||
{
|
||||
case HLSL_TYPE_INT:
|
||||
case HLSL_TYPE_UINT:
|
||||
dst->value[k].u = src1->value[k].u | src2->value[k].u;
|
||||
dst->value.u[k].u = src1->value.u[k].u | src2->value.u[k].u;
|
||||
break;
|
||||
|
||||
default:
|
||||
@ -616,7 +616,7 @@ bool hlsl_fold_constant_swizzles(struct hlsl_ctx *ctx, struct hlsl_ir_node *inst
|
||||
return false;
|
||||
|
||||
for (i = 0; i < swizzle->node.data_type->dimx; ++i)
|
||||
res->value[i] = value->value[hlsl_swizzle_get_component(swizzle->swizzle, i)];
|
||||
res->value.u[i] = value->value.u[hlsl_swizzle_get_component(swizzle->swizzle, i)];
|
||||
|
||||
list_add_before(&swizzle->node.entry, &res->node.entry);
|
||||
hlsl_replace_node(&swizzle->node, &res->node);
|
||||
|
@ -3297,9 +3297,9 @@ static bool encode_texel_offset_as_aoffimmi(struct sm4_instruction *instr,
|
||||
offset = hlsl_ir_constant(texel_offset);
|
||||
|
||||
modif.type = VKD3D_SM4_MODIFIER_AOFFIMMI;
|
||||
modif.u.aoffimmi.u = offset->value[0].i;
|
||||
modif.u.aoffimmi.v = offset->value[1].i;
|
||||
modif.u.aoffimmi.w = offset->value[2].i;
|
||||
modif.u.aoffimmi.u = offset->value.u[0].i;
|
||||
modif.u.aoffimmi.v = offset->value.u[1].i;
|
||||
modif.u.aoffimmi.w = offset->value.u[2].i;
|
||||
if (modif.u.aoffimmi.u < -8 || modif.u.aoffimmi.u > 7
|
||||
|| modif.u.aoffimmi.v < -8 || modif.u.aoffimmi.v > 7
|
||||
|| modif.u.aoffimmi.w < -8 || modif.u.aoffimmi.w > 7)
|
||||
@ -3622,7 +3622,7 @@ static void write_sm4_constant(struct hlsl_ctx *ctx,
|
||||
if (dimx == 1)
|
||||
{
|
||||
reg->dim = VKD3D_SM4_DIMENSION_SCALAR;
|
||||
reg->immconst_uint[0] = constant->value[0].u;
|
||||
reg->immconst_uint[0] = constant->value.u[0].u;
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -3632,7 +3632,7 @@ static void write_sm4_constant(struct hlsl_ctx *ctx,
|
||||
for (i = 0; i < 4; ++i)
|
||||
{
|
||||
if (instr.dsts[0].writemask & (1u << i))
|
||||
reg->immconst_uint[i] = constant->value[j++].u;
|
||||
reg->immconst_uint[i] = constant->value.u[j++].u;
|
||||
}
|
||||
}
|
||||
instr.src_count = 1,
|
||||
|
Loading…
x
Reference in New Issue
Block a user