2021-03-02 13:34:46 -08:00
|
|
|
/*
|
|
|
|
* HLSL optimization and code generation
|
|
|
|
*
|
|
|
|
* Copyright 2019-2020 Zebediah Figura for CodeWeavers
|
|
|
|
*
|
|
|
|
* This library is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU Lesser General Public
|
|
|
|
* License as published by the Free Software Foundation; either
|
|
|
|
* version 2.1 of the License, or (at your option) any later version.
|
|
|
|
*
|
|
|
|
* This library is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* Lesser General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU Lesser General Public
|
|
|
|
* License along with this library; if not, write to the Free Software
|
|
|
|
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "hlsl.h"
|
2021-03-28 12:46:55 -07:00
|
|
|
#include <stdio.h>
|
|
|
|
|
2022-07-01 09:36:25 -07:00
|
|
|
/* TODO: remove when no longer needed, only used for new_offset_instr_from_deref() */
|
|
|
|
static struct hlsl_ir_node *new_offset_from_path_index(struct hlsl_ctx *ctx, struct hlsl_block *block,
|
|
|
|
struct hlsl_type *type, struct hlsl_ir_node *offset, struct hlsl_ir_node *idx,
|
2022-10-28 08:23:05 -07:00
|
|
|
enum hlsl_regset regset, const struct vkd3d_shader_location *loc)
|
2022-07-01 09:36:25 -07:00
|
|
|
{
|
|
|
|
struct hlsl_ir_node *idx_offset = NULL;
|
2022-11-10 19:06:04 -08:00
|
|
|
struct hlsl_ir_node *c;
|
2022-07-01 09:36:25 -07:00
|
|
|
|
2023-02-16 15:52:15 -08:00
|
|
|
hlsl_block_init(block);
|
2022-07-01 09:36:25 -07:00
|
|
|
|
2022-11-11 17:31:55 -08:00
|
|
|
switch (type->class)
|
2022-07-01 09:36:25 -07:00
|
|
|
{
|
|
|
|
case HLSL_CLASS_VECTOR:
|
|
|
|
idx_offset = idx;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case HLSL_CLASS_MATRIX:
|
|
|
|
{
|
|
|
|
if (!(c = hlsl_new_uint_constant(ctx, 4, loc)))
|
|
|
|
return NULL;
|
2022-11-10 19:06:04 -08:00
|
|
|
hlsl_block_add_instr(block, c);
|
2022-07-01 09:36:25 -07:00
|
|
|
|
2022-11-10 19:06:04 -08:00
|
|
|
if (!(idx_offset = hlsl_new_binary_expr(ctx, HLSL_OP2_MUL, c, idx)))
|
2022-07-01 09:36:25 -07:00
|
|
|
return NULL;
|
2022-11-11 19:13:50 -08:00
|
|
|
hlsl_block_add_instr(block, idx_offset);
|
2022-07-01 09:36:25 -07:00
|
|
|
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case HLSL_CLASS_ARRAY:
|
|
|
|
{
|
2022-10-28 08:23:05 -07:00
|
|
|
unsigned int size = hlsl_type_get_array_element_reg_size(type->e.array.type, regset);
|
2022-07-01 09:36:25 -07:00
|
|
|
|
|
|
|
if (!(c = hlsl_new_uint_constant(ctx, size, loc)))
|
|
|
|
return NULL;
|
2022-11-10 19:06:04 -08:00
|
|
|
hlsl_block_add_instr(block, c);
|
2022-07-01 09:36:25 -07:00
|
|
|
|
2022-11-10 19:06:04 -08:00
|
|
|
if (!(idx_offset = hlsl_new_binary_expr(ctx, HLSL_OP2_MUL, c, idx)))
|
2022-07-01 09:36:25 -07:00
|
|
|
return NULL;
|
2022-11-11 19:13:50 -08:00
|
|
|
hlsl_block_add_instr(block, idx_offset);
|
2022-07-01 09:36:25 -07:00
|
|
|
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case HLSL_CLASS_STRUCT:
|
|
|
|
{
|
2022-11-11 16:39:55 -08:00
|
|
|
unsigned int field_idx = hlsl_ir_constant(idx)->value.u[0].u;
|
2022-07-01 09:36:25 -07:00
|
|
|
struct hlsl_struct_field *field = &type->e.record.fields[field_idx];
|
|
|
|
|
2022-10-28 08:23:05 -07:00
|
|
|
if (!(c = hlsl_new_uint_constant(ctx, field->reg_offset[regset], loc)))
|
2022-07-01 09:36:25 -07:00
|
|
|
return NULL;
|
2022-11-10 19:06:04 -08:00
|
|
|
hlsl_block_add_instr(block, c);
|
2022-07-01 09:36:25 -07:00
|
|
|
|
2022-11-10 19:06:04 -08:00
|
|
|
idx_offset = c;
|
2022-07-01 09:36:25 -07:00
|
|
|
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
default:
|
2022-08-31 04:25:24 -07:00
|
|
|
vkd3d_unreachable();
|
2022-07-01 09:36:25 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
if (offset)
|
|
|
|
{
|
|
|
|
if (!(idx_offset = hlsl_new_binary_expr(ctx, HLSL_OP2_ADD, offset, idx_offset)))
|
|
|
|
return NULL;
|
2022-11-11 19:13:50 -08:00
|
|
|
hlsl_block_add_instr(block, idx_offset);
|
2022-07-01 09:36:25 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
return idx_offset;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* TODO: remove when no longer needed, only used for replace_deref_path_with_offset() */
|
|
|
|
static struct hlsl_ir_node *new_offset_instr_from_deref(struct hlsl_ctx *ctx, struct hlsl_block *block,
|
|
|
|
const struct hlsl_deref *deref, const struct vkd3d_shader_location *loc)
|
|
|
|
{
|
2023-05-29 14:34:03 -07:00
|
|
|
enum hlsl_regset regset = hlsl_type_get_regset(deref->data_type);
|
2022-07-01 09:36:25 -07:00
|
|
|
struct hlsl_ir_node *offset = NULL;
|
|
|
|
struct hlsl_type *type;
|
|
|
|
unsigned int i;
|
|
|
|
|
2023-02-16 15:52:15 -08:00
|
|
|
hlsl_block_init(block);
|
2022-07-01 09:36:25 -07:00
|
|
|
|
|
|
|
assert(deref->var);
|
|
|
|
type = deref->var->data_type;
|
|
|
|
|
|
|
|
for (i = 0; i < deref->path_len; ++i)
|
|
|
|
{
|
|
|
|
struct hlsl_block idx_block;
|
|
|
|
|
2022-10-28 08:23:05 -07:00
|
|
|
if (!(offset = new_offset_from_path_index(ctx, &idx_block, type, offset, deref->path[i].node,
|
2023-05-29 14:34:03 -07:00
|
|
|
regset, loc)))
|
2022-07-01 09:36:25 -07:00
|
|
|
return NULL;
|
|
|
|
|
2023-02-16 16:00:01 -08:00
|
|
|
hlsl_block_add_block(block, &idx_block);
|
2022-07-01 09:36:25 -07:00
|
|
|
|
2022-08-16 09:33:51 -07:00
|
|
|
type = hlsl_get_element_type_from_path_index(ctx, type, deref->path[i].node);
|
2022-07-01 09:36:25 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
return offset;
|
|
|
|
}
|
|
|
|
|
2022-06-30 15:20:20 -07:00
|
|
|
/* TODO: remove when no longer needed, only used for transform_deref_paths_into_offsets() */
|
2023-05-04 12:06:58 -07:00
|
|
|
static bool replace_deref_path_with_offset(struct hlsl_ctx *ctx, struct hlsl_deref *deref,
|
2022-06-30 15:20:20 -07:00
|
|
|
struct hlsl_ir_node *instr)
|
|
|
|
{
|
2023-05-29 14:34:03 -07:00
|
|
|
struct hlsl_type *type;
|
2022-06-30 15:20:20 -07:00
|
|
|
struct hlsl_ir_node *offset;
|
|
|
|
struct hlsl_block block;
|
|
|
|
|
2023-05-04 12:06:58 -07:00
|
|
|
assert(deref->var);
|
2022-06-30 15:20:20 -07:00
|
|
|
|
2022-07-12 13:58:40 -07:00
|
|
|
/* register offsets shouldn't be used before this point is reached. */
|
|
|
|
assert(!deref->offset.node);
|
|
|
|
|
2023-02-09 15:29:17 -08:00
|
|
|
type = hlsl_deref_get_type(ctx, deref);
|
|
|
|
|
|
|
|
/* Instructions that directly refer to structs or arrays (instead of single-register components)
|
|
|
|
* are removed later by dce. So it is not a problem to just cleanup their derefs. */
|
2022-11-11 17:31:55 -08:00
|
|
|
if (type->class == HLSL_CLASS_STRUCT || type->class == HLSL_CLASS_ARRAY)
|
2023-02-09 15:29:17 -08:00
|
|
|
{
|
|
|
|
hlsl_cleanup_deref(deref);
|
2023-05-04 12:06:58 -07:00
|
|
|
return true;
|
2023-02-09 15:29:17 -08:00
|
|
|
}
|
|
|
|
|
2023-05-29 14:34:03 -07:00
|
|
|
deref->data_type = type;
|
2022-10-28 08:23:05 -07:00
|
|
|
|
2022-07-01 09:36:25 -07:00
|
|
|
if (!(offset = new_offset_instr_from_deref(ctx, &block, deref, &instr->loc)))
|
2023-05-04 12:06:58 -07:00
|
|
|
return false;
|
2022-06-30 15:20:20 -07:00
|
|
|
list_move_before(&instr->entry, &block.instrs);
|
|
|
|
|
|
|
|
hlsl_cleanup_deref(deref);
|
|
|
|
hlsl_src_from_node(&deref->offset, offset);
|
|
|
|
|
2023-05-04 12:06:58 -07:00
|
|
|
return true;
|
2022-06-30 15:20:20 -07:00
|
|
|
}
|
|
|
|
|
2021-03-28 12:46:55 -07:00
|
|
|
/* Split uniforms into two variables representing the constant and temp
|
|
|
|
* registers, and copy the former to the latter, so that writes to uniforms
|
|
|
|
* work. */
|
2023-03-10 13:13:23 -08:00
|
|
|
static void prepend_uniform_copy(struct hlsl_ctx *ctx, struct hlsl_block *block, struct hlsl_ir_var *temp)
|
2021-03-28 12:46:55 -07:00
|
|
|
{
|
2021-04-15 17:03:45 -07:00
|
|
|
struct hlsl_ir_var *uniform;
|
2022-11-10 18:55:03 -08:00
|
|
|
struct hlsl_ir_node *store;
|
2021-03-28 12:46:55 -07:00
|
|
|
struct hlsl_ir_load *load;
|
2023-08-07 14:45:31 -07:00
|
|
|
char *new_name;
|
2021-03-28 12:46:55 -07:00
|
|
|
|
2021-04-15 17:03:45 -07:00
|
|
|
/* Use the synthetic name for the temp, rather than the uniform, so that we
|
|
|
|
* can write the uniform name into the shader reflection data. */
|
|
|
|
|
2022-03-22 14:59:11 -07:00
|
|
|
if (!(uniform = hlsl_new_var(ctx, temp->name, temp->data_type,
|
2023-04-14 00:02:14 -07:00
|
|
|
&temp->loc, NULL, temp->storage_modifiers, &temp->reg_reservation)))
|
2021-03-28 12:46:55 -07:00
|
|
|
return;
|
2021-04-15 17:03:45 -07:00
|
|
|
list_add_before(&temp->scope_entry, &uniform->scope_entry);
|
|
|
|
list_add_tail(&ctx->extern_vars, &uniform->extern_entry);
|
|
|
|
uniform->is_uniform = 1;
|
2021-04-15 17:03:46 -07:00
|
|
|
uniform->is_param = temp->is_param;
|
2021-06-21 21:37:10 -07:00
|
|
|
uniform->buffer = temp->buffer;
|
2021-04-15 17:03:45 -07:00
|
|
|
|
2023-08-07 14:45:31 -07:00
|
|
|
if (!(new_name = hlsl_sprintf_alloc(ctx, "<temp-%s>", temp->name)))
|
2021-03-28 12:46:55 -07:00
|
|
|
return;
|
2023-08-07 14:45:31 -07:00
|
|
|
temp->name = new_name;
|
2021-03-28 12:46:55 -07:00
|
|
|
|
2023-04-14 00:02:14 -07:00
|
|
|
if (!(load = hlsl_new_var_load(ctx, uniform, &temp->loc)))
|
2021-03-28 12:46:55 -07:00
|
|
|
return;
|
2023-03-10 13:13:23 -08:00
|
|
|
list_add_head(&block->instrs, &load->node.entry);
|
2021-03-28 12:46:55 -07:00
|
|
|
|
2021-05-20 22:32:20 -07:00
|
|
|
if (!(store = hlsl_new_simple_store(ctx, temp, &load->node)))
|
2021-03-28 12:46:55 -07:00
|
|
|
return;
|
2022-11-10 18:55:03 -08:00
|
|
|
list_add_after(&load->node.entry, &store->entry);
|
2021-03-28 12:46:55 -07:00
|
|
|
}
|
2021-03-02 13:34:46 -08:00
|
|
|
|
2023-04-05 09:07:37 -07:00
|
|
|
static void validate_field_semantic(struct hlsl_ctx *ctx, struct hlsl_struct_field *field)
|
|
|
|
{
|
|
|
|
if (!field->semantic.name && hlsl_get_multiarray_element_type(field->type)->class <= HLSL_CLASS_LAST_NUMERIC
|
|
|
|
&& !field->semantic.reported_missing)
|
|
|
|
{
|
|
|
|
hlsl_error(ctx, &field->loc, VKD3D_SHADER_ERROR_HLSL_MISSING_SEMANTIC,
|
|
|
|
"Field '%s' is missing a semantic.", field->name);
|
|
|
|
field->semantic.reported_missing = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-04-12 13:27:31 -07:00
|
|
|
static enum hlsl_base_type base_type_get_semantic_equivalent(enum hlsl_base_type base)
|
|
|
|
{
|
|
|
|
if (base == HLSL_TYPE_BOOL)
|
|
|
|
return HLSL_TYPE_UINT;
|
|
|
|
if (base == HLSL_TYPE_INT)
|
|
|
|
return HLSL_TYPE_UINT;
|
|
|
|
if (base == HLSL_TYPE_HALF)
|
|
|
|
return HLSL_TYPE_FLOAT;
|
|
|
|
return base;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool types_are_semantic_equivalent(struct hlsl_ctx *ctx, const struct hlsl_type *type1,
|
|
|
|
const struct hlsl_type *type2)
|
|
|
|
{
|
2023-04-14 14:46:03 -07:00
|
|
|
if (ctx->profile->major_version < 4)
|
|
|
|
return true;
|
|
|
|
|
2023-04-12 13:27:31 -07:00
|
|
|
if (type1->dimx != type2->dimx)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return base_type_get_semantic_equivalent(type1->base_type)
|
|
|
|
== base_type_get_semantic_equivalent(type2->base_type);
|
|
|
|
}
|
|
|
|
|
2022-07-01 08:37:47 -07:00
|
|
|
static struct hlsl_ir_var *add_semantic_var(struct hlsl_ctx *ctx, struct hlsl_ir_var *var,
|
2023-04-12 12:59:06 -07:00
|
|
|
struct hlsl_type *type, unsigned int modifiers, struct hlsl_semantic *semantic,
|
|
|
|
uint32_t index, bool output, const struct vkd3d_shader_location *loc)
|
2022-07-01 08:37:47 -07:00
|
|
|
{
|
|
|
|
struct hlsl_semantic new_semantic;
|
|
|
|
struct hlsl_ir_var *ext_var;
|
2023-08-07 14:45:31 -07:00
|
|
|
char *new_name;
|
2022-07-01 08:37:47 -07:00
|
|
|
|
2023-08-07 14:45:31 -07:00
|
|
|
if (!(new_name = hlsl_sprintf_alloc(ctx, "<%s-%s%u>", output ? "output" : "input", semantic->name, index)))
|
2022-07-01 08:37:47 -07:00
|
|
|
return NULL;
|
2023-04-05 08:58:23 -07:00
|
|
|
|
|
|
|
LIST_FOR_EACH_ENTRY(ext_var, &ctx->extern_vars, struct hlsl_ir_var, extern_entry)
|
|
|
|
{
|
2023-08-07 14:45:31 -07:00
|
|
|
if (!ascii_strcasecmp(ext_var->name, new_name))
|
2023-04-05 08:58:23 -07:00
|
|
|
{
|
2023-04-12 12:59:06 -07:00
|
|
|
if (output)
|
|
|
|
{
|
|
|
|
if (index >= semantic->reported_duplicated_output_next_index)
|
|
|
|
{
|
|
|
|
hlsl_error(ctx, loc, VKD3D_SHADER_ERROR_HLSL_INVALID_SEMANTIC,
|
|
|
|
"Output semantic \"%s%u\" is used multiple times.", semantic->name, index);
|
|
|
|
hlsl_note(ctx, &ext_var->loc, HLSL_LEVEL_ERROR,
|
|
|
|
"First use of \"%s%u\" is here.", semantic->name, index);
|
|
|
|
semantic->reported_duplicated_output_next_index = index + 1;
|
|
|
|
}
|
|
|
|
}
|
2023-04-12 13:27:31 -07:00
|
|
|
else
|
|
|
|
{
|
|
|
|
if (index >= semantic->reported_duplicated_input_incompatible_next_index
|
|
|
|
&& !types_are_semantic_equivalent(ctx, ext_var->data_type, type))
|
|
|
|
{
|
|
|
|
hlsl_error(ctx, loc, VKD3D_SHADER_ERROR_HLSL_INVALID_SEMANTIC,
|
|
|
|
"Input semantic \"%s%u\" is used multiple times with incompatible types.",
|
|
|
|
semantic->name, index);
|
|
|
|
hlsl_note(ctx, &ext_var->loc, HLSL_LEVEL_ERROR,
|
|
|
|
"First declaration of \"%s%u\" is here.", semantic->name, index);
|
|
|
|
semantic->reported_duplicated_input_incompatible_next_index = index + 1;
|
|
|
|
}
|
|
|
|
}
|
2023-04-12 12:59:06 -07:00
|
|
|
|
2023-08-07 14:45:31 -07:00
|
|
|
vkd3d_free(new_name);
|
2023-04-05 08:58:23 -07:00
|
|
|
return ext_var;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-07-01 08:37:47 -07:00
|
|
|
if (!(new_semantic.name = hlsl_strdup(ctx, semantic->name)))
|
|
|
|
{
|
2023-08-07 14:45:31 -07:00
|
|
|
vkd3d_free(new_name);
|
2022-07-01 08:37:47 -07:00
|
|
|
return NULL;
|
|
|
|
}
|
2023-04-05 09:07:37 -07:00
|
|
|
new_semantic.index = index;
|
2023-08-07 14:45:31 -07:00
|
|
|
if (!(ext_var = hlsl_new_var(ctx, new_name, type, loc, &new_semantic, modifiers, NULL)))
|
2022-07-01 08:37:47 -07:00
|
|
|
{
|
2023-08-07 14:45:31 -07:00
|
|
|
vkd3d_free(new_name);
|
2023-01-31 17:44:46 -08:00
|
|
|
hlsl_cleanup_semantic(&new_semantic);
|
2022-07-01 08:37:47 -07:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
if (output)
|
|
|
|
ext_var->is_output_semantic = 1;
|
|
|
|
else
|
|
|
|
ext_var->is_input_semantic = 1;
|
|
|
|
ext_var->is_param = var->is_param;
|
|
|
|
list_add_before(&var->scope_entry, &ext_var->scope_entry);
|
|
|
|
list_add_tail(&ctx->extern_vars, &ext_var->extern_entry);
|
|
|
|
|
|
|
|
return ext_var;
|
|
|
|
}
|
|
|
|
|
2023-03-10 13:14:30 -08:00
|
|
|
static void prepend_input_copy(struct hlsl_ctx *ctx, struct hlsl_block *block, struct hlsl_ir_load *lhs,
|
2023-04-12 12:59:06 -07:00
|
|
|
unsigned int modifiers, struct hlsl_semantic *semantic, uint32_t semantic_index)
|
2021-03-28 12:46:57 -07:00
|
|
|
{
|
2023-04-14 14:34:22 -07:00
|
|
|
struct hlsl_type *type = lhs->node.data_type, *vector_type_src, *vector_type_dst;
|
2023-04-12 12:59:06 -07:00
|
|
|
struct vkd3d_shader_location *loc = &lhs->node.loc;
|
2022-07-01 09:06:30 -07:00
|
|
|
struct hlsl_ir_var *var = lhs->src.var;
|
2022-11-10 19:06:04 -08:00
|
|
|
struct hlsl_ir_node *c;
|
2022-07-01 09:06:30 -07:00
|
|
|
unsigned int i;
|
2021-03-28 12:46:57 -07:00
|
|
|
|
2023-04-05 09:45:33 -07:00
|
|
|
if (type->class > HLSL_CLASS_LAST_NUMERIC)
|
|
|
|
{
|
|
|
|
struct vkd3d_string_buffer *string;
|
|
|
|
if (!(string = hlsl_type_to_string(ctx, type)))
|
|
|
|
return;
|
|
|
|
hlsl_fixme(ctx, &var->loc, "Input semantics for type %s.", string->buffer);
|
|
|
|
hlsl_release_string_buffer(ctx, string);
|
|
|
|
}
|
2023-04-05 09:07:37 -07:00
|
|
|
if (!semantic->name)
|
|
|
|
return;
|
2023-04-05 09:45:33 -07:00
|
|
|
|
2023-04-14 14:34:22 -07:00
|
|
|
vector_type_dst = hlsl_get_vector_type(ctx, type->base_type, hlsl_type_minor_size(type));
|
2023-07-31 11:00:45 -07:00
|
|
|
vector_type_src = vector_type_dst;
|
|
|
|
if (ctx->profile->major_version < 4 && ctx->profile->type == VKD3D_SHADER_TYPE_VERTEX)
|
|
|
|
vector_type_src = hlsl_get_vector_type(ctx, type->base_type, 4);
|
2022-07-01 09:06:30 -07:00
|
|
|
|
|
|
|
for (i = 0; i < hlsl_type_major_size(type); ++i)
|
2022-06-07 14:29:11 -07:00
|
|
|
{
|
2022-11-10 18:57:00 -08:00
|
|
|
struct hlsl_ir_node *store, *cast;
|
2022-07-01 09:06:30 -07:00
|
|
|
struct hlsl_ir_var *input;
|
|
|
|
struct hlsl_ir_load *load;
|
2022-06-07 14:29:11 -07:00
|
|
|
|
2023-04-14 14:34:22 -07:00
|
|
|
if (!(input = add_semantic_var(ctx, var, vector_type_src, modifiers, semantic,
|
|
|
|
semantic_index + i, false, loc)))
|
2022-07-01 09:06:30 -07:00
|
|
|
return;
|
2022-06-07 14:29:11 -07:00
|
|
|
|
2023-04-14 00:02:14 -07:00
|
|
|
if (!(load = hlsl_new_var_load(ctx, input, &var->loc)))
|
2022-07-01 09:06:30 -07:00
|
|
|
return;
|
|
|
|
list_add_after(&lhs->node.entry, &load->node.entry);
|
2021-03-28 12:46:57 -07:00
|
|
|
|
2023-04-14 14:34:22 -07:00
|
|
|
if (!(cast = hlsl_new_cast(ctx, &load->node, vector_type_dst, &var->loc)))
|
|
|
|
return;
|
|
|
|
list_add_after(&load->node.entry, &cast->entry);
|
|
|
|
|
2022-11-11 17:31:55 -08:00
|
|
|
if (type->class == HLSL_CLASS_MATRIX)
|
2022-07-01 09:06:30 -07:00
|
|
|
{
|
|
|
|
if (!(c = hlsl_new_uint_constant(ctx, i, &var->loc)))
|
|
|
|
return;
|
2022-11-10 19:06:04 -08:00
|
|
|
list_add_after(&cast->entry, &c->entry);
|
2021-03-28 12:46:57 -07:00
|
|
|
|
2022-11-10 19:06:04 -08:00
|
|
|
if (!(store = hlsl_new_store_index(ctx, &lhs->src, c, cast, 0, &var->loc)))
|
2022-07-01 09:06:30 -07:00
|
|
|
return;
|
2022-11-10 19:06:04 -08:00
|
|
|
list_add_after(&c->entry, &store->entry);
|
2022-07-01 09:06:30 -07:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
assert(i == 0);
|
2021-03-28 12:46:57 -07:00
|
|
|
|
2023-04-14 14:34:22 -07:00
|
|
|
if (!(store = hlsl_new_store_index(ctx, &lhs->src, NULL, cast, 0, &var->loc)))
|
2022-07-01 09:06:30 -07:00
|
|
|
return;
|
2022-11-10 18:57:00 -08:00
|
|
|
list_add_after(&cast->entry, &store->entry);
|
2022-07-01 09:06:30 -07:00
|
|
|
}
|
|
|
|
}
|
2021-03-28 12:46:57 -07:00
|
|
|
}
|
|
|
|
|
2023-03-10 13:14:30 -08:00
|
|
|
static void prepend_input_copy_recurse(struct hlsl_ctx *ctx, struct hlsl_block *block, struct hlsl_ir_load *lhs,
|
2023-04-12 12:59:06 -07:00
|
|
|
unsigned int modifiers, struct hlsl_semantic *semantic, uint32_t semantic_index)
|
2021-03-28 12:46:57 -07:00
|
|
|
{
|
2023-04-12 12:59:06 -07:00
|
|
|
struct vkd3d_shader_location *loc = &lhs->node.loc;
|
2022-07-01 09:06:30 -07:00
|
|
|
struct hlsl_type *type = lhs->node.data_type;
|
|
|
|
struct hlsl_ir_var *var = lhs->src.var;
|
2022-11-10 19:06:04 -08:00
|
|
|
struct hlsl_ir_node *c;
|
2023-04-05 09:07:37 -07:00
|
|
|
unsigned int i;
|
2021-03-28 12:46:57 -07:00
|
|
|
|
2023-04-05 09:07:37 -07:00
|
|
|
if (type->class == HLSL_CLASS_ARRAY || type->class == HLSL_CLASS_STRUCT)
|
2021-03-28 12:46:57 -07:00
|
|
|
{
|
2023-04-05 09:07:37 -07:00
|
|
|
struct hlsl_ir_load *element_load;
|
|
|
|
struct hlsl_struct_field *field;
|
|
|
|
uint32_t elem_semantic_index;
|
2022-07-01 09:06:30 -07:00
|
|
|
|
2023-04-05 09:07:37 -07:00
|
|
|
for (i = 0; i < hlsl_type_element_count(type); ++i)
|
|
|
|
{
|
|
|
|
if (type->class == HLSL_CLASS_ARRAY)
|
|
|
|
{
|
|
|
|
elem_semantic_index = semantic_index
|
|
|
|
+ i * hlsl_type_get_array_element_reg_size(type->e.array.type, HLSL_REGSET_NUMERIC) / 4;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
field = &type->e.record.fields[i];
|
2022-11-25 12:31:21 -08:00
|
|
|
if (hlsl_type_is_resource(field->type))
|
|
|
|
continue;
|
2023-04-05 09:07:37 -07:00
|
|
|
validate_field_semantic(ctx, field);
|
|
|
|
semantic = &field->semantic;
|
|
|
|
elem_semantic_index = semantic->index;
|
2023-04-12 12:59:06 -07:00
|
|
|
loc = &field->loc;
|
2023-04-05 09:07:37 -07:00
|
|
|
}
|
2022-07-01 09:06:30 -07:00
|
|
|
|
2023-04-05 09:07:37 -07:00
|
|
|
if (!(c = hlsl_new_uint_constant(ctx, i, &var->loc)))
|
|
|
|
return;
|
2022-11-10 19:06:04 -08:00
|
|
|
list_add_after(&lhs->node.entry, &c->entry);
|
2022-07-14 18:23:43 -07:00
|
|
|
|
2023-04-05 09:07:37 -07:00
|
|
|
/* This redundant load is expected to be deleted later by DCE. */
|
2022-11-10 19:06:04 -08:00
|
|
|
if (!(element_load = hlsl_new_load_index(ctx, &lhs->src, c, loc)))
|
2023-04-05 09:07:37 -07:00
|
|
|
return;
|
2022-11-10 19:06:04 -08:00
|
|
|
list_add_after(&c->entry, &element_load->node.entry);
|
2023-04-05 09:07:37 -07:00
|
|
|
|
2023-03-10 13:14:30 -08:00
|
|
|
prepend_input_copy_recurse(ctx, block, element_load, modifiers, semantic, elem_semantic_index);
|
2023-04-05 09:07:37 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2023-03-10 13:14:30 -08:00
|
|
|
prepend_input_copy(ctx, block, lhs, modifiers, semantic, semantic_index);
|
2021-03-28 12:46:57 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-04-27 10:14:20 -07:00
|
|
|
/* Split inputs into two variables representing the semantic and temp registers,
|
|
|
|
* and copy the former to the latter, so that writes to input variables work. */
|
2023-03-10 13:14:30 -08:00
|
|
|
static void prepend_input_var_copy(struct hlsl_ctx *ctx, struct hlsl_block *block, struct hlsl_ir_var *var)
|
2021-03-28 12:46:57 -07:00
|
|
|
{
|
2022-07-01 09:06:30 -07:00
|
|
|
struct hlsl_ir_load *load;
|
|
|
|
|
2023-03-07 17:04:37 -08:00
|
|
|
/* This redundant load is expected to be deleted later by DCE. */
|
2023-04-14 00:02:14 -07:00
|
|
|
if (!(load = hlsl_new_var_load(ctx, var, &var->loc)))
|
2022-07-01 09:06:30 -07:00
|
|
|
return;
|
2023-03-10 13:14:30 -08:00
|
|
|
list_add_head(&block->instrs, &load->node.entry);
|
2022-07-01 09:06:30 -07:00
|
|
|
|
2023-03-10 13:14:30 -08:00
|
|
|
prepend_input_copy_recurse(ctx, block, load, var->storage_modifiers, &var->semantic, var->semantic.index);
|
2021-03-28 12:46:57 -07:00
|
|
|
}
|
|
|
|
|
2023-03-10 13:17:26 -08:00
|
|
|
static void append_output_copy(struct hlsl_ctx *ctx, struct hlsl_block *block, struct hlsl_ir_load *rhs,
|
2023-04-12 12:59:06 -07:00
|
|
|
unsigned int modifiers, struct hlsl_semantic *semantic, uint32_t semantic_index)
|
2021-03-28 12:46:59 -07:00
|
|
|
{
|
2022-07-01 09:06:30 -07:00
|
|
|
struct hlsl_type *type = rhs->node.data_type, *vector_type;
|
2023-04-12 12:59:06 -07:00
|
|
|
struct vkd3d_shader_location *loc = &rhs->node.loc;
|
2022-07-01 09:06:30 -07:00
|
|
|
struct hlsl_ir_var *var = rhs->src.var;
|
2022-11-10 19:06:04 -08:00
|
|
|
struct hlsl_ir_node *c;
|
2022-07-01 09:06:30 -07:00
|
|
|
unsigned int i;
|
|
|
|
|
2023-04-05 09:45:33 -07:00
|
|
|
if (type->class > HLSL_CLASS_LAST_NUMERIC)
|
|
|
|
{
|
|
|
|
struct vkd3d_string_buffer *string;
|
|
|
|
if (!(string = hlsl_type_to_string(ctx, type)))
|
|
|
|
return;
|
|
|
|
hlsl_fixme(ctx, &var->loc, "Output semantics for type %s.", string->buffer);
|
|
|
|
hlsl_release_string_buffer(ctx, string);
|
|
|
|
}
|
2023-04-05 09:07:37 -07:00
|
|
|
if (!semantic->name)
|
|
|
|
return;
|
2023-04-05 09:45:33 -07:00
|
|
|
|
2022-07-01 09:06:30 -07:00
|
|
|
vector_type = hlsl_get_vector_type(ctx, type->base_type, hlsl_type_minor_size(type));
|
2021-03-28 12:46:59 -07:00
|
|
|
|
2022-07-01 09:06:30 -07:00
|
|
|
for (i = 0; i < hlsl_type_major_size(type); ++i)
|
2022-06-07 14:29:10 -07:00
|
|
|
{
|
2022-11-10 18:55:03 -08:00
|
|
|
struct hlsl_ir_node *store;
|
2022-07-01 09:06:30 -07:00
|
|
|
struct hlsl_ir_var *output;
|
|
|
|
struct hlsl_ir_load *load;
|
2022-06-07 14:29:10 -07:00
|
|
|
|
2023-04-12 12:59:06 -07:00
|
|
|
if (!(output = add_semantic_var(ctx, var, vector_type, modifiers, semantic, semantic_index + i, true, loc)))
|
2022-07-01 09:06:30 -07:00
|
|
|
return;
|
2022-06-07 14:29:10 -07:00
|
|
|
|
2022-11-11 17:31:55 -08:00
|
|
|
if (type->class == HLSL_CLASS_MATRIX)
|
2022-07-01 09:06:30 -07:00
|
|
|
{
|
|
|
|
if (!(c = hlsl_new_uint_constant(ctx, i, &var->loc)))
|
|
|
|
return;
|
2023-03-10 13:17:26 -08:00
|
|
|
hlsl_block_add_instr(block, c);
|
2021-03-28 12:46:59 -07:00
|
|
|
|
2022-11-10 19:06:04 -08:00
|
|
|
if (!(load = hlsl_new_load_index(ctx, &rhs->src, c, &var->loc)))
|
2022-07-01 09:06:30 -07:00
|
|
|
return;
|
2023-03-10 13:17:26 -08:00
|
|
|
hlsl_block_add_instr(block, &load->node);
|
2022-07-01 09:06:30 -07:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
assert(i == 0);
|
2021-03-28 12:46:59 -07:00
|
|
|
|
2022-07-01 09:06:30 -07:00
|
|
|
if (!(load = hlsl_new_load_index(ctx, &rhs->src, NULL, &var->loc)))
|
|
|
|
return;
|
2023-03-10 13:17:26 -08:00
|
|
|
hlsl_block_add_instr(block, &load->node);
|
2022-07-01 09:06:30 -07:00
|
|
|
}
|
2021-03-28 12:46:59 -07:00
|
|
|
|
2022-07-01 09:06:30 -07:00
|
|
|
if (!(store = hlsl_new_simple_store(ctx, output, &load->node)))
|
|
|
|
return;
|
2023-03-10 13:17:26 -08:00
|
|
|
hlsl_block_add_instr(block, store);
|
2022-07-01 09:06:30 -07:00
|
|
|
}
|
2021-03-28 12:46:59 -07:00
|
|
|
}
|
|
|
|
|
2023-03-10 13:17:26 -08:00
|
|
|
static void append_output_copy_recurse(struct hlsl_ctx *ctx, struct hlsl_block *block, struct hlsl_ir_load *rhs,
|
2023-04-12 12:59:06 -07:00
|
|
|
unsigned int modifiers, struct hlsl_semantic *semantic, uint32_t semantic_index)
|
2021-03-28 12:46:59 -07:00
|
|
|
{
|
2023-04-12 12:59:06 -07:00
|
|
|
struct vkd3d_shader_location *loc = &rhs->node.loc;
|
2022-07-01 09:06:30 -07:00
|
|
|
struct hlsl_type *type = rhs->node.data_type;
|
|
|
|
struct hlsl_ir_var *var = rhs->src.var;
|
2022-11-10 19:06:04 -08:00
|
|
|
struct hlsl_ir_node *c;
|
2023-04-05 09:07:37 -07:00
|
|
|
unsigned int i;
|
2021-03-28 12:46:59 -07:00
|
|
|
|
2023-04-05 09:07:37 -07:00
|
|
|
if (type->class == HLSL_CLASS_ARRAY || type->class == HLSL_CLASS_STRUCT)
|
2021-03-28 12:46:59 -07:00
|
|
|
{
|
2023-04-05 09:07:37 -07:00
|
|
|
struct hlsl_ir_load *element_load;
|
|
|
|
struct hlsl_struct_field *field;
|
|
|
|
uint32_t elem_semantic_index;
|
2022-07-01 09:06:30 -07:00
|
|
|
|
2023-04-05 09:07:37 -07:00
|
|
|
for (i = 0; i < hlsl_type_element_count(type); ++i)
|
|
|
|
{
|
|
|
|
if (type->class == HLSL_CLASS_ARRAY)
|
|
|
|
{
|
|
|
|
elem_semantic_index = semantic_index
|
|
|
|
+ i * hlsl_type_get_array_element_reg_size(type->e.array.type, HLSL_REGSET_NUMERIC) / 4;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
field = &type->e.record.fields[i];
|
2022-11-25 12:31:21 -08:00
|
|
|
if (hlsl_type_is_resource(field->type))
|
|
|
|
continue;
|
2023-04-05 09:07:37 -07:00
|
|
|
validate_field_semantic(ctx, field);
|
|
|
|
semantic = &field->semantic;
|
|
|
|
elem_semantic_index = semantic->index;
|
2023-04-12 12:59:06 -07:00
|
|
|
loc = &field->loc;
|
2023-04-05 09:07:37 -07:00
|
|
|
}
|
2022-07-01 09:06:30 -07:00
|
|
|
|
2023-04-05 09:07:37 -07:00
|
|
|
if (!(c = hlsl_new_uint_constant(ctx, i, &var->loc)))
|
|
|
|
return;
|
2023-03-10 13:17:26 -08:00
|
|
|
hlsl_block_add_instr(block, c);
|
2022-07-14 18:23:43 -07:00
|
|
|
|
2022-11-10 19:06:04 -08:00
|
|
|
if (!(element_load = hlsl_new_load_index(ctx, &rhs->src, c, loc)))
|
2023-04-05 09:07:37 -07:00
|
|
|
return;
|
2023-03-10 13:17:26 -08:00
|
|
|
hlsl_block_add_instr(block, &element_load->node);
|
2023-04-05 09:07:37 -07:00
|
|
|
|
2023-03-10 13:17:26 -08:00
|
|
|
append_output_copy_recurse(ctx, block, element_load, modifiers, semantic, elem_semantic_index);
|
2023-04-05 09:07:37 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2023-03-10 13:17:26 -08:00
|
|
|
append_output_copy(ctx, block, rhs, modifiers, semantic, semantic_index);
|
2021-03-28 12:46:59 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-04-27 10:14:20 -07:00
|
|
|
/* Split outputs into two variables representing the temp and semantic
|
2021-03-28 12:46:59 -07:00
|
|
|
* registers, and copy the former to the latter, so that reads from output
|
2021-04-27 10:14:20 -07:00
|
|
|
* variables work. */
|
2023-03-10 13:17:26 -08:00
|
|
|
static void append_output_var_copy(struct hlsl_ctx *ctx, struct hlsl_block *block, struct hlsl_ir_var *var)
|
2021-03-28 12:46:59 -07:00
|
|
|
{
|
2022-07-01 09:06:30 -07:00
|
|
|
struct hlsl_ir_load *load;
|
|
|
|
|
2023-03-07 17:04:37 -08:00
|
|
|
/* This redundant load is expected to be deleted later by DCE. */
|
2023-04-14 00:02:14 -07:00
|
|
|
if (!(load = hlsl_new_var_load(ctx, var, &var->loc)))
|
2022-07-01 09:06:30 -07:00
|
|
|
return;
|
2023-03-10 13:17:26 -08:00
|
|
|
hlsl_block_add_instr(block, &load->node);
|
2022-07-01 09:06:30 -07:00
|
|
|
|
2023-03-10 13:17:26 -08:00
|
|
|
append_output_copy_recurse(ctx, block, load, var->storage_modifiers, &var->semantic, var->semantic.index);
|
2021-03-28 12:46:59 -07:00
|
|
|
}
|
|
|
|
|
2023-04-25 06:04:29 -07:00
|
|
|
bool hlsl_transform_ir(struct hlsl_ctx *ctx, bool (*func)(struct hlsl_ctx *ctx, struct hlsl_ir_node *, void *),
|
2021-10-15 14:54:10 -07:00
|
|
|
struct hlsl_block *block, void *context)
|
2021-03-16 14:31:53 -07:00
|
|
|
{
|
|
|
|
struct hlsl_ir_node *instr, *next;
|
2021-11-17 00:47:25 -08:00
|
|
|
bool progress = false;
|
2021-03-16 14:31:53 -07:00
|
|
|
|
2021-10-15 14:54:10 -07:00
|
|
|
LIST_FOR_EACH_ENTRY_SAFE(instr, next, &block->instrs, struct hlsl_ir_node, entry)
|
2021-03-16 14:31:53 -07:00
|
|
|
{
|
|
|
|
if (instr->type == HLSL_IR_IF)
|
|
|
|
{
|
|
|
|
struct hlsl_ir_if *iff = hlsl_ir_if(instr);
|
|
|
|
|
2023-04-25 06:04:29 -07:00
|
|
|
progress |= hlsl_transform_ir(ctx, func, &iff->then_block, context);
|
|
|
|
progress |= hlsl_transform_ir(ctx, func, &iff->else_block, context);
|
2021-03-16 14:31:53 -07:00
|
|
|
}
|
|
|
|
else if (instr->type == HLSL_IR_LOOP)
|
2023-04-25 06:04:29 -07:00
|
|
|
progress |= hlsl_transform_ir(ctx, func, &hlsl_ir_loop(instr)->body, context);
|
2021-03-16 14:31:53 -07:00
|
|
|
|
|
|
|
progress |= func(ctx, instr, context);
|
|
|
|
}
|
|
|
|
|
|
|
|
return progress;
|
|
|
|
}
|
|
|
|
|
2023-03-06 18:34:10 -08:00
|
|
|
typedef bool (*PFN_lower_func)(struct hlsl_ctx *, struct hlsl_ir_node *, struct hlsl_block *);
|
|
|
|
|
|
|
|
static bool call_lower_func(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, void *context)
|
|
|
|
{
|
|
|
|
PFN_lower_func func = context;
|
|
|
|
struct hlsl_block block;
|
|
|
|
|
|
|
|
hlsl_block_init(&block);
|
|
|
|
if (func(ctx, instr, &block))
|
|
|
|
{
|
|
|
|
struct hlsl_ir_node *replacement = LIST_ENTRY(list_tail(&block.instrs), struct hlsl_ir_node, entry);
|
|
|
|
|
|
|
|
list_move_before(&instr->entry, &block.instrs);
|
|
|
|
hlsl_replace_node(instr, replacement);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
hlsl_block_cleanup(&block);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Specific form of transform_ir() for passes which convert a single instruction
|
|
|
|
* to a block of one or more instructions. This helper takes care of setting up
|
|
|
|
* the block and calling hlsl_replace_node_with_block(). */
|
|
|
|
static bool lower_ir(struct hlsl_ctx *ctx, PFN_lower_func func, struct hlsl_block *block)
|
|
|
|
{
|
|
|
|
return hlsl_transform_ir(ctx, call_lower_func, block, func);
|
|
|
|
}
|
|
|
|
|
2023-05-04 12:06:58 -07:00
|
|
|
static bool transform_instr_derefs(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, void *context)
|
|
|
|
{
|
|
|
|
bool res;
|
|
|
|
bool (*func)(struct hlsl_ctx *ctx, struct hlsl_deref *, struct hlsl_ir_node *) = context;
|
|
|
|
|
|
|
|
switch(instr->type)
|
|
|
|
{
|
|
|
|
case HLSL_IR_LOAD:
|
|
|
|
res = func(ctx, &hlsl_ir_load(instr)->src, instr);
|
|
|
|
return res;
|
|
|
|
|
|
|
|
case HLSL_IR_STORE:
|
|
|
|
res = func(ctx, &hlsl_ir_store(instr)->lhs, instr);
|
|
|
|
return res;
|
|
|
|
|
|
|
|
case HLSL_IR_RESOURCE_LOAD:
|
|
|
|
res = func(ctx, &hlsl_ir_resource_load(instr)->resource, instr);
|
|
|
|
if (hlsl_ir_resource_load(instr)->sampler.var)
|
|
|
|
res |= func(ctx, &hlsl_ir_resource_load(instr)->sampler, instr);
|
|
|
|
return res;
|
|
|
|
|
|
|
|
case HLSL_IR_RESOURCE_STORE:
|
|
|
|
res = func(ctx, &hlsl_ir_resource_store(instr)->resource, instr);
|
|
|
|
return res;
|
|
|
|
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool transform_derefs(struct hlsl_ctx *ctx,
|
|
|
|
bool (*func)(struct hlsl_ctx *ctx, struct hlsl_deref *, struct hlsl_ir_node *),
|
|
|
|
struct hlsl_block *block)
|
|
|
|
{
|
|
|
|
return hlsl_transform_ir(ctx, transform_instr_derefs, block, func);
|
|
|
|
}
|
|
|
|
|
2021-09-11 09:20:32 -07:00
|
|
|
struct recursive_call_ctx
|
|
|
|
{
|
|
|
|
const struct hlsl_ir_function_decl **backtrace;
|
|
|
|
size_t count, capacity;
|
|
|
|
};
|
|
|
|
|
|
|
|
static bool find_recursive_calls(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, void *context)
|
|
|
|
{
|
|
|
|
struct recursive_call_ctx *call_ctx = context;
|
|
|
|
struct hlsl_ir_function_decl *decl;
|
|
|
|
const struct hlsl_ir_call *call;
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
if (instr->type != HLSL_IR_CALL)
|
|
|
|
return false;
|
|
|
|
call = hlsl_ir_call(instr);
|
|
|
|
decl = call->decl;
|
|
|
|
|
|
|
|
for (i = 0; i < call_ctx->count; ++i)
|
|
|
|
{
|
|
|
|
if (call_ctx->backtrace[i] == decl)
|
|
|
|
{
|
|
|
|
hlsl_error(ctx, &call->node.loc, VKD3D_SHADER_ERROR_HLSL_RECURSIVE_CALL,
|
|
|
|
"Recursive call to \"%s\".", decl->func->name);
|
|
|
|
/* Native returns E_NOTIMPL instead of E_FAIL here. */
|
|
|
|
ctx->result = VKD3D_ERROR_NOT_IMPLEMENTED;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!hlsl_array_reserve(ctx, (void **)&call_ctx->backtrace, &call_ctx->capacity,
|
|
|
|
call_ctx->count + 1, sizeof(*call_ctx->backtrace)))
|
|
|
|
return false;
|
|
|
|
call_ctx->backtrace[call_ctx->count++] = decl;
|
|
|
|
|
2023-04-25 06:04:29 -07:00
|
|
|
hlsl_transform_ir(ctx, find_recursive_calls, &decl->body, call_ctx);
|
2021-09-11 09:20:32 -07:00
|
|
|
|
|
|
|
--call_ctx->count;
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2021-09-13 21:08:34 -07:00
|
|
|
static void insert_early_return_break(struct hlsl_ctx *ctx,
|
|
|
|
struct hlsl_ir_function_decl *func, struct hlsl_ir_node *cf_instr)
|
|
|
|
{
|
2022-11-10 18:08:44 -08:00
|
|
|
struct hlsl_ir_node *iff, *jump;
|
2022-11-10 18:04:22 -08:00
|
|
|
struct hlsl_block then_block;
|
2021-09-13 21:08:34 -07:00
|
|
|
struct hlsl_ir_load *load;
|
|
|
|
|
2022-11-10 18:04:22 -08:00
|
|
|
hlsl_block_init(&then_block);
|
|
|
|
|
2023-04-14 00:02:14 -07:00
|
|
|
if (!(load = hlsl_new_var_load(ctx, func->early_return_var, &cf_instr->loc)))
|
2021-09-13 21:08:34 -07:00
|
|
|
return;
|
|
|
|
list_add_after(&cf_instr->entry, &load->node.entry);
|
|
|
|
|
2023-06-08 00:42:58 -07:00
|
|
|
if (!(jump = hlsl_new_jump(ctx, HLSL_IR_JUMP_BREAK, NULL, &cf_instr->loc)))
|
2021-09-13 21:08:34 -07:00
|
|
|
return;
|
2022-11-10 18:08:44 -08:00
|
|
|
hlsl_block_add_instr(&then_block, jump);
|
2021-09-13 21:08:34 -07:00
|
|
|
|
2022-11-10 18:04:22 -08:00
|
|
|
if (!(iff = hlsl_new_if(ctx, &load->node, &then_block, NULL, &cf_instr->loc)))
|
2021-09-13 21:08:34 -07:00
|
|
|
return;
|
2022-11-10 18:05:53 -08:00
|
|
|
list_add_after(&load->node.entry, &iff->entry);
|
2021-09-13 21:08:34 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Remove HLSL_IR_JUMP_RETURN calls by altering subsequent control flow. */
|
2021-09-13 21:48:38 -07:00
|
|
|
static bool lower_return(struct hlsl_ctx *ctx, struct hlsl_ir_function_decl *func,
|
2021-09-13 21:08:34 -07:00
|
|
|
struct hlsl_block *block, bool in_loop)
|
|
|
|
{
|
|
|
|
struct hlsl_ir_node *return_instr = NULL, *cf_instr = NULL;
|
|
|
|
struct hlsl_ir_node *instr, *next;
|
2021-09-13 21:48:38 -07:00
|
|
|
bool has_early_return = false;
|
2021-09-13 21:08:34 -07:00
|
|
|
|
|
|
|
/* SM1 has no function calls. SM4 does, but native d3dcompiler inlines
|
|
|
|
* everything anyway. We are safest following suit.
|
|
|
|
*
|
|
|
|
* The basic idea is to keep track of whether the function has executed an
|
|
|
|
* early return in a synthesized boolean variable (func->early_return_var)
|
|
|
|
* and guard all code after the return on that variable being false. In the
|
|
|
|
* case of loops we also replace the return with a break.
|
|
|
|
*
|
|
|
|
* The following algorithm loops over instructions in a block, recursing
|
|
|
|
* into inferior CF blocks, until it hits one of the following two things:
|
|
|
|
*
|
|
|
|
* - A return statement. In this case, we remove everything after the return
|
|
|
|
* statement in this block. We have to stop and do this in a separate
|
|
|
|
* loop, because instructions must be deleted in reverse order (due to
|
|
|
|
* def-use chains.)
|
|
|
|
*
|
|
|
|
* If we're inside of a loop CF block, we can instead just turn the
|
|
|
|
* return into a break, which offers the right semantics—except that it
|
|
|
|
* won't break out of nested loops.
|
|
|
|
*
|
2021-09-13 21:48:38 -07:00
|
|
|
* - A CF block which contains a return statement. After calling
|
2021-09-13 21:08:34 -07:00
|
|
|
* lower_return() on the CF block body, we stop, pull out everything after
|
|
|
|
* the CF instruction, shove it into an if block, and then lower that if
|
|
|
|
* block.
|
|
|
|
*
|
2023-04-25 06:04:29 -07:00
|
|
|
* (We could return a "did we make progress" boolean like hlsl_transform_ir()
|
2021-09-13 21:08:34 -07:00
|
|
|
* and run this pass multiple times, but we already know the only block
|
|
|
|
* that still needs to be addressed, so there's not much point.)
|
|
|
|
*
|
|
|
|
* If we're inside of a loop CF block, we again do things differently. We
|
|
|
|
* already turned any returns into breaks. If the block we just processed
|
|
|
|
* was conditional, then "break" did our work for us. If it was a loop,
|
|
|
|
* we need to propagate that break to the outer loop.
|
2021-09-13 21:48:38 -07:00
|
|
|
*
|
|
|
|
* We return true if there was an early return anywhere in the block we just
|
|
|
|
* processed (including CF contained inside that block).
|
2021-09-13 21:08:34 -07:00
|
|
|
*/
|
|
|
|
|
|
|
|
LIST_FOR_EACH_ENTRY_SAFE(instr, next, &block->instrs, struct hlsl_ir_node, entry)
|
|
|
|
{
|
|
|
|
if (instr->type == HLSL_IR_CALL)
|
|
|
|
{
|
|
|
|
struct hlsl_ir_call *call = hlsl_ir_call(instr);
|
|
|
|
|
|
|
|
lower_return(ctx, call->decl, &call->decl->body, false);
|
|
|
|
}
|
|
|
|
else if (instr->type == HLSL_IR_IF)
|
|
|
|
{
|
|
|
|
struct hlsl_ir_if *iff = hlsl_ir_if(instr);
|
|
|
|
|
2022-11-10 18:04:22 -08:00
|
|
|
has_early_return |= lower_return(ctx, func, &iff->then_block, in_loop);
|
|
|
|
has_early_return |= lower_return(ctx, func, &iff->else_block, in_loop);
|
2021-09-13 21:08:34 -07:00
|
|
|
|
2021-09-13 21:48:38 -07:00
|
|
|
if (has_early_return)
|
2021-09-13 21:08:34 -07:00
|
|
|
{
|
2021-09-13 21:48:38 -07:00
|
|
|
/* If we're in a loop, we don't need to do anything here. We
|
|
|
|
* turned the return into a break, and that will already skip
|
|
|
|
* anything that comes after this "if" block. */
|
|
|
|
if (!in_loop)
|
|
|
|
{
|
|
|
|
cf_instr = instr;
|
|
|
|
break;
|
|
|
|
}
|
2021-09-13 21:08:34 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
else if (instr->type == HLSL_IR_LOOP)
|
|
|
|
{
|
2021-09-13 21:48:38 -07:00
|
|
|
has_early_return |= lower_return(ctx, func, &hlsl_ir_loop(instr)->body, true);
|
2021-09-13 21:08:34 -07:00
|
|
|
|
2021-09-13 21:48:38 -07:00
|
|
|
if (has_early_return)
|
2021-09-13 21:08:34 -07:00
|
|
|
{
|
2021-09-13 21:48:38 -07:00
|
|
|
if (in_loop)
|
|
|
|
{
|
|
|
|
/* "instr" is a nested loop. "return" breaks out of all
|
|
|
|
* loops, so break out of this one too now. */
|
|
|
|
insert_early_return_break(ctx, func, instr);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
cf_instr = instr;
|
|
|
|
break;
|
|
|
|
}
|
2021-09-13 21:08:34 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
else if (instr->type == HLSL_IR_JUMP)
|
|
|
|
{
|
|
|
|
struct hlsl_ir_jump *jump = hlsl_ir_jump(instr);
|
2022-11-10 18:55:03 -08:00
|
|
|
struct hlsl_ir_node *constant, *store;
|
2021-09-13 21:08:34 -07:00
|
|
|
|
|
|
|
if (jump->type == HLSL_IR_JUMP_RETURN)
|
|
|
|
{
|
|
|
|
if (!(constant = hlsl_new_bool_constant(ctx, true, &jump->node.loc)))
|
2021-09-13 21:48:38 -07:00
|
|
|
return false;
|
2022-11-10 17:37:41 -08:00
|
|
|
list_add_before(&jump->node.entry, &constant->entry);
|
2021-09-13 21:08:34 -07:00
|
|
|
|
2022-11-10 17:37:41 -08:00
|
|
|
if (!(store = hlsl_new_simple_store(ctx, func->early_return_var, constant)))
|
2021-09-13 21:48:38 -07:00
|
|
|
return false;
|
2022-11-10 18:55:03 -08:00
|
|
|
list_add_after(&constant->entry, &store->entry);
|
2021-09-13 21:08:34 -07:00
|
|
|
|
2021-09-13 21:48:38 -07:00
|
|
|
has_early_return = true;
|
2021-09-13 21:08:34 -07:00
|
|
|
if (in_loop)
|
|
|
|
{
|
|
|
|
jump->type = HLSL_IR_JUMP_BREAK;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
return_instr = instr;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (return_instr)
|
|
|
|
{
|
|
|
|
/* If we're in a loop, we should have used "break" instead. */
|
|
|
|
assert(!in_loop);
|
|
|
|
|
|
|
|
/* Iterate in reverse, to avoid use-after-free when unlinking sources from
|
|
|
|
* the "uses" list. */
|
|
|
|
LIST_FOR_EACH_ENTRY_SAFE_REV(instr, next, &block->instrs, struct hlsl_ir_node, entry)
|
|
|
|
{
|
|
|
|
list_remove(&instr->entry);
|
|
|
|
hlsl_free_instr(instr);
|
|
|
|
|
|
|
|
/* Yes, we just freed it, but we're comparing pointers. */
|
|
|
|
if (instr == return_instr)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else if (cf_instr)
|
|
|
|
{
|
|
|
|
struct list *tail = list_tail(&block->instrs);
|
2022-11-10 18:05:53 -08:00
|
|
|
struct hlsl_ir_node *not, *iff;
|
2022-11-10 18:04:22 -08:00
|
|
|
struct hlsl_block then_block;
|
2021-09-13 21:08:34 -07:00
|
|
|
struct hlsl_ir_load *load;
|
|
|
|
|
|
|
|
/* If we're in a loop, we should have used "break" instead. */
|
|
|
|
assert(!in_loop);
|
|
|
|
|
|
|
|
if (tail == &cf_instr->entry)
|
2021-09-13 21:48:38 -07:00
|
|
|
return has_early_return;
|
2021-09-13 21:08:34 -07:00
|
|
|
|
2022-11-10 18:04:22 -08:00
|
|
|
hlsl_block_init(&then_block);
|
|
|
|
list_move_slice_tail(&then_block.instrs, list_next(&block->instrs, &cf_instr->entry), tail);
|
|
|
|
lower_return(ctx, func, &then_block, in_loop);
|
|
|
|
|
2023-04-14 00:02:14 -07:00
|
|
|
if (!(load = hlsl_new_var_load(ctx, func->early_return_var, &cf_instr->loc)))
|
2021-09-13 21:48:38 -07:00
|
|
|
return false;
|
2022-11-11 19:13:50 -08:00
|
|
|
hlsl_block_add_instr(block, &load->node);
|
2021-09-13 21:08:34 -07:00
|
|
|
|
2023-04-14 00:02:14 -07:00
|
|
|
if (!(not = hlsl_new_unary_expr(ctx, HLSL_OP1_LOGIC_NOT, &load->node, &cf_instr->loc)))
|
2021-09-13 21:48:38 -07:00
|
|
|
return false;
|
2022-11-11 19:13:50 -08:00
|
|
|
hlsl_block_add_instr(block, not);
|
2021-09-13 21:08:34 -07:00
|
|
|
|
2022-11-10 18:04:22 -08:00
|
|
|
if (!(iff = hlsl_new_if(ctx, not, &then_block, NULL, &cf_instr->loc)))
|
2021-09-13 21:48:38 -07:00
|
|
|
return false;
|
2022-11-10 18:05:53 -08:00
|
|
|
list_add_tail(&block->instrs, &iff->entry);
|
2021-09-13 21:08:34 -07:00
|
|
|
}
|
2021-09-13 21:48:38 -07:00
|
|
|
|
|
|
|
return has_early_return;
|
2021-09-13 21:08:34 -07:00
|
|
|
}
|
|
|
|
|
2021-09-11 14:56:04 -07:00
|
|
|
/* Remove HLSL_IR_CALL instructions by inlining them. */
|
|
|
|
static bool lower_calls(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, void *context)
|
|
|
|
{
|
|
|
|
const struct hlsl_ir_function_decl *decl;
|
|
|
|
struct hlsl_ir_call *call;
|
|
|
|
struct hlsl_block block;
|
|
|
|
|
|
|
|
if (instr->type != HLSL_IR_CALL)
|
|
|
|
return false;
|
|
|
|
call = hlsl_ir_call(instr);
|
|
|
|
decl = call->decl;
|
|
|
|
|
|
|
|
if (!decl->has_body)
|
|
|
|
hlsl_error(ctx, &call->node.loc, VKD3D_SHADER_ERROR_HLSL_NOT_DEFINED,
|
|
|
|
"Function \"%s\" is not defined.", decl->func->name);
|
|
|
|
|
|
|
|
if (!hlsl_clone_block(ctx, &block, &decl->body))
|
|
|
|
return false;
|
|
|
|
list_move_before(&call->node.entry, &block.instrs);
|
|
|
|
|
|
|
|
list_remove(&call->node.entry);
|
|
|
|
hlsl_free_instr(&call->node);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2023-04-28 10:03:36 -07:00
|
|
|
static struct hlsl_ir_node *add_zero_mipmap_level(struct hlsl_ctx *ctx, struct hlsl_ir_node *index,
|
|
|
|
const struct vkd3d_shader_location *loc)
|
|
|
|
{
|
|
|
|
unsigned int dim_count = index->data_type->dimx;
|
2022-11-10 19:06:04 -08:00
|
|
|
struct hlsl_ir_node *store, *zero;
|
2023-04-28 10:03:36 -07:00
|
|
|
struct hlsl_ir_load *coords_load;
|
|
|
|
struct hlsl_deref coords_deref;
|
|
|
|
struct hlsl_ir_var *coords;
|
|
|
|
|
|
|
|
assert(dim_count < 4);
|
|
|
|
|
|
|
|
if (!(coords = hlsl_new_synthetic_var(ctx, "coords",
|
|
|
|
hlsl_get_vector_type(ctx, HLSL_TYPE_UINT, dim_count + 1), loc)))
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
hlsl_init_simple_deref_from_var(&coords_deref, coords);
|
|
|
|
if (!(store = hlsl_new_store_index(ctx, &coords_deref, NULL, index, (1u << dim_count) - 1, loc)))
|
|
|
|
return NULL;
|
2022-11-10 18:57:00 -08:00
|
|
|
list_add_after(&index->entry, &store->entry);
|
2023-04-28 10:03:36 -07:00
|
|
|
|
|
|
|
if (!(zero = hlsl_new_uint_constant(ctx, 0, loc)))
|
|
|
|
return NULL;
|
2022-11-10 19:06:04 -08:00
|
|
|
list_add_after(&store->entry, &zero->entry);
|
2023-04-28 10:03:36 -07:00
|
|
|
|
2022-11-10 19:06:04 -08:00
|
|
|
if (!(store = hlsl_new_store_index(ctx, &coords_deref, NULL, zero, 1u << dim_count, loc)))
|
2023-04-28 10:03:36 -07:00
|
|
|
return NULL;
|
2022-11-10 19:06:04 -08:00
|
|
|
list_add_after(&zero->entry, &store->entry);
|
2023-04-28 10:03:36 -07:00
|
|
|
|
|
|
|
if (!(coords_load = hlsl_new_var_load(ctx, coords, loc)))
|
|
|
|
return NULL;
|
2022-11-10 18:57:00 -08:00
|
|
|
list_add_after(&store->entry, &coords_load->node.entry);
|
2023-04-28 10:03:36 -07:00
|
|
|
|
|
|
|
return &coords_load->node;
|
|
|
|
}
|
|
|
|
|
2023-09-06 17:01:49 -07:00
|
|
|
/* hlsl_ir_swizzle nodes that directly point to a matrix value are only a parse-time construct that
|
|
|
|
* represents matrix swizzles (e.g. mat._m01_m23) before we know if they will be used in the lhs of
|
|
|
|
* an assignment or as a value made from different components of the matrix. The former cases should
|
|
|
|
* have already been split into several separate assignments, but the latter are lowered by this
|
|
|
|
* pass. */
|
|
|
|
static bool lower_matrix_swizzles(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, struct hlsl_block *block)
|
|
|
|
{
|
|
|
|
struct hlsl_ir_swizzle *swizzle;
|
|
|
|
struct hlsl_ir_load *var_load;
|
|
|
|
struct hlsl_deref var_deref;
|
|
|
|
struct hlsl_type *matrix_type;
|
|
|
|
struct hlsl_ir_var *var;
|
|
|
|
unsigned int x, y, k, i;
|
|
|
|
|
|
|
|
if (instr->type != HLSL_IR_SWIZZLE)
|
|
|
|
return false;
|
|
|
|
swizzle = hlsl_ir_swizzle(instr);
|
|
|
|
matrix_type = swizzle->val.node->data_type;
|
|
|
|
if (matrix_type->class != HLSL_CLASS_MATRIX)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (!(var = hlsl_new_synthetic_var(ctx, "matrix-swizzle", instr->data_type, &instr->loc)))
|
|
|
|
return false;
|
|
|
|
hlsl_init_simple_deref_from_var(&var_deref, var);
|
|
|
|
|
|
|
|
for (i = 0; i < instr->data_type->dimx; ++i)
|
|
|
|
{
|
|
|
|
struct hlsl_block store_block;
|
|
|
|
struct hlsl_ir_node *load;
|
|
|
|
|
|
|
|
y = (swizzle->swizzle >> (8 * i + 4)) & 0xf;
|
|
|
|
x = (swizzle->swizzle >> 8 * i) & 0xf;
|
|
|
|
k = y * matrix_type->dimx + x;
|
|
|
|
|
|
|
|
if (!(load = hlsl_add_load_component(ctx, block, swizzle->val.node, k, &instr->loc)))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (!hlsl_new_store_component(ctx, &store_block, &var_deref, i, load))
|
|
|
|
return false;
|
|
|
|
hlsl_block_add_block(block, &store_block);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!(var_load = hlsl_new_var_load(ctx, var, &instr->loc)))
|
|
|
|
return false;
|
|
|
|
hlsl_block_add_instr(block, &var_load->node);
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2023-03-10 17:09:58 -08:00
|
|
|
/* hlsl_ir_index nodes are a parse-time construct used to represent array indexing and struct
|
|
|
|
* record access before knowing if they will be used in the lhs of an assignment --in which case
|
|
|
|
* they are lowered into a deref-- or as the load of an element within a larger value.
|
|
|
|
* For the latter case, this pass takes care of lowering hlsl_ir_indexes into individual
|
2023-03-10 12:23:49 -08:00
|
|
|
* hlsl_ir_loads, or individual hlsl_ir_resource_loads, in case the indexing is a
|
|
|
|
* resource access. */
|
2023-06-25 16:46:10 -07:00
|
|
|
static bool lower_index_loads(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, struct hlsl_block *block)
|
2023-03-10 17:09:58 -08:00
|
|
|
{
|
2022-11-10 18:55:03 -08:00
|
|
|
struct hlsl_ir_node *val, *store;
|
2023-03-10 17:09:58 -08:00
|
|
|
struct hlsl_deref var_deref;
|
|
|
|
struct hlsl_ir_index *index;
|
|
|
|
struct hlsl_ir_load *load;
|
|
|
|
struct hlsl_ir_var *var;
|
|
|
|
|
|
|
|
if (instr->type != HLSL_IR_INDEX)
|
|
|
|
return false;
|
|
|
|
index = hlsl_ir_index(instr);
|
|
|
|
val = index->val.node;
|
|
|
|
|
2023-03-10 12:23:49 -08:00
|
|
|
if (hlsl_index_is_resource_access(index))
|
|
|
|
{
|
2023-04-28 10:03:36 -07:00
|
|
|
unsigned int dim_count = hlsl_sampler_dim_count(val->data_type->sampler_dim);
|
|
|
|
struct hlsl_ir_node *coords = index->idx.node;
|
2023-03-10 12:23:49 -08:00
|
|
|
struct hlsl_resource_load_params params = {0};
|
|
|
|
struct hlsl_ir_node *load;
|
|
|
|
|
2023-04-28 10:03:36 -07:00
|
|
|
assert(coords->data_type->class == HLSL_CLASS_VECTOR);
|
|
|
|
assert(coords->data_type->base_type == HLSL_TYPE_UINT);
|
|
|
|
assert(coords->data_type->dimx == dim_count);
|
|
|
|
|
|
|
|
if (!(coords = add_zero_mipmap_level(ctx, coords, &instr->loc)))
|
|
|
|
return false;
|
|
|
|
|
2023-03-10 12:23:49 -08:00
|
|
|
params.type = HLSL_RESOURCE_LOAD;
|
|
|
|
params.resource = val;
|
2023-04-28 10:03:36 -07:00
|
|
|
params.coords = coords;
|
2023-03-10 12:23:49 -08:00
|
|
|
params.format = val->data_type->e.resource_format;
|
|
|
|
|
|
|
|
if (!(load = hlsl_new_resource_load(ctx, ¶ms, &instr->loc)))
|
|
|
|
return false;
|
2023-06-25 16:46:10 -07:00
|
|
|
hlsl_block_add_instr(block, load);
|
2023-03-10 12:23:49 -08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2023-03-10 17:09:58 -08:00
|
|
|
if (!(var = hlsl_new_synthetic_var(ctx, "index-val", val->data_type, &instr->loc)))
|
|
|
|
return false;
|
|
|
|
hlsl_init_simple_deref_from_var(&var_deref, var);
|
|
|
|
|
|
|
|
if (!(store = hlsl_new_simple_store(ctx, var, val)))
|
|
|
|
return false;
|
2023-06-25 16:46:10 -07:00
|
|
|
hlsl_block_add_instr(block, store);
|
2023-03-10 17:09:58 -08:00
|
|
|
|
|
|
|
if (hlsl_index_is_noncontiguous(index))
|
|
|
|
{
|
|
|
|
struct hlsl_ir_node *mat = index->val.node;
|
|
|
|
struct hlsl_deref row_deref;
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
assert(!hlsl_type_is_row_major(mat->data_type));
|
|
|
|
|
|
|
|
if (!(var = hlsl_new_synthetic_var(ctx, "row", instr->data_type, &instr->loc)))
|
|
|
|
return false;
|
|
|
|
hlsl_init_simple_deref_from_var(&row_deref, var);
|
|
|
|
|
|
|
|
for (i = 0; i < mat->data_type->dimx; ++i)
|
|
|
|
{
|
2022-11-10 19:06:04 -08:00
|
|
|
struct hlsl_ir_node *c;
|
2023-03-10 17:09:58 -08:00
|
|
|
|
|
|
|
if (!(c = hlsl_new_uint_constant(ctx, i, &instr->loc)))
|
|
|
|
return false;
|
2023-06-25 16:46:10 -07:00
|
|
|
hlsl_block_add_instr(block, c);
|
2023-03-10 17:09:58 -08:00
|
|
|
|
2022-11-10 19:06:04 -08:00
|
|
|
if (!(load = hlsl_new_load_index(ctx, &var_deref, c, &instr->loc)))
|
2023-03-10 17:09:58 -08:00
|
|
|
return false;
|
2023-06-25 16:46:10 -07:00
|
|
|
hlsl_block_add_instr(block, &load->node);
|
2023-03-10 17:09:58 -08:00
|
|
|
|
|
|
|
if (!(load = hlsl_new_load_index(ctx, &load->src, index->idx.node, &instr->loc)))
|
|
|
|
return false;
|
2023-06-25 16:46:10 -07:00
|
|
|
hlsl_block_add_instr(block, &load->node);
|
2023-03-10 17:09:58 -08:00
|
|
|
|
2022-11-10 19:06:04 -08:00
|
|
|
if (!(store = hlsl_new_store_index(ctx, &row_deref, c, &load->node, 0, &instr->loc)))
|
2023-03-10 17:09:58 -08:00
|
|
|
return false;
|
2023-06-25 16:46:10 -07:00
|
|
|
hlsl_block_add_instr(block, store);
|
2023-03-10 17:09:58 -08:00
|
|
|
}
|
|
|
|
|
2023-04-14 00:02:14 -07:00
|
|
|
if (!(load = hlsl_new_var_load(ctx, var, &instr->loc)))
|
2023-03-10 17:09:58 -08:00
|
|
|
return false;
|
2023-06-25 16:46:10 -07:00
|
|
|
hlsl_block_add_instr(block, &load->node);
|
2023-03-10 17:09:58 -08:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
if (!(load = hlsl_new_load_index(ctx, &var_deref, index->idx.node, &instr->loc)))
|
|
|
|
return false;
|
2023-06-25 16:46:10 -07:00
|
|
|
hlsl_block_add_instr(block, &load->node);
|
2023-03-10 17:09:58 -08:00
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2021-12-01 08:14:55 -08:00
|
|
|
/* Lower casts from vec1 to vecN to swizzles. */
|
2023-06-25 16:46:10 -07:00
|
|
|
static bool lower_broadcasts(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, struct hlsl_block *block)
|
2021-12-01 08:14:55 -08:00
|
|
|
{
|
|
|
|
const struct hlsl_type *src_type, *dst_type;
|
|
|
|
struct hlsl_type *dst_scalar_type;
|
|
|
|
struct hlsl_ir_expr *cast;
|
|
|
|
|
|
|
|
if (instr->type != HLSL_IR_EXPR)
|
|
|
|
return false;
|
|
|
|
cast = hlsl_ir_expr(instr);
|
2021-09-10 14:35:54 -07:00
|
|
|
if (cast->op != HLSL_OP1_CAST)
|
|
|
|
return false;
|
2021-12-01 08:14:55 -08:00
|
|
|
src_type = cast->operands[0].node->data_type;
|
|
|
|
dst_type = cast->node.data_type;
|
|
|
|
|
2022-11-11 17:31:55 -08:00
|
|
|
if (src_type->class <= HLSL_CLASS_VECTOR && dst_type->class <= HLSL_CLASS_VECTOR && src_type->dimx == 1)
|
2021-12-01 08:14:55 -08:00
|
|
|
{
|
2023-06-25 16:46:10 -07:00
|
|
|
struct hlsl_ir_node *new_cast, *swizzle;
|
2021-12-01 08:14:55 -08:00
|
|
|
|
|
|
|
dst_scalar_type = hlsl_get_scalar_type(ctx, dst_type->base_type);
|
|
|
|
/* We need to preserve the cast since it might be doing more than just
|
|
|
|
* turning the scalar into a vector. */
|
|
|
|
if (!(new_cast = hlsl_new_cast(ctx, cast->operands[0].node, dst_scalar_type, &cast->node.loc)))
|
|
|
|
return false;
|
2023-06-25 16:46:10 -07:00
|
|
|
hlsl_block_add_instr(block, new_cast);
|
2022-04-27 01:56:22 -07:00
|
|
|
|
|
|
|
if (dst_type->dimx != 1)
|
|
|
|
{
|
2023-06-25 16:46:10 -07:00
|
|
|
if (!(swizzle = hlsl_new_swizzle(ctx, HLSL_SWIZZLE(X, X, X, X), dst_type->dimx, new_cast, &cast->node.loc)))
|
2022-04-27 01:56:22 -07:00
|
|
|
return false;
|
2023-06-25 16:46:10 -07:00
|
|
|
hlsl_block_add_instr(block, swizzle);
|
2022-04-27 01:56:22 -07:00
|
|
|
}
|
2021-12-01 08:14:55 -08:00
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
vkd3d-shader/hlsl: Replace loads with constants in copy prop.
If a hlsl_ir_load loads a variable whose components are stored from different
instructions, copy propagation doesn't replace it.
But if all these instructions are constants (which currently is the case
for value constructors), the load could be replaced with a constant value.
Which is expected in some other instructions, e.g. texel_offsets when
using aoffimmi modifiers.
For instance, this shader:
```
sampler s;
Texture2D t;
float4 main() : sv_target
{
return t.Gather(s, float2(0.6, 0.6), int2(0, 0));
}
```
results in the following IR before applying the patch:
```
float | 6.00000024e-01
float | 6.00000024e-01
uint | 0
| = (<constructor-2>[@4].x @2)
uint | 1
| = (<constructor-2>[@6].x @3)
float2 | <constructor-2>
int | 0
int | 0
uint | 0
| = (<constructor-5>[@11].x @9)
uint | 1
| = (<constructor-5>[@13].x @10)
int2 | <constructor-5>
float4 | gather_red(resource = t, sampler = s, coords = @8, offset = @15)
| return
| = (<output-sv_target0> @16)
```
and this IR afterwards:
```
float2 | {6.00000024e-01 6.00000024e-01 }
int2 | {0 0 }
float4 | gather_red(resource = t, sampler = s, coords = @2, offset = @3)
| return
| = (<output-sv_target0> @4)
```
2022-11-17 12:49:28 -08:00
|
|
|
/*
|
|
|
|
* Copy propagation. The basic idea is to recognize instruction sequences of the
|
|
|
|
* form:
|
|
|
|
*
|
|
|
|
* 2: <any instruction>
|
|
|
|
* 3: v = @2
|
|
|
|
* 4: load(v)
|
|
|
|
*
|
|
|
|
* and replace the load (@4) with the original instruction (@2).
|
|
|
|
* This works for multiple components, even if they're written using separate
|
|
|
|
* store instructions, as long as the rhs is the same in every case. This basic
|
|
|
|
* detection is implemented by copy_propagation_replace_with_single_instr().
|
|
|
|
*
|
2023-01-12 15:26:03 -08:00
|
|
|
* In some cases, the load itself might not have a single source, but a
|
|
|
|
* subsequent swizzle might; hence we also try to replace swizzles of loads.
|
|
|
|
*
|
vkd3d-shader/hlsl: Replace loads with constants in copy prop.
If a hlsl_ir_load loads a variable whose components are stored from different
instructions, copy propagation doesn't replace it.
But if all these instructions are constants (which currently is the case
for value constructors), the load could be replaced with a constant value.
Which is expected in some other instructions, e.g. texel_offsets when
using aoffimmi modifiers.
For instance, this shader:
```
sampler s;
Texture2D t;
float4 main() : sv_target
{
return t.Gather(s, float2(0.6, 0.6), int2(0, 0));
}
```
results in the following IR before applying the patch:
```
float | 6.00000024e-01
float | 6.00000024e-01
uint | 0
| = (<constructor-2>[@4].x @2)
uint | 1
| = (<constructor-2>[@6].x @3)
float2 | <constructor-2>
int | 0
int | 0
uint | 0
| = (<constructor-5>[@11].x @9)
uint | 1
| = (<constructor-5>[@13].x @10)
int2 | <constructor-5>
float4 | gather_red(resource = t, sampler = s, coords = @8, offset = @15)
| return
| = (<output-sv_target0> @16)
```
and this IR afterwards:
```
float2 | {6.00000024e-01 6.00000024e-01 }
int2 | {0 0 }
float4 | gather_red(resource = t, sampler = s, coords = @2, offset = @3)
| return
| = (<output-sv_target0> @4)
```
2022-11-17 12:49:28 -08:00
|
|
|
* We use the same infrastructure to implement a more specialized
|
|
|
|
* transformation. We recognize sequences of the form:
|
|
|
|
*
|
|
|
|
* 2: 123
|
|
|
|
* 3: var.x = @2
|
|
|
|
* 4: 345
|
|
|
|
* 5: var.y = @4
|
|
|
|
* 6: load(var.xy)
|
|
|
|
*
|
|
|
|
* where the load (@6) originates from different sources but that are constant,
|
|
|
|
* and transform it into a single constant vector. This latter pass is done
|
|
|
|
* by copy_propagation_replace_with_constant_vector().
|
|
|
|
*
|
|
|
|
* This is a specialized form of vectorization, and begs the question: why does
|
|
|
|
* the load need to be involved? Can we just vectorize the stores into a single
|
|
|
|
* instruction, and then use "normal" copy-prop to convert that into a single
|
|
|
|
* vector?
|
|
|
|
*
|
|
|
|
* In general, the answer is yes, but there is a special case which necessitates
|
|
|
|
* the use of this transformation: non-uniform control flow. Copy-prop can act
|
|
|
|
* across some control flow, and in cases like the following:
|
|
|
|
*
|
|
|
|
* 2: 123
|
|
|
|
* 3: var.x = @2
|
|
|
|
* 4: if (...)
|
|
|
|
* 5: 456
|
|
|
|
* 6: var.y = @5
|
|
|
|
* 7: load(var.xy)
|
|
|
|
*
|
|
|
|
* we can copy-prop the load (@7) into a constant vector {123, 456}, but we
|
|
|
|
* cannot easily vectorize the stores @3 and @6.
|
|
|
|
*/
|
|
|
|
|
2022-04-28 06:32:05 -07:00
|
|
|
enum copy_propagation_value_state
|
|
|
|
{
|
|
|
|
VALUE_STATE_NOT_WRITTEN = 0,
|
|
|
|
VALUE_STATE_STATICALLY_WRITTEN,
|
|
|
|
VALUE_STATE_DYNAMICALLY_WRITTEN,
|
|
|
|
};
|
|
|
|
|
2021-12-01 08:14:50 -08:00
|
|
|
struct copy_propagation_value
|
|
|
|
{
|
2022-04-28 06:32:05 -07:00
|
|
|
enum copy_propagation_value_state state;
|
2021-12-01 08:14:50 -08:00
|
|
|
struct hlsl_ir_node *node;
|
|
|
|
unsigned int component;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct copy_propagation_var_def
|
|
|
|
{
|
|
|
|
struct rb_entry entry;
|
|
|
|
struct hlsl_ir_var *var;
|
|
|
|
struct copy_propagation_value values[];
|
|
|
|
};
|
|
|
|
|
|
|
|
struct copy_propagation_state
|
|
|
|
{
|
|
|
|
struct rb_tree var_defs;
|
2022-04-28 06:32:05 -07:00
|
|
|
struct copy_propagation_state *parent;
|
2021-12-01 08:14:50 -08:00
|
|
|
};
|
|
|
|
|
|
|
|
static int copy_propagation_var_def_compare(const void *key, const struct rb_entry *entry)
|
|
|
|
{
|
|
|
|
struct copy_propagation_var_def *var_def = RB_ENTRY_VALUE(entry, struct copy_propagation_var_def, entry);
|
|
|
|
uintptr_t key_int = (uintptr_t)key, entry_int = (uintptr_t)var_def->var;
|
|
|
|
|
|
|
|
return (key_int > entry_int) - (key_int < entry_int);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void copy_propagation_var_def_destroy(struct rb_entry *entry, void *context)
|
|
|
|
{
|
|
|
|
struct copy_propagation_var_def *var_def = RB_ENTRY_VALUE(entry, struct copy_propagation_var_def, entry);
|
|
|
|
|
|
|
|
vkd3d_free(var_def);
|
|
|
|
}
|
|
|
|
|
2022-04-28 06:32:05 -07:00
|
|
|
static struct copy_propagation_value *copy_propagation_get_value(const struct copy_propagation_state *state,
|
2022-07-20 12:37:07 -07:00
|
|
|
const struct hlsl_ir_var *var, unsigned int component)
|
2021-12-01 08:14:50 -08:00
|
|
|
{
|
2022-04-28 06:32:05 -07:00
|
|
|
for (; state; state = state->parent)
|
|
|
|
{
|
|
|
|
struct rb_entry *entry = rb_get(&state->var_defs, var);
|
|
|
|
if (entry)
|
|
|
|
{
|
|
|
|
struct copy_propagation_var_def *var_def = RB_ENTRY_VALUE(entry, struct copy_propagation_var_def, entry);
|
2022-07-20 12:37:07 -07:00
|
|
|
unsigned int component_count = hlsl_type_component_count(var->data_type);
|
|
|
|
enum copy_propagation_value_state state;
|
2022-04-28 06:32:05 -07:00
|
|
|
|
2022-07-20 12:37:07 -07:00
|
|
|
assert(component < component_count);
|
|
|
|
state = var_def->values[component].state;
|
2022-04-28 06:32:05 -07:00
|
|
|
|
|
|
|
switch (state)
|
|
|
|
{
|
|
|
|
case VALUE_STATE_STATICALLY_WRITTEN:
|
|
|
|
return &var_def->values[component];
|
|
|
|
case VALUE_STATE_DYNAMICALLY_WRITTEN:
|
|
|
|
return NULL;
|
|
|
|
case VALUE_STATE_NOT_WRITTEN:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2021-12-01 08:14:50 -08:00
|
|
|
|
2022-04-28 06:32:05 -07:00
|
|
|
return NULL;
|
2021-12-01 08:14:50 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
static struct copy_propagation_var_def *copy_propagation_create_var_def(struct hlsl_ctx *ctx,
|
|
|
|
struct copy_propagation_state *state, struct hlsl_ir_var *var)
|
|
|
|
{
|
|
|
|
struct rb_entry *entry = rb_get(&state->var_defs, var);
|
|
|
|
struct copy_propagation_var_def *var_def;
|
2022-07-20 12:37:07 -07:00
|
|
|
unsigned int component_count = hlsl_type_component_count(var->data_type);
|
2021-12-01 08:14:50 -08:00
|
|
|
int res;
|
|
|
|
|
|
|
|
if (entry)
|
|
|
|
return RB_ENTRY_VALUE(entry, struct copy_propagation_var_def, entry);
|
|
|
|
|
2022-07-20 12:37:07 -07:00
|
|
|
if (!(var_def = hlsl_alloc(ctx, offsetof(struct copy_propagation_var_def, values[component_count]))))
|
2021-12-01 08:14:50 -08:00
|
|
|
return NULL;
|
|
|
|
|
|
|
|
var_def->var = var;
|
|
|
|
|
|
|
|
res = rb_put(&state->var_defs, var, &var_def->entry);
|
|
|
|
assert(!res);
|
|
|
|
|
|
|
|
return var_def;
|
|
|
|
}
|
|
|
|
|
2022-04-28 06:32:05 -07:00
|
|
|
static void copy_propagation_invalidate_variable(struct copy_propagation_var_def *var_def,
|
2022-07-20 12:37:07 -07:00
|
|
|
unsigned int comp, unsigned char writemask)
|
2022-04-28 06:32:05 -07:00
|
|
|
{
|
|
|
|
unsigned i;
|
|
|
|
|
2022-07-20 12:37:07 -07:00
|
|
|
TRACE("Invalidate variable %s[%u]%s.\n", var_def->var->name, comp, debug_hlsl_writemask(writemask));
|
2022-04-28 06:32:05 -07:00
|
|
|
|
|
|
|
for (i = 0; i < 4; ++i)
|
|
|
|
{
|
|
|
|
if (writemask & (1u << i))
|
2022-07-20 12:37:07 -07:00
|
|
|
var_def->values[comp + i].state = VALUE_STATE_DYNAMICALLY_WRITTEN;
|
2022-04-28 06:32:05 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-07-20 14:42:13 -07:00
|
|
|
static void copy_propagation_invalidate_variable_from_deref_recurse(struct hlsl_ctx *ctx,
|
|
|
|
struct copy_propagation_var_def *var_def, const struct hlsl_deref *deref,
|
|
|
|
struct hlsl_type *type, unsigned int depth, unsigned int comp_start, unsigned char writemask)
|
2021-12-01 08:14:50 -08:00
|
|
|
{
|
2022-07-20 14:42:13 -07:00
|
|
|
unsigned int i, subtype_comp_count;
|
|
|
|
struct hlsl_ir_node *path_node;
|
|
|
|
struct hlsl_type *subtype;
|
|
|
|
|
|
|
|
if (depth == deref->path_len)
|
|
|
|
{
|
|
|
|
copy_propagation_invalidate_variable(var_def, comp_start, writemask);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
path_node = deref->path[depth].node;
|
|
|
|
subtype = hlsl_get_element_type_from_path_index(ctx, type, path_node);
|
2022-04-28 06:32:05 -07:00
|
|
|
|
2022-11-11 17:31:55 -08:00
|
|
|
if (type->class == HLSL_CLASS_STRUCT)
|
2022-07-20 14:42:13 -07:00
|
|
|
{
|
2022-11-11 16:39:55 -08:00
|
|
|
unsigned int idx = hlsl_ir_constant(path_node)->value.u[0].u;
|
2022-07-20 14:42:13 -07:00
|
|
|
|
|
|
|
for (i = 0; i < idx; ++i)
|
|
|
|
comp_start += hlsl_type_component_count(type->e.record.fields[i].type);
|
|
|
|
|
|
|
|
copy_propagation_invalidate_variable_from_deref_recurse(ctx, var_def, deref, subtype,
|
|
|
|
depth + 1, comp_start, writemask);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
subtype_comp_count = hlsl_type_component_count(subtype);
|
2022-04-28 06:32:05 -07:00
|
|
|
|
2022-07-20 14:42:13 -07:00
|
|
|
if (path_node->type == HLSL_IR_CONSTANT)
|
|
|
|
{
|
|
|
|
copy_propagation_invalidate_variable_from_deref_recurse(ctx, var_def, deref, subtype,
|
2022-11-11 16:39:55 -08:00
|
|
|
depth + 1, hlsl_ir_constant(path_node)->value.u[0].u * subtype_comp_count, writemask);
|
2022-07-20 14:42:13 -07:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
for (i = 0; i < hlsl_type_element_count(type); ++i)
|
|
|
|
{
|
|
|
|
copy_propagation_invalidate_variable_from_deref_recurse(ctx, var_def, deref, subtype,
|
|
|
|
depth + 1, i * subtype_comp_count, writemask);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void copy_propagation_invalidate_variable_from_deref(struct hlsl_ctx *ctx,
|
|
|
|
struct copy_propagation_var_def *var_def, const struct hlsl_deref *deref, unsigned char writemask)
|
|
|
|
{
|
|
|
|
copy_propagation_invalidate_variable_from_deref_recurse(ctx, var_def, deref, deref->var->data_type,
|
|
|
|
0, 0, writemask);
|
2021-12-01 08:14:50 -08:00
|
|
|
}
|
|
|
|
|
2022-07-20 12:37:07 -07:00
|
|
|
static void copy_propagation_set_value(struct copy_propagation_var_def *var_def, unsigned int comp,
|
|
|
|
unsigned char writemask, struct hlsl_ir_node *instr)
|
2021-12-01 08:14:50 -08:00
|
|
|
{
|
|
|
|
unsigned int i, j = 0;
|
|
|
|
|
|
|
|
for (i = 0; i < 4; ++i)
|
|
|
|
{
|
|
|
|
if (writemask & (1u << i))
|
|
|
|
{
|
|
|
|
TRACE("Variable %s[%u] is written by instruction %p%s.\n",
|
2022-07-20 12:37:07 -07:00
|
|
|
var_def->var->name, comp + i, instr, debug_hlsl_writemask(1u << i));
|
|
|
|
var_def->values[comp + i].state = VALUE_STATE_STATICALLY_WRITTEN;
|
|
|
|
var_def->values[comp + i].node = instr;
|
|
|
|
var_def->values[comp + i].component = j++;
|
2021-12-01 08:14:50 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-01-12 14:28:44 -08:00
|
|
|
static bool copy_propagation_replace_with_single_instr(struct hlsl_ctx *ctx,
|
2023-01-12 15:26:03 -08:00
|
|
|
const struct copy_propagation_state *state, const struct hlsl_deref *deref,
|
|
|
|
unsigned int swizzle, struct hlsl_ir_node *instr)
|
2021-12-01 08:14:50 -08:00
|
|
|
{
|
2023-01-12 15:26:03 -08:00
|
|
|
const unsigned int instr_component_count = hlsl_type_component_count(instr->data_type);
|
2022-01-21 13:22:27 -08:00
|
|
|
const struct hlsl_ir_var *var = deref->var;
|
2023-01-12 14:28:44 -08:00
|
|
|
struct hlsl_ir_node *new_instr = NULL;
|
2022-07-20 12:37:07 -07:00
|
|
|
unsigned int start, count, i;
|
2023-01-12 14:28:44 -08:00
|
|
|
unsigned int ret_swizzle = 0;
|
2022-01-21 13:22:27 -08:00
|
|
|
|
2022-07-20 12:37:07 -07:00
|
|
|
if (!hlsl_component_index_range_from_deref(ctx, deref, &start, &count))
|
2023-01-12 14:28:44 -08:00
|
|
|
return false;
|
2021-12-01 08:14:50 -08:00
|
|
|
|
2023-01-12 15:26:03 -08:00
|
|
|
for (i = 0; i < instr_component_count; ++i)
|
2021-12-01 08:14:50 -08:00
|
|
|
{
|
2023-01-12 15:26:03 -08:00
|
|
|
struct copy_propagation_value *value;
|
2022-04-28 06:32:05 -07:00
|
|
|
|
2023-01-12 15:26:03 -08:00
|
|
|
if (!(value = copy_propagation_get_value(state, var, start + hlsl_swizzle_get_component(swizzle, i))))
|
2023-01-12 14:28:44 -08:00
|
|
|
return false;
|
2022-04-28 06:32:05 -07:00
|
|
|
|
2023-01-12 14:28:44 -08:00
|
|
|
if (!new_instr)
|
2022-01-21 13:22:27 -08:00
|
|
|
{
|
2023-01-12 14:28:44 -08:00
|
|
|
new_instr = value->node;
|
2022-01-21 13:22:27 -08:00
|
|
|
}
|
2023-01-12 14:28:44 -08:00
|
|
|
else if (new_instr != value->node)
|
2022-01-21 13:22:27 -08:00
|
|
|
{
|
2023-01-12 15:26:03 -08:00
|
|
|
TRACE("No single source for propagating load from %s[%u-%u]%s\n",
|
|
|
|
var->name, start, start + count, debug_hlsl_swizzle(swizzle, instr_component_count));
|
2023-01-12 14:28:44 -08:00
|
|
|
return false;
|
2022-01-21 13:22:27 -08:00
|
|
|
}
|
2023-01-12 14:28:44 -08:00
|
|
|
ret_swizzle |= value->component << HLSL_SWIZZLE_SHIFT(i);
|
2021-12-01 08:14:50 -08:00
|
|
|
}
|
|
|
|
|
2023-01-12 15:26:03 -08:00
|
|
|
TRACE("Load from %s[%u-%u]%s propagated as instruction %p%s.\n",
|
|
|
|
var->name, start, start + count, debug_hlsl_swizzle(swizzle, instr_component_count),
|
|
|
|
new_instr, debug_hlsl_swizzle(ret_swizzle, instr_component_count));
|
2023-01-12 14:28:44 -08:00
|
|
|
|
2022-11-11 17:31:55 -08:00
|
|
|
if (instr->data_type->class != HLSL_CLASS_OBJECT)
|
2023-01-12 14:28:44 -08:00
|
|
|
{
|
2022-11-10 19:01:18 -08:00
|
|
|
struct hlsl_ir_node *swizzle_node;
|
2023-01-12 14:28:44 -08:00
|
|
|
|
2023-02-14 14:01:18 -08:00
|
|
|
if (!(swizzle_node = hlsl_new_swizzle(ctx, ret_swizzle, instr_component_count, new_instr, &instr->loc)))
|
2023-01-12 14:28:44 -08:00
|
|
|
return false;
|
2022-11-10 19:01:18 -08:00
|
|
|
list_add_before(&instr->entry, &swizzle_node->entry);
|
|
|
|
new_instr = swizzle_node;
|
2023-01-12 14:28:44 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
hlsl_replace_node(instr, new_instr);
|
|
|
|
return true;
|
2021-12-01 08:14:50 -08:00
|
|
|
}
|
|
|
|
|
vkd3d-shader/hlsl: Replace loads with constants in copy prop.
If a hlsl_ir_load loads a variable whose components are stored from different
instructions, copy propagation doesn't replace it.
But if all these instructions are constants (which currently is the case
for value constructors), the load could be replaced with a constant value.
Which is expected in some other instructions, e.g. texel_offsets when
using aoffimmi modifiers.
For instance, this shader:
```
sampler s;
Texture2D t;
float4 main() : sv_target
{
return t.Gather(s, float2(0.6, 0.6), int2(0, 0));
}
```
results in the following IR before applying the patch:
```
float | 6.00000024e-01
float | 6.00000024e-01
uint | 0
| = (<constructor-2>[@4].x @2)
uint | 1
| = (<constructor-2>[@6].x @3)
float2 | <constructor-2>
int | 0
int | 0
uint | 0
| = (<constructor-5>[@11].x @9)
uint | 1
| = (<constructor-5>[@13].x @10)
int2 | <constructor-5>
float4 | gather_red(resource = t, sampler = s, coords = @8, offset = @15)
| return
| = (<output-sv_target0> @16)
```
and this IR afterwards:
```
float2 | {6.00000024e-01 6.00000024e-01 }
int2 | {0 0 }
float4 | gather_red(resource = t, sampler = s, coords = @2, offset = @3)
| return
| = (<output-sv_target0> @4)
```
2022-11-17 12:49:28 -08:00
|
|
|
static bool copy_propagation_replace_with_constant_vector(struct hlsl_ctx *ctx,
|
2023-01-12 15:26:03 -08:00
|
|
|
const struct copy_propagation_state *state, const struct hlsl_deref *deref,
|
|
|
|
unsigned int swizzle, struct hlsl_ir_node *instr)
|
vkd3d-shader/hlsl: Replace loads with constants in copy prop.
If a hlsl_ir_load loads a variable whose components are stored from different
instructions, copy propagation doesn't replace it.
But if all these instructions are constants (which currently is the case
for value constructors), the load could be replaced with a constant value.
Which is expected in some other instructions, e.g. texel_offsets when
using aoffimmi modifiers.
For instance, this shader:
```
sampler s;
Texture2D t;
float4 main() : sv_target
{
return t.Gather(s, float2(0.6, 0.6), int2(0, 0));
}
```
results in the following IR before applying the patch:
```
float | 6.00000024e-01
float | 6.00000024e-01
uint | 0
| = (<constructor-2>[@4].x @2)
uint | 1
| = (<constructor-2>[@6].x @3)
float2 | <constructor-2>
int | 0
int | 0
uint | 0
| = (<constructor-5>[@11].x @9)
uint | 1
| = (<constructor-5>[@13].x @10)
int2 | <constructor-5>
float4 | gather_red(resource = t, sampler = s, coords = @8, offset = @15)
| return
| = (<output-sv_target0> @16)
```
and this IR afterwards:
```
float2 | {6.00000024e-01 6.00000024e-01 }
int2 | {0 0 }
float4 | gather_red(resource = t, sampler = s, coords = @2, offset = @3)
| return
| = (<output-sv_target0> @4)
```
2022-11-17 12:49:28 -08:00
|
|
|
{
|
2023-01-12 15:26:03 -08:00
|
|
|
const unsigned int instr_component_count = hlsl_type_component_count(instr->data_type);
|
|
|
|
const struct hlsl_ir_var *var = deref->var;
|
2022-11-11 16:39:55 -08:00
|
|
|
struct hlsl_constant_value values = {0};
|
vkd3d-shader/hlsl: Replace loads with constants in copy prop.
If a hlsl_ir_load loads a variable whose components are stored from different
instructions, copy propagation doesn't replace it.
But if all these instructions are constants (which currently is the case
for value constructors), the load could be replaced with a constant value.
Which is expected in some other instructions, e.g. texel_offsets when
using aoffimmi modifiers.
For instance, this shader:
```
sampler s;
Texture2D t;
float4 main() : sv_target
{
return t.Gather(s, float2(0.6, 0.6), int2(0, 0));
}
```
results in the following IR before applying the patch:
```
float | 6.00000024e-01
float | 6.00000024e-01
uint | 0
| = (<constructor-2>[@4].x @2)
uint | 1
| = (<constructor-2>[@6].x @3)
float2 | <constructor-2>
int | 0
int | 0
uint | 0
| = (<constructor-5>[@11].x @9)
uint | 1
| = (<constructor-5>[@13].x @10)
int2 | <constructor-5>
float4 | gather_red(resource = t, sampler = s, coords = @8, offset = @15)
| return
| = (<output-sv_target0> @16)
```
and this IR afterwards:
```
float2 | {6.00000024e-01 6.00000024e-01 }
int2 | {0 0 }
float4 | gather_red(resource = t, sampler = s, coords = @2, offset = @3)
| return
| = (<output-sv_target0> @4)
```
2022-11-17 12:49:28 -08:00
|
|
|
unsigned int start, count, i;
|
2022-11-11 17:13:26 -08:00
|
|
|
struct hlsl_ir_node *cons;
|
vkd3d-shader/hlsl: Replace loads with constants in copy prop.
If a hlsl_ir_load loads a variable whose components are stored from different
instructions, copy propagation doesn't replace it.
But if all these instructions are constants (which currently is the case
for value constructors), the load could be replaced with a constant value.
Which is expected in some other instructions, e.g. texel_offsets when
using aoffimmi modifiers.
For instance, this shader:
```
sampler s;
Texture2D t;
float4 main() : sv_target
{
return t.Gather(s, float2(0.6, 0.6), int2(0, 0));
}
```
results in the following IR before applying the patch:
```
float | 6.00000024e-01
float | 6.00000024e-01
uint | 0
| = (<constructor-2>[@4].x @2)
uint | 1
| = (<constructor-2>[@6].x @3)
float2 | <constructor-2>
int | 0
int | 0
uint | 0
| = (<constructor-5>[@11].x @9)
uint | 1
| = (<constructor-5>[@13].x @10)
int2 | <constructor-5>
float4 | gather_red(resource = t, sampler = s, coords = @8, offset = @15)
| return
| = (<output-sv_target0> @16)
```
and this IR afterwards:
```
float2 | {6.00000024e-01 6.00000024e-01 }
int2 | {0 0 }
float4 | gather_red(resource = t, sampler = s, coords = @2, offset = @3)
| return
| = (<output-sv_target0> @4)
```
2022-11-17 12:49:28 -08:00
|
|
|
|
2023-01-12 15:26:03 -08:00
|
|
|
if (!hlsl_component_index_range_from_deref(ctx, deref, &start, &count))
|
vkd3d-shader/hlsl: Replace loads with constants in copy prop.
If a hlsl_ir_load loads a variable whose components are stored from different
instructions, copy propagation doesn't replace it.
But if all these instructions are constants (which currently is the case
for value constructors), the load could be replaced with a constant value.
Which is expected in some other instructions, e.g. texel_offsets when
using aoffimmi modifiers.
For instance, this shader:
```
sampler s;
Texture2D t;
float4 main() : sv_target
{
return t.Gather(s, float2(0.6, 0.6), int2(0, 0));
}
```
results in the following IR before applying the patch:
```
float | 6.00000024e-01
float | 6.00000024e-01
uint | 0
| = (<constructor-2>[@4].x @2)
uint | 1
| = (<constructor-2>[@6].x @3)
float2 | <constructor-2>
int | 0
int | 0
uint | 0
| = (<constructor-5>[@11].x @9)
uint | 1
| = (<constructor-5>[@13].x @10)
int2 | <constructor-5>
float4 | gather_red(resource = t, sampler = s, coords = @8, offset = @15)
| return
| = (<output-sv_target0> @16)
```
and this IR afterwards:
```
float2 | {6.00000024e-01 6.00000024e-01 }
int2 | {0 0 }
float4 | gather_red(resource = t, sampler = s, coords = @2, offset = @3)
| return
| = (<output-sv_target0> @4)
```
2022-11-17 12:49:28 -08:00
|
|
|
return false;
|
|
|
|
|
2023-01-12 15:26:03 -08:00
|
|
|
for (i = 0; i < instr_component_count; ++i)
|
vkd3d-shader/hlsl: Replace loads with constants in copy prop.
If a hlsl_ir_load loads a variable whose components are stored from different
instructions, copy propagation doesn't replace it.
But if all these instructions are constants (which currently is the case
for value constructors), the load could be replaced with a constant value.
Which is expected in some other instructions, e.g. texel_offsets when
using aoffimmi modifiers.
For instance, this shader:
```
sampler s;
Texture2D t;
float4 main() : sv_target
{
return t.Gather(s, float2(0.6, 0.6), int2(0, 0));
}
```
results in the following IR before applying the patch:
```
float | 6.00000024e-01
float | 6.00000024e-01
uint | 0
| = (<constructor-2>[@4].x @2)
uint | 1
| = (<constructor-2>[@6].x @3)
float2 | <constructor-2>
int | 0
int | 0
uint | 0
| = (<constructor-5>[@11].x @9)
uint | 1
| = (<constructor-5>[@13].x @10)
int2 | <constructor-5>
float4 | gather_red(resource = t, sampler = s, coords = @8, offset = @15)
| return
| = (<output-sv_target0> @16)
```
and this IR afterwards:
```
float2 | {6.00000024e-01 6.00000024e-01 }
int2 | {0 0 }
float4 | gather_red(resource = t, sampler = s, coords = @2, offset = @3)
| return
| = (<output-sv_target0> @4)
```
2022-11-17 12:49:28 -08:00
|
|
|
{
|
2023-01-12 15:26:03 -08:00
|
|
|
struct copy_propagation_value *value;
|
vkd3d-shader/hlsl: Replace loads with constants in copy prop.
If a hlsl_ir_load loads a variable whose components are stored from different
instructions, copy propagation doesn't replace it.
But if all these instructions are constants (which currently is the case
for value constructors), the load could be replaced with a constant value.
Which is expected in some other instructions, e.g. texel_offsets when
using aoffimmi modifiers.
For instance, this shader:
```
sampler s;
Texture2D t;
float4 main() : sv_target
{
return t.Gather(s, float2(0.6, 0.6), int2(0, 0));
}
```
results in the following IR before applying the patch:
```
float | 6.00000024e-01
float | 6.00000024e-01
uint | 0
| = (<constructor-2>[@4].x @2)
uint | 1
| = (<constructor-2>[@6].x @3)
float2 | <constructor-2>
int | 0
int | 0
uint | 0
| = (<constructor-5>[@11].x @9)
uint | 1
| = (<constructor-5>[@13].x @10)
int2 | <constructor-5>
float4 | gather_red(resource = t, sampler = s, coords = @8, offset = @15)
| return
| = (<output-sv_target0> @16)
```
and this IR afterwards:
```
float2 | {6.00000024e-01 6.00000024e-01 }
int2 | {0 0 }
float4 | gather_red(resource = t, sampler = s, coords = @2, offset = @3)
| return
| = (<output-sv_target0> @4)
```
2022-11-17 12:49:28 -08:00
|
|
|
|
2023-01-12 15:26:03 -08:00
|
|
|
if (!(value = copy_propagation_get_value(state, var, start + hlsl_swizzle_get_component(swizzle, i)))
|
|
|
|
|| value->node->type != HLSL_IR_CONSTANT)
|
vkd3d-shader/hlsl: Replace loads with constants in copy prop.
If a hlsl_ir_load loads a variable whose components are stored from different
instructions, copy propagation doesn't replace it.
But if all these instructions are constants (which currently is the case
for value constructors), the load could be replaced with a constant value.
Which is expected in some other instructions, e.g. texel_offsets when
using aoffimmi modifiers.
For instance, this shader:
```
sampler s;
Texture2D t;
float4 main() : sv_target
{
return t.Gather(s, float2(0.6, 0.6), int2(0, 0));
}
```
results in the following IR before applying the patch:
```
float | 6.00000024e-01
float | 6.00000024e-01
uint | 0
| = (<constructor-2>[@4].x @2)
uint | 1
| = (<constructor-2>[@6].x @3)
float2 | <constructor-2>
int | 0
int | 0
uint | 0
| = (<constructor-5>[@11].x @9)
uint | 1
| = (<constructor-5>[@13].x @10)
int2 | <constructor-5>
float4 | gather_red(resource = t, sampler = s, coords = @8, offset = @15)
| return
| = (<output-sv_target0> @16)
```
and this IR afterwards:
```
float2 | {6.00000024e-01 6.00000024e-01 }
int2 | {0 0 }
float4 | gather_red(resource = t, sampler = s, coords = @2, offset = @3)
| return
| = (<output-sv_target0> @4)
```
2022-11-17 12:49:28 -08:00
|
|
|
return false;
|
|
|
|
|
2022-11-11 16:39:55 -08:00
|
|
|
values.u[i] = hlsl_ir_constant(value->node)->value.u[value->component];
|
vkd3d-shader/hlsl: Replace loads with constants in copy prop.
If a hlsl_ir_load loads a variable whose components are stored from different
instructions, copy propagation doesn't replace it.
But if all these instructions are constants (which currently is the case
for value constructors), the load could be replaced with a constant value.
Which is expected in some other instructions, e.g. texel_offsets when
using aoffimmi modifiers.
For instance, this shader:
```
sampler s;
Texture2D t;
float4 main() : sv_target
{
return t.Gather(s, float2(0.6, 0.6), int2(0, 0));
}
```
results in the following IR before applying the patch:
```
float | 6.00000024e-01
float | 6.00000024e-01
uint | 0
| = (<constructor-2>[@4].x @2)
uint | 1
| = (<constructor-2>[@6].x @3)
float2 | <constructor-2>
int | 0
int | 0
uint | 0
| = (<constructor-5>[@11].x @9)
uint | 1
| = (<constructor-5>[@13].x @10)
int2 | <constructor-5>
float4 | gather_red(resource = t, sampler = s, coords = @8, offset = @15)
| return
| = (<output-sv_target0> @16)
```
and this IR afterwards:
```
float2 | {6.00000024e-01 6.00000024e-01 }
int2 | {0 0 }
float4 | gather_red(resource = t, sampler = s, coords = @2, offset = @3)
| return
| = (<output-sv_target0> @4)
```
2022-11-17 12:49:28 -08:00
|
|
|
}
|
|
|
|
|
2022-11-11 17:10:14 -08:00
|
|
|
if (!(cons = hlsl_new_constant(ctx, instr->data_type, &values, &instr->loc)))
|
vkd3d-shader/hlsl: Replace loads with constants in copy prop.
If a hlsl_ir_load loads a variable whose components are stored from different
instructions, copy propagation doesn't replace it.
But if all these instructions are constants (which currently is the case
for value constructors), the load could be replaced with a constant value.
Which is expected in some other instructions, e.g. texel_offsets when
using aoffimmi modifiers.
For instance, this shader:
```
sampler s;
Texture2D t;
float4 main() : sv_target
{
return t.Gather(s, float2(0.6, 0.6), int2(0, 0));
}
```
results in the following IR before applying the patch:
```
float | 6.00000024e-01
float | 6.00000024e-01
uint | 0
| = (<constructor-2>[@4].x @2)
uint | 1
| = (<constructor-2>[@6].x @3)
float2 | <constructor-2>
int | 0
int | 0
uint | 0
| = (<constructor-5>[@11].x @9)
uint | 1
| = (<constructor-5>[@13].x @10)
int2 | <constructor-5>
float4 | gather_red(resource = t, sampler = s, coords = @8, offset = @15)
| return
| = (<output-sv_target0> @16)
```
and this IR afterwards:
```
float2 | {6.00000024e-01 6.00000024e-01 }
int2 | {0 0 }
float4 | gather_red(resource = t, sampler = s, coords = @2, offset = @3)
| return
| = (<output-sv_target0> @4)
```
2022-11-17 12:49:28 -08:00
|
|
|
return false;
|
2022-11-11 17:13:26 -08:00
|
|
|
list_add_before(&instr->entry, &cons->entry);
|
vkd3d-shader/hlsl: Replace loads with constants in copy prop.
If a hlsl_ir_load loads a variable whose components are stored from different
instructions, copy propagation doesn't replace it.
But if all these instructions are constants (which currently is the case
for value constructors), the load could be replaced with a constant value.
Which is expected in some other instructions, e.g. texel_offsets when
using aoffimmi modifiers.
For instance, this shader:
```
sampler s;
Texture2D t;
float4 main() : sv_target
{
return t.Gather(s, float2(0.6, 0.6), int2(0, 0));
}
```
results in the following IR before applying the patch:
```
float | 6.00000024e-01
float | 6.00000024e-01
uint | 0
| = (<constructor-2>[@4].x @2)
uint | 1
| = (<constructor-2>[@6].x @3)
float2 | <constructor-2>
int | 0
int | 0
uint | 0
| = (<constructor-5>[@11].x @9)
uint | 1
| = (<constructor-5>[@13].x @10)
int2 | <constructor-5>
float4 | gather_red(resource = t, sampler = s, coords = @8, offset = @15)
| return
| = (<output-sv_target0> @16)
```
and this IR afterwards:
```
float2 | {6.00000024e-01 6.00000024e-01 }
int2 | {0 0 }
float4 | gather_red(resource = t, sampler = s, coords = @2, offset = @3)
| return
| = (<output-sv_target0> @4)
```
2022-11-17 12:49:28 -08:00
|
|
|
|
2023-01-12 15:26:03 -08:00
|
|
|
TRACE("Load from %s[%u-%u]%s turned into a constant %p.\n",
|
|
|
|
var->name, start, start + count, debug_hlsl_swizzle(swizzle, instr_component_count), cons);
|
vkd3d-shader/hlsl: Replace loads with constants in copy prop.
If a hlsl_ir_load loads a variable whose components are stored from different
instructions, copy propagation doesn't replace it.
But if all these instructions are constants (which currently is the case
for value constructors), the load could be replaced with a constant value.
Which is expected in some other instructions, e.g. texel_offsets when
using aoffimmi modifiers.
For instance, this shader:
```
sampler s;
Texture2D t;
float4 main() : sv_target
{
return t.Gather(s, float2(0.6, 0.6), int2(0, 0));
}
```
results in the following IR before applying the patch:
```
float | 6.00000024e-01
float | 6.00000024e-01
uint | 0
| = (<constructor-2>[@4].x @2)
uint | 1
| = (<constructor-2>[@6].x @3)
float2 | <constructor-2>
int | 0
int | 0
uint | 0
| = (<constructor-5>[@11].x @9)
uint | 1
| = (<constructor-5>[@13].x @10)
int2 | <constructor-5>
float4 | gather_red(resource = t, sampler = s, coords = @8, offset = @15)
| return
| = (<output-sv_target0> @16)
```
and this IR afterwards:
```
float2 | {6.00000024e-01 6.00000024e-01 }
int2 | {0 0 }
float4 | gather_red(resource = t, sampler = s, coords = @2, offset = @3)
| return
| = (<output-sv_target0> @4)
```
2022-11-17 12:49:28 -08:00
|
|
|
|
2022-11-11 17:13:26 -08:00
|
|
|
hlsl_replace_node(instr, cons);
|
vkd3d-shader/hlsl: Replace loads with constants in copy prop.
If a hlsl_ir_load loads a variable whose components are stored from different
instructions, copy propagation doesn't replace it.
But if all these instructions are constants (which currently is the case
for value constructors), the load could be replaced with a constant value.
Which is expected in some other instructions, e.g. texel_offsets when
using aoffimmi modifiers.
For instance, this shader:
```
sampler s;
Texture2D t;
float4 main() : sv_target
{
return t.Gather(s, float2(0.6, 0.6), int2(0, 0));
}
```
results in the following IR before applying the patch:
```
float | 6.00000024e-01
float | 6.00000024e-01
uint | 0
| = (<constructor-2>[@4].x @2)
uint | 1
| = (<constructor-2>[@6].x @3)
float2 | <constructor-2>
int | 0
int | 0
uint | 0
| = (<constructor-5>[@11].x @9)
uint | 1
| = (<constructor-5>[@13].x @10)
int2 | <constructor-5>
float4 | gather_red(resource = t, sampler = s, coords = @8, offset = @15)
| return
| = (<output-sv_target0> @16)
```
and this IR afterwards:
```
float2 | {6.00000024e-01 6.00000024e-01 }
int2 | {0 0 }
float4 | gather_red(resource = t, sampler = s, coords = @2, offset = @3)
| return
| = (<output-sv_target0> @4)
```
2022-11-17 12:49:28 -08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2022-01-21 13:22:29 -08:00
|
|
|
static bool copy_propagation_transform_load(struct hlsl_ctx *ctx,
|
|
|
|
struct hlsl_ir_load *load, struct copy_propagation_state *state)
|
2021-12-01 08:14:50 -08:00
|
|
|
{
|
2023-01-12 14:28:44 -08:00
|
|
|
struct hlsl_type *type = load->node.data_type;
|
2021-12-01 08:14:50 -08:00
|
|
|
|
2022-11-11 17:31:55 -08:00
|
|
|
switch (type->class)
|
2022-01-21 13:22:26 -08:00
|
|
|
{
|
|
|
|
case HLSL_CLASS_SCALAR:
|
|
|
|
case HLSL_CLASS_VECTOR:
|
|
|
|
case HLSL_CLASS_OBJECT:
|
|
|
|
break;
|
|
|
|
|
|
|
|
case HLSL_CLASS_MATRIX:
|
|
|
|
case HLSL_CLASS_ARRAY:
|
|
|
|
case HLSL_CLASS_STRUCT:
|
|
|
|
/* FIXME: Actually we shouldn't even get here, but we don't split
|
|
|
|
* matrices yet. */
|
|
|
|
return false;
|
|
|
|
}
|
2021-12-01 08:14:50 -08:00
|
|
|
|
2023-01-12 15:26:03 -08:00
|
|
|
if (copy_propagation_replace_with_constant_vector(ctx, state, &load->src, HLSL_SWIZZLE(X, Y, Z, W), &load->node))
|
vkd3d-shader/hlsl: Replace loads with constants in copy prop.
If a hlsl_ir_load loads a variable whose components are stored from different
instructions, copy propagation doesn't replace it.
But if all these instructions are constants (which currently is the case
for value constructors), the load could be replaced with a constant value.
Which is expected in some other instructions, e.g. texel_offsets when
using aoffimmi modifiers.
For instance, this shader:
```
sampler s;
Texture2D t;
float4 main() : sv_target
{
return t.Gather(s, float2(0.6, 0.6), int2(0, 0));
}
```
results in the following IR before applying the patch:
```
float | 6.00000024e-01
float | 6.00000024e-01
uint | 0
| = (<constructor-2>[@4].x @2)
uint | 1
| = (<constructor-2>[@6].x @3)
float2 | <constructor-2>
int | 0
int | 0
uint | 0
| = (<constructor-5>[@11].x @9)
uint | 1
| = (<constructor-5>[@13].x @10)
int2 | <constructor-5>
float4 | gather_red(resource = t, sampler = s, coords = @8, offset = @15)
| return
| = (<output-sv_target0> @16)
```
and this IR afterwards:
```
float2 | {6.00000024e-01 6.00000024e-01 }
int2 | {0 0 }
float4 | gather_red(resource = t, sampler = s, coords = @2, offset = @3)
| return
| = (<output-sv_target0> @4)
```
2022-11-17 12:49:28 -08:00
|
|
|
return true;
|
|
|
|
|
2023-01-12 15:26:03 -08:00
|
|
|
if (copy_propagation_replace_with_single_instr(ctx, state, &load->src, HLSL_SWIZZLE(X, Y, Z, W), &load->node))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool copy_propagation_transform_swizzle(struct hlsl_ctx *ctx,
|
|
|
|
struct hlsl_ir_swizzle *swizzle, struct copy_propagation_state *state)
|
|
|
|
{
|
|
|
|
struct hlsl_ir_load *load;
|
|
|
|
|
|
|
|
if (swizzle->val.node->type != HLSL_IR_LOAD)
|
|
|
|
return false;
|
|
|
|
load = hlsl_ir_load(swizzle->val.node);
|
|
|
|
|
|
|
|
if (copy_propagation_replace_with_constant_vector(ctx, state, &load->src, swizzle->swizzle, &swizzle->node))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
if (copy_propagation_replace_with_single_instr(ctx, state, &load->src, swizzle->swizzle, &swizzle->node))
|
2023-01-12 14:28:44 -08:00
|
|
|
return true;
|
2021-12-01 08:14:50 -08:00
|
|
|
|
2023-01-12 14:28:44 -08:00
|
|
|
return false;
|
2021-12-01 08:14:50 -08:00
|
|
|
}
|
|
|
|
|
2022-01-21 13:22:29 -08:00
|
|
|
static bool copy_propagation_transform_object_load(struct hlsl_ctx *ctx,
|
|
|
|
struct hlsl_deref *deref, struct copy_propagation_state *state)
|
|
|
|
{
|
2023-01-12 15:28:44 -08:00
|
|
|
struct copy_propagation_value *value;
|
2022-01-21 13:22:29 -08:00
|
|
|
struct hlsl_ir_load *load;
|
2023-01-12 15:28:44 -08:00
|
|
|
unsigned int start, count;
|
|
|
|
|
|
|
|
if (!hlsl_component_index_range_from_deref(ctx, deref, &start, &count))
|
|
|
|
return false;
|
|
|
|
assert(count == 1);
|
2022-01-21 13:22:29 -08:00
|
|
|
|
2023-01-12 15:28:44 -08:00
|
|
|
if (!(value = copy_propagation_get_value(state, deref->var, start)))
|
2022-01-21 13:22:29 -08:00
|
|
|
return false;
|
2023-01-12 15:28:44 -08:00
|
|
|
assert(value->component == 0);
|
2022-01-21 13:22:29 -08:00
|
|
|
|
|
|
|
/* Only HLSL_IR_LOAD can produce an object. */
|
2023-01-12 15:28:44 -08:00
|
|
|
load = hlsl_ir_load(value->node);
|
2022-07-20 12:37:07 -07:00
|
|
|
|
2023-01-05 10:32:11 -08:00
|
|
|
/* As we are replacing the instruction's deref (with the one in the hlsl_ir_load) and not the
|
|
|
|
* instruction itself, we won't be able to rely on the value retrieved by
|
|
|
|
* copy_propagation_get_value() for the new deref in subsequent iterations of copy propagation.
|
|
|
|
* This is because another value may be written to that deref between the hlsl_ir_load and
|
|
|
|
* this instruction.
|
|
|
|
*
|
|
|
|
* For this reason, we only replace the new deref when it corresponds to a uniform variable,
|
|
|
|
* which cannot be written to.
|
|
|
|
*
|
|
|
|
* In a valid shader, all object references must resolve statically to a single uniform object.
|
|
|
|
* If this is the case, we can expect copy propagation on regular store/loads and the other
|
|
|
|
* compilation passes to replace all hlsl_ir_loads with loads to uniform objects, so this
|
|
|
|
* implementation is complete, even with this restriction.
|
|
|
|
*/
|
|
|
|
if (!load->src.var->is_uniform)
|
|
|
|
{
|
|
|
|
TRACE("Ignoring load from non-uniform object variable %s\n", load->src.var->name);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2022-07-20 12:37:07 -07:00
|
|
|
hlsl_cleanup_deref(deref);
|
|
|
|
hlsl_copy_deref(ctx, deref, &load->src);
|
|
|
|
|
2022-01-21 13:22:29 -08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool copy_propagation_transform_resource_load(struct hlsl_ctx *ctx,
|
|
|
|
struct hlsl_ir_resource_load *load, struct copy_propagation_state *state)
|
|
|
|
{
|
|
|
|
bool progress = false;
|
|
|
|
|
|
|
|
progress |= copy_propagation_transform_object_load(ctx, &load->resource, state);
|
|
|
|
if (load->sampler.var)
|
|
|
|
progress |= copy_propagation_transform_object_load(ctx, &load->sampler, state);
|
|
|
|
return progress;
|
|
|
|
}
|
|
|
|
|
2022-04-05 16:26:22 -07:00
|
|
|
static bool copy_propagation_transform_resource_store(struct hlsl_ctx *ctx,
|
|
|
|
struct hlsl_ir_resource_store *store, struct copy_propagation_state *state)
|
|
|
|
{
|
|
|
|
bool progress = false;
|
|
|
|
|
|
|
|
progress |= copy_propagation_transform_object_load(ctx, &store->resource, state);
|
|
|
|
return progress;
|
|
|
|
}
|
|
|
|
|
2021-12-01 08:14:50 -08:00
|
|
|
static void copy_propagation_record_store(struct hlsl_ctx *ctx, struct hlsl_ir_store *store,
|
|
|
|
struct copy_propagation_state *state)
|
|
|
|
{
|
|
|
|
struct copy_propagation_var_def *var_def;
|
|
|
|
struct hlsl_deref *lhs = &store->lhs;
|
|
|
|
struct hlsl_ir_var *var = lhs->var;
|
2022-07-20 12:37:07 -07:00
|
|
|
unsigned int start, count;
|
2021-12-01 08:14:50 -08:00
|
|
|
|
|
|
|
if (!(var_def = copy_propagation_create_var_def(ctx, state, var)))
|
|
|
|
return;
|
|
|
|
|
2022-07-20 12:37:07 -07:00
|
|
|
if (hlsl_component_index_range_from_deref(ctx, lhs, &start, &count))
|
2022-01-21 13:22:26 -08:00
|
|
|
{
|
|
|
|
unsigned int writemask = store->writemask;
|
|
|
|
|
2022-11-11 17:31:55 -08:00
|
|
|
if (store->rhs.node->data_type->class == HLSL_CLASS_OBJECT)
|
2022-01-21 13:22:26 -08:00
|
|
|
writemask = VKD3DSP_WRITEMASK_0;
|
2022-07-20 12:37:07 -07:00
|
|
|
copy_propagation_set_value(var_def, start, writemask, store->rhs.node);
|
2022-01-21 13:22:26 -08:00
|
|
|
}
|
2021-12-01 08:14:50 -08:00
|
|
|
else
|
2022-01-21 13:22:26 -08:00
|
|
|
{
|
2022-07-20 14:42:13 -07:00
|
|
|
copy_propagation_invalidate_variable_from_deref(ctx, var_def, lhs, store->writemask);
|
2022-01-21 13:22:26 -08:00
|
|
|
}
|
2021-12-01 08:14:50 -08:00
|
|
|
}
|
|
|
|
|
2022-04-28 06:32:05 -07:00
|
|
|
static void copy_propagation_state_init(struct hlsl_ctx *ctx, struct copy_propagation_state *state,
|
|
|
|
struct copy_propagation_state *parent)
|
|
|
|
{
|
|
|
|
rb_init(&state->var_defs, copy_propagation_var_def_compare);
|
|
|
|
state->parent = parent;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void copy_propagation_state_destroy(struct copy_propagation_state *state)
|
|
|
|
{
|
|
|
|
rb_destroy(&state->var_defs, copy_propagation_var_def_destroy, NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void copy_propagation_invalidate_from_block(struct hlsl_ctx *ctx, struct copy_propagation_state *state,
|
|
|
|
struct hlsl_block *block)
|
|
|
|
{
|
|
|
|
struct hlsl_ir_node *instr;
|
|
|
|
|
|
|
|
LIST_FOR_EACH_ENTRY(instr, &block->instrs, struct hlsl_ir_node, entry)
|
|
|
|
{
|
|
|
|
switch (instr->type)
|
|
|
|
{
|
|
|
|
case HLSL_IR_STORE:
|
|
|
|
{
|
|
|
|
struct hlsl_ir_store *store = hlsl_ir_store(instr);
|
|
|
|
struct copy_propagation_var_def *var_def;
|
|
|
|
struct hlsl_deref *lhs = &store->lhs;
|
|
|
|
struct hlsl_ir_var *var = lhs->var;
|
|
|
|
|
|
|
|
if (!(var_def = copy_propagation_create_var_def(ctx, state, var)))
|
|
|
|
continue;
|
|
|
|
|
2022-07-20 14:42:13 -07:00
|
|
|
copy_propagation_invalidate_variable_from_deref(ctx, var_def, lhs, store->writemask);
|
2022-04-28 06:32:05 -07:00
|
|
|
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case HLSL_IR_IF:
|
|
|
|
{
|
|
|
|
struct hlsl_ir_if *iff = hlsl_ir_if(instr);
|
|
|
|
|
2022-11-10 18:04:22 -08:00
|
|
|
copy_propagation_invalidate_from_block(ctx, state, &iff->then_block);
|
|
|
|
copy_propagation_invalidate_from_block(ctx, state, &iff->else_block);
|
2022-04-28 06:32:05 -07:00
|
|
|
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case HLSL_IR_LOOP:
|
|
|
|
{
|
|
|
|
struct hlsl_ir_loop *loop = hlsl_ir_loop(instr);
|
|
|
|
|
|
|
|
copy_propagation_invalidate_from_block(ctx, state, &loop->body);
|
|
|
|
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool copy_propagation_transform_block(struct hlsl_ctx *ctx, struct hlsl_block *block,
|
|
|
|
struct copy_propagation_state *state);
|
|
|
|
|
|
|
|
static bool copy_propagation_process_if(struct hlsl_ctx *ctx, struct hlsl_ir_if *iff,
|
|
|
|
struct copy_propagation_state *state)
|
|
|
|
{
|
|
|
|
struct copy_propagation_state inner_state;
|
|
|
|
bool progress = false;
|
|
|
|
|
|
|
|
copy_propagation_state_init(ctx, &inner_state, state);
|
2022-11-10 18:04:22 -08:00
|
|
|
progress |= copy_propagation_transform_block(ctx, &iff->then_block, &inner_state);
|
2022-04-28 06:32:05 -07:00
|
|
|
copy_propagation_state_destroy(&inner_state);
|
|
|
|
|
|
|
|
copy_propagation_state_init(ctx, &inner_state, state);
|
2022-11-10 18:04:22 -08:00
|
|
|
progress |= copy_propagation_transform_block(ctx, &iff->else_block, &inner_state);
|
2022-04-28 06:32:05 -07:00
|
|
|
copy_propagation_state_destroy(&inner_state);
|
|
|
|
|
|
|
|
/* Ideally we'd invalidate the outer state looking at what was
|
|
|
|
* touched in the two inner states, but this doesn't work for
|
|
|
|
* loops (because we need to know what is invalidated in advance),
|
|
|
|
* so we need copy_propagation_invalidate_from_block() anyway. */
|
2022-11-10 18:04:22 -08:00
|
|
|
copy_propagation_invalidate_from_block(ctx, state, &iff->then_block);
|
|
|
|
copy_propagation_invalidate_from_block(ctx, state, &iff->else_block);
|
2022-04-28 06:32:05 -07:00
|
|
|
|
|
|
|
return progress;
|
|
|
|
}
|
|
|
|
|
2022-05-03 02:57:21 -07:00
|
|
|
static bool copy_propagation_process_loop(struct hlsl_ctx *ctx, struct hlsl_ir_loop *loop,
|
|
|
|
struct copy_propagation_state *state)
|
|
|
|
{
|
|
|
|
struct copy_propagation_state inner_state;
|
|
|
|
bool progress = false;
|
|
|
|
|
|
|
|
copy_propagation_invalidate_from_block(ctx, state, &loop->body);
|
|
|
|
|
|
|
|
copy_propagation_state_init(ctx, &inner_state, state);
|
|
|
|
progress |= copy_propagation_transform_block(ctx, &loop->body, &inner_state);
|
|
|
|
copy_propagation_state_destroy(&inner_state);
|
|
|
|
|
|
|
|
return progress;
|
|
|
|
}
|
|
|
|
|
2021-12-01 08:14:50 -08:00
|
|
|
static bool copy_propagation_transform_block(struct hlsl_ctx *ctx, struct hlsl_block *block,
|
|
|
|
struct copy_propagation_state *state)
|
|
|
|
{
|
|
|
|
struct hlsl_ir_node *instr, *next;
|
|
|
|
bool progress = false;
|
|
|
|
|
|
|
|
LIST_FOR_EACH_ENTRY_SAFE(instr, next, &block->instrs, struct hlsl_ir_node, entry)
|
|
|
|
{
|
|
|
|
switch (instr->type)
|
|
|
|
{
|
|
|
|
case HLSL_IR_LOAD:
|
2022-01-21 13:22:29 -08:00
|
|
|
progress |= copy_propagation_transform_load(ctx, hlsl_ir_load(instr), state);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case HLSL_IR_RESOURCE_LOAD:
|
|
|
|
progress |= copy_propagation_transform_resource_load(ctx, hlsl_ir_resource_load(instr), state);
|
2021-12-01 08:14:50 -08:00
|
|
|
break;
|
|
|
|
|
2022-04-05 16:26:22 -07:00
|
|
|
case HLSL_IR_RESOURCE_STORE:
|
|
|
|
progress |= copy_propagation_transform_resource_store(ctx, hlsl_ir_resource_store(instr), state);
|
|
|
|
break;
|
|
|
|
|
2021-12-01 08:14:50 -08:00
|
|
|
case HLSL_IR_STORE:
|
|
|
|
copy_propagation_record_store(ctx, hlsl_ir_store(instr), state);
|
|
|
|
break;
|
|
|
|
|
2023-01-12 15:26:03 -08:00
|
|
|
case HLSL_IR_SWIZZLE:
|
|
|
|
progress |= copy_propagation_transform_swizzle(ctx, hlsl_ir_swizzle(instr), state);
|
|
|
|
break;
|
|
|
|
|
2021-12-01 08:14:50 -08:00
|
|
|
case HLSL_IR_IF:
|
2022-04-28 06:32:05 -07:00
|
|
|
progress |= copy_propagation_process_if(ctx, hlsl_ir_if(instr), state);
|
|
|
|
break;
|
2021-12-01 08:14:50 -08:00
|
|
|
|
|
|
|
case HLSL_IR_LOOP:
|
2022-05-03 02:57:21 -07:00
|
|
|
progress |= copy_propagation_process_loop(ctx, hlsl_ir_loop(instr), state);
|
|
|
|
break;
|
2021-12-01 08:14:50 -08:00
|
|
|
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return progress;
|
|
|
|
}
|
|
|
|
|
2023-06-09 06:39:38 -07:00
|
|
|
bool hlsl_copy_propagation_execute(struct hlsl_ctx *ctx, struct hlsl_block *block)
|
2021-12-01 08:14:50 -08:00
|
|
|
{
|
|
|
|
struct copy_propagation_state state;
|
|
|
|
bool progress;
|
|
|
|
|
2022-04-28 06:32:05 -07:00
|
|
|
copy_propagation_state_init(ctx, &state, NULL);
|
2021-12-01 08:14:50 -08:00
|
|
|
|
|
|
|
progress = copy_propagation_transform_block(ctx, block, &state);
|
|
|
|
|
2022-04-28 06:32:05 -07:00
|
|
|
copy_propagation_state_destroy(&state);
|
2021-12-01 08:14:50 -08:00
|
|
|
|
|
|
|
return progress;
|
|
|
|
}
|
|
|
|
|
2022-07-22 08:40:24 -07:00
|
|
|
static void note_non_static_deref_expressions(struct hlsl_ctx *ctx, const struct hlsl_deref *deref,
|
|
|
|
const char *usage)
|
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
for (i = 0; i < deref->path_len; ++i)
|
|
|
|
{
|
|
|
|
struct hlsl_ir_node *path_node = deref->path[i].node;
|
|
|
|
|
|
|
|
assert(path_node);
|
|
|
|
if (path_node->type != HLSL_IR_CONSTANT)
|
|
|
|
hlsl_note(ctx, &path_node->loc, VKD3D_SHADER_LOG_ERROR,
|
|
|
|
"Expression for %s within \"%s\" cannot be resolved statically.",
|
|
|
|
usage, deref->var->name);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool validate_static_object_references(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr,
|
|
|
|
void *context)
|
|
|
|
{
|
|
|
|
unsigned int start, count;
|
|
|
|
|
|
|
|
if (instr->type == HLSL_IR_RESOURCE_LOAD)
|
|
|
|
{
|
|
|
|
struct hlsl_ir_resource_load *load = hlsl_ir_resource_load(instr);
|
|
|
|
|
2023-01-20 15:37:15 -08:00
|
|
|
if (!load->resource.var->is_uniform)
|
2022-12-05 16:48:28 -08:00
|
|
|
{
|
|
|
|
hlsl_error(ctx, &instr->loc, VKD3D_SHADER_ERROR_HLSL_NON_STATIC_OBJECT_REF,
|
|
|
|
"Loaded resource must have a single uniform source.");
|
|
|
|
}
|
|
|
|
else if (!hlsl_component_index_range_from_deref(ctx, &load->resource, &start, &count))
|
2022-07-22 08:40:24 -07:00
|
|
|
{
|
|
|
|
hlsl_error(ctx, &instr->loc, VKD3D_SHADER_ERROR_HLSL_NON_STATIC_OBJECT_REF,
|
|
|
|
"Loaded resource from \"%s\" must be determinable at compile time.",
|
|
|
|
load->resource.var->name);
|
|
|
|
note_non_static_deref_expressions(ctx, &load->resource, "loaded resource");
|
|
|
|
}
|
2022-12-05 16:48:28 -08:00
|
|
|
|
|
|
|
if (load->sampler.var)
|
2022-07-22 08:40:24 -07:00
|
|
|
{
|
2023-01-20 15:37:15 -08:00
|
|
|
if (!load->sampler.var->is_uniform)
|
2022-12-05 16:48:28 -08:00
|
|
|
{
|
|
|
|
hlsl_error(ctx, &instr->loc, VKD3D_SHADER_ERROR_HLSL_NON_STATIC_OBJECT_REF,
|
|
|
|
"Resource load sampler must have a single uniform source.");
|
|
|
|
}
|
|
|
|
else if (!hlsl_component_index_range_from_deref(ctx, &load->sampler, &start, &count))
|
|
|
|
{
|
|
|
|
hlsl_error(ctx, &instr->loc, VKD3D_SHADER_ERROR_HLSL_NON_STATIC_OBJECT_REF,
|
|
|
|
"Resource load sampler from \"%s\" must be determinable at compile time.",
|
|
|
|
load->sampler.var->name);
|
|
|
|
note_non_static_deref_expressions(ctx, &load->sampler, "resource load sampler");
|
|
|
|
}
|
2022-07-22 08:40:24 -07:00
|
|
|
}
|
|
|
|
}
|
2022-11-09 11:41:50 -08:00
|
|
|
else if (instr->type == HLSL_IR_RESOURCE_STORE)
|
|
|
|
{
|
|
|
|
struct hlsl_ir_resource_store *store = hlsl_ir_resource_store(instr);
|
|
|
|
|
2023-01-20 15:37:15 -08:00
|
|
|
if (!store->resource.var->is_uniform)
|
2022-12-05 16:48:28 -08:00
|
|
|
{
|
|
|
|
hlsl_error(ctx, &instr->loc, VKD3D_SHADER_ERROR_HLSL_NON_STATIC_OBJECT_REF,
|
|
|
|
"Accessed resource must have a single uniform source.");
|
|
|
|
}
|
|
|
|
else if (!hlsl_component_index_range_from_deref(ctx, &store->resource, &start, &count))
|
2022-11-09 11:41:50 -08:00
|
|
|
{
|
|
|
|
hlsl_error(ctx, &instr->loc, VKD3D_SHADER_ERROR_HLSL_NON_STATIC_OBJECT_REF,
|
|
|
|
"Accessed resource from \"%s\" must be determinable at compile time.",
|
|
|
|
store->resource.var->name);
|
|
|
|
note_non_static_deref_expressions(ctx, &store->resource, "accessed resource");
|
|
|
|
}
|
|
|
|
}
|
2022-07-22 08:40:24 -07:00
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2021-03-16 14:31:56 -07:00
|
|
|
static bool is_vec1(const struct hlsl_type *type)
|
|
|
|
{
|
2022-11-11 17:31:55 -08:00
|
|
|
return (type->class == HLSL_CLASS_SCALAR) || (type->class == HLSL_CLASS_VECTOR && type->dimx == 1);
|
2021-03-16 14:31:56 -07:00
|
|
|
}
|
|
|
|
|
2021-03-16 14:31:55 -07:00
|
|
|
static bool fold_redundant_casts(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, void *context)
|
|
|
|
{
|
|
|
|
if (instr->type == HLSL_IR_EXPR)
|
|
|
|
{
|
|
|
|
struct hlsl_ir_expr *expr = hlsl_ir_expr(instr);
|
2021-03-16 14:31:56 -07:00
|
|
|
const struct hlsl_type *dst_type = expr->node.data_type;
|
2021-09-10 14:35:54 -07:00
|
|
|
const struct hlsl_type *src_type;
|
2021-03-16 14:31:56 -07:00
|
|
|
|
2021-08-12 17:36:13 -07:00
|
|
|
if (expr->op != HLSL_OP1_CAST)
|
2021-03-16 14:31:56 -07:00
|
|
|
return false;
|
2021-03-16 14:31:55 -07:00
|
|
|
|
2021-09-10 14:35:54 -07:00
|
|
|
src_type = expr->operands[0].node->data_type;
|
|
|
|
|
2021-03-17 22:22:19 -07:00
|
|
|
if (hlsl_types_are_equal(src_type, dst_type)
|
2021-03-16 14:31:56 -07:00
|
|
|
|| (src_type->base_type == dst_type->base_type && is_vec1(src_type) && is_vec1(dst_type)))
|
2021-03-16 14:31:55 -07:00
|
|
|
{
|
2022-02-10 19:48:18 -08:00
|
|
|
hlsl_replace_node(&expr->node, expr->operands[0].node);
|
2021-03-16 14:31:55 -07:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2022-04-22 03:25:01 -07:00
|
|
|
/* Copy an element of a complex variable. Helper for
|
|
|
|
* split_array_copies(), split_struct_copies() and
|
|
|
|
* split_matrix_copies(). Inserts new instructions right before
|
|
|
|
* "store". */
|
2021-08-12 17:36:14 -07:00
|
|
|
static bool split_copy(struct hlsl_ctx *ctx, struct hlsl_ir_store *store,
|
2022-06-28 14:20:24 -07:00
|
|
|
const struct hlsl_ir_load *load, const unsigned int idx, struct hlsl_type *type)
|
2021-08-12 17:36:14 -07:00
|
|
|
{
|
2022-11-10 19:06:04 -08:00
|
|
|
struct hlsl_ir_node *split_store, *c;
|
2021-08-12 17:36:14 -07:00
|
|
|
struct hlsl_ir_load *split_load;
|
|
|
|
|
2022-06-28 14:20:24 -07:00
|
|
|
if (!(c = hlsl_new_uint_constant(ctx, idx, &store->node.loc)))
|
2021-08-12 17:36:14 -07:00
|
|
|
return false;
|
2022-11-10 19:06:04 -08:00
|
|
|
list_add_before(&store->node.entry, &c->entry);
|
2021-08-12 17:36:14 -07:00
|
|
|
|
2022-11-10 19:06:04 -08:00
|
|
|
if (!(split_load = hlsl_new_load_index(ctx, &load->src, c, &store->node.loc)))
|
2021-08-12 17:36:14 -07:00
|
|
|
return false;
|
|
|
|
list_add_before(&store->node.entry, &split_load->node.entry);
|
|
|
|
|
2022-11-10 19:06:04 -08:00
|
|
|
if (!(split_store = hlsl_new_store_index(ctx, &store->lhs, c, &split_load->node, 0, &store->node.loc)))
|
2021-08-12 17:36:14 -07:00
|
|
|
return false;
|
2022-11-10 18:57:00 -08:00
|
|
|
list_add_before(&store->node.entry, &split_store->entry);
|
2021-08-12 17:36:14 -07:00
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool split_array_copies(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, void *context)
|
2021-03-17 22:22:21 -07:00
|
|
|
{
|
|
|
|
const struct hlsl_ir_node *rhs;
|
2021-08-12 17:36:14 -07:00
|
|
|
struct hlsl_type *element_type;
|
2021-03-17 22:22:21 -07:00
|
|
|
const struct hlsl_type *type;
|
2021-04-08 21:38:22 -07:00
|
|
|
struct hlsl_ir_store *store;
|
2022-06-28 14:20:24 -07:00
|
|
|
unsigned int i;
|
2021-03-17 22:22:21 -07:00
|
|
|
|
2021-04-08 21:38:22 -07:00
|
|
|
if (instr->type != HLSL_IR_STORE)
|
2021-03-17 22:22:21 -07:00
|
|
|
return false;
|
|
|
|
|
2021-04-08 21:38:22 -07:00
|
|
|
store = hlsl_ir_store(instr);
|
|
|
|
rhs = store->rhs.node;
|
2021-03-17 22:22:21 -07:00
|
|
|
type = rhs->data_type;
|
2022-11-11 17:31:55 -08:00
|
|
|
if (type->class != HLSL_CLASS_ARRAY)
|
2021-03-17 22:22:21 -07:00
|
|
|
return false;
|
2021-08-12 17:36:14 -07:00
|
|
|
element_type = type->e.array.type;
|
2021-03-17 22:22:21 -07:00
|
|
|
|
2022-04-28 06:31:58 -07:00
|
|
|
if (rhs->type != HLSL_IR_LOAD)
|
|
|
|
{
|
|
|
|
hlsl_fixme(ctx, &instr->loc, "Array store rhs is not HLSL_IR_LOAD. Broadcast may be missing.");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2021-08-12 17:36:14 -07:00
|
|
|
for (i = 0; i < type->e.array.elements_count; ++i)
|
2021-03-17 22:22:21 -07:00
|
|
|
{
|
2022-06-28 14:20:24 -07:00
|
|
|
if (!split_copy(ctx, store, hlsl_ir_load(rhs), i, element_type))
|
2021-03-17 22:22:21 -07:00
|
|
|
return false;
|
2021-08-12 17:36:14 -07:00
|
|
|
}
|
2021-03-17 22:22:21 -07:00
|
|
|
|
2021-08-12 17:36:14 -07:00
|
|
|
/* Remove the store instruction, so that we can split structs which contain
|
|
|
|
* other structs. Although assignments produce a value, we don't allow
|
|
|
|
* HLSL_IR_STORE to be used as a source. */
|
|
|
|
list_remove(&store->node.entry);
|
|
|
|
hlsl_free_instr(&store->node);
|
|
|
|
return true;
|
|
|
|
}
|
2021-03-17 22:22:21 -07:00
|
|
|
|
2021-08-12 17:36:14 -07:00
|
|
|
static bool split_struct_copies(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, void *context)
|
|
|
|
{
|
|
|
|
const struct hlsl_ir_node *rhs;
|
|
|
|
const struct hlsl_type *type;
|
|
|
|
struct hlsl_ir_store *store;
|
2022-07-14 18:23:43 -07:00
|
|
|
size_t i;
|
2021-03-17 22:22:21 -07:00
|
|
|
|
2021-08-12 17:36:14 -07:00
|
|
|
if (instr->type != HLSL_IR_STORE)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
store = hlsl_ir_store(instr);
|
|
|
|
rhs = store->rhs.node;
|
|
|
|
type = rhs->data_type;
|
2022-11-11 17:31:55 -08:00
|
|
|
if (type->class != HLSL_CLASS_STRUCT)
|
2021-08-12 17:36:14 -07:00
|
|
|
return false;
|
|
|
|
|
2022-04-28 06:31:58 -07:00
|
|
|
if (rhs->type != HLSL_IR_LOAD)
|
|
|
|
{
|
|
|
|
hlsl_fixme(ctx, &instr->loc, "Struct store rhs is not HLSL_IR_LOAD. Broadcast may be missing.");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2022-07-14 18:23:43 -07:00
|
|
|
for (i = 0; i < type->e.record.field_count; ++i)
|
2021-08-12 17:36:14 -07:00
|
|
|
{
|
2022-07-14 18:23:43 -07:00
|
|
|
const struct hlsl_struct_field *field = &type->e.record.fields[i];
|
|
|
|
|
2022-06-28 14:20:24 -07:00
|
|
|
if (!split_copy(ctx, store, hlsl_ir_load(rhs), i, field->type))
|
2021-03-17 22:22:21 -07:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2021-04-08 21:38:22 -07:00
|
|
|
/* Remove the store instruction, so that we can split structs which contain
|
|
|
|
* other structs. Although assignments produce a value, we don't allow
|
|
|
|
* HLSL_IR_STORE to be used as a source. */
|
|
|
|
list_remove(&store->node.entry);
|
|
|
|
hlsl_free_instr(&store->node);
|
2021-03-17 22:22:21 -07:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2022-04-17 23:34:02 -07:00
|
|
|
static bool split_matrix_copies(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, void *context)
|
|
|
|
{
|
|
|
|
const struct hlsl_ir_node *rhs;
|
|
|
|
struct hlsl_type *element_type;
|
|
|
|
const struct hlsl_type *type;
|
|
|
|
unsigned int i;
|
|
|
|
struct hlsl_ir_store *store;
|
|
|
|
|
|
|
|
if (instr->type != HLSL_IR_STORE)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
store = hlsl_ir_store(instr);
|
|
|
|
rhs = store->rhs.node;
|
|
|
|
type = rhs->data_type;
|
2022-11-11 17:31:55 -08:00
|
|
|
if (type->class != HLSL_CLASS_MATRIX)
|
2022-04-17 23:34:02 -07:00
|
|
|
return false;
|
2022-06-30 13:25:12 -07:00
|
|
|
element_type = hlsl_get_vector_type(ctx, type->base_type, hlsl_type_minor_size(type));
|
2022-04-17 23:34:02 -07:00
|
|
|
|
|
|
|
if (rhs->type != HLSL_IR_LOAD)
|
|
|
|
{
|
2023-06-23 13:22:19 -07:00
|
|
|
hlsl_fixme(ctx, &instr->loc, "Copying from unsupported node type.");
|
2022-04-17 23:34:02 -07:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2022-06-30 13:25:12 -07:00
|
|
|
for (i = 0; i < hlsl_type_major_size(type); ++i)
|
2022-04-17 23:34:02 -07:00
|
|
|
{
|
2022-06-28 14:20:24 -07:00
|
|
|
if (!split_copy(ctx, store, hlsl_ir_load(rhs), i, element_type))
|
2022-04-17 23:34:02 -07:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
list_remove(&store->node.entry);
|
|
|
|
hlsl_free_instr(&store->node);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2023-06-25 16:46:10 -07:00
|
|
|
static bool lower_narrowing_casts(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, struct hlsl_block *block)
|
2021-12-01 08:14:56 -08:00
|
|
|
{
|
|
|
|
const struct hlsl_type *src_type, *dst_type;
|
|
|
|
struct hlsl_type *dst_vector_type;
|
|
|
|
struct hlsl_ir_expr *cast;
|
|
|
|
|
|
|
|
if (instr->type != HLSL_IR_EXPR)
|
|
|
|
return false;
|
|
|
|
cast = hlsl_ir_expr(instr);
|
2021-09-10 14:35:54 -07:00
|
|
|
if (cast->op != HLSL_OP1_CAST)
|
|
|
|
return false;
|
2021-12-01 08:14:56 -08:00
|
|
|
src_type = cast->operands[0].node->data_type;
|
|
|
|
dst_type = cast->node.data_type;
|
|
|
|
|
2022-11-11 17:31:55 -08:00
|
|
|
if (src_type->class <= HLSL_CLASS_VECTOR && dst_type->class <= HLSL_CLASS_VECTOR && dst_type->dimx < src_type->dimx)
|
2021-12-01 08:14:56 -08:00
|
|
|
{
|
2022-11-10 19:01:18 -08:00
|
|
|
struct hlsl_ir_node *new_cast, *swizzle;
|
2021-12-01 08:14:56 -08:00
|
|
|
|
|
|
|
dst_vector_type = hlsl_get_vector_type(ctx, dst_type->base_type, src_type->dimx);
|
|
|
|
/* We need to preserve the cast since it might be doing more than just
|
|
|
|
* narrowing the vector. */
|
|
|
|
if (!(new_cast = hlsl_new_cast(ctx, cast->operands[0].node, dst_vector_type, &cast->node.loc)))
|
|
|
|
return false;
|
2023-06-25 16:46:10 -07:00
|
|
|
hlsl_block_add_instr(block, new_cast);
|
|
|
|
|
2022-11-10 17:39:42 -08:00
|
|
|
if (!(swizzle = hlsl_new_swizzle(ctx, HLSL_SWIZZLE(X, Y, Z, W), dst_type->dimx, new_cast, &cast->node.loc)))
|
2021-12-01 08:14:56 -08:00
|
|
|
return false;
|
2023-06-25 16:46:10 -07:00
|
|
|
hlsl_block_add_instr(block, swizzle);
|
2021-12-01 08:14:56 -08:00
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2022-12-09 14:23:41 -08:00
|
|
|
static bool fold_swizzle_chains(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, void *context)
|
|
|
|
{
|
|
|
|
struct hlsl_ir_swizzle *swizzle;
|
|
|
|
struct hlsl_ir_node *next_instr;
|
|
|
|
|
|
|
|
if (instr->type != HLSL_IR_SWIZZLE)
|
|
|
|
return false;
|
|
|
|
swizzle = hlsl_ir_swizzle(instr);
|
|
|
|
|
|
|
|
next_instr = swizzle->val.node;
|
|
|
|
|
|
|
|
if (next_instr->type == HLSL_IR_SWIZZLE)
|
|
|
|
{
|
2022-11-10 19:01:18 -08:00
|
|
|
struct hlsl_ir_node *new_swizzle;
|
2022-12-09 14:23:41 -08:00
|
|
|
unsigned int combined_swizzle;
|
|
|
|
|
|
|
|
combined_swizzle = hlsl_combine_swizzles(hlsl_ir_swizzle(next_instr)->swizzle,
|
|
|
|
swizzle->swizzle, instr->data_type->dimx);
|
|
|
|
next_instr = hlsl_ir_swizzle(next_instr)->val.node;
|
|
|
|
|
|
|
|
if (!(new_swizzle = hlsl_new_swizzle(ctx, combined_swizzle, instr->data_type->dimx, next_instr, &instr->loc)))
|
|
|
|
return false;
|
|
|
|
|
2022-11-10 19:01:18 -08:00
|
|
|
list_add_before(&instr->entry, &new_swizzle->entry);
|
|
|
|
hlsl_replace_node(instr, new_swizzle);
|
2022-12-09 14:23:41 -08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2021-11-17 00:47:24 -08:00
|
|
|
static bool remove_trivial_swizzles(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, void *context)
|
|
|
|
{
|
|
|
|
struct hlsl_ir_swizzle *swizzle;
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
if (instr->type != HLSL_IR_SWIZZLE)
|
|
|
|
return false;
|
|
|
|
swizzle = hlsl_ir_swizzle(instr);
|
|
|
|
|
|
|
|
if (instr->data_type->dimx != swizzle->val.node->data_type->dimx)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
for (i = 0; i < instr->data_type->dimx; ++i)
|
2023-01-12 13:52:49 -08:00
|
|
|
if (hlsl_swizzle_get_component(swizzle->swizzle, i) != i)
|
2021-11-17 00:47:24 -08:00
|
|
|
return false;
|
|
|
|
|
2022-02-10 19:48:18 -08:00
|
|
|
hlsl_replace_node(instr, swizzle->val.node);
|
2021-11-17 00:47:24 -08:00
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2023-06-25 16:46:10 -07:00
|
|
|
static bool lower_nonconstant_vector_derefs(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, struct hlsl_block *block)
|
2023-05-08 15:25:18 -07:00
|
|
|
{
|
|
|
|
struct hlsl_ir_node *idx;
|
|
|
|
struct hlsl_deref *deref;
|
|
|
|
struct hlsl_type *type;
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
if (instr->type != HLSL_IR_LOAD)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
deref = &hlsl_ir_load(instr)->src;
|
|
|
|
assert(deref->var);
|
|
|
|
|
|
|
|
if (deref->path_len == 0)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
type = deref->var->data_type;
|
|
|
|
for (i = 0; i < deref->path_len - 1; ++i)
|
|
|
|
type = hlsl_get_element_type_from_path_index(ctx, type, deref->path[i].node);
|
|
|
|
|
|
|
|
idx = deref->path[deref->path_len - 1].node;
|
|
|
|
|
|
|
|
if (type->class == HLSL_CLASS_VECTOR && idx->type != HLSL_IR_CONSTANT)
|
|
|
|
{
|
2022-11-11 17:13:26 -08:00
|
|
|
struct hlsl_ir_node *eq, *swizzle, *dot, *c, *operands[HLSL_MAX_OPERANDS] = {0};
|
2022-11-11 17:10:14 -08:00
|
|
|
struct hlsl_constant_value value;
|
2023-05-08 15:25:18 -07:00
|
|
|
struct hlsl_ir_load *vector_load;
|
|
|
|
enum hlsl_ir_expr_op op;
|
|
|
|
|
|
|
|
if (!(vector_load = hlsl_new_load_parent(ctx, deref, &instr->loc)))
|
|
|
|
return false;
|
2023-06-25 16:46:10 -07:00
|
|
|
hlsl_block_add_instr(block, &vector_load->node);
|
2023-05-08 15:25:18 -07:00
|
|
|
|
|
|
|
if (!(swizzle = hlsl_new_swizzle(ctx, HLSL_SWIZZLE(X, X, X, X), type->dimx, idx, &instr->loc)))
|
|
|
|
return false;
|
2023-06-25 16:46:10 -07:00
|
|
|
hlsl_block_add_instr(block, swizzle);
|
2023-05-08 15:25:18 -07:00
|
|
|
|
2022-11-11 17:10:14 -08:00
|
|
|
value.u[0].u = 0;
|
|
|
|
value.u[1].u = 1;
|
|
|
|
value.u[2].u = 2;
|
|
|
|
value.u[3].u = 3;
|
|
|
|
if (!(c = hlsl_new_constant(ctx, hlsl_get_vector_type(ctx, HLSL_TYPE_UINT, type->dimx), &value, &instr->loc)))
|
2023-05-08 15:25:18 -07:00
|
|
|
return false;
|
2023-06-25 16:46:10 -07:00
|
|
|
hlsl_block_add_instr(block, c);
|
2023-05-08 15:25:18 -07:00
|
|
|
|
|
|
|
operands[0] = swizzle;
|
2022-11-11 17:13:26 -08:00
|
|
|
operands[1] = c;
|
2023-05-08 15:25:18 -07:00
|
|
|
if (!(eq = hlsl_new_expr(ctx, HLSL_OP2_EQUAL, operands,
|
|
|
|
hlsl_get_vector_type(ctx, HLSL_TYPE_BOOL, type->dimx), &instr->loc)))
|
|
|
|
return false;
|
2023-06-25 16:46:10 -07:00
|
|
|
hlsl_block_add_instr(block, eq);
|
2023-05-08 15:25:18 -07:00
|
|
|
|
|
|
|
if (!(eq = hlsl_new_cast(ctx, eq, type, &instr->loc)))
|
|
|
|
return false;
|
2023-06-25 16:46:10 -07:00
|
|
|
hlsl_block_add_instr(block, eq);
|
2023-05-08 15:25:18 -07:00
|
|
|
|
|
|
|
op = HLSL_OP2_DOT;
|
|
|
|
if (type->dimx == 1)
|
|
|
|
op = type->base_type == HLSL_TYPE_BOOL ? HLSL_OP2_LOGIC_AND : HLSL_OP2_MUL;
|
|
|
|
|
|
|
|
/* Note: We may be creating a DOT for bool vectors here, which we need to lower to
|
|
|
|
* LOGIC_OR + LOGIC_AND. */
|
|
|
|
operands[0] = &vector_load->node;
|
|
|
|
operands[1] = eq;
|
|
|
|
if (!(dot = hlsl_new_expr(ctx, op, operands, instr->data_type, &instr->loc)))
|
|
|
|
return false;
|
2023-06-25 16:46:10 -07:00
|
|
|
hlsl_block_add_instr(block, dot);
|
2023-05-08 15:25:18 -07:00
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2023-05-29 18:59:17 -07:00
|
|
|
/* Lower combined samples and sampler variables to synthesized separated textures and samplers.
|
|
|
|
* That is, translate SM1-style samples in the source to SM4-style samples in the bytecode. */
|
|
|
|
static bool lower_combined_samples(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, void *context)
|
|
|
|
{
|
|
|
|
struct hlsl_ir_resource_load *load;
|
|
|
|
struct vkd3d_string_buffer *name;
|
|
|
|
struct hlsl_ir_var *var;
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
if (instr->type != HLSL_IR_RESOURCE_LOAD)
|
|
|
|
return false;
|
|
|
|
load = hlsl_ir_resource_load(instr);
|
|
|
|
|
|
|
|
switch (load->load_type)
|
|
|
|
{
|
|
|
|
case HLSL_RESOURCE_LOAD:
|
|
|
|
case HLSL_RESOURCE_GATHER_RED:
|
|
|
|
case HLSL_RESOURCE_GATHER_GREEN:
|
|
|
|
case HLSL_RESOURCE_GATHER_BLUE:
|
|
|
|
case HLSL_RESOURCE_GATHER_ALPHA:
|
2023-06-07 10:56:02 -07:00
|
|
|
case HLSL_RESOURCE_RESINFO:
|
2023-05-29 18:59:17 -07:00
|
|
|
case HLSL_RESOURCE_SAMPLE_CMP:
|
|
|
|
case HLSL_RESOURCE_SAMPLE_CMP_LZ:
|
|
|
|
case HLSL_RESOURCE_SAMPLE_GRAD:
|
2023-06-07 10:56:02 -07:00
|
|
|
case HLSL_RESOURCE_SAMPLE_INFO:
|
2023-05-29 18:59:17 -07:00
|
|
|
return false;
|
|
|
|
|
|
|
|
case HLSL_RESOURCE_SAMPLE:
|
|
|
|
case HLSL_RESOURCE_SAMPLE_LOD:
|
|
|
|
case HLSL_RESOURCE_SAMPLE_LOD_BIAS:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (load->sampler.var)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (!hlsl_type_is_resource(load->resource.var->data_type))
|
|
|
|
{
|
|
|
|
hlsl_fixme(ctx, &instr->loc, "Lower combined samplers within structs.");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(hlsl_type_get_regset(load->resource.var->data_type) == HLSL_REGSET_SAMPLERS);
|
|
|
|
|
|
|
|
if (!(name = hlsl_get_string_buffer(ctx)))
|
|
|
|
return false;
|
|
|
|
vkd3d_string_buffer_printf(name, "<resource>%s", load->resource.var->name);
|
|
|
|
|
|
|
|
TRACE("Lowering to separate resource %s.\n", debugstr_a(name->buffer));
|
|
|
|
|
|
|
|
if (!(var = hlsl_get_var(ctx->globals, name->buffer)))
|
|
|
|
{
|
|
|
|
struct hlsl_type *texture_array_type = hlsl_new_texture_type(ctx, load->sampling_dim,
|
|
|
|
hlsl_get_vector_type(ctx, HLSL_TYPE_FLOAT, 4), 0);
|
|
|
|
|
|
|
|
/* Create (possibly multi-dimensional) texture array type with the same dims as the sampler array. */
|
|
|
|
struct hlsl_type *arr_type = load->resource.var->data_type;
|
|
|
|
for (i = 0; i < load->resource.path_len; ++i)
|
|
|
|
{
|
|
|
|
assert(arr_type->class == HLSL_CLASS_ARRAY);
|
|
|
|
texture_array_type = hlsl_new_array_type(ctx, texture_array_type, arr_type->e.array.elements_count);
|
|
|
|
arr_type = arr_type->e.array.type;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!(var = hlsl_new_synthetic_var_named(ctx, name->buffer, texture_array_type, &instr->loc, false)))
|
|
|
|
{
|
|
|
|
hlsl_release_string_buffer(ctx, name);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
var->is_uniform = 1;
|
|
|
|
var->is_separated_resource = true;
|
|
|
|
|
|
|
|
list_add_tail(&ctx->extern_vars, &var->extern_entry);
|
|
|
|
}
|
|
|
|
hlsl_release_string_buffer(ctx, name);
|
|
|
|
|
|
|
|
if (load->sampling_dim != var->data_type->sampler_dim)
|
|
|
|
{
|
|
|
|
hlsl_error(ctx, &load->node.loc, VKD3D_SHADER_ERROR_HLSL_INCONSISTENT_SAMPLER,
|
|
|
|
"Cannot split combined samplers from \"%s\" if they have different usage dimensions.",
|
|
|
|
load->resource.var->name);
|
|
|
|
hlsl_note(ctx, &var->loc, VKD3D_SHADER_LOG_ERROR, "First use as combined sampler is here.");
|
|
|
|
return false;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
hlsl_copy_deref(ctx, &load->sampler, &load->resource);
|
|
|
|
load->resource.var = var;
|
|
|
|
assert(hlsl_deref_get_type(ctx, &load->resource)->base_type == HLSL_TYPE_TEXTURE);
|
|
|
|
assert(hlsl_deref_get_type(ctx, &load->sampler)->base_type == HLSL_TYPE_SAMPLER);
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2023-08-04 12:02:39 -07:00
|
|
|
static void insert_ensuring_decreasing_bind_count(struct list *list, struct hlsl_ir_var *to_add,
|
|
|
|
enum hlsl_regset regset)
|
|
|
|
{
|
|
|
|
struct hlsl_ir_var *var;
|
|
|
|
|
|
|
|
LIST_FOR_EACH_ENTRY(var, list, struct hlsl_ir_var, extern_entry)
|
|
|
|
{
|
|
|
|
if (var->bind_count[regset] < to_add->bind_count[regset])
|
|
|
|
{
|
|
|
|
list_add_before(&var->extern_entry, &to_add->extern_entry);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
list_add_tail(list, &to_add->extern_entry);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool sort_synthetic_separated_samplers_first(struct hlsl_ctx *ctx)
|
|
|
|
{
|
|
|
|
struct list separated_resources;
|
|
|
|
struct hlsl_ir_var *var, *next;
|
|
|
|
|
|
|
|
list_init(&separated_resources);
|
|
|
|
|
|
|
|
LIST_FOR_EACH_ENTRY_SAFE(var, next, &ctx->extern_vars, struct hlsl_ir_var, extern_entry)
|
|
|
|
{
|
|
|
|
if (var->is_separated_resource)
|
|
|
|
{
|
|
|
|
list_remove(&var->extern_entry);
|
|
|
|
insert_ensuring_decreasing_bind_count(&separated_resources, var, HLSL_REGSET_TEXTURES);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
list_move_head(&ctx->extern_vars, &separated_resources);
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2021-05-20 22:32:24 -07:00
|
|
|
/* Lower DIV to RCP + MUL. */
|
2023-06-25 17:10:34 -07:00
|
|
|
static bool lower_division(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, struct hlsl_block *block)
|
2021-05-20 22:32:24 -07:00
|
|
|
{
|
2023-06-25 17:10:34 -07:00
|
|
|
struct hlsl_ir_node *rcp, *mul;
|
2021-05-20 22:32:24 -07:00
|
|
|
struct hlsl_ir_expr *expr;
|
|
|
|
|
|
|
|
if (instr->type != HLSL_IR_EXPR)
|
|
|
|
return false;
|
|
|
|
expr = hlsl_ir_expr(instr);
|
2021-08-12 17:36:13 -07:00
|
|
|
if (expr->op != HLSL_OP2_DIV)
|
2021-05-20 22:32:24 -07:00
|
|
|
return false;
|
|
|
|
|
2023-04-14 00:02:14 -07:00
|
|
|
if (!(rcp = hlsl_new_unary_expr(ctx, HLSL_OP1_RCP, expr->operands[1].node, &instr->loc)))
|
2021-05-20 22:32:24 -07:00
|
|
|
return false;
|
2023-06-25 17:10:34 -07:00
|
|
|
hlsl_block_add_instr(block, rcp);
|
|
|
|
|
|
|
|
if (!(mul = hlsl_new_binary_expr(ctx, HLSL_OP2_MUL, expr->operands[0].node, rcp)))
|
|
|
|
return false;
|
|
|
|
hlsl_block_add_instr(block, mul);
|
|
|
|
|
2021-05-20 22:32:24 -07:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2023-01-24 04:44:39 -08:00
|
|
|
/* Lower SQRT to RSQ + RCP. */
|
2023-06-25 17:11:37 -07:00
|
|
|
static bool lower_sqrt(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, struct hlsl_block *block)
|
2023-01-24 04:44:39 -08:00
|
|
|
{
|
2023-06-25 17:11:37 -07:00
|
|
|
struct hlsl_ir_node *rsq, *rcp;
|
2023-01-24 04:44:39 -08:00
|
|
|
struct hlsl_ir_expr *expr;
|
|
|
|
|
|
|
|
if (instr->type != HLSL_IR_EXPR)
|
|
|
|
return false;
|
|
|
|
expr = hlsl_ir_expr(instr);
|
|
|
|
if (expr->op != HLSL_OP1_SQRT)
|
|
|
|
return false;
|
|
|
|
|
2023-04-14 00:02:14 -07:00
|
|
|
if (!(rsq = hlsl_new_unary_expr(ctx, HLSL_OP1_RSQ, expr->operands[0].node, &instr->loc)))
|
2023-01-24 04:44:39 -08:00
|
|
|
return false;
|
2023-06-25 17:11:37 -07:00
|
|
|
hlsl_block_add_instr(block, rsq);
|
|
|
|
|
|
|
|
if (!(rcp = hlsl_new_unary_expr(ctx, HLSL_OP1_RCP, rsq, &instr->loc)))
|
|
|
|
return false;
|
|
|
|
hlsl_block_add_instr(block, rcp);
|
2023-01-24 04:44:39 -08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2023-01-26 12:56:00 -08:00
|
|
|
/* Lower DP2 to MUL + ADD */
|
2023-06-25 16:46:10 -07:00
|
|
|
static bool lower_dot(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, struct hlsl_block *block)
|
2023-01-26 12:56:00 -08:00
|
|
|
{
|
2022-11-10 19:01:18 -08:00
|
|
|
struct hlsl_ir_node *arg1, *arg2, *mul, *replacement, *zero, *add_x, *add_y;
|
2023-01-26 12:56:00 -08:00
|
|
|
struct hlsl_ir_expr *expr;
|
|
|
|
|
|
|
|
if (instr->type != HLSL_IR_EXPR)
|
|
|
|
return false;
|
|
|
|
expr = hlsl_ir_expr(instr);
|
|
|
|
arg1 = expr->operands[0].node;
|
|
|
|
arg2 = expr->operands[1].node;
|
|
|
|
if (expr->op != HLSL_OP2_DOT)
|
|
|
|
return false;
|
|
|
|
if (arg1->data_type->dimx != 2)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (ctx->profile->type == VKD3D_SHADER_TYPE_PIXEL)
|
|
|
|
{
|
|
|
|
struct hlsl_ir_node *operands[HLSL_MAX_OPERANDS] = { 0 };
|
|
|
|
|
|
|
|
if (!(zero = hlsl_new_float_constant(ctx, 0.0f, &expr->node.loc)))
|
|
|
|
return false;
|
2023-06-25 16:46:10 -07:00
|
|
|
hlsl_block_add_instr(block, zero);
|
2023-01-26 12:56:00 -08:00
|
|
|
|
|
|
|
operands[0] = arg1;
|
|
|
|
operands[1] = arg2;
|
2022-11-10 17:45:51 -08:00
|
|
|
operands[2] = zero;
|
2023-01-26 12:56:00 -08:00
|
|
|
|
|
|
|
if (!(replacement = hlsl_new_expr(ctx, HLSL_OP3_DP2ADD, operands, instr->data_type, &expr->node.loc)))
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
if (!(mul = hlsl_new_binary_expr(ctx, HLSL_OP2_MUL, expr->operands[0].node, expr->operands[1].node)))
|
|
|
|
return false;
|
2023-06-25 16:46:10 -07:00
|
|
|
hlsl_block_add_instr(block, mul);
|
2023-01-26 12:56:00 -08:00
|
|
|
|
|
|
|
if (!(add_x = hlsl_new_swizzle(ctx, HLSL_SWIZZLE(X, X, X, X), instr->data_type->dimx, mul, &expr->node.loc)))
|
|
|
|
return false;
|
2023-06-25 16:46:10 -07:00
|
|
|
hlsl_block_add_instr(block, add_x);
|
2023-01-26 12:56:00 -08:00
|
|
|
|
|
|
|
if (!(add_y = hlsl_new_swizzle(ctx, HLSL_SWIZZLE(Y, Y, Y, Y), instr->data_type->dimx, mul, &expr->node.loc)))
|
|
|
|
return false;
|
2023-06-25 16:46:10 -07:00
|
|
|
hlsl_block_add_instr(block, add_y);
|
2023-01-26 12:56:00 -08:00
|
|
|
|
2022-11-10 19:01:18 -08:00
|
|
|
if (!(replacement = hlsl_new_binary_expr(ctx, HLSL_OP2_ADD, add_x, add_y)))
|
2023-01-26 12:56:00 -08:00
|
|
|
return false;
|
|
|
|
}
|
2023-06-25 16:46:10 -07:00
|
|
|
hlsl_block_add_instr(block, replacement);
|
2023-01-26 12:56:00 -08:00
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2023-02-04 15:30:36 -08:00
|
|
|
/* Lower ABS to MAX */
|
2023-06-25 16:46:10 -07:00
|
|
|
static bool lower_abs(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, struct hlsl_block *block)
|
2023-02-04 15:30:36 -08:00
|
|
|
{
|
|
|
|
struct hlsl_ir_node *arg, *neg, *replacement;
|
|
|
|
struct hlsl_ir_expr *expr;
|
|
|
|
|
|
|
|
if (instr->type != HLSL_IR_EXPR)
|
|
|
|
return false;
|
|
|
|
expr = hlsl_ir_expr(instr);
|
|
|
|
arg = expr->operands[0].node;
|
|
|
|
if (expr->op != HLSL_OP1_ABS)
|
|
|
|
return false;
|
|
|
|
|
2023-04-14 00:02:14 -07:00
|
|
|
if (!(neg = hlsl_new_unary_expr(ctx, HLSL_OP1_NEG, arg, &instr->loc)))
|
2023-02-04 15:30:36 -08:00
|
|
|
return false;
|
2023-06-25 16:46:10 -07:00
|
|
|
hlsl_block_add_instr(block, neg);
|
2023-02-04 15:30:36 -08:00
|
|
|
|
|
|
|
if (!(replacement = hlsl_new_binary_expr(ctx, HLSL_OP2_MAX, neg, arg)))
|
|
|
|
return false;
|
2023-06-25 16:46:10 -07:00
|
|
|
hlsl_block_add_instr(block, replacement);
|
2023-02-04 15:30:36 -08:00
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2023-03-16 13:14:12 -07:00
|
|
|
/* Lower ROUND using FRC, ROUND(x) -> ((x + 0.5) - FRC(x + 0.5)). */
|
2023-06-25 16:46:10 -07:00
|
|
|
static bool lower_round(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, struct hlsl_block *block)
|
2023-03-16 13:14:12 -07:00
|
|
|
{
|
2022-11-11 17:13:26 -08:00
|
|
|
struct hlsl_ir_node *arg, *neg, *sum, *frc, *half, *replacement;
|
2023-03-16 13:14:12 -07:00
|
|
|
struct hlsl_type *type = instr->data_type;
|
2022-11-11 17:10:14 -08:00
|
|
|
struct hlsl_constant_value half_value;
|
2023-03-16 13:14:12 -07:00
|
|
|
unsigned int i, component_count;
|
|
|
|
struct hlsl_ir_expr *expr;
|
|
|
|
|
|
|
|
if (instr->type != HLSL_IR_EXPR)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
expr = hlsl_ir_expr(instr);
|
|
|
|
arg = expr->operands[0].node;
|
|
|
|
if (expr->op != HLSL_OP1_ROUND)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
component_count = hlsl_type_component_count(type);
|
|
|
|
for (i = 0; i < component_count; ++i)
|
2022-11-11 17:10:14 -08:00
|
|
|
half_value.u[i].f = 0.5f;
|
|
|
|
if (!(half = hlsl_new_constant(ctx, type, &half_value, &expr->node.loc)))
|
|
|
|
return false;
|
2023-06-25 16:46:10 -07:00
|
|
|
hlsl_block_add_instr(block, half);
|
2023-03-16 13:14:12 -07:00
|
|
|
|
2022-11-11 17:13:26 -08:00
|
|
|
if (!(sum = hlsl_new_binary_expr(ctx, HLSL_OP2_ADD, arg, half)))
|
2023-03-16 13:14:12 -07:00
|
|
|
return false;
|
2023-06-25 16:46:10 -07:00
|
|
|
hlsl_block_add_instr(block, sum);
|
2023-03-16 13:14:12 -07:00
|
|
|
|
2023-04-14 00:02:14 -07:00
|
|
|
if (!(frc = hlsl_new_unary_expr(ctx, HLSL_OP1_FRACT, sum, &instr->loc)))
|
2023-03-16 13:14:12 -07:00
|
|
|
return false;
|
2023-06-25 16:46:10 -07:00
|
|
|
hlsl_block_add_instr(block, frc);
|
2023-03-16 13:14:12 -07:00
|
|
|
|
2023-04-14 00:02:14 -07:00
|
|
|
if (!(neg = hlsl_new_unary_expr(ctx, HLSL_OP1_NEG, frc, &instr->loc)))
|
2023-03-16 13:14:12 -07:00
|
|
|
return false;
|
2023-06-25 16:46:10 -07:00
|
|
|
hlsl_block_add_instr(block, neg);
|
2023-03-16 13:14:12 -07:00
|
|
|
|
|
|
|
if (!(replacement = hlsl_new_binary_expr(ctx, HLSL_OP2_ADD, sum, neg)))
|
|
|
|
return false;
|
2023-06-25 16:46:10 -07:00
|
|
|
hlsl_block_add_instr(block, replacement);
|
2023-03-16 13:14:12 -07:00
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2023-07-24 23:46:28 -07:00
|
|
|
/* Use 'movc' for the ternary operator. */
|
2023-06-25 16:46:10 -07:00
|
|
|
static bool lower_ternary(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, struct hlsl_block *block)
|
2023-07-24 23:46:28 -07:00
|
|
|
{
|
|
|
|
struct hlsl_ir_node *operands[HLSL_MAX_OPERANDS], *replacement;
|
|
|
|
struct hlsl_ir_node *zero, *cond, *first, *second;
|
|
|
|
struct hlsl_constant_value zero_value = { 0 };
|
|
|
|
struct hlsl_ir_expr *expr;
|
|
|
|
struct hlsl_type *type;
|
|
|
|
|
|
|
|
if (instr->type != HLSL_IR_EXPR)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
expr = hlsl_ir_expr(instr);
|
|
|
|
if (expr->op != HLSL_OP3_TERNARY)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
cond = expr->operands[0].node;
|
|
|
|
first = expr->operands[1].node;
|
|
|
|
second = expr->operands[2].node;
|
|
|
|
|
|
|
|
if (cond->data_type->base_type == HLSL_TYPE_FLOAT)
|
|
|
|
{
|
|
|
|
if (!(zero = hlsl_new_constant(ctx, cond->data_type, &zero_value, &instr->loc)))
|
|
|
|
return false;
|
2023-06-25 16:46:10 -07:00
|
|
|
hlsl_block_add_instr(block, zero);
|
2023-07-24 23:46:28 -07:00
|
|
|
|
|
|
|
memset(operands, 0, sizeof(operands));
|
|
|
|
operands[0] = zero;
|
|
|
|
operands[1] = cond;
|
|
|
|
type = cond->data_type;
|
|
|
|
type = hlsl_get_numeric_type(ctx, type->class, HLSL_TYPE_BOOL, type->dimx, type->dimy);
|
|
|
|
if (!(cond = hlsl_new_expr(ctx, HLSL_OP2_NEQUAL, operands, type, &instr->loc)))
|
|
|
|
return false;
|
2023-06-25 16:46:10 -07:00
|
|
|
hlsl_block_add_instr(block, cond);
|
2023-07-24 23:46:28 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
memset(operands, 0, sizeof(operands));
|
|
|
|
operands[0] = cond;
|
|
|
|
operands[1] = first;
|
|
|
|
operands[2] = second;
|
|
|
|
if (!(replacement = hlsl_new_expr(ctx, HLSL_OP3_MOVC, operands, first->data_type, &instr->loc)))
|
|
|
|
return false;
|
2023-06-25 16:46:10 -07:00
|
|
|
hlsl_block_add_instr(block, replacement);
|
2023-07-24 23:46:28 -07:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2023-06-25 17:03:26 -07:00
|
|
|
static bool lower_casts_to_bool(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, struct hlsl_block *block)
|
2022-04-06 12:24:42 -07:00
|
|
|
{
|
|
|
|
struct hlsl_type *type = instr->data_type, *arg_type;
|
2022-11-11 17:10:14 -08:00
|
|
|
static const struct hlsl_constant_value zero_value;
|
2023-06-25 17:03:26 -07:00
|
|
|
struct hlsl_ir_node *zero, *neq;
|
2022-04-06 12:24:42 -07:00
|
|
|
struct hlsl_ir_expr *expr;
|
|
|
|
|
|
|
|
if (instr->type != HLSL_IR_EXPR)
|
|
|
|
return false;
|
|
|
|
expr = hlsl_ir_expr(instr);
|
|
|
|
if (expr->op != HLSL_OP1_CAST)
|
|
|
|
return false;
|
|
|
|
arg_type = expr->operands[0].node->data_type;
|
2022-11-11 17:31:55 -08:00
|
|
|
if (type->class > HLSL_CLASS_VECTOR || arg_type->class > HLSL_CLASS_VECTOR)
|
2022-04-06 12:24:42 -07:00
|
|
|
return false;
|
|
|
|
if (type->base_type != HLSL_TYPE_BOOL)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
/* Narrowing casts should have already been lowered. */
|
|
|
|
assert(type->dimx == arg_type->dimx);
|
|
|
|
|
2022-11-11 17:10:14 -08:00
|
|
|
zero = hlsl_new_constant(ctx, arg_type, &zero_value, &instr->loc);
|
2022-04-06 12:24:42 -07:00
|
|
|
if (!zero)
|
|
|
|
return false;
|
2023-06-25 17:03:26 -07:00
|
|
|
hlsl_block_add_instr(block, zero);
|
2022-04-06 12:24:42 -07:00
|
|
|
|
2023-06-25 17:03:26 -07:00
|
|
|
if (!(neq = hlsl_new_binary_expr(ctx, HLSL_OP2_NEQUAL, expr->operands[0].node, zero)))
|
|
|
|
return false;
|
|
|
|
neq->data_type = expr->node.data_type;
|
|
|
|
hlsl_block_add_instr(block, neq);
|
2022-04-06 12:24:42 -07:00
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2023-03-06 18:37:41 -08:00
|
|
|
struct hlsl_ir_node *hlsl_add_conditional(struct hlsl_ctx *ctx, struct hlsl_block *instrs,
|
2021-09-21 08:12:31 -07:00
|
|
|
struct hlsl_ir_node *condition, struct hlsl_ir_node *if_true, struct hlsl_ir_node *if_false)
|
|
|
|
{
|
2023-09-09 11:08:52 -07:00
|
|
|
struct hlsl_ir_node *operands[HLSL_MAX_OPERANDS];
|
|
|
|
struct hlsl_ir_node *cond;
|
2021-09-21 08:12:31 -07:00
|
|
|
|
|
|
|
assert(hlsl_types_are_equal(if_true->data_type, if_false->data_type));
|
|
|
|
|
2023-09-09 11:08:52 -07:00
|
|
|
operands[0] = condition;
|
|
|
|
operands[1] = if_true;
|
|
|
|
operands[2] = if_false;
|
|
|
|
if (!(cond = hlsl_new_expr(ctx, HLSL_OP3_TERNARY, operands, if_true->data_type, &condition->loc)))
|
|
|
|
return false;
|
|
|
|
hlsl_block_add_instr(instrs, cond);
|
2021-09-21 08:12:31 -07:00
|
|
|
|
2023-09-09 11:08:52 -07:00
|
|
|
return cond;
|
2021-09-21 08:12:31 -07:00
|
|
|
}
|
|
|
|
|
2023-03-06 18:34:10 -08:00
|
|
|
static bool lower_int_division(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, struct hlsl_block *block)
|
2021-09-21 08:12:31 -07:00
|
|
|
{
|
2023-03-06 18:34:10 -08:00
|
|
|
struct hlsl_ir_node *arg1, *arg2, *xor, *and, *abs1, *abs2, *div, *neg, *cast1, *cast2, *cast3, *high_bit;
|
2021-09-21 08:12:31 -07:00
|
|
|
struct hlsl_type *type = instr->data_type, *utype;
|
2022-11-11 17:10:14 -08:00
|
|
|
struct hlsl_constant_value high_bit_value;
|
2021-09-21 08:12:31 -07:00
|
|
|
struct hlsl_ir_expr *expr;
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
if (instr->type != HLSL_IR_EXPR)
|
|
|
|
return false;
|
|
|
|
expr = hlsl_ir_expr(instr);
|
|
|
|
arg1 = expr->operands[0].node;
|
|
|
|
arg2 = expr->operands[1].node;
|
|
|
|
if (expr->op != HLSL_OP2_DIV)
|
|
|
|
return false;
|
2022-11-11 17:31:55 -08:00
|
|
|
if (type->class != HLSL_CLASS_SCALAR && type->class != HLSL_CLASS_VECTOR)
|
2021-09-21 08:12:31 -07:00
|
|
|
return false;
|
|
|
|
if (type->base_type != HLSL_TYPE_INT)
|
|
|
|
return false;
|
2022-11-11 17:31:55 -08:00
|
|
|
utype = hlsl_get_numeric_type(ctx, type->class, HLSL_TYPE_UINT, type->dimx, type->dimy);
|
2021-09-21 08:12:31 -07:00
|
|
|
|
|
|
|
if (!(xor = hlsl_new_binary_expr(ctx, HLSL_OP2_BIT_XOR, arg1, arg2)))
|
|
|
|
return false;
|
2023-03-06 18:34:10 -08:00
|
|
|
hlsl_block_add_instr(block, xor);
|
2021-09-21 08:12:31 -07:00
|
|
|
|
|
|
|
for (i = 0; i < type->dimx; ++i)
|
2022-11-11 17:10:14 -08:00
|
|
|
high_bit_value.u[i].u = 0x80000000;
|
|
|
|
if (!(high_bit = hlsl_new_constant(ctx, type, &high_bit_value, &instr->loc)))
|
|
|
|
return false;
|
2023-03-06 18:34:10 -08:00
|
|
|
hlsl_block_add_instr(block, high_bit);
|
2021-09-21 08:12:31 -07:00
|
|
|
|
2022-11-11 17:13:26 -08:00
|
|
|
if (!(and = hlsl_new_binary_expr(ctx, HLSL_OP2_BIT_AND, xor, high_bit)))
|
2021-09-21 08:12:31 -07:00
|
|
|
return false;
|
2023-03-06 18:34:10 -08:00
|
|
|
hlsl_block_add_instr(block, and);
|
2021-09-21 08:12:31 -07:00
|
|
|
|
2023-04-14 00:02:14 -07:00
|
|
|
if (!(abs1 = hlsl_new_unary_expr(ctx, HLSL_OP1_ABS, arg1, &instr->loc)))
|
2021-09-21 08:12:31 -07:00
|
|
|
return false;
|
2023-03-06 18:34:10 -08:00
|
|
|
hlsl_block_add_instr(block, abs1);
|
2021-09-21 08:12:31 -07:00
|
|
|
|
|
|
|
if (!(cast1 = hlsl_new_cast(ctx, abs1, utype, &instr->loc)))
|
|
|
|
return false;
|
2023-03-06 18:34:10 -08:00
|
|
|
hlsl_block_add_instr(block, cast1);
|
2021-09-21 08:12:31 -07:00
|
|
|
|
2023-04-14 00:02:14 -07:00
|
|
|
if (!(abs2 = hlsl_new_unary_expr(ctx, HLSL_OP1_ABS, arg2, &instr->loc)))
|
2021-09-21 08:12:31 -07:00
|
|
|
return false;
|
2023-03-06 18:34:10 -08:00
|
|
|
hlsl_block_add_instr(block, abs2);
|
2021-09-21 08:12:31 -07:00
|
|
|
|
|
|
|
if (!(cast2 = hlsl_new_cast(ctx, abs2, utype, &instr->loc)))
|
|
|
|
return false;
|
2023-03-06 18:34:10 -08:00
|
|
|
hlsl_block_add_instr(block, cast2);
|
2021-09-21 08:12:31 -07:00
|
|
|
|
2022-11-10 17:39:42 -08:00
|
|
|
if (!(div = hlsl_new_binary_expr(ctx, HLSL_OP2_DIV, cast1, cast2)))
|
2021-09-21 08:12:31 -07:00
|
|
|
return false;
|
2023-03-06 18:34:10 -08:00
|
|
|
hlsl_block_add_instr(block, div);
|
2021-09-21 08:12:31 -07:00
|
|
|
|
|
|
|
if (!(cast3 = hlsl_new_cast(ctx, div, type, &instr->loc)))
|
|
|
|
return false;
|
2023-03-06 18:34:10 -08:00
|
|
|
hlsl_block_add_instr(block, cast3);
|
2021-09-21 08:12:31 -07:00
|
|
|
|
2023-04-14 00:02:14 -07:00
|
|
|
if (!(neg = hlsl_new_unary_expr(ctx, HLSL_OP1_NEG, cast3, &instr->loc)))
|
2021-09-21 08:12:31 -07:00
|
|
|
return false;
|
2023-03-06 18:34:10 -08:00
|
|
|
hlsl_block_add_instr(block, neg);
|
2021-09-21 08:12:31 -07:00
|
|
|
|
2023-03-06 18:37:41 -08:00
|
|
|
return hlsl_add_conditional(ctx, block, and, neg, cast3);
|
2021-09-21 08:12:31 -07:00
|
|
|
}
|
|
|
|
|
2023-03-06 18:35:59 -08:00
|
|
|
static bool lower_int_modulus(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, struct hlsl_block *block)
|
2021-09-22 01:27:07 -07:00
|
|
|
{
|
2023-03-06 18:35:59 -08:00
|
|
|
struct hlsl_ir_node *arg1, *arg2, *and, *abs1, *abs2, *div, *neg, *cast1, *cast2, *cast3, *high_bit;
|
2021-09-22 01:27:07 -07:00
|
|
|
struct hlsl_type *type = instr->data_type, *utype;
|
2022-11-11 17:10:14 -08:00
|
|
|
struct hlsl_constant_value high_bit_value;
|
2021-09-22 01:27:07 -07:00
|
|
|
struct hlsl_ir_expr *expr;
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
if (instr->type != HLSL_IR_EXPR)
|
|
|
|
return false;
|
|
|
|
expr = hlsl_ir_expr(instr);
|
|
|
|
arg1 = expr->operands[0].node;
|
|
|
|
arg2 = expr->operands[1].node;
|
|
|
|
if (expr->op != HLSL_OP2_MOD)
|
|
|
|
return false;
|
2022-11-11 17:31:55 -08:00
|
|
|
if (type->class != HLSL_CLASS_SCALAR && type->class != HLSL_CLASS_VECTOR)
|
2021-09-22 01:27:07 -07:00
|
|
|
return false;
|
|
|
|
if (type->base_type != HLSL_TYPE_INT)
|
|
|
|
return false;
|
2022-11-11 17:31:55 -08:00
|
|
|
utype = hlsl_get_numeric_type(ctx, type->class, HLSL_TYPE_UINT, type->dimx, type->dimy);
|
2021-09-22 01:27:07 -07:00
|
|
|
|
|
|
|
for (i = 0; i < type->dimx; ++i)
|
2022-11-11 17:10:14 -08:00
|
|
|
high_bit_value.u[i].u = 0x80000000;
|
|
|
|
if (!(high_bit = hlsl_new_constant(ctx, type, &high_bit_value, &instr->loc)))
|
|
|
|
return false;
|
2023-03-06 18:35:59 -08:00
|
|
|
hlsl_block_add_instr(block, high_bit);
|
2021-09-22 01:27:07 -07:00
|
|
|
|
2022-11-11 17:13:26 -08:00
|
|
|
if (!(and = hlsl_new_binary_expr(ctx, HLSL_OP2_BIT_AND, arg1, high_bit)))
|
2021-09-22 01:27:07 -07:00
|
|
|
return false;
|
2023-03-06 18:35:59 -08:00
|
|
|
hlsl_block_add_instr(block, and);
|
2021-09-22 01:27:07 -07:00
|
|
|
|
2023-04-14 00:02:14 -07:00
|
|
|
if (!(abs1 = hlsl_new_unary_expr(ctx, HLSL_OP1_ABS, arg1, &instr->loc)))
|
2021-09-22 01:27:07 -07:00
|
|
|
return false;
|
2023-03-06 18:35:59 -08:00
|
|
|
hlsl_block_add_instr(block, abs1);
|
2021-09-22 01:27:07 -07:00
|
|
|
|
|
|
|
if (!(cast1 = hlsl_new_cast(ctx, abs1, utype, &instr->loc)))
|
|
|
|
return false;
|
2023-03-06 18:35:59 -08:00
|
|
|
hlsl_block_add_instr(block, cast1);
|
2021-09-22 01:27:07 -07:00
|
|
|
|
2023-04-14 00:02:14 -07:00
|
|
|
if (!(abs2 = hlsl_new_unary_expr(ctx, HLSL_OP1_ABS, arg2, &instr->loc)))
|
2021-09-22 01:27:07 -07:00
|
|
|
return false;
|
2023-03-06 18:35:59 -08:00
|
|
|
hlsl_block_add_instr(block, abs2);
|
2021-09-22 01:27:07 -07:00
|
|
|
|
|
|
|
if (!(cast2 = hlsl_new_cast(ctx, abs2, utype, &instr->loc)))
|
|
|
|
return false;
|
2023-03-06 18:35:59 -08:00
|
|
|
hlsl_block_add_instr(block, cast2);
|
2021-09-22 01:27:07 -07:00
|
|
|
|
2022-11-10 17:39:42 -08:00
|
|
|
if (!(div = hlsl_new_binary_expr(ctx, HLSL_OP2_MOD, cast1, cast2)))
|
2021-09-22 01:27:07 -07:00
|
|
|
return false;
|
2023-03-06 18:35:59 -08:00
|
|
|
hlsl_block_add_instr(block, div);
|
2021-09-22 01:27:07 -07:00
|
|
|
|
|
|
|
if (!(cast3 = hlsl_new_cast(ctx, div, type, &instr->loc)))
|
|
|
|
return false;
|
2023-03-06 18:35:59 -08:00
|
|
|
hlsl_block_add_instr(block, cast3);
|
2021-09-22 01:27:07 -07:00
|
|
|
|
2023-04-14 00:02:14 -07:00
|
|
|
if (!(neg = hlsl_new_unary_expr(ctx, HLSL_OP1_NEG, cast3, &instr->loc)))
|
2021-09-22 01:27:07 -07:00
|
|
|
return false;
|
2023-03-06 18:35:59 -08:00
|
|
|
hlsl_block_add_instr(block, neg);
|
2021-09-22 01:27:07 -07:00
|
|
|
|
2023-03-06 18:37:41 -08:00
|
|
|
return hlsl_add_conditional(ctx, block, and, neg, cast3);
|
2021-09-22 01:27:07 -07:00
|
|
|
}
|
|
|
|
|
2023-06-25 17:06:45 -07:00
|
|
|
static bool lower_int_abs(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, struct hlsl_block *block)
|
2021-09-21 08:21:44 -07:00
|
|
|
{
|
|
|
|
struct hlsl_type *type = instr->data_type;
|
2023-06-25 17:06:45 -07:00
|
|
|
struct hlsl_ir_node *arg, *neg, *max;
|
2021-09-21 08:21:44 -07:00
|
|
|
struct hlsl_ir_expr *expr;
|
|
|
|
|
|
|
|
if (instr->type != HLSL_IR_EXPR)
|
|
|
|
return false;
|
|
|
|
expr = hlsl_ir_expr(instr);
|
|
|
|
|
|
|
|
if (expr->op != HLSL_OP1_ABS)
|
|
|
|
return false;
|
2022-11-11 17:31:55 -08:00
|
|
|
if (type->class != HLSL_CLASS_SCALAR && type->class != HLSL_CLASS_VECTOR)
|
2021-09-21 08:21:44 -07:00
|
|
|
return false;
|
|
|
|
if (type->base_type != HLSL_TYPE_INT)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
arg = expr->operands[0].node;
|
|
|
|
|
2023-04-14 00:02:14 -07:00
|
|
|
if (!(neg = hlsl_new_unary_expr(ctx, HLSL_OP1_NEG, arg, &instr->loc)))
|
2021-09-21 08:21:44 -07:00
|
|
|
return false;
|
2023-06-25 17:06:45 -07:00
|
|
|
hlsl_block_add_instr(block, neg);
|
2021-09-21 08:21:44 -07:00
|
|
|
|
2023-06-25 17:06:45 -07:00
|
|
|
if (!(max = hlsl_new_binary_expr(ctx, HLSL_OP2_MAX, arg, neg)))
|
|
|
|
return false;
|
|
|
|
hlsl_block_add_instr(block, max);
|
2021-09-21 08:21:44 -07:00
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2023-06-25 16:46:10 -07:00
|
|
|
static bool lower_int_dot(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, struct hlsl_block *block)
|
2023-05-23 22:01:38 -07:00
|
|
|
{
|
|
|
|
struct hlsl_ir_node *arg1, *arg2, *mult, *comps[4] = {0}, *res;
|
|
|
|
struct hlsl_type *type = instr->data_type;
|
|
|
|
struct hlsl_ir_expr *expr;
|
|
|
|
unsigned int i, dimx;
|
2023-05-08 15:25:18 -07:00
|
|
|
bool is_bool;
|
2023-05-23 22:01:38 -07:00
|
|
|
|
|
|
|
if (instr->type != HLSL_IR_EXPR)
|
|
|
|
return false;
|
|
|
|
expr = hlsl_ir_expr(instr);
|
|
|
|
|
|
|
|
if (expr->op != HLSL_OP2_DOT)
|
|
|
|
return false;
|
|
|
|
|
2023-05-08 15:25:18 -07:00
|
|
|
if (type->base_type == HLSL_TYPE_INT || type->base_type == HLSL_TYPE_UINT
|
|
|
|
|| type->base_type == HLSL_TYPE_BOOL)
|
2023-05-23 22:01:38 -07:00
|
|
|
{
|
|
|
|
arg1 = expr->operands[0].node;
|
|
|
|
arg2 = expr->operands[1].node;
|
|
|
|
assert(arg1->data_type->dimx == arg2->data_type->dimx);
|
|
|
|
dimx = arg1->data_type->dimx;
|
2023-05-08 15:25:18 -07:00
|
|
|
is_bool = type->base_type == HLSL_TYPE_BOOL;
|
2023-05-23 22:01:38 -07:00
|
|
|
|
2023-05-08 15:25:18 -07:00
|
|
|
if (!(mult = hlsl_new_binary_expr(ctx, is_bool ? HLSL_OP2_LOGIC_AND : HLSL_OP2_MUL, arg1, arg2)))
|
2023-05-23 22:01:38 -07:00
|
|
|
return false;
|
2023-06-25 16:46:10 -07:00
|
|
|
hlsl_block_add_instr(block, mult);
|
2023-05-23 22:01:38 -07:00
|
|
|
|
|
|
|
for (i = 0; i < dimx; ++i)
|
|
|
|
{
|
|
|
|
unsigned int s = hlsl_swizzle_from_writemask(1 << i);
|
|
|
|
|
|
|
|
if (!(comps[i] = hlsl_new_swizzle(ctx, s, 1, mult, &instr->loc)))
|
|
|
|
return false;
|
2023-06-25 16:46:10 -07:00
|
|
|
hlsl_block_add_instr(block, comps[i]);
|
2023-05-23 22:01:38 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
res = comps[0];
|
|
|
|
for (i = 1; i < dimx; ++i)
|
|
|
|
{
|
2023-05-08 15:25:18 -07:00
|
|
|
if (!(res = hlsl_new_binary_expr(ctx, is_bool ? HLSL_OP2_LOGIC_OR : HLSL_OP2_ADD, res, comps[i])))
|
2023-05-23 22:01:38 -07:00
|
|
|
return false;
|
2023-06-25 16:46:10 -07:00
|
|
|
hlsl_block_add_instr(block, res);
|
2023-05-23 22:01:38 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2023-03-06 18:31:16 -08:00
|
|
|
static bool lower_float_modulus(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, struct hlsl_block *block)
|
2022-06-14 05:32:22 -07:00
|
|
|
{
|
2023-03-06 18:31:16 -08:00
|
|
|
struct hlsl_ir_node *arg1, *arg2, *mul1, *neg1, *ge, *neg2, *div, *mul2, *frc, *cond, *one, *mul3;
|
2022-06-14 05:32:22 -07:00
|
|
|
struct hlsl_type *type = instr->data_type, *btype;
|
2022-11-11 17:10:14 -08:00
|
|
|
struct hlsl_constant_value one_value;
|
2022-06-14 05:32:22 -07:00
|
|
|
struct hlsl_ir_expr *expr;
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
if (instr->type != HLSL_IR_EXPR)
|
|
|
|
return false;
|
|
|
|
expr = hlsl_ir_expr(instr);
|
|
|
|
arg1 = expr->operands[0].node;
|
|
|
|
arg2 = expr->operands[1].node;
|
|
|
|
if (expr->op != HLSL_OP2_MOD)
|
|
|
|
return false;
|
2022-11-11 17:31:55 -08:00
|
|
|
if (type->class != HLSL_CLASS_SCALAR && type->class != HLSL_CLASS_VECTOR)
|
2022-06-14 05:32:22 -07:00
|
|
|
return false;
|
|
|
|
if (type->base_type != HLSL_TYPE_FLOAT)
|
|
|
|
return false;
|
2022-11-11 17:31:55 -08:00
|
|
|
btype = hlsl_get_numeric_type(ctx, type->class, HLSL_TYPE_BOOL, type->dimx, type->dimy);
|
2022-06-14 05:32:22 -07:00
|
|
|
|
|
|
|
if (!(mul1 = hlsl_new_binary_expr(ctx, HLSL_OP2_MUL, arg2, arg1)))
|
|
|
|
return false;
|
2023-03-06 18:31:16 -08:00
|
|
|
hlsl_block_add_instr(block, mul1);
|
2022-06-14 05:32:22 -07:00
|
|
|
|
2023-04-14 00:02:14 -07:00
|
|
|
if (!(neg1 = hlsl_new_unary_expr(ctx, HLSL_OP1_NEG, mul1, &instr->loc)))
|
2022-06-14 05:32:22 -07:00
|
|
|
return false;
|
2023-03-06 18:31:16 -08:00
|
|
|
hlsl_block_add_instr(block, neg1);
|
2022-06-14 05:32:22 -07:00
|
|
|
|
|
|
|
if (!(ge = hlsl_new_binary_expr(ctx, HLSL_OP2_GEQUAL, mul1, neg1)))
|
|
|
|
return false;
|
|
|
|
ge->data_type = btype;
|
2023-03-06 18:31:16 -08:00
|
|
|
hlsl_block_add_instr(block, ge);
|
2022-06-14 05:32:22 -07:00
|
|
|
|
2023-04-14 00:02:14 -07:00
|
|
|
if (!(neg2 = hlsl_new_unary_expr(ctx, HLSL_OP1_NEG, arg2, &instr->loc)))
|
2022-06-14 05:32:22 -07:00
|
|
|
return false;
|
2023-03-06 18:31:16 -08:00
|
|
|
hlsl_block_add_instr(block, neg2);
|
2022-06-14 05:32:22 -07:00
|
|
|
|
2023-03-06 18:37:41 -08:00
|
|
|
if (!(cond = hlsl_add_conditional(ctx, block, ge, arg2, neg2)))
|
2022-06-14 05:32:22 -07:00
|
|
|
return false;
|
|
|
|
|
|
|
|
for (i = 0; i < type->dimx; ++i)
|
2022-11-11 17:10:14 -08:00
|
|
|
one_value.u[i].f = 1.0f;
|
|
|
|
if (!(one = hlsl_new_constant(ctx, type, &one_value, &instr->loc)))
|
|
|
|
return false;
|
2023-03-06 18:31:16 -08:00
|
|
|
hlsl_block_add_instr(block, one);
|
2022-06-14 05:32:22 -07:00
|
|
|
|
2022-11-11 17:13:26 -08:00
|
|
|
if (!(div = hlsl_new_binary_expr(ctx, HLSL_OP2_DIV, one, cond)))
|
2022-06-14 05:32:22 -07:00
|
|
|
return false;
|
2023-03-06 18:31:16 -08:00
|
|
|
hlsl_block_add_instr(block, div);
|
2022-06-14 05:32:22 -07:00
|
|
|
|
|
|
|
if (!(mul2 = hlsl_new_binary_expr(ctx, HLSL_OP2_MUL, div, arg1)))
|
|
|
|
return false;
|
2023-03-06 18:31:16 -08:00
|
|
|
hlsl_block_add_instr(block, mul2);
|
2022-06-14 05:32:22 -07:00
|
|
|
|
2023-04-14 00:02:14 -07:00
|
|
|
if (!(frc = hlsl_new_unary_expr(ctx, HLSL_OP1_FRACT, mul2, &instr->loc)))
|
2022-06-14 05:32:22 -07:00
|
|
|
return false;
|
2023-03-06 18:31:16 -08:00
|
|
|
hlsl_block_add_instr(block, frc);
|
2022-06-14 05:32:22 -07:00
|
|
|
|
2023-03-06 18:31:16 -08:00
|
|
|
if (!(mul3 = hlsl_new_binary_expr(ctx, HLSL_OP2_MUL, frc, cond)))
|
|
|
|
return false;
|
|
|
|
hlsl_block_add_instr(block, mul3);
|
2022-06-14 05:32:22 -07:00
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2023-06-08 03:47:40 -07:00
|
|
|
static bool lower_discard_neg(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, void *context)
|
|
|
|
{
|
|
|
|
struct hlsl_ir_node *zero, *bool_false, *or, *cmp, *load;
|
|
|
|
static const struct hlsl_constant_value zero_value;
|
|
|
|
struct hlsl_type *arg_type, *cmp_type;
|
|
|
|
struct hlsl_ir_node *operands[HLSL_MAX_OPERANDS] = { 0 };
|
|
|
|
struct hlsl_ir_jump *jump;
|
2023-06-29 21:24:53 -07:00
|
|
|
struct hlsl_block block;
|
2023-06-08 03:47:40 -07:00
|
|
|
unsigned int i, count;
|
|
|
|
|
|
|
|
if (instr->type != HLSL_IR_JUMP)
|
|
|
|
return false;
|
|
|
|
jump = hlsl_ir_jump(instr);
|
|
|
|
if (jump->type != HLSL_IR_JUMP_DISCARD_NEG)
|
|
|
|
return false;
|
|
|
|
|
2023-06-29 21:24:53 -07:00
|
|
|
hlsl_block_init(&block);
|
2023-06-08 03:47:40 -07:00
|
|
|
|
|
|
|
arg_type = jump->condition.node->data_type;
|
|
|
|
if (!(zero = hlsl_new_constant(ctx, arg_type, &zero_value, &instr->loc)))
|
|
|
|
return false;
|
2023-06-29 21:24:53 -07:00
|
|
|
hlsl_block_add_instr(&block, zero);
|
2023-06-08 03:47:40 -07:00
|
|
|
|
|
|
|
operands[0] = jump->condition.node;
|
|
|
|
operands[1] = zero;
|
|
|
|
cmp_type = hlsl_get_numeric_type(ctx, arg_type->class, HLSL_TYPE_BOOL, arg_type->dimx, arg_type->dimy);
|
|
|
|
if (!(cmp = hlsl_new_expr(ctx, HLSL_OP2_LESS, operands, cmp_type, &instr->loc)))
|
|
|
|
return false;
|
2023-06-29 21:24:53 -07:00
|
|
|
hlsl_block_add_instr(&block, cmp);
|
2023-06-08 03:47:40 -07:00
|
|
|
|
|
|
|
if (!(bool_false = hlsl_new_constant(ctx, hlsl_get_scalar_type(ctx, HLSL_TYPE_BOOL), &zero_value, &instr->loc)))
|
|
|
|
return false;
|
2023-06-29 21:24:53 -07:00
|
|
|
hlsl_block_add_instr(&block, bool_false);
|
2023-06-08 03:47:40 -07:00
|
|
|
|
|
|
|
or = bool_false;
|
|
|
|
|
|
|
|
count = hlsl_type_component_count(cmp_type);
|
|
|
|
for (i = 0; i < count; ++i)
|
|
|
|
{
|
2022-11-14 18:44:44 -08:00
|
|
|
if (!(load = hlsl_add_load_component(ctx, &block, cmp, i, &instr->loc)))
|
2023-06-08 03:47:40 -07:00
|
|
|
return false;
|
|
|
|
|
|
|
|
if (!(or = hlsl_new_binary_expr(ctx, HLSL_OP2_LOGIC_OR, or, load)))
|
|
|
|
return NULL;
|
2023-06-29 21:24:53 -07:00
|
|
|
hlsl_block_add_instr(&block, or);
|
2023-06-08 03:47:40 -07:00
|
|
|
}
|
|
|
|
|
2023-06-29 21:24:53 -07:00
|
|
|
list_move_tail(&instr->entry, &block.instrs);
|
2023-06-08 03:47:40 -07:00
|
|
|
hlsl_src_remove(&jump->condition);
|
|
|
|
hlsl_src_from_node(&jump->condition, or);
|
|
|
|
jump->type = HLSL_IR_JUMP_DISCARD_NZ;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2021-03-16 14:31:54 -07:00
|
|
|
static bool dce(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, void *context)
|
|
|
|
{
|
|
|
|
switch (instr->type)
|
|
|
|
{
|
|
|
|
case HLSL_IR_CONSTANT:
|
|
|
|
case HLSL_IR_EXPR:
|
2023-02-24 11:39:56 -08:00
|
|
|
case HLSL_IR_INDEX:
|
2021-03-16 14:31:54 -07:00
|
|
|
case HLSL_IR_LOAD:
|
2021-10-07 19:58:57 -07:00
|
|
|
case HLSL_IR_RESOURCE_LOAD:
|
2021-03-16 14:31:54 -07:00
|
|
|
case HLSL_IR_SWIZZLE:
|
|
|
|
if (list_empty(&instr->uses))
|
|
|
|
{
|
|
|
|
list_remove(&instr->entry);
|
|
|
|
hlsl_free_instr(instr);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
2021-04-08 21:38:22 -07:00
|
|
|
case HLSL_IR_STORE:
|
2021-03-17 22:22:22 -07:00
|
|
|
{
|
2021-04-08 21:38:22 -07:00
|
|
|
struct hlsl_ir_store *store = hlsl_ir_store(instr);
|
|
|
|
struct hlsl_ir_var *var = store->lhs.var;
|
2021-03-17 22:22:22 -07:00
|
|
|
|
|
|
|
if (var->last_read < instr->index)
|
|
|
|
{
|
|
|
|
list_remove(&instr->entry);
|
|
|
|
hlsl_free_instr(instr);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2021-09-09 19:06:38 -07:00
|
|
|
case HLSL_IR_CALL:
|
2021-03-16 14:31:54 -07:00
|
|
|
case HLSL_IR_IF:
|
|
|
|
case HLSL_IR_JUMP:
|
|
|
|
case HLSL_IR_LOOP:
|
2021-08-15 10:08:32 -07:00
|
|
|
case HLSL_IR_RESOURCE_STORE:
|
2021-03-16 14:31:54 -07:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2021-03-02 13:34:46 -08:00
|
|
|
/* Allocate a unique, ordered index to each instruction, which will be used for
|
|
|
|
* computing liveness ranges. */
|
2021-10-15 14:54:10 -07:00
|
|
|
static unsigned int index_instructions(struct hlsl_block *block, unsigned int index)
|
2021-03-02 13:34:46 -08:00
|
|
|
{
|
|
|
|
struct hlsl_ir_node *instr;
|
|
|
|
|
2021-10-15 14:54:10 -07:00
|
|
|
LIST_FOR_EACH_ENTRY(instr, &block->instrs, struct hlsl_ir_node, entry)
|
2021-03-02 13:34:46 -08:00
|
|
|
{
|
|
|
|
instr->index = index++;
|
|
|
|
|
|
|
|
if (instr->type == HLSL_IR_IF)
|
|
|
|
{
|
|
|
|
struct hlsl_ir_if *iff = hlsl_ir_if(instr);
|
2022-11-10 18:04:22 -08:00
|
|
|
index = index_instructions(&iff->then_block, index);
|
|
|
|
index = index_instructions(&iff->else_block, index);
|
2021-03-02 13:34:46 -08:00
|
|
|
}
|
|
|
|
else if (instr->type == HLSL_IR_LOOP)
|
|
|
|
{
|
|
|
|
index = index_instructions(&hlsl_ir_loop(instr)->body, index);
|
|
|
|
hlsl_ir_loop(instr)->next_index = index;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return index;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void dump_function_decl(struct rb_entry *entry, void *context)
|
|
|
|
{
|
|
|
|
struct hlsl_ir_function_decl *func = RB_ENTRY_VALUE(entry, struct hlsl_ir_function_decl, entry);
|
2021-05-20 22:32:22 -07:00
|
|
|
struct hlsl_ctx *ctx = context;
|
2021-03-02 13:34:46 -08:00
|
|
|
|
2021-10-15 14:54:09 -07:00
|
|
|
if (func->has_body)
|
2021-05-20 22:32:22 -07:00
|
|
|
hlsl_dump_function(ctx, func);
|
2021-03-02 13:34:46 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void dump_function(struct rb_entry *entry, void *context)
|
|
|
|
{
|
|
|
|
struct hlsl_ir_function *func = RB_ENTRY_VALUE(entry, struct hlsl_ir_function, entry);
|
2021-05-20 22:32:22 -07:00
|
|
|
struct hlsl_ctx *ctx = context;
|
|
|
|
|
|
|
|
rb_for_each_entry(&func->overloads, dump_function_decl, ctx);
|
2021-03-02 13:34:46 -08:00
|
|
|
}
|
|
|
|
|
2023-01-30 13:27:32 -08:00
|
|
|
static char get_regset_name(enum hlsl_regset regset)
|
|
|
|
{
|
|
|
|
switch (regset)
|
|
|
|
{
|
|
|
|
case HLSL_REGSET_SAMPLERS:
|
|
|
|
return 's';
|
|
|
|
case HLSL_REGSET_TEXTURES:
|
|
|
|
return 't';
|
|
|
|
case HLSL_REGSET_UAVS:
|
|
|
|
return 'u';
|
|
|
|
case HLSL_REGSET_NUMERIC:
|
|
|
|
vkd3d_unreachable();
|
|
|
|
}
|
|
|
|
vkd3d_unreachable();
|
|
|
|
}
|
|
|
|
|
2022-11-24 14:36:20 -08:00
|
|
|
static void allocate_register_reservations(struct hlsl_ctx *ctx)
|
|
|
|
{
|
|
|
|
struct hlsl_ir_var *var;
|
|
|
|
|
|
|
|
LIST_FOR_EACH_ENTRY(var, &ctx->extern_vars, struct hlsl_ir_var, extern_entry)
|
|
|
|
{
|
|
|
|
enum hlsl_regset regset;
|
|
|
|
|
|
|
|
if (!hlsl_type_is_resource(var->data_type))
|
|
|
|
continue;
|
|
|
|
regset = hlsl_type_get_regset(var->data_type);
|
|
|
|
|
vkd3d-shader/hlsl: Rename hlsl_reg.bind_count to hlsl_reg.allocation_size.
We have to distinguish between the "bind count" and the "allocation size"
of variables.
The "allocation size" affects the starting register id for the resource to
be allocated next, while the "bind count" is determined by the last field
actually used. The former may be larger than the latter.
What we are currently calling hlsl_reg.bind_count is actually the
"allocation size", so a rename is in order.
The real "bind count", which will be introduced in following patches,
is important because it is what should be shown in the RDEF table and
some resource allocation rules depend on it.
For instance, for this shader:
texture2D texs[3];
texture2D tex;
float4 main() : sv_target
{
return texs[0].Load(int3(0, 0, 0)) + tex.Load(int3(0, 0, 0));
}
the variable "texs" has a "bind count" of 1, but an "allocation size" of
3:
// Resource Bindings:
//
// Name Type Format Dim HLSL Bind Count
// ------------------------------ ---------- ------- ----------- -------------- ------
// texs texture float4 2d t0 1
// tex texture float4 2d t3 1
2023-08-04 10:21:27 -07:00
|
|
|
if (var->reg_reservation.reg_type && var->regs[regset].allocation_size)
|
2022-11-24 14:36:20 -08:00
|
|
|
{
|
2023-03-06 14:53:29 -08:00
|
|
|
if (var->reg_reservation.reg_type != get_regset_name(regset))
|
2022-11-24 14:36:20 -08:00
|
|
|
{
|
|
|
|
struct vkd3d_string_buffer *type_string;
|
|
|
|
|
|
|
|
type_string = hlsl_type_to_string(ctx, var->data_type);
|
|
|
|
hlsl_error(ctx, &var->loc, VKD3D_SHADER_ERROR_HLSL_INVALID_RESERVATION,
|
|
|
|
"Object of type '%s' must be bound to register type '%c'.",
|
|
|
|
type_string->buffer, get_regset_name(regset));
|
|
|
|
hlsl_release_string_buffer(ctx, type_string);
|
|
|
|
}
|
2023-06-21 10:28:44 -07:00
|
|
|
else
|
2022-11-24 14:36:20 -08:00
|
|
|
{
|
|
|
|
var->regs[regset].allocated = true;
|
2023-03-06 14:53:29 -08:00
|
|
|
var->regs[regset].id = var->reg_reservation.reg_index;
|
2023-04-25 09:41:38 -07:00
|
|
|
TRACE("Allocated reserved %s to %c%u-%c%u.\n", var->name, var->reg_reservation.reg_type,
|
|
|
|
var->reg_reservation.reg_index, var->reg_reservation.reg_type,
|
vkd3d-shader/hlsl: Rename hlsl_reg.bind_count to hlsl_reg.allocation_size.
We have to distinguish between the "bind count" and the "allocation size"
of variables.
The "allocation size" affects the starting register id for the resource to
be allocated next, while the "bind count" is determined by the last field
actually used. The former may be larger than the latter.
What we are currently calling hlsl_reg.bind_count is actually the
"allocation size", so a rename is in order.
The real "bind count", which will be introduced in following patches,
is important because it is what should be shown in the RDEF table and
some resource allocation rules depend on it.
For instance, for this shader:
texture2D texs[3];
texture2D tex;
float4 main() : sv_target
{
return texs[0].Load(int3(0, 0, 0)) + tex.Load(int3(0, 0, 0));
}
the variable "texs" has a "bind count" of 1, but an "allocation size" of
3:
// Resource Bindings:
//
// Name Type Format Dim HLSL Bind Count
// ------------------------------ ---------- ------- ----------- -------------- ------
// texs texture float4 2d t0 1
// tex texture float4 2d t3 1
2023-08-04 10:21:27 -07:00
|
|
|
var->reg_reservation.reg_index + var->regs[regset].allocation_size);
|
2022-11-24 14:36:20 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-02 13:34:46 -08:00
|
|
|
/* Compute the earliest and latest liveness for each variable. In the case that
|
|
|
|
* a variable is accessed inside of a loop, we promote its liveness to extend
|
2023-05-10 09:30:39 -07:00
|
|
|
* to at least the range of the entire loop. We also do this for nodes, so that
|
|
|
|
* nodes produced before the loop have their temp register protected from being
|
|
|
|
* overridden after the last read within an iteration. */
|
2021-10-15 14:54:10 -07:00
|
|
|
static void compute_liveness_recurse(struct hlsl_block *block, unsigned int loop_first, unsigned int loop_last)
|
2021-03-02 13:34:46 -08:00
|
|
|
{
|
|
|
|
struct hlsl_ir_node *instr;
|
|
|
|
struct hlsl_ir_var *var;
|
|
|
|
|
2021-10-15 14:54:10 -07:00
|
|
|
LIST_FOR_EACH_ENTRY(instr, &block->instrs, struct hlsl_ir_node, entry)
|
2021-03-02 13:34:46 -08:00
|
|
|
{
|
2023-05-10 09:30:39 -07:00
|
|
|
const unsigned int last_read = loop_last ? max(instr->index, loop_last) : instr->index;
|
2021-10-15 14:54:07 -07:00
|
|
|
|
2021-03-02 13:34:46 -08:00
|
|
|
switch (instr->type)
|
|
|
|
{
|
2021-09-09 19:06:38 -07:00
|
|
|
case HLSL_IR_CALL:
|
2021-09-11 14:56:04 -07:00
|
|
|
/* We should have inlined all calls before computing liveness. */
|
|
|
|
vkd3d_unreachable();
|
2021-09-09 19:06:38 -07:00
|
|
|
|
2021-04-08 21:38:22 -07:00
|
|
|
case HLSL_IR_STORE:
|
2021-03-02 13:34:46 -08:00
|
|
|
{
|
2021-04-08 21:38:22 -07:00
|
|
|
struct hlsl_ir_store *store = hlsl_ir_store(instr);
|
2021-03-02 13:34:46 -08:00
|
|
|
|
2021-04-08 21:38:22 -07:00
|
|
|
var = store->lhs.var;
|
2021-03-02 13:34:46 -08:00
|
|
|
if (!var->first_write)
|
|
|
|
var->first_write = loop_first ? min(instr->index, loop_first) : instr->index;
|
2023-05-10 09:30:39 -07:00
|
|
|
store->rhs.node->last_read = last_read;
|
2021-04-08 21:38:22 -07:00
|
|
|
if (store->lhs.offset.node)
|
2023-05-10 09:30:39 -07:00
|
|
|
store->lhs.offset.node->last_read = last_read;
|
2021-03-02 13:34:46 -08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case HLSL_IR_EXPR:
|
|
|
|
{
|
|
|
|
struct hlsl_ir_expr *expr = hlsl_ir_expr(instr);
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(expr->operands) && expr->operands[i].node; ++i)
|
2023-05-10 09:30:39 -07:00
|
|
|
expr->operands[i].node->last_read = last_read;
|
2021-03-02 13:34:46 -08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case HLSL_IR_IF:
|
|
|
|
{
|
|
|
|
struct hlsl_ir_if *iff = hlsl_ir_if(instr);
|
|
|
|
|
2022-11-10 18:04:22 -08:00
|
|
|
compute_liveness_recurse(&iff->then_block, loop_first, loop_last);
|
|
|
|
compute_liveness_recurse(&iff->else_block, loop_first, loop_last);
|
2023-05-10 09:30:39 -07:00
|
|
|
iff->condition.node->last_read = last_read;
|
2021-03-02 13:34:46 -08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case HLSL_IR_LOAD:
|
|
|
|
{
|
|
|
|
struct hlsl_ir_load *load = hlsl_ir_load(instr);
|
|
|
|
|
|
|
|
var = load->src.var;
|
2023-05-10 09:30:39 -07:00
|
|
|
var->last_read = max(var->last_read, last_read);
|
2021-03-02 13:34:46 -08:00
|
|
|
if (load->src.offset.node)
|
2023-05-10 09:30:39 -07:00
|
|
|
load->src.offset.node->last_read = last_read;
|
2021-03-02 13:34:46 -08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case HLSL_IR_LOOP:
|
|
|
|
{
|
|
|
|
struct hlsl_ir_loop *loop = hlsl_ir_loop(instr);
|
|
|
|
|
|
|
|
compute_liveness_recurse(&loop->body, loop_first ? loop_first : instr->index,
|
|
|
|
loop_last ? loop_last : loop->next_index);
|
|
|
|
break;
|
|
|
|
}
|
2021-10-07 19:58:57 -07:00
|
|
|
case HLSL_IR_RESOURCE_LOAD:
|
|
|
|
{
|
|
|
|
struct hlsl_ir_resource_load *load = hlsl_ir_resource_load(instr);
|
|
|
|
|
2021-10-15 14:54:07 -07:00
|
|
|
var = load->resource.var;
|
2023-05-10 09:30:39 -07:00
|
|
|
var->last_read = max(var->last_read, last_read);
|
2021-10-07 19:58:57 -07:00
|
|
|
if (load->resource.offset.node)
|
2023-05-10 09:30:39 -07:00
|
|
|
load->resource.offset.node->last_read = last_read;
|
2021-11-05 11:35:52 -07:00
|
|
|
|
|
|
|
if ((var = load->sampler.var))
|
|
|
|
{
|
2023-05-10 09:30:39 -07:00
|
|
|
var->last_read = max(var->last_read, last_read);
|
2021-11-05 11:35:52 -07:00
|
|
|
if (load->sampler.offset.node)
|
2023-05-10 09:30:39 -07:00
|
|
|
load->sampler.offset.node->last_read = last_read;
|
2021-11-05 11:35:52 -07:00
|
|
|
}
|
|
|
|
|
2023-06-07 10:56:02 -07:00
|
|
|
if (load->coords.node)
|
|
|
|
load->coords.node->last_read = last_read;
|
2022-01-26 06:35:29 -08:00
|
|
|
if (load->texel_offset.node)
|
2023-05-10 09:30:39 -07:00
|
|
|
load->texel_offset.node->last_read = last_read;
|
2021-08-16 18:28:47 -07:00
|
|
|
if (load->lod.node)
|
2023-05-10 09:30:39 -07:00
|
|
|
load->lod.node->last_read = last_read;
|
2023-05-05 08:13:18 -07:00
|
|
|
if (load->ddx.node)
|
2023-05-10 09:30:39 -07:00
|
|
|
load->ddx.node->last_read = last_read;
|
2023-05-05 08:13:18 -07:00
|
|
|
if (load->ddy.node)
|
2023-05-10 09:30:39 -07:00
|
|
|
load->ddy.node->last_read = last_read;
|
2023-04-27 01:15:36 -07:00
|
|
|
if (load->sample_index.node)
|
2023-05-10 09:30:39 -07:00
|
|
|
load->sample_index.node->last_read = last_read;
|
2023-05-16 11:54:22 -07:00
|
|
|
if (load->cmp.node)
|
|
|
|
load->cmp.node->last_read = last_read;
|
2021-10-07 19:58:57 -07:00
|
|
|
break;
|
|
|
|
}
|
2021-08-15 10:08:32 -07:00
|
|
|
case HLSL_IR_RESOURCE_STORE:
|
|
|
|
{
|
|
|
|
struct hlsl_ir_resource_store *store = hlsl_ir_resource_store(instr);
|
|
|
|
|
|
|
|
var = store->resource.var;
|
2023-05-10 09:30:39 -07:00
|
|
|
var->last_read = max(var->last_read, last_read);
|
2021-08-15 10:08:32 -07:00
|
|
|
if (store->resource.offset.node)
|
2023-05-10 09:30:39 -07:00
|
|
|
store->resource.offset.node->last_read = last_read;
|
|
|
|
store->coords.node->last_read = last_read;
|
|
|
|
store->value.node->last_read = last_read;
|
2021-08-15 10:08:32 -07:00
|
|
|
break;
|
|
|
|
}
|
2021-03-02 13:34:46 -08:00
|
|
|
case HLSL_IR_SWIZZLE:
|
|
|
|
{
|
|
|
|
struct hlsl_ir_swizzle *swizzle = hlsl_ir_swizzle(instr);
|
|
|
|
|
2023-05-10 09:30:39 -07:00
|
|
|
swizzle->val.node->last_read = last_read;
|
2021-03-02 13:34:46 -08:00
|
|
|
break;
|
|
|
|
}
|
2023-02-24 11:39:56 -08:00
|
|
|
case HLSL_IR_INDEX:
|
|
|
|
{
|
|
|
|
struct hlsl_ir_index *index = hlsl_ir_index(instr);
|
|
|
|
|
2023-05-10 09:30:39 -07:00
|
|
|
index->val.node->last_read = last_read;
|
|
|
|
index->idx.node->last_read = last_read;
|
2023-02-24 11:39:56 -08:00
|
|
|
break;
|
|
|
|
}
|
2021-03-02 13:34:46 -08:00
|
|
|
case HLSL_IR_JUMP:
|
2023-06-08 00:42:58 -07:00
|
|
|
{
|
|
|
|
struct hlsl_ir_jump *jump = hlsl_ir_jump(instr);
|
|
|
|
|
|
|
|
if (jump->condition.node)
|
|
|
|
jump->condition.node->last_read = last_read;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case HLSL_IR_CONSTANT:
|
2021-03-02 13:34:46 -08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void compute_liveness(struct hlsl_ctx *ctx, struct hlsl_ir_function_decl *entry_func)
|
|
|
|
{
|
2021-03-17 22:22:22 -07:00
|
|
|
struct hlsl_scope *scope;
|
2021-03-02 13:34:46 -08:00
|
|
|
struct hlsl_ir_var *var;
|
|
|
|
|
2021-03-17 22:22:22 -07:00
|
|
|
/* Index 0 means unused; index 1 means function entry, so start at 2. */
|
2021-10-15 14:54:09 -07:00
|
|
|
index_instructions(&entry_func->body, 2);
|
2021-03-17 22:22:22 -07:00
|
|
|
|
|
|
|
LIST_FOR_EACH_ENTRY(scope, &ctx->scopes, struct hlsl_scope, entry)
|
|
|
|
{
|
|
|
|
LIST_FOR_EACH_ENTRY(var, &scope->vars, struct hlsl_ir_var, scope_entry)
|
|
|
|
var->first_write = var->last_read = 0;
|
|
|
|
}
|
|
|
|
|
2021-04-15 17:03:44 -07:00
|
|
|
LIST_FOR_EACH_ENTRY(var, &ctx->extern_vars, struct hlsl_ir_var, extern_entry)
|
2021-03-02 13:34:46 -08:00
|
|
|
{
|
2021-04-27 10:14:20 -07:00
|
|
|
if (var->is_uniform || var->is_input_semantic)
|
2021-03-22 15:02:40 -07:00
|
|
|
var->first_write = 1;
|
2021-04-27 10:14:20 -07:00
|
|
|
else if (var->is_output_semantic)
|
2021-03-28 12:46:59 -07:00
|
|
|
var->last_read = UINT_MAX;
|
2021-03-02 13:34:46 -08:00
|
|
|
}
|
|
|
|
|
2021-10-15 14:54:09 -07:00
|
|
|
compute_liveness_recurse(&entry_func->body, 0, 0);
|
2021-03-02 13:34:46 -08:00
|
|
|
}
|
|
|
|
|
2023-04-05 13:15:37 -07:00
|
|
|
struct register_allocator
|
2021-04-08 21:38:23 -07:00
|
|
|
{
|
2023-04-05 12:09:16 -07:00
|
|
|
size_t count, capacity;
|
|
|
|
|
|
|
|
/* Highest register index that has been allocated.
|
|
|
|
* Used to declare sm4 temp count. */
|
|
|
|
uint32_t max_reg;
|
|
|
|
|
|
|
|
struct allocation
|
2021-04-08 21:38:23 -07:00
|
|
|
{
|
2023-04-05 12:09:16 -07:00
|
|
|
uint32_t reg;
|
|
|
|
unsigned int writemask;
|
|
|
|
unsigned int first_write, last_read;
|
|
|
|
} *allocations;
|
2021-04-08 21:38:23 -07:00
|
|
|
};
|
|
|
|
|
2023-04-05 12:09:16 -07:00
|
|
|
static unsigned int get_available_writemask(const struct register_allocator *allocator,
|
|
|
|
unsigned int first_write, unsigned int last_read, uint32_t reg_idx)
|
2021-04-08 21:38:23 -07:00
|
|
|
{
|
2023-04-05 12:09:16 -07:00
|
|
|
unsigned int writemask = VKD3DSP_WRITEMASK_ALL;
|
|
|
|
size_t i;
|
2021-04-08 21:38:23 -07:00
|
|
|
|
2023-04-05 12:09:16 -07:00
|
|
|
for (i = 0; i < allocator->count; ++i)
|
2021-04-08 21:38:23 -07:00
|
|
|
{
|
2023-04-05 12:09:16 -07:00
|
|
|
const struct allocation *allocation = &allocator->allocations[i];
|
|
|
|
|
|
|
|
/* We do not overlap if first write == last read:
|
|
|
|
* this is the case where we are allocating the result of that
|
|
|
|
* expression, e.g. "add r0, r0, r1". */
|
|
|
|
|
|
|
|
if (allocation->reg == reg_idx
|
|
|
|
&& first_write < allocation->last_read && last_read > allocation->first_write)
|
|
|
|
writemask &= ~allocation->writemask;
|
|
|
|
|
|
|
|
if (!writemask)
|
|
|
|
break;
|
2021-04-08 21:38:23 -07:00
|
|
|
}
|
|
|
|
|
2023-04-05 12:09:16 -07:00
|
|
|
return writemask;
|
2021-04-08 21:38:23 -07:00
|
|
|
}
|
|
|
|
|
2023-04-05 12:09:16 -07:00
|
|
|
static void record_allocation(struct hlsl_ctx *ctx, struct register_allocator *allocator,
|
|
|
|
uint32_t reg_idx, unsigned int writemask, unsigned int first_write, unsigned int last_read)
|
2021-04-08 21:38:23 -07:00
|
|
|
{
|
2023-04-05 12:09:16 -07:00
|
|
|
struct allocation *allocation;
|
2021-04-08 21:38:23 -07:00
|
|
|
|
2023-04-05 12:09:16 -07:00
|
|
|
if (!hlsl_array_reserve(ctx, (void **)&allocator->allocations, &allocator->capacity,
|
|
|
|
allocator->count + 1, sizeof(*allocator->allocations)))
|
|
|
|
return;
|
2021-04-08 21:38:23 -07:00
|
|
|
|
2023-04-05 12:09:16 -07:00
|
|
|
allocation = &allocator->allocations[allocator->count++];
|
|
|
|
allocation->reg = reg_idx;
|
|
|
|
allocation->writemask = writemask;
|
|
|
|
allocation->first_write = first_write;
|
|
|
|
allocation->last_read = last_read;
|
|
|
|
|
|
|
|
allocator->max_reg = max(allocator->max_reg, reg_idx);
|
2021-04-08 21:38:23 -07:00
|
|
|
}
|
|
|
|
|
2023-02-06 08:17:32 -08:00
|
|
|
/* reg_size is the number of register components to be reserved, while component_count is the number
|
|
|
|
* of components for the register's writemask. In SM1, floats and vectors allocate the whole
|
|
|
|
* register, even if they don't use it completely. */
|
2023-04-05 13:15:37 -07:00
|
|
|
static struct hlsl_reg allocate_register(struct hlsl_ctx *ctx, struct register_allocator *allocator,
|
2023-02-06 08:17:32 -08:00
|
|
|
unsigned int first_write, unsigned int last_read, unsigned int reg_size,
|
|
|
|
unsigned int component_count)
|
2021-04-08 21:38:23 -07:00
|
|
|
{
|
|
|
|
struct hlsl_reg ret = {0};
|
2023-04-05 12:09:16 -07:00
|
|
|
unsigned int writemask;
|
|
|
|
uint32_t reg_idx;
|
2021-04-08 21:38:23 -07:00
|
|
|
|
2023-02-06 08:17:32 -08:00
|
|
|
assert(component_count <= reg_size);
|
|
|
|
|
2023-04-05 12:09:16 -07:00
|
|
|
for (reg_idx = 0;; ++reg_idx)
|
2021-04-08 21:38:23 -07:00
|
|
|
{
|
2023-04-05 12:09:16 -07:00
|
|
|
writemask = get_available_writemask(allocator, first_write, last_read, reg_idx);
|
|
|
|
|
|
|
|
if (vkd3d_popcount(writemask) >= reg_size)
|
|
|
|
{
|
|
|
|
writemask = hlsl_combine_writemasks(writemask, (1u << reg_size) - 1);
|
2021-04-08 21:38:23 -07:00
|
|
|
break;
|
2023-04-05 12:09:16 -07:00
|
|
|
}
|
2021-04-08 21:38:23 -07:00
|
|
|
}
|
2023-04-05 12:09:16 -07:00
|
|
|
|
|
|
|
record_allocation(ctx, allocator, reg_idx, writemask, first_write, last_read);
|
|
|
|
|
|
|
|
ret.id = reg_idx;
|
vkd3d-shader/hlsl: Rename hlsl_reg.bind_count to hlsl_reg.allocation_size.
We have to distinguish between the "bind count" and the "allocation size"
of variables.
The "allocation size" affects the starting register id for the resource to
be allocated next, while the "bind count" is determined by the last field
actually used. The former may be larger than the latter.
What we are currently calling hlsl_reg.bind_count is actually the
"allocation size", so a rename is in order.
The real "bind count", which will be introduced in following patches,
is important because it is what should be shown in the RDEF table and
some resource allocation rules depend on it.
For instance, for this shader:
texture2D texs[3];
texture2D tex;
float4 main() : sv_target
{
return texs[0].Load(int3(0, 0, 0)) + tex.Load(int3(0, 0, 0));
}
the variable "texs" has a "bind count" of 1, but an "allocation size" of
3:
// Resource Bindings:
//
// Name Type Format Dim HLSL Bind Count
// ------------------------------ ---------- ------- ----------- -------------- ------
// texs texture float4 2d t0 1
// tex texture float4 2d t3 1
2023-08-04 10:21:27 -07:00
|
|
|
ret.allocation_size = 1;
|
2023-02-06 08:17:32 -08:00
|
|
|
ret.writemask = hlsl_combine_writemasks(writemask, (1u << component_count) - 1);
|
2021-04-08 21:38:23 -07:00
|
|
|
ret.allocated = true;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2023-04-05 12:09:16 -07:00
|
|
|
static bool is_range_available(const struct register_allocator *allocator,
|
|
|
|
unsigned int first_write, unsigned int last_read, uint32_t reg_idx, unsigned int reg_size)
|
2021-04-08 21:38:23 -07:00
|
|
|
{
|
2023-04-05 12:09:16 -07:00
|
|
|
uint32_t i;
|
2021-04-08 21:38:23 -07:00
|
|
|
|
2023-04-05 12:09:16 -07:00
|
|
|
for (i = 0; i < (reg_size / 4); ++i)
|
2021-04-08 21:38:23 -07:00
|
|
|
{
|
2023-04-05 12:09:16 -07:00
|
|
|
if (get_available_writemask(allocator, first_write, last_read, reg_idx + i) != VKD3DSP_WRITEMASK_ALL)
|
2021-04-08 21:38:23 -07:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2023-04-05 13:15:37 -07:00
|
|
|
static struct hlsl_reg allocate_range(struct hlsl_ctx *ctx, struct register_allocator *allocator,
|
2023-02-06 07:05:32 -08:00
|
|
|
unsigned int first_write, unsigned int last_read, unsigned int reg_size)
|
2021-04-08 21:38:23 -07:00
|
|
|
{
|
|
|
|
struct hlsl_reg ret = {0};
|
2023-04-05 12:09:16 -07:00
|
|
|
uint32_t reg_idx;
|
|
|
|
unsigned int i;
|
2021-04-08 21:38:23 -07:00
|
|
|
|
2023-04-05 12:09:16 -07:00
|
|
|
for (reg_idx = 0;; ++reg_idx)
|
2021-04-08 21:38:23 -07:00
|
|
|
{
|
2023-04-05 12:09:16 -07:00
|
|
|
if (is_range_available(allocator, first_write, last_read, reg_idx, reg_size))
|
2021-04-08 21:38:23 -07:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2023-04-05 12:09:16 -07:00
|
|
|
for (i = 0; i < reg_size / 4; ++i)
|
|
|
|
record_allocation(ctx, allocator, reg_idx + i, VKD3DSP_WRITEMASK_ALL, first_write, last_read);
|
|
|
|
|
|
|
|
ret.id = reg_idx;
|
vkd3d-shader/hlsl: Rename hlsl_reg.bind_count to hlsl_reg.allocation_size.
We have to distinguish between the "bind count" and the "allocation size"
of variables.
The "allocation size" affects the starting register id for the resource to
be allocated next, while the "bind count" is determined by the last field
actually used. The former may be larger than the latter.
What we are currently calling hlsl_reg.bind_count is actually the
"allocation size", so a rename is in order.
The real "bind count", which will be introduced in following patches,
is important because it is what should be shown in the RDEF table and
some resource allocation rules depend on it.
For instance, for this shader:
texture2D texs[3];
texture2D tex;
float4 main() : sv_target
{
return texs[0].Load(int3(0, 0, 0)) + tex.Load(int3(0, 0, 0));
}
the variable "texs" has a "bind count" of 1, but an "allocation size" of
3:
// Resource Bindings:
//
// Name Type Format Dim HLSL Bind Count
// ------------------------------ ---------- ------- ----------- -------------- ------
// texs texture float4 2d t0 1
// tex texture float4 2d t3 1
2023-08-04 10:21:27 -07:00
|
|
|
ret.allocation_size = align(reg_size, 4) / 4;
|
2021-04-08 21:38:23 -07:00
|
|
|
ret.allocated = true;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2023-04-05 13:15:37 -07:00
|
|
|
static struct hlsl_reg allocate_numeric_registers_for_type(struct hlsl_ctx *ctx, struct register_allocator *allocator,
|
2023-02-02 09:41:13 -08:00
|
|
|
unsigned int first_write, unsigned int last_read, const struct hlsl_type *type)
|
|
|
|
{
|
2022-10-28 08:23:05 -07:00
|
|
|
unsigned int reg_size = type->reg_size[HLSL_REGSET_NUMERIC];
|
|
|
|
|
2022-11-11 17:31:55 -08:00
|
|
|
if (type->class <= HLSL_CLASS_VECTOR)
|
2023-04-05 13:15:37 -07:00
|
|
|
return allocate_register(ctx, allocator, first_write, last_read, reg_size, type->dimx);
|
2023-02-02 09:41:13 -08:00
|
|
|
else
|
2023-04-05 13:15:37 -07:00
|
|
|
return allocate_range(ctx, allocator, first_write, last_read, reg_size);
|
2023-02-02 09:41:13 -08:00
|
|
|
}
|
|
|
|
|
2021-04-08 21:38:23 -07:00
|
|
|
static const char *debug_register(char class, struct hlsl_reg reg, const struct hlsl_type *type)
|
|
|
|
{
|
2021-06-23 21:57:34 -07:00
|
|
|
static const char writemask_offset[] = {'w','x','y','z'};
|
2022-10-28 08:23:05 -07:00
|
|
|
unsigned int reg_size = type->reg_size[HLSL_REGSET_NUMERIC];
|
2021-06-23 21:57:34 -07:00
|
|
|
|
2022-10-28 08:23:05 -07:00
|
|
|
if (reg_size > 4)
|
2021-06-23 21:57:34 -07:00
|
|
|
{
|
2022-10-28 08:23:05 -07:00
|
|
|
if (reg_size & 3)
|
|
|
|
return vkd3d_dbg_sprintf("%c%u-%c%u.%c", class, reg.id, class, reg.id + (reg_size / 4),
|
|
|
|
writemask_offset[reg_size & 3]);
|
2021-06-23 21:57:34 -07:00
|
|
|
|
2022-10-28 08:23:05 -07:00
|
|
|
return vkd3d_dbg_sprintf("%c%u-%c%u", class, reg.id, class, reg.id + (reg_size / 4) - 1);
|
2021-06-23 21:57:34 -07:00
|
|
|
}
|
2021-04-08 21:38:23 -07:00
|
|
|
return vkd3d_dbg_sprintf("%c%u%s", class, reg.id, debug_hlsl_writemask(reg.writemask));
|
|
|
|
}
|
|
|
|
|
2023-05-25 14:38:31 -07:00
|
|
|
static bool track_object_components_sampler_dim(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, void *context)
|
2022-11-25 14:47:56 -08:00
|
|
|
{
|
|
|
|
struct hlsl_ir_resource_load *load;
|
|
|
|
struct hlsl_ir_var *var;
|
|
|
|
enum hlsl_regset regset;
|
|
|
|
unsigned int index;
|
|
|
|
|
|
|
|
if (instr->type != HLSL_IR_RESOURCE_LOAD)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
load = hlsl_ir_resource_load(instr);
|
|
|
|
var = load->resource.var;
|
2023-05-25 14:38:31 -07:00
|
|
|
|
2022-11-25 14:47:56 -08:00
|
|
|
regset = hlsl_type_get_regset(hlsl_deref_get_type(ctx, &load->resource));
|
2023-05-25 14:38:31 -07:00
|
|
|
if (!hlsl_regset_index_from_deref(ctx, &load->resource, regset, &index))
|
|
|
|
return false;
|
2022-11-25 14:47:56 -08:00
|
|
|
|
|
|
|
if (regset == HLSL_REGSET_SAMPLERS)
|
|
|
|
{
|
2022-11-25 15:38:33 -08:00
|
|
|
enum hlsl_sampler_dim dim;
|
2022-11-25 14:47:56 -08:00
|
|
|
|
2022-11-25 15:38:33 -08:00
|
|
|
assert(!load->sampler.var);
|
2022-11-25 14:47:56 -08:00
|
|
|
|
2022-11-25 15:38:33 -08:00
|
|
|
dim = var->objects_usage[regset][index].sampler_dim;
|
|
|
|
if (dim != load->sampling_dim)
|
|
|
|
{
|
|
|
|
if (dim == HLSL_SAMPLER_DIM_GENERIC)
|
|
|
|
{
|
|
|
|
var->objects_usage[regset][index].first_sampler_dim_loc = instr->loc;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
hlsl_error(ctx, &instr->loc, VKD3D_SHADER_ERROR_HLSL_INCONSISTENT_SAMPLER,
|
|
|
|
"Inconsistent generic sampler usage dimension.");
|
|
|
|
hlsl_note(ctx, &var->objects_usage[regset][index].first_sampler_dim_loc,
|
|
|
|
VKD3D_SHADER_LOG_ERROR, "First use is here.");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
2022-11-25 14:47:56 -08:00
|
|
|
}
|
2023-05-25 14:38:31 -07:00
|
|
|
var->objects_usage[regset][index].sampler_dim = load->sampling_dim;
|
2022-11-25 14:47:56 -08:00
|
|
|
|
2023-05-25 14:38:31 -07:00
|
|
|
return false;
|
|
|
|
}
|
2022-11-25 14:47:56 -08:00
|
|
|
|
2023-05-25 14:38:31 -07:00
|
|
|
static bool track_object_components_usage(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, void *context)
|
|
|
|
{
|
|
|
|
struct hlsl_ir_resource_load *load;
|
|
|
|
struct hlsl_ir_var *var;
|
|
|
|
enum hlsl_regset regset;
|
|
|
|
unsigned int index;
|
2022-11-25 14:47:56 -08:00
|
|
|
|
2023-05-25 14:38:31 -07:00
|
|
|
if (instr->type != HLSL_IR_RESOURCE_LOAD)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
load = hlsl_ir_resource_load(instr);
|
|
|
|
var = load->resource.var;
|
|
|
|
|
|
|
|
regset = hlsl_type_get_regset(hlsl_deref_get_type(ctx, &load->resource));
|
|
|
|
if (!hlsl_regset_index_from_deref(ctx, &load->resource, regset, &index))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
var->objects_usage[regset][index].used = true;
|
2023-08-03 18:02:11 -07:00
|
|
|
var->bind_count[regset] = max(var->bind_count[regset], index + 1);
|
2023-05-25 14:38:31 -07:00
|
|
|
if (load->sampler.var)
|
|
|
|
{
|
|
|
|
var = load->sampler.var;
|
|
|
|
if (!hlsl_regset_index_from_deref(ctx, &load->sampler, HLSL_REGSET_SAMPLERS, &index))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
var->objects_usage[HLSL_REGSET_SAMPLERS][index].used = true;
|
2023-08-03 18:02:11 -07:00
|
|
|
var->bind_count[HLSL_REGSET_SAMPLERS] = max(var->bind_count[HLSL_REGSET_SAMPLERS], index + 1);
|
2022-11-25 14:47:56 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void calculate_resource_register_counts(struct hlsl_ctx *ctx)
|
|
|
|
{
|
|
|
|
struct hlsl_ir_var *var;
|
|
|
|
struct hlsl_type *type;
|
2023-08-07 15:22:10 -07:00
|
|
|
unsigned int k;
|
2022-11-25 14:47:56 -08:00
|
|
|
|
|
|
|
LIST_FOR_EACH_ENTRY(var, &ctx->extern_vars, struct hlsl_ir_var, extern_entry)
|
|
|
|
{
|
|
|
|
type = var->data_type;
|
|
|
|
|
|
|
|
for (k = 0; k <= HLSL_REGSET_LAST_OBJECT; ++k)
|
|
|
|
{
|
2023-08-07 15:22:10 -07:00
|
|
|
bool is_separated = var->is_separated_resource;
|
2022-12-01 15:17:08 -08:00
|
|
|
|
2023-08-07 15:22:10 -07:00
|
|
|
if (var->bind_count[k] > 0)
|
|
|
|
var->regs[k].allocation_size = (k == HLSL_REGSET_SAMPLERS || is_separated) ? var->bind_count[k] : type->reg_size[k];
|
2022-11-25 14:47:56 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-04-05 13:15:37 -07:00
|
|
|
static void allocate_variable_temp_register(struct hlsl_ctx *ctx,
|
|
|
|
struct hlsl_ir_var *var, struct register_allocator *allocator)
|
2021-04-08 21:38:23 -07:00
|
|
|
{
|
2021-04-27 10:14:20 -07:00
|
|
|
if (var->is_input_semantic || var->is_output_semantic || var->is_uniform)
|
2021-04-08 21:38:23 -07:00
|
|
|
return;
|
|
|
|
|
2022-11-24 12:03:54 -08:00
|
|
|
if (!var->regs[HLSL_REGSET_NUMERIC].allocated && var->last_read)
|
2021-04-08 21:38:23 -07:00
|
|
|
{
|
2023-04-05 13:15:37 -07:00
|
|
|
var->regs[HLSL_REGSET_NUMERIC] = allocate_numeric_registers_for_type(ctx, allocator,
|
2022-11-24 12:03:54 -08:00
|
|
|
var->first_write, var->last_read, var->data_type);
|
2023-02-02 09:41:13 -08:00
|
|
|
|
2022-11-24 12:03:54 -08:00
|
|
|
TRACE("Allocated %s to %s (liveness %u-%u).\n", var->name, debug_register('r',
|
|
|
|
var->regs[HLSL_REGSET_NUMERIC], var->data_type), var->first_write, var->last_read);
|
2021-04-08 21:38:23 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-04-05 13:15:37 -07:00
|
|
|
static void allocate_temp_registers_recurse(struct hlsl_ctx *ctx,
|
|
|
|
struct hlsl_block *block, struct register_allocator *allocator)
|
2021-04-08 21:38:23 -07:00
|
|
|
{
|
|
|
|
struct hlsl_ir_node *instr;
|
|
|
|
|
2021-10-15 14:54:10 -07:00
|
|
|
LIST_FOR_EACH_ENTRY(instr, &block->instrs, struct hlsl_ir_node, entry)
|
2021-04-08 21:38:23 -07:00
|
|
|
{
|
2023-05-26 05:40:30 -07:00
|
|
|
/* In SM4 all constants are inlined. */
|
|
|
|
if (ctx->profile->major_version >= 4 && instr->type == HLSL_IR_CONSTANT)
|
|
|
|
continue;
|
|
|
|
|
2021-04-08 21:38:24 -07:00
|
|
|
if (!instr->reg.allocated && instr->last_read)
|
|
|
|
{
|
2023-04-05 13:15:37 -07:00
|
|
|
instr->reg = allocate_numeric_registers_for_type(ctx, allocator, instr->index, instr->last_read,
|
2023-02-02 09:41:13 -08:00
|
|
|
instr->data_type);
|
2021-04-08 21:38:24 -07:00
|
|
|
TRACE("Allocated anonymous expression @%u to %s (liveness %u-%u).\n", instr->index,
|
|
|
|
debug_register('r', instr->reg, instr->data_type), instr->index, instr->last_read);
|
|
|
|
}
|
|
|
|
|
2021-04-08 21:38:23 -07:00
|
|
|
switch (instr->type)
|
|
|
|
{
|
|
|
|
case HLSL_IR_IF:
|
|
|
|
{
|
|
|
|
struct hlsl_ir_if *iff = hlsl_ir_if(instr);
|
2023-04-05 13:15:37 -07:00
|
|
|
allocate_temp_registers_recurse(ctx, &iff->then_block, allocator);
|
|
|
|
allocate_temp_registers_recurse(ctx, &iff->else_block, allocator);
|
2021-04-08 21:38:23 -07:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case HLSL_IR_LOAD:
|
|
|
|
{
|
|
|
|
struct hlsl_ir_load *load = hlsl_ir_load(instr);
|
|
|
|
/* We need to at least allocate a variable for undefs.
|
|
|
|
* FIXME: We should probably find a way to remove them instead. */
|
2023-04-05 13:15:37 -07:00
|
|
|
allocate_variable_temp_register(ctx, load->src.var, allocator);
|
2021-04-08 21:38:23 -07:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case HLSL_IR_LOOP:
|
|
|
|
{
|
|
|
|
struct hlsl_ir_loop *loop = hlsl_ir_loop(instr);
|
2023-04-05 13:15:37 -07:00
|
|
|
allocate_temp_registers_recurse(ctx, &loop->body, allocator);
|
2021-04-08 21:38:23 -07:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case HLSL_IR_STORE:
|
|
|
|
{
|
|
|
|
struct hlsl_ir_store *store = hlsl_ir_store(instr);
|
2023-04-05 13:15:37 -07:00
|
|
|
allocate_variable_temp_register(ctx, store->lhs.var, allocator);
|
2021-04-08 21:38:23 -07:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-02-24 14:42:26 -08:00
|
|
|
static void record_constant(struct hlsl_ctx *ctx, unsigned int component_index, float f)
|
|
|
|
{
|
|
|
|
struct hlsl_constant_defs *defs = &ctx->constant_defs;
|
|
|
|
struct hlsl_constant_register *reg;
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
for (i = 0; i < defs->count; ++i)
|
|
|
|
{
|
|
|
|
reg = &defs->regs[i];
|
|
|
|
if (reg->index == (component_index / 4))
|
|
|
|
{
|
|
|
|
reg->value.f[component_index % 4] = f;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!hlsl_array_reserve(ctx, (void **)&defs->regs, &defs->size, defs->count + 1, sizeof(*defs->regs)))
|
|
|
|
return;
|
|
|
|
reg = &defs->regs[defs->count++];
|
|
|
|
memset(reg, 0, sizeof(*reg));
|
|
|
|
reg->index = component_index / 4;
|
|
|
|
reg->value.f[component_index % 4] = f;
|
|
|
|
}
|
|
|
|
|
2023-04-05 13:15:37 -07:00
|
|
|
static void allocate_const_registers_recurse(struct hlsl_ctx *ctx,
|
|
|
|
struct hlsl_block *block, struct register_allocator *allocator)
|
2021-04-08 21:38:26 -07:00
|
|
|
{
|
|
|
|
struct hlsl_ir_node *instr;
|
|
|
|
|
2021-10-15 14:54:10 -07:00
|
|
|
LIST_FOR_EACH_ENTRY(instr, &block->instrs, struct hlsl_ir_node, entry)
|
2021-04-08 21:38:26 -07:00
|
|
|
{
|
|
|
|
switch (instr->type)
|
|
|
|
{
|
|
|
|
case HLSL_IR_CONSTANT:
|
|
|
|
{
|
|
|
|
struct hlsl_ir_constant *constant = hlsl_ir_constant(instr);
|
2021-04-27 10:14:17 -07:00
|
|
|
const struct hlsl_type *type = instr->data_type;
|
2023-02-24 14:42:26 -08:00
|
|
|
unsigned int x, i;
|
2021-04-08 21:38:26 -07:00
|
|
|
|
2023-04-05 13:15:37 -07:00
|
|
|
constant->reg = allocate_numeric_registers_for_type(ctx, allocator, 1, UINT_MAX, type);
|
2021-04-27 10:14:17 -07:00
|
|
|
TRACE("Allocated constant @%u to %s.\n", instr->index, debug_register('c', constant->reg, type));
|
|
|
|
|
2022-11-11 17:31:55 -08:00
|
|
|
assert(type->class <= HLSL_CLASS_LAST_NUMERIC);
|
2023-02-24 14:24:47 -08:00
|
|
|
assert(type->dimy == 1);
|
|
|
|
assert(constant->reg.writemask);
|
2021-04-27 10:14:17 -07:00
|
|
|
|
2023-02-24 14:24:47 -08:00
|
|
|
for (x = 0, i = 0; x < 4; ++x)
|
2021-04-27 10:14:17 -07:00
|
|
|
{
|
2023-02-24 14:24:47 -08:00
|
|
|
const union hlsl_constant_value_component *value;
|
|
|
|
float f;
|
|
|
|
|
|
|
|
if (!(constant->reg.writemask & (1u << x)))
|
|
|
|
continue;
|
|
|
|
value = &constant->value.u[i++];
|
|
|
|
|
|
|
|
switch (type->base_type)
|
2021-04-27 10:14:17 -07:00
|
|
|
{
|
2023-02-24 14:24:47 -08:00
|
|
|
case HLSL_TYPE_BOOL:
|
|
|
|
f = !!value->u;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case HLSL_TYPE_FLOAT:
|
|
|
|
case HLSL_TYPE_HALF:
|
|
|
|
f = value->f;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case HLSL_TYPE_INT:
|
|
|
|
f = value->i;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case HLSL_TYPE_UINT:
|
|
|
|
f = value->u;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case HLSL_TYPE_DOUBLE:
|
|
|
|
FIXME("Double constant.\n");
|
|
|
|
return;
|
|
|
|
|
|
|
|
default:
|
|
|
|
vkd3d_unreachable();
|
2021-04-27 10:14:17 -07:00
|
|
|
}
|
2023-02-24 14:42:26 -08:00
|
|
|
|
|
|
|
record_constant(ctx, constant->reg.id * 4 + x, f);
|
2021-04-27 10:14:17 -07:00
|
|
|
}
|
|
|
|
|
2021-04-08 21:38:26 -07:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case HLSL_IR_IF:
|
|
|
|
{
|
|
|
|
struct hlsl_ir_if *iff = hlsl_ir_if(instr);
|
2023-04-05 13:15:37 -07:00
|
|
|
allocate_const_registers_recurse(ctx, &iff->then_block, allocator);
|
|
|
|
allocate_const_registers_recurse(ctx, &iff->else_block, allocator);
|
2021-04-08 21:38:26 -07:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case HLSL_IR_LOOP:
|
|
|
|
{
|
|
|
|
struct hlsl_ir_loop *loop = hlsl_ir_loop(instr);
|
2023-04-05 13:15:37 -07:00
|
|
|
allocate_const_registers_recurse(ctx, &loop->body, allocator);
|
2021-04-08 21:38:26 -07:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-04-08 21:38:27 -07:00
|
|
|
static void allocate_const_registers(struct hlsl_ctx *ctx, struct hlsl_ir_function_decl *entry_func)
|
2021-04-08 21:38:26 -07:00
|
|
|
{
|
2023-04-05 13:15:37 -07:00
|
|
|
struct register_allocator allocator = {0};
|
2021-04-08 21:38:27 -07:00
|
|
|
struct hlsl_ir_var *var;
|
|
|
|
|
2021-04-15 17:03:44 -07:00
|
|
|
LIST_FOR_EACH_ENTRY(var, &ctx->extern_vars, struct hlsl_ir_var, extern_entry)
|
2021-04-08 21:38:27 -07:00
|
|
|
{
|
|
|
|
if (var->is_uniform && var->last_read)
|
|
|
|
{
|
2022-11-24 12:03:54 -08:00
|
|
|
unsigned int reg_size = var->data_type->reg_size[HLSL_REGSET_NUMERIC];
|
|
|
|
|
|
|
|
if (reg_size == 0)
|
2022-09-07 13:51:56 -07:00
|
|
|
continue;
|
|
|
|
|
2023-04-05 13:15:37 -07:00
|
|
|
var->regs[HLSL_REGSET_NUMERIC] = allocate_numeric_registers_for_type(ctx, &allocator,
|
2022-11-24 12:03:54 -08:00
|
|
|
1, UINT_MAX, var->data_type);
|
|
|
|
TRACE("Allocated %s to %s.\n", var->name,
|
|
|
|
debug_register('c', var->regs[HLSL_REGSET_NUMERIC], var->data_type));
|
2021-04-08 21:38:27 -07:00
|
|
|
}
|
|
|
|
}
|
2023-04-05 13:16:00 -07:00
|
|
|
|
2022-11-07 15:45:35 -08:00
|
|
|
allocate_const_registers_recurse(ctx, &entry_func->body, &allocator);
|
|
|
|
|
2023-04-05 12:09:16 -07:00
|
|
|
vkd3d_free(allocator.allocations);
|
2021-04-08 21:38:26 -07:00
|
|
|
}
|
|
|
|
|
2021-04-08 21:38:23 -07:00
|
|
|
/* Simple greedy temporary register allocation pass that just assigns a unique
|
|
|
|
* index to all (simultaneously live) variables or intermediate values. Agnostic
|
|
|
|
* as to how many registers are actually available for the current backend, and
|
|
|
|
* does not handle constants. */
|
2021-05-20 22:32:20 -07:00
|
|
|
static void allocate_temp_registers(struct hlsl_ctx *ctx, struct hlsl_ir_function_decl *entry_func)
|
2021-04-08 21:38:23 -07:00
|
|
|
{
|
2023-04-05 13:15:37 -07:00
|
|
|
struct register_allocator allocator = {0};
|
2023-04-05 14:18:25 -07:00
|
|
|
|
|
|
|
/* ps_1_* outputs are special and go in temp register 0. */
|
|
|
|
if (ctx->profile->major_version == 1 && ctx->profile->type == VKD3D_SHADER_TYPE_PIXEL)
|
|
|
|
{
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
for (i = 0; i < entry_func->parameters.count; ++i)
|
|
|
|
{
|
|
|
|
const struct hlsl_ir_var *var = entry_func->parameters.vars[i];
|
|
|
|
|
|
|
|
if (var->is_output_semantic)
|
|
|
|
{
|
|
|
|
record_allocation(ctx, &allocator, 0, VKD3DSP_WRITEMASK_ALL, var->first_write, var->last_read);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-04-05 13:15:37 -07:00
|
|
|
allocate_temp_registers_recurse(ctx, &entry_func->body, &allocator);
|
2023-04-05 12:09:16 -07:00
|
|
|
ctx->temp_count = allocator.max_reg + 1;
|
|
|
|
vkd3d_free(allocator.allocations);
|
2021-04-08 21:38:23 -07:00
|
|
|
}
|
|
|
|
|
2021-04-27 10:14:21 -07:00
|
|
|
static void allocate_semantic_register(struct hlsl_ctx *ctx, struct hlsl_ir_var *var, unsigned int *counter, bool output)
|
|
|
|
{
|
2023-04-05 13:22:03 -07:00
|
|
|
static const char *const shader_names[] =
|
2021-08-19 16:44:27 -07:00
|
|
|
{
|
|
|
|
[VKD3D_SHADER_TYPE_PIXEL] = "Pixel",
|
|
|
|
[VKD3D_SHADER_TYPE_VERTEX] = "Vertex",
|
|
|
|
[VKD3D_SHADER_TYPE_GEOMETRY] = "Geometry",
|
|
|
|
[VKD3D_SHADER_TYPE_HULL] = "Hull",
|
|
|
|
[VKD3D_SHADER_TYPE_DOMAIN] = "Domain",
|
|
|
|
[VKD3D_SHADER_TYPE_COMPUTE] = "Compute",
|
|
|
|
};
|
|
|
|
|
|
|
|
unsigned int type;
|
|
|
|
uint32_t reg;
|
|
|
|
bool builtin;
|
|
|
|
|
2021-04-27 10:14:21 -07:00
|
|
|
assert(var->semantic.name);
|
|
|
|
|
|
|
|
if (ctx->profile->major_version < 4)
|
|
|
|
{
|
2021-05-10 21:36:07 -07:00
|
|
|
D3DDECLUSAGE usage;
|
2021-08-19 16:44:27 -07:00
|
|
|
uint32_t usage_idx;
|
2021-05-10 21:36:07 -07:00
|
|
|
|
2023-04-05 14:18:25 -07:00
|
|
|
/* ps_1_* outputs are special and go in temp register 0. */
|
|
|
|
if (ctx->profile->major_version == 1 && output && ctx->profile->type == VKD3D_SHADER_TYPE_PIXEL)
|
|
|
|
return;
|
|
|
|
|
2023-02-24 11:11:06 -08:00
|
|
|
builtin = hlsl_sm1_register_from_semantic(ctx, &var->semantic, output, &type, ®);
|
|
|
|
if (!builtin && !hlsl_sm1_usage_from_semantic(&var->semantic, &usage, &usage_idx))
|
2021-05-10 21:36:07 -07:00
|
|
|
{
|
2021-12-01 08:14:57 -08:00
|
|
|
hlsl_error(ctx, &var->loc, VKD3D_SHADER_ERROR_HLSL_INVALID_SEMANTIC,
|
2021-05-10 21:36:07 -07:00
|
|
|
"Invalid semantic '%s'.", var->semantic.name);
|
|
|
|
return;
|
|
|
|
}
|
2021-04-27 10:14:21 -07:00
|
|
|
|
2021-08-19 16:44:27 -07:00
|
|
|
if ((!output && !var->last_read) || (output && !var->first_write))
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
D3D_NAME usage;
|
2021-08-19 16:44:29 -07:00
|
|
|
bool has_idx;
|
2021-08-19 16:44:27 -07:00
|
|
|
|
|
|
|
if (!hlsl_sm4_usage_from_semantic(ctx, &var->semantic, output, &usage))
|
2021-04-27 10:14:21 -07:00
|
|
|
{
|
2021-12-01 08:14:57 -08:00
|
|
|
hlsl_error(ctx, &var->loc, VKD3D_SHADER_ERROR_HLSL_INVALID_SEMANTIC,
|
2021-08-19 16:44:27 -07:00
|
|
|
"Invalid semantic '%s'.", var->semantic.name);
|
|
|
|
return;
|
2021-04-27 10:14:21 -07:00
|
|
|
}
|
2021-11-15 10:40:04 -08:00
|
|
|
if ((builtin = hlsl_sm4_register_from_semantic(ctx, &var->semantic, output, &type, NULL, &has_idx)))
|
2021-08-19 16:44:29 -07:00
|
|
|
reg = has_idx ? var->semantic.index : 0;
|
2021-08-19 16:44:27 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
if (builtin)
|
|
|
|
{
|
|
|
|
TRACE("%s %s semantic %s[%u] matches predefined register %#x[%u].\n", shader_names[ctx->profile->type],
|
|
|
|
output ? "output" : "input", var->semantic.name, var->semantic.index, type, reg);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2022-11-24 12:03:54 -08:00
|
|
|
var->regs[HLSL_REGSET_NUMERIC].allocated = true;
|
|
|
|
var->regs[HLSL_REGSET_NUMERIC].id = (*counter)++;
|
vkd3d-shader/hlsl: Rename hlsl_reg.bind_count to hlsl_reg.allocation_size.
We have to distinguish between the "bind count" and the "allocation size"
of variables.
The "allocation size" affects the starting register id for the resource to
be allocated next, while the "bind count" is determined by the last field
actually used. The former may be larger than the latter.
What we are currently calling hlsl_reg.bind_count is actually the
"allocation size", so a rename is in order.
The real "bind count", which will be introduced in following patches,
is important because it is what should be shown in the RDEF table and
some resource allocation rules depend on it.
For instance, for this shader:
texture2D texs[3];
texture2D tex;
float4 main() : sv_target
{
return texs[0].Load(int3(0, 0, 0)) + tex.Load(int3(0, 0, 0));
}
the variable "texs" has a "bind count" of 1, but an "allocation size" of
3:
// Resource Bindings:
//
// Name Type Format Dim HLSL Bind Count
// ------------------------------ ---------- ------- ----------- -------------- ------
// texs texture float4 2d t0 1
// tex texture float4 2d t3 1
2023-08-04 10:21:27 -07:00
|
|
|
var->regs[HLSL_REGSET_NUMERIC].allocation_size = 1;
|
2022-11-24 12:03:54 -08:00
|
|
|
var->regs[HLSL_REGSET_NUMERIC].writemask = (1 << var->data_type->dimx) - 1;
|
|
|
|
TRACE("Allocated %s to %s.\n", var->name, debug_register(output ? 'o' : 'v',
|
|
|
|
var->regs[HLSL_REGSET_NUMERIC], var->data_type));
|
2021-04-27 10:14:21 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void allocate_semantic_registers(struct hlsl_ctx *ctx)
|
|
|
|
{
|
|
|
|
unsigned int input_counter = 0, output_counter = 0;
|
|
|
|
struct hlsl_ir_var *var;
|
|
|
|
|
|
|
|
LIST_FOR_EACH_ENTRY(var, &ctx->extern_vars, struct hlsl_ir_var, extern_entry)
|
|
|
|
{
|
2021-08-19 16:44:27 -07:00
|
|
|
if (var->is_input_semantic)
|
2021-04-27 10:14:21 -07:00
|
|
|
allocate_semantic_register(ctx, var, &input_counter, false);
|
2021-08-19 16:44:27 -07:00
|
|
|
if (var->is_output_semantic)
|
2021-04-27 10:14:21 -07:00
|
|
|
allocate_semantic_register(ctx, var, &output_counter, true);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-06-23 21:57:35 -07:00
|
|
|
static const struct hlsl_buffer *get_reserved_buffer(struct hlsl_ctx *ctx, uint32_t index)
|
|
|
|
{
|
|
|
|
const struct hlsl_buffer *buffer;
|
|
|
|
|
|
|
|
LIST_FOR_EACH_ENTRY(buffer, &ctx->buffers, const struct hlsl_buffer, entry)
|
|
|
|
{
|
2023-03-06 14:53:29 -08:00
|
|
|
if (buffer->used_size && buffer->reservation.reg_type == 'b' && buffer->reservation.reg_index == index)
|
2021-06-23 21:57:35 -07:00
|
|
|
return buffer;
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2023-02-21 12:08:39 -08:00
|
|
|
static void calculate_buffer_offset(struct hlsl_ctx *ctx, struct hlsl_ir_var *var)
|
2021-06-23 21:57:35 -07:00
|
|
|
{
|
2023-02-21 12:08:39 -08:00
|
|
|
unsigned int var_reg_size = var->data_type->reg_size[HLSL_REGSET_NUMERIC];
|
|
|
|
enum hlsl_type_class var_class = var->data_type->class;
|
2021-06-23 21:57:35 -07:00
|
|
|
struct hlsl_buffer *buffer = var->buffer;
|
|
|
|
|
2023-02-21 12:08:39 -08:00
|
|
|
if (var->reg_reservation.offset_type == 'c')
|
|
|
|
{
|
|
|
|
if (var->reg_reservation.offset_index % 4)
|
|
|
|
{
|
|
|
|
if (var_class == HLSL_CLASS_MATRIX)
|
|
|
|
{
|
|
|
|
hlsl_error(ctx, &var->loc, VKD3D_SHADER_ERROR_HLSL_INVALID_RESERVATION,
|
|
|
|
"packoffset() reservations with matrix types must be aligned with the beginning of a register.");
|
|
|
|
}
|
|
|
|
else if (var_class == HLSL_CLASS_ARRAY)
|
|
|
|
{
|
|
|
|
hlsl_error(ctx, &var->loc, VKD3D_SHADER_ERROR_HLSL_INVALID_RESERVATION,
|
|
|
|
"packoffset() reservations with array types must be aligned with the beginning of a register.");
|
|
|
|
}
|
|
|
|
else if (var_class == HLSL_CLASS_STRUCT)
|
|
|
|
{
|
|
|
|
hlsl_error(ctx, &var->loc, VKD3D_SHADER_ERROR_HLSL_INVALID_RESERVATION,
|
|
|
|
"packoffset() reservations with struct types must be aligned with the beginning of a register.");
|
|
|
|
}
|
|
|
|
else if (var_class == HLSL_CLASS_VECTOR)
|
|
|
|
{
|
|
|
|
unsigned int aligned_offset = hlsl_type_get_sm4_offset(var->data_type, var->reg_reservation.offset_index);
|
|
|
|
|
|
|
|
if (var->reg_reservation.offset_index != aligned_offset)
|
|
|
|
hlsl_error(ctx, &var->loc, VKD3D_SHADER_ERROR_HLSL_INVALID_RESERVATION,
|
|
|
|
"packoffset() reservations with vector types cannot span multiple registers.");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
var->buffer_offset = var->reg_reservation.offset_index;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
var->buffer_offset = hlsl_type_get_sm4_offset(var->data_type, buffer->size);
|
|
|
|
}
|
2021-06-23 21:57:35 -07:00
|
|
|
|
|
|
|
TRACE("Allocated buffer offset %u to %s.\n", var->buffer_offset, var->name);
|
2023-02-21 12:08:39 -08:00
|
|
|
buffer->size = max(buffer->size, var->buffer_offset + var_reg_size);
|
2021-06-23 21:57:35 -07:00
|
|
|
if (var->last_read)
|
2023-02-21 12:08:39 -08:00
|
|
|
buffer->used_size = max(buffer->used_size, var->buffer_offset + var_reg_size);
|
2021-06-23 21:57:35 -07:00
|
|
|
}
|
|
|
|
|
2023-02-21 16:53:32 -08:00
|
|
|
static void validate_buffer_offsets(struct hlsl_ctx *ctx)
|
|
|
|
{
|
|
|
|
struct hlsl_ir_var *var1, *var2;
|
|
|
|
struct hlsl_buffer *buffer;
|
|
|
|
|
|
|
|
LIST_FOR_EACH_ENTRY(var1, &ctx->extern_vars, struct hlsl_ir_var, extern_entry)
|
|
|
|
{
|
2023-05-29 18:59:17 -07:00
|
|
|
if (!var1->is_uniform || hlsl_type_is_resource(var1->data_type))
|
2023-02-21 16:53:32 -08:00
|
|
|
continue;
|
|
|
|
|
|
|
|
buffer = var1->buffer;
|
|
|
|
if (!buffer->used_size)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
LIST_FOR_EACH_ENTRY(var2, &ctx->extern_vars, struct hlsl_ir_var, extern_entry)
|
|
|
|
{
|
|
|
|
unsigned int var1_reg_size, var2_reg_size;
|
|
|
|
|
2023-05-29 18:59:17 -07:00
|
|
|
if (!var2->is_uniform || hlsl_type_is_resource(var2->data_type))
|
2023-02-21 16:53:32 -08:00
|
|
|
continue;
|
|
|
|
|
|
|
|
if (var1 == var2 || var1->buffer != var2->buffer)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/* This is to avoid reporting the error twice for the same pair of overlapping variables. */
|
|
|
|
if (strcmp(var1->name, var2->name) >= 0)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
var1_reg_size = var1->data_type->reg_size[HLSL_REGSET_NUMERIC];
|
|
|
|
var2_reg_size = var2->data_type->reg_size[HLSL_REGSET_NUMERIC];
|
|
|
|
|
|
|
|
if (var1->buffer_offset < var2->buffer_offset + var2_reg_size
|
|
|
|
&& var2->buffer_offset < var1->buffer_offset + var1_reg_size)
|
|
|
|
hlsl_error(ctx, &buffer->loc, VKD3D_SHADER_ERROR_HLSL_INVALID_RESERVATION,
|
|
|
|
"Invalid packoffset() reservation: Variables %s and %s overlap.",
|
|
|
|
var1->name, var2->name);
|
|
|
|
}
|
|
|
|
}
|
2023-02-21 17:39:24 -08:00
|
|
|
|
|
|
|
LIST_FOR_EACH_ENTRY(var1, &ctx->extern_vars, struct hlsl_ir_var, extern_entry)
|
|
|
|
{
|
|
|
|
buffer = var1->buffer;
|
2023-03-01 10:57:31 -08:00
|
|
|
if (!buffer || buffer == ctx->globals_buffer)
|
2023-02-21 17:39:24 -08:00
|
|
|
continue;
|
|
|
|
|
2023-03-01 10:57:31 -08:00
|
|
|
if (var1->reg_reservation.offset_type
|
|
|
|
|| (var1->data_type->class == HLSL_CLASS_OBJECT && var1->reg_reservation.reg_type))
|
2023-02-21 17:39:24 -08:00
|
|
|
buffer->manually_packed_elements = true;
|
|
|
|
else
|
|
|
|
buffer->automatically_packed_elements = true;
|
|
|
|
|
|
|
|
if (buffer->manually_packed_elements && buffer->automatically_packed_elements)
|
|
|
|
{
|
|
|
|
hlsl_error(ctx, &buffer->loc, VKD3D_SHADER_ERROR_HLSL_INVALID_RESERVATION,
|
|
|
|
"packoffset() must be specified for all the buffer elements, or none of them.");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2023-02-21 16:53:32 -08:00
|
|
|
}
|
|
|
|
|
2021-06-23 21:57:35 -07:00
|
|
|
static void allocate_buffers(struct hlsl_ctx *ctx)
|
|
|
|
{
|
2021-07-08 19:13:18 -07:00
|
|
|
struct hlsl_buffer *buffer;
|
2021-06-23 21:57:35 -07:00
|
|
|
struct hlsl_ir_var *var;
|
|
|
|
uint32_t index = 0;
|
|
|
|
|
|
|
|
LIST_FOR_EACH_ENTRY(var, &ctx->extern_vars, struct hlsl_ir_var, extern_entry)
|
|
|
|
{
|
2023-05-29 18:59:17 -07:00
|
|
|
if (var->is_uniform && !hlsl_type_is_resource(var->data_type))
|
2021-06-23 21:57:35 -07:00
|
|
|
{
|
|
|
|
if (var->is_param)
|
2021-07-08 19:13:18 -07:00
|
|
|
var->buffer = ctx->params_buffer;
|
2021-06-23 21:57:35 -07:00
|
|
|
|
2023-02-21 12:08:39 -08:00
|
|
|
calculate_buffer_offset(ctx, var);
|
2021-06-23 21:57:35 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-02-21 16:53:32 -08:00
|
|
|
validate_buffer_offsets(ctx);
|
|
|
|
|
2021-06-23 21:57:35 -07:00
|
|
|
LIST_FOR_EACH_ENTRY(buffer, &ctx->buffers, struct hlsl_buffer, entry)
|
|
|
|
{
|
|
|
|
if (!buffer->used_size)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (buffer->type == HLSL_BUFFER_CONSTANT)
|
|
|
|
{
|
2023-03-06 14:53:29 -08:00
|
|
|
if (buffer->reservation.reg_type == 'b')
|
2021-06-23 21:57:35 -07:00
|
|
|
{
|
2023-03-06 14:53:29 -08:00
|
|
|
const struct hlsl_buffer *reserved_buffer = get_reserved_buffer(ctx, buffer->reservation.reg_index);
|
2021-06-23 21:57:35 -07:00
|
|
|
|
|
|
|
if (reserved_buffer && reserved_buffer != buffer)
|
|
|
|
{
|
2021-12-01 08:14:57 -08:00
|
|
|
hlsl_error(ctx, &buffer->loc, VKD3D_SHADER_ERROR_HLSL_OVERLAPPING_RESERVATIONS,
|
2023-03-06 14:53:29 -08:00
|
|
|
"Multiple buffers bound to cb%u.", buffer->reservation.reg_index);
|
2021-12-01 08:14:57 -08:00
|
|
|
hlsl_note(ctx, &reserved_buffer->loc, VKD3D_SHADER_LOG_ERROR,
|
2023-03-06 14:53:29 -08:00
|
|
|
"Buffer %s is already bound to cb%u.", reserved_buffer->name, buffer->reservation.reg_index);
|
2021-06-23 21:57:35 -07:00
|
|
|
}
|
|
|
|
|
2023-03-06 14:53:29 -08:00
|
|
|
buffer->reg.id = buffer->reservation.reg_index;
|
vkd3d-shader/hlsl: Rename hlsl_reg.bind_count to hlsl_reg.allocation_size.
We have to distinguish between the "bind count" and the "allocation size"
of variables.
The "allocation size" affects the starting register id for the resource to
be allocated next, while the "bind count" is determined by the last field
actually used. The former may be larger than the latter.
What we are currently calling hlsl_reg.bind_count is actually the
"allocation size", so a rename is in order.
The real "bind count", which will be introduced in following patches,
is important because it is what should be shown in the RDEF table and
some resource allocation rules depend on it.
For instance, for this shader:
texture2D texs[3];
texture2D tex;
float4 main() : sv_target
{
return texs[0].Load(int3(0, 0, 0)) + tex.Load(int3(0, 0, 0));
}
the variable "texs" has a "bind count" of 1, but an "allocation size" of
3:
// Resource Bindings:
//
// Name Type Format Dim HLSL Bind Count
// ------------------------------ ---------- ------- ----------- -------------- ------
// texs texture float4 2d t0 1
// tex texture float4 2d t3 1
2023-08-04 10:21:27 -07:00
|
|
|
buffer->reg.allocation_size = 1;
|
2021-06-23 21:57:35 -07:00
|
|
|
buffer->reg.allocated = true;
|
|
|
|
TRACE("Allocated reserved %s to cb%u.\n", buffer->name, index);
|
|
|
|
}
|
2023-03-06 14:53:29 -08:00
|
|
|
else if (!buffer->reservation.reg_type)
|
2021-06-23 21:57:35 -07:00
|
|
|
{
|
|
|
|
while (get_reserved_buffer(ctx, index))
|
|
|
|
++index;
|
|
|
|
|
|
|
|
buffer->reg.id = index;
|
vkd3d-shader/hlsl: Rename hlsl_reg.bind_count to hlsl_reg.allocation_size.
We have to distinguish between the "bind count" and the "allocation size"
of variables.
The "allocation size" affects the starting register id for the resource to
be allocated next, while the "bind count" is determined by the last field
actually used. The former may be larger than the latter.
What we are currently calling hlsl_reg.bind_count is actually the
"allocation size", so a rename is in order.
The real "bind count", which will be introduced in following patches,
is important because it is what should be shown in the RDEF table and
some resource allocation rules depend on it.
For instance, for this shader:
texture2D texs[3];
texture2D tex;
float4 main() : sv_target
{
return texs[0].Load(int3(0, 0, 0)) + tex.Load(int3(0, 0, 0));
}
the variable "texs" has a "bind count" of 1, but an "allocation size" of
3:
// Resource Bindings:
//
// Name Type Format Dim HLSL Bind Count
// ------------------------------ ---------- ------- ----------- -------------- ------
// texs texture float4 2d t0 1
// tex texture float4 2d t3 1
2023-08-04 10:21:27 -07:00
|
|
|
buffer->reg.allocation_size = 1;
|
2021-06-23 21:57:35 -07:00
|
|
|
buffer->reg.allocated = true;
|
|
|
|
TRACE("Allocated %s to cb%u.\n", buffer->name, index);
|
|
|
|
++index;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2021-12-01 08:14:57 -08:00
|
|
|
hlsl_error(ctx, &buffer->loc, VKD3D_SHADER_ERROR_HLSL_INVALID_RESERVATION,
|
2021-06-23 21:57:35 -07:00
|
|
|
"Constant buffers must be allocated to register type 'b'.");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
FIXME("Allocate registers for texture buffers.\n");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-11-24 14:36:20 -08:00
|
|
|
static const struct hlsl_ir_var *get_allocated_object(struct hlsl_ctx *ctx, enum hlsl_regset regset,
|
2023-06-27 11:14:37 -07:00
|
|
|
uint32_t index, bool allocated_only)
|
2021-10-11 19:58:46 -07:00
|
|
|
{
|
|
|
|
const struct hlsl_ir_var *var;
|
2023-04-25 09:41:38 -07:00
|
|
|
unsigned int start, count;
|
2021-10-11 19:58:46 -07:00
|
|
|
|
|
|
|
LIST_FOR_EACH_ENTRY(var, &ctx->extern_vars, const struct hlsl_ir_var, extern_entry)
|
|
|
|
{
|
2023-06-21 10:00:39 -07:00
|
|
|
if (var->reg_reservation.reg_type == get_regset_name(regset)
|
|
|
|
&& var->data_type->reg_size[regset])
|
|
|
|
{
|
|
|
|
/* Vars with a reservation prevent non-reserved vars from being
|
|
|
|
* bound there even if the reserved vars aren't used. */
|
|
|
|
start = var->reg_reservation.reg_index;
|
|
|
|
count = var->data_type->reg_size[regset];
|
2023-06-27 11:14:37 -07:00
|
|
|
|
|
|
|
if (!var->regs[regset].allocated && allocated_only)
|
|
|
|
continue;
|
2023-06-21 10:00:39 -07:00
|
|
|
}
|
|
|
|
else if (var->regs[regset].allocated)
|
|
|
|
{
|
|
|
|
start = var->regs[regset].id;
|
vkd3d-shader/hlsl: Rename hlsl_reg.bind_count to hlsl_reg.allocation_size.
We have to distinguish between the "bind count" and the "allocation size"
of variables.
The "allocation size" affects the starting register id for the resource to
be allocated next, while the "bind count" is determined by the last field
actually used. The former may be larger than the latter.
What we are currently calling hlsl_reg.bind_count is actually the
"allocation size", so a rename is in order.
The real "bind count", which will be introduced in following patches,
is important because it is what should be shown in the RDEF table and
some resource allocation rules depend on it.
For instance, for this shader:
texture2D texs[3];
texture2D tex;
float4 main() : sv_target
{
return texs[0].Load(int3(0, 0, 0)) + tex.Load(int3(0, 0, 0));
}
the variable "texs" has a "bind count" of 1, but an "allocation size" of
3:
// Resource Bindings:
//
// Name Type Format Dim HLSL Bind Count
// ------------------------------ ---------- ------- ----------- -------------- ------
// texs texture float4 2d t0 1
// tex texture float4 2d t3 1
2023-08-04 10:21:27 -07:00
|
|
|
count = var->regs[regset].allocation_size;
|
2023-06-21 10:00:39 -07:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2022-11-24 14:36:20 -08:00
|
|
|
continue;
|
2023-06-21 10:00:39 -07:00
|
|
|
}
|
2023-04-25 09:41:38 -07:00
|
|
|
|
|
|
|
if (start <= index && index < start + count)
|
2021-10-11 19:58:46 -07:00
|
|
|
return var;
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2023-01-30 13:27:32 -08:00
|
|
|
static void allocate_objects(struct hlsl_ctx *ctx, enum hlsl_regset regset)
|
2021-11-19 07:44:48 -08:00
|
|
|
{
|
2023-01-30 13:27:32 -08:00
|
|
|
char regset_name = get_regset_name(regset);
|
2021-10-11 19:58:46 -07:00
|
|
|
struct hlsl_ir_var *var;
|
2021-08-12 19:26:15 -07:00
|
|
|
uint32_t min_index = 0;
|
|
|
|
|
2023-01-30 13:27:32 -08:00
|
|
|
if (regset == HLSL_REGSET_UAVS)
|
2021-08-12 19:26:15 -07:00
|
|
|
{
|
|
|
|
LIST_FOR_EACH_ENTRY(var, &ctx->extern_vars, struct hlsl_ir_var, extern_entry)
|
|
|
|
{
|
|
|
|
if (var->semantic.name && (!ascii_strcasecmp(var->semantic.name, "color")
|
|
|
|
|| !ascii_strcasecmp(var->semantic.name, "sv_target")))
|
|
|
|
min_index = max(min_index, var->semantic.index + 1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-10-11 19:58:46 -07:00
|
|
|
LIST_FOR_EACH_ENTRY(var, &ctx->extern_vars, struct hlsl_ir_var, extern_entry)
|
|
|
|
{
|
vkd3d-shader/hlsl: Rename hlsl_reg.bind_count to hlsl_reg.allocation_size.
We have to distinguish between the "bind count" and the "allocation size"
of variables.
The "allocation size" affects the starting register id for the resource to
be allocated next, while the "bind count" is determined by the last field
actually used. The former may be larger than the latter.
What we are currently calling hlsl_reg.bind_count is actually the
"allocation size", so a rename is in order.
The real "bind count", which will be introduced in following patches,
is important because it is what should be shown in the RDEF table and
some resource allocation rules depend on it.
For instance, for this shader:
texture2D texs[3];
texture2D tex;
float4 main() : sv_target
{
return texs[0].Load(int3(0, 0, 0)) + tex.Load(int3(0, 0, 0));
}
the variable "texs" has a "bind count" of 1, but an "allocation size" of
3:
// Resource Bindings:
//
// Name Type Format Dim HLSL Bind Count
// ------------------------------ ---------- ------- ----------- -------------- ------
// texs texture float4 2d t0 1
// tex texture float4 2d t3 1
2023-08-04 10:21:27 -07:00
|
|
|
unsigned int count = var->regs[regset].allocation_size;
|
2023-04-25 09:41:38 -07:00
|
|
|
|
|
|
|
if (count == 0)
|
2021-10-11 19:58:46 -07:00
|
|
|
continue;
|
|
|
|
|
2023-06-27 11:14:37 -07:00
|
|
|
/* The variable was already allocated if it has a reservation. */
|
2022-11-24 14:36:20 -08:00
|
|
|
if (var->regs[regset].allocated)
|
2021-10-11 19:58:46 -07:00
|
|
|
{
|
2023-04-25 09:41:38 -07:00
|
|
|
const struct hlsl_ir_var *reserved_object, *last_reported = NULL;
|
|
|
|
unsigned int index, i;
|
2022-11-24 14:36:20 -08:00
|
|
|
|
|
|
|
if (var->regs[regset].id < min_index)
|
2021-08-12 19:26:15 -07:00
|
|
|
{
|
2023-01-30 13:27:32 -08:00
|
|
|
assert(regset == HLSL_REGSET_UAVS);
|
2021-08-12 19:26:15 -07:00
|
|
|
hlsl_error(ctx, &var->loc, VKD3D_SHADER_ERROR_HLSL_OVERLAPPING_RESERVATIONS,
|
|
|
|
"UAV index (%u) must be higher than the maximum render target index (%u).",
|
2022-11-24 14:36:20 -08:00
|
|
|
var->regs[regset].id, min_index - 1);
|
2023-04-25 09:41:38 -07:00
|
|
|
continue;
|
2021-08-12 19:26:15 -07:00
|
|
|
}
|
2023-04-25 09:41:38 -07:00
|
|
|
|
|
|
|
for (i = 0; i < count; ++i)
|
2021-10-11 19:58:46 -07:00
|
|
|
{
|
2023-04-25 09:41:38 -07:00
|
|
|
index = var->regs[regset].id + i;
|
2021-10-11 19:58:46 -07:00
|
|
|
|
2023-06-27 11:14:37 -07:00
|
|
|
/* get_allocated_object() may return "var" itself, but we
|
|
|
|
* actually want that, otherwise we'll end up reporting the
|
|
|
|
* same conflict between the same two variables twice. */
|
|
|
|
reserved_object = get_allocated_object(ctx, regset, index, true);
|
2023-04-25 09:41:38 -07:00
|
|
|
if (reserved_object && reserved_object != var && reserved_object != last_reported)
|
|
|
|
{
|
|
|
|
hlsl_error(ctx, &var->loc, VKD3D_SHADER_ERROR_HLSL_OVERLAPPING_RESERVATIONS,
|
|
|
|
"Multiple variables bound to %c%u.", regset_name, index);
|
|
|
|
hlsl_note(ctx, &reserved_object->loc, VKD3D_SHADER_LOG_ERROR,
|
|
|
|
"Variable '%s' is already bound to %c%u.", reserved_object->name,
|
|
|
|
regset_name, index);
|
|
|
|
last_reported = reserved_object;
|
|
|
|
}
|
|
|
|
}
|
2021-10-11 19:58:46 -07:00
|
|
|
}
|
2022-11-24 14:36:20 -08:00
|
|
|
else
|
2021-10-11 19:58:46 -07:00
|
|
|
{
|
2023-04-25 09:41:38 -07:00
|
|
|
unsigned int index = min_index;
|
|
|
|
unsigned int available = 0;
|
|
|
|
|
|
|
|
while (available < count)
|
|
|
|
{
|
2023-06-27 11:14:37 -07:00
|
|
|
if (get_allocated_object(ctx, regset, index, false))
|
2023-04-25 09:41:38 -07:00
|
|
|
available = 0;
|
|
|
|
else
|
|
|
|
++available;
|
2021-10-11 19:58:46 -07:00
|
|
|
++index;
|
2023-04-25 09:41:38 -07:00
|
|
|
}
|
|
|
|
index -= count;
|
2021-10-11 19:58:46 -07:00
|
|
|
|
2022-11-24 12:03:54 -08:00
|
|
|
var->regs[regset].id = index;
|
|
|
|
var->regs[regset].allocated = true;
|
2023-04-25 09:41:38 -07:00
|
|
|
TRACE("Allocated variable %s to %c%u-%c%u.\n", var->name, regset_name, index, regset_name,
|
|
|
|
index + count);
|
2021-10-11 19:58:46 -07:00
|
|
|
++index;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-07-20 12:37:07 -07:00
|
|
|
bool hlsl_component_index_range_from_deref(struct hlsl_ctx *ctx, const struct hlsl_deref *deref,
|
|
|
|
unsigned int *start, unsigned int *count)
|
|
|
|
{
|
|
|
|
struct hlsl_type *type = deref->var->data_type;
|
|
|
|
unsigned int i, k;
|
|
|
|
|
|
|
|
*start = 0;
|
|
|
|
*count = 0;
|
|
|
|
|
|
|
|
for (i = 0; i < deref->path_len; ++i)
|
|
|
|
{
|
|
|
|
struct hlsl_ir_node *path_node = deref->path[i].node;
|
|
|
|
unsigned int idx = 0;
|
|
|
|
|
|
|
|
assert(path_node);
|
|
|
|
if (path_node->type != HLSL_IR_CONSTANT)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
/* We should always have generated a cast to UINT. */
|
2022-11-11 17:31:55 -08:00
|
|
|
assert(path_node->data_type->class == HLSL_CLASS_SCALAR
|
2022-07-20 12:37:07 -07:00
|
|
|
&& path_node->data_type->base_type == HLSL_TYPE_UINT);
|
|
|
|
|
2022-11-11 16:39:55 -08:00
|
|
|
idx = hlsl_ir_constant(path_node)->value.u[0].u;
|
2022-07-20 12:37:07 -07:00
|
|
|
|
2022-11-11 17:31:55 -08:00
|
|
|
switch (type->class)
|
2022-07-20 12:37:07 -07:00
|
|
|
{
|
|
|
|
case HLSL_CLASS_VECTOR:
|
|
|
|
if (idx >= type->dimx)
|
|
|
|
{
|
|
|
|
hlsl_error(ctx, &path_node->loc, VKD3D_SHADER_ERROR_HLSL_OFFSET_OUT_OF_BOUNDS,
|
|
|
|
"Vector index is out of bounds. %u/%u", idx, type->dimx);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
*start += idx;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case HLSL_CLASS_MATRIX:
|
|
|
|
if (idx >= hlsl_type_major_size(type))
|
|
|
|
{
|
|
|
|
hlsl_error(ctx, &path_node->loc, VKD3D_SHADER_ERROR_HLSL_OFFSET_OUT_OF_BOUNDS,
|
|
|
|
"Matrix index is out of bounds. %u/%u", idx, hlsl_type_major_size(type));
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if (hlsl_type_is_row_major(type))
|
|
|
|
*start += idx * type->dimx;
|
|
|
|
else
|
|
|
|
*start += idx * type->dimy;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case HLSL_CLASS_ARRAY:
|
|
|
|
if (idx >= type->e.array.elements_count)
|
|
|
|
{
|
|
|
|
hlsl_error(ctx, &path_node->loc, VKD3D_SHADER_ERROR_HLSL_OFFSET_OUT_OF_BOUNDS,
|
|
|
|
"Array index is out of bounds. %u/%u", idx, type->e.array.elements_count);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
*start += idx * hlsl_type_component_count(type->e.array.type);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case HLSL_CLASS_STRUCT:
|
|
|
|
for (k = 0; k < idx; ++k)
|
|
|
|
*start += hlsl_type_component_count(type->e.record.fields[k].type);
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
2022-08-31 04:25:24 -07:00
|
|
|
vkd3d_unreachable();
|
2022-07-20 12:37:07 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
type = hlsl_get_element_type_from_path_index(ctx, type, path_node);
|
|
|
|
}
|
|
|
|
|
|
|
|
*count = hlsl_type_component_count(type);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2022-11-25 14:47:56 -08:00
|
|
|
bool hlsl_regset_index_from_deref(struct hlsl_ctx *ctx, const struct hlsl_deref *deref,
|
|
|
|
enum hlsl_regset regset, unsigned int *index)
|
|
|
|
{
|
|
|
|
struct hlsl_type *type = deref->var->data_type;
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
assert(regset <= HLSL_REGSET_LAST_OBJECT);
|
|
|
|
|
|
|
|
*index = 0;
|
|
|
|
|
|
|
|
for (i = 0; i < deref->path_len; ++i)
|
|
|
|
{
|
|
|
|
struct hlsl_ir_node *path_node = deref->path[i].node;
|
|
|
|
unsigned int idx = 0;
|
|
|
|
|
|
|
|
assert(path_node);
|
|
|
|
if (path_node->type != HLSL_IR_CONSTANT)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
/* We should always have generated a cast to UINT. */
|
|
|
|
assert(path_node->data_type->class == HLSL_CLASS_SCALAR
|
|
|
|
&& path_node->data_type->base_type == HLSL_TYPE_UINT);
|
|
|
|
|
|
|
|
idx = hlsl_ir_constant(path_node)->value.u[0].u;
|
|
|
|
|
|
|
|
switch (type->class)
|
|
|
|
{
|
|
|
|
case HLSL_CLASS_ARRAY:
|
|
|
|
if (idx >= type->e.array.elements_count)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
*index += idx * type->e.array.type->reg_size[regset];
|
|
|
|
break;
|
|
|
|
|
|
|
|
case HLSL_CLASS_STRUCT:
|
|
|
|
*index += type->e.record.fields[idx].reg_offset[regset];
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
vkd3d_unreachable();
|
|
|
|
}
|
|
|
|
|
|
|
|
type = hlsl_get_element_type_from_path_index(ctx, type, path_node);
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(type->reg_size[regset] == 1);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2022-02-24 06:06:15 -08:00
|
|
|
bool hlsl_offset_from_deref(struct hlsl_ctx *ctx, const struct hlsl_deref *deref, unsigned int *offset)
|
2021-05-10 21:36:08 -07:00
|
|
|
{
|
|
|
|
struct hlsl_ir_node *offset_node = deref->offset.node;
|
2023-05-29 14:34:03 -07:00
|
|
|
enum hlsl_regset regset;
|
2022-10-28 08:23:05 -07:00
|
|
|
unsigned int size;
|
2021-09-23 06:42:50 -07:00
|
|
|
|
|
|
|
if (!offset_node)
|
2021-11-17 00:47:26 -08:00
|
|
|
{
|
|
|
|
*offset = 0;
|
|
|
|
return true;
|
|
|
|
}
|
2021-05-10 21:36:08 -07:00
|
|
|
|
2021-05-16 10:47:53 -07:00
|
|
|
/* We should always have generated a cast to UINT. */
|
2022-11-11 17:31:55 -08:00
|
|
|
assert(offset_node->data_type->class == HLSL_CLASS_SCALAR
|
2021-09-23 06:42:50 -07:00
|
|
|
&& offset_node->data_type->base_type == HLSL_TYPE_UINT);
|
2021-05-16 10:47:53 -07:00
|
|
|
|
2021-09-23 06:42:50 -07:00
|
|
|
if (offset_node->type != HLSL_IR_CONSTANT)
|
2021-11-17 00:47:26 -08:00
|
|
|
return false;
|
2021-05-10 21:36:08 -07:00
|
|
|
|
2022-11-11 16:39:55 -08:00
|
|
|
*offset = hlsl_ir_constant(offset_node)->value.u[0].u;
|
2023-05-29 14:34:03 -07:00
|
|
|
regset = hlsl_type_get_regset(deref->data_type);
|
2022-02-24 06:06:15 -08:00
|
|
|
|
2023-05-29 14:34:03 -07:00
|
|
|
size = deref->var->data_type->reg_size[regset];
|
2022-10-28 08:23:05 -07:00
|
|
|
if (*offset >= size)
|
2022-02-24 06:06:15 -08:00
|
|
|
{
|
|
|
|
hlsl_error(ctx, &deref->offset.node->loc, VKD3D_SHADER_ERROR_HLSL_OFFSET_OUT_OF_BOUNDS,
|
2022-10-28 08:23:05 -07:00
|
|
|
"Dereference is out of bounds. %u/%u", *offset, size);
|
2022-02-24 06:06:15 -08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2021-11-17 00:47:26 -08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned int hlsl_offset_from_deref_safe(struct hlsl_ctx *ctx, const struct hlsl_deref *deref)
|
|
|
|
{
|
|
|
|
unsigned int offset;
|
|
|
|
|
2022-02-24 06:06:15 -08:00
|
|
|
if (hlsl_offset_from_deref(ctx, deref, &offset))
|
2021-11-17 00:47:26 -08:00
|
|
|
return offset;
|
|
|
|
|
2021-12-01 08:14:57 -08:00
|
|
|
hlsl_fixme(ctx, &deref->offset.node->loc, "Dereference with non-constant offset of type %s.",
|
2021-11-17 00:47:26 -08:00
|
|
|
hlsl_node_type_to_string(deref->offset.node->type));
|
|
|
|
|
|
|
|
return 0;
|
2021-09-23 06:42:50 -07:00
|
|
|
}
|
|
|
|
|
2022-03-10 07:14:05 -08:00
|
|
|
struct hlsl_reg hlsl_reg_from_deref(struct hlsl_ctx *ctx, const struct hlsl_deref *deref)
|
2021-09-23 06:42:50 -07:00
|
|
|
{
|
|
|
|
const struct hlsl_ir_var *var = deref->var;
|
2022-11-24 12:03:54 -08:00
|
|
|
struct hlsl_reg ret = var->regs[HLSL_REGSET_NUMERIC];
|
2021-11-17 00:47:26 -08:00
|
|
|
unsigned int offset = hlsl_offset_from_deref_safe(ctx, deref);
|
2021-05-10 21:36:08 -07:00
|
|
|
|
2023-05-29 14:34:03 -07:00
|
|
|
assert(deref->data_type);
|
|
|
|
assert(deref->data_type->class <= HLSL_CLASS_LAST_NUMERIC);
|
2022-10-28 08:23:05 -07:00
|
|
|
|
2021-05-10 21:36:08 -07:00
|
|
|
ret.id += offset / 4;
|
|
|
|
|
2022-03-10 07:14:05 -08:00
|
|
|
ret.writemask = 0xf & (0xf << (offset % 4));
|
2022-11-24 12:03:54 -08:00
|
|
|
if (var->regs[HLSL_REGSET_NUMERIC].writemask)
|
|
|
|
ret.writemask = hlsl_combine_writemasks(var->regs[HLSL_REGSET_NUMERIC].writemask, ret.writemask);
|
2022-03-10 07:14:05 -08:00
|
|
|
|
2021-05-10 21:36:08 -07:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2021-08-16 15:29:34 -07:00
|
|
|
static void parse_numthreads_attribute(struct hlsl_ctx *ctx, const struct hlsl_attribute *attr)
|
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
ctx->found_numthreads = 1;
|
|
|
|
|
|
|
|
if (attr->args_count != 3)
|
|
|
|
{
|
|
|
|
hlsl_error(ctx, &attr->loc, VKD3D_SHADER_ERROR_HLSL_WRONG_PARAMETER_COUNT,
|
|
|
|
"Expected 3 parameters for [numthreads] attribute, but got %u.", attr->args_count);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < attr->args_count; ++i)
|
|
|
|
{
|
|
|
|
const struct hlsl_ir_node *instr = attr->args[i].node;
|
|
|
|
const struct hlsl_type *type = instr->data_type;
|
|
|
|
const struct hlsl_ir_constant *constant;
|
|
|
|
|
2022-11-11 17:31:55 -08:00
|
|
|
if (type->class != HLSL_CLASS_SCALAR
|
2021-08-16 15:29:34 -07:00
|
|
|
|| (type->base_type != HLSL_TYPE_INT && type->base_type != HLSL_TYPE_UINT))
|
|
|
|
{
|
|
|
|
struct vkd3d_string_buffer *string;
|
|
|
|
|
|
|
|
if ((string = hlsl_type_to_string(ctx, type)))
|
|
|
|
hlsl_error(ctx, &instr->loc, VKD3D_SHADER_ERROR_HLSL_INVALID_TYPE,
|
|
|
|
"Wrong type for argument %u of [numthreads]: expected int or uint, but got %s.",
|
|
|
|
i, string->buffer);
|
|
|
|
hlsl_release_string_buffer(ctx, string);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (instr->type != HLSL_IR_CONSTANT)
|
|
|
|
{
|
|
|
|
hlsl_fixme(ctx, &instr->loc, "Non-constant expression in [numthreads] initializer.");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
constant = hlsl_ir_constant(instr);
|
|
|
|
|
2022-11-11 16:39:55 -08:00
|
|
|
if ((type->base_type == HLSL_TYPE_INT && constant->value.u[0].i <= 0)
|
|
|
|
|| (type->base_type == HLSL_TYPE_UINT && !constant->value.u[0].u))
|
2021-08-16 15:29:34 -07:00
|
|
|
hlsl_error(ctx, &instr->loc, VKD3D_SHADER_ERROR_HLSL_INVALID_THREAD_COUNT,
|
|
|
|
"Thread count must be a positive integer.");
|
|
|
|
|
2022-11-11 16:39:55 -08:00
|
|
|
ctx->thread_count[i] = constant->value.u[0].u;
|
2021-08-16 15:29:34 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-04-17 13:27:15 -07:00
|
|
|
static bool type_has_object_components(struct hlsl_type *type)
|
|
|
|
{
|
|
|
|
if (type->class == HLSL_CLASS_OBJECT)
|
|
|
|
return true;
|
|
|
|
if (type->class == HLSL_CLASS_ARRAY)
|
|
|
|
return type_has_object_components(type->e.array.type);
|
|
|
|
if (type->class == HLSL_CLASS_STRUCT)
|
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
for (i = 0; i < type->e.record.field_count; ++i)
|
|
|
|
{
|
|
|
|
if (type_has_object_components(type->e.record.fields[i].type))
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2022-02-28 03:23:43 -08:00
|
|
|
int hlsl_emit_bytecode(struct hlsl_ctx *ctx, struct hlsl_ir_function_decl *entry_func,
|
|
|
|
enum vkd3d_shader_target_type target_type, struct vkd3d_shader_code *out)
|
2021-03-02 13:34:46 -08:00
|
|
|
{
|
2021-08-16 15:29:34 -07:00
|
|
|
const struct hlsl_profile_info *profile = ctx->profile;
|
2021-10-15 14:54:10 -07:00
|
|
|
struct hlsl_block *const body = &entry_func->body;
|
2021-09-11 09:20:32 -07:00
|
|
|
struct recursive_call_ctx recursive_call_ctx;
|
2021-03-28 12:46:55 -07:00
|
|
|
struct hlsl_ir_var *var;
|
2021-08-16 12:52:10 -07:00
|
|
|
unsigned int i;
|
2021-08-12 17:36:14 -07:00
|
|
|
bool progress;
|
2021-03-28 12:46:55 -07:00
|
|
|
|
2023-06-09 06:28:06 -07:00
|
|
|
list_move_head(&body->instrs, &ctx->static_initializers.instrs);
|
2021-03-02 13:34:46 -08:00
|
|
|
|
2021-09-11 09:20:32 -07:00
|
|
|
memset(&recursive_call_ctx, 0, sizeof(recursive_call_ctx));
|
2023-04-25 06:04:29 -07:00
|
|
|
hlsl_transform_ir(ctx, find_recursive_calls, body, &recursive_call_ctx);
|
2021-09-11 09:20:32 -07:00
|
|
|
vkd3d_free(recursive_call_ctx.backtrace);
|
|
|
|
|
2021-09-13 21:08:34 -07:00
|
|
|
/* Avoid going into an infinite loop when processing call instructions.
|
|
|
|
* lower_return() recurses into inferior calls. */
|
|
|
|
if (ctx->result)
|
|
|
|
return ctx->result;
|
|
|
|
|
|
|
|
lower_return(ctx, entry_func, body, false);
|
|
|
|
|
2023-04-25 06:04:29 -07:00
|
|
|
while (hlsl_transform_ir(ctx, lower_calls, body, NULL));
|
2021-09-11 14:56:04 -07:00
|
|
|
|
2023-09-06 17:01:49 -07:00
|
|
|
lower_ir(ctx, lower_matrix_swizzles, body);
|
2023-06-25 16:46:10 -07:00
|
|
|
lower_ir(ctx, lower_index_loads, body);
|
2023-03-10 17:09:58 -08:00
|
|
|
|
2021-03-28 12:46:55 -07:00
|
|
|
LIST_FOR_EACH_ENTRY(var, &ctx->globals->vars, struct hlsl_ir_var, scope_entry)
|
|
|
|
{
|
2022-11-29 11:10:40 -08:00
|
|
|
if (var->storage_modifiers & HLSL_STORAGE_UNIFORM)
|
2023-03-10 13:13:23 -08:00
|
|
|
prepend_uniform_copy(ctx, body, var);
|
2021-03-28 12:46:55 -07:00
|
|
|
}
|
|
|
|
|
2023-01-31 17:27:01 -08:00
|
|
|
for (i = 0; i < entry_func->parameters.count; ++i)
|
2021-03-28 12:46:55 -07:00
|
|
|
{
|
2023-01-31 17:27:01 -08:00
|
|
|
var = entry_func->parameters.vars[i];
|
|
|
|
|
2023-04-05 09:07:37 -07:00
|
|
|
if (hlsl_type_is_resource(var->data_type) || (var->storage_modifiers & HLSL_STORAGE_UNIFORM))
|
2021-04-27 10:14:18 -07:00
|
|
|
{
|
2023-03-10 13:13:23 -08:00
|
|
|
prepend_uniform_copy(ctx, body, var);
|
2021-04-27 10:14:18 -07:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2023-04-17 13:27:15 -07:00
|
|
|
if (type_has_object_components(var->data_type))
|
|
|
|
hlsl_fixme(ctx, &var->loc, "Prepend uniform copies for object components within structs.");
|
|
|
|
|
2023-04-05 09:07:37 -07:00
|
|
|
if (hlsl_get_multiarray_element_type(var->data_type)->class != HLSL_CLASS_STRUCT
|
|
|
|
&& !var->semantic.name)
|
|
|
|
{
|
2021-12-01 08:14:57 -08:00
|
|
|
hlsl_error(ctx, &var->loc, VKD3D_SHADER_ERROR_HLSL_MISSING_SEMANTIC,
|
2021-10-15 14:54:08 -07:00
|
|
|
"Parameter \"%s\" is missing a semantic.", var->name);
|
2023-04-05 09:07:37 -07:00
|
|
|
var->semantic.reported_missing = true;
|
|
|
|
}
|
2021-10-15 14:54:08 -07:00
|
|
|
|
2022-11-29 11:10:40 -08:00
|
|
|
if (var->storage_modifiers & HLSL_STORAGE_IN)
|
2023-03-10 13:14:30 -08:00
|
|
|
prepend_input_var_copy(ctx, body, var);
|
2022-11-29 11:10:40 -08:00
|
|
|
if (var->storage_modifiers & HLSL_STORAGE_OUT)
|
2023-03-10 13:17:26 -08:00
|
|
|
append_output_var_copy(ctx, body, var);
|
2021-04-27 10:14:18 -07:00
|
|
|
}
|
2021-03-28 12:46:55 -07:00
|
|
|
}
|
2021-03-28 12:46:59 -07:00
|
|
|
if (entry_func->return_var)
|
2021-04-27 10:14:18 -07:00
|
|
|
{
|
2022-11-11 17:31:55 -08:00
|
|
|
if (entry_func->return_var->data_type->class != HLSL_CLASS_STRUCT && !entry_func->return_var->semantic.name)
|
2021-12-01 08:14:57 -08:00
|
|
|
hlsl_error(ctx, &entry_func->loc, VKD3D_SHADER_ERROR_HLSL_MISSING_SEMANTIC,
|
2021-04-27 10:14:18 -07:00
|
|
|
"Entry point \"%s\" is missing a return value semantic.", entry_func->func->name);
|
|
|
|
|
2023-03-10 13:17:26 -08:00
|
|
|
append_output_var_copy(ctx, body, entry_func->return_var);
|
2021-04-27 10:14:18 -07:00
|
|
|
}
|
2021-03-28 12:46:55 -07:00
|
|
|
|
2021-08-16 12:52:10 -07:00
|
|
|
for (i = 0; i < entry_func->attr_count; ++i)
|
2021-08-16 15:29:34 -07:00
|
|
|
{
|
|
|
|
const struct hlsl_attribute *attr = entry_func->attrs[i];
|
|
|
|
|
|
|
|
if (!strcmp(attr->name, "numthreads") && profile->type == VKD3D_SHADER_TYPE_COMPUTE)
|
|
|
|
parse_numthreads_attribute(ctx, attr);
|
|
|
|
else
|
|
|
|
hlsl_warning(ctx, &entry_func->attrs[i]->loc, VKD3D_SHADER_WARNING_HLSL_UNKNOWN_ATTRIBUTE,
|
|
|
|
"Ignoring unknown attribute \"%s\".", entry_func->attrs[i]->name);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (profile->type == VKD3D_SHADER_TYPE_COMPUTE && !ctx->found_numthreads)
|
|
|
|
hlsl_error(ctx, &entry_func->loc, VKD3D_SHADER_ERROR_HLSL_MISSING_ATTRIBUTE,
|
|
|
|
"Entry point \"%s\" is missing a [numthreads] attribute.", entry_func->func->name);
|
2021-08-16 12:52:10 -07:00
|
|
|
|
2023-06-08 03:47:40 -07:00
|
|
|
if (profile->major_version >= 4)
|
|
|
|
{
|
|
|
|
hlsl_transform_ir(ctx, lower_discard_neg, body, NULL);
|
|
|
|
}
|
2023-06-25 16:46:10 -07:00
|
|
|
lower_ir(ctx, lower_broadcasts, body);
|
2023-04-25 06:04:29 -07:00
|
|
|
while (hlsl_transform_ir(ctx, fold_redundant_casts, body, NULL));
|
2021-08-12 17:36:14 -07:00
|
|
|
do
|
|
|
|
{
|
2023-04-25 06:04:29 -07:00
|
|
|
progress = hlsl_transform_ir(ctx, split_array_copies, body, NULL);
|
|
|
|
progress |= hlsl_transform_ir(ctx, split_struct_copies, body, NULL);
|
2021-08-12 17:36:14 -07:00
|
|
|
}
|
|
|
|
while (progress);
|
2023-04-25 06:04:29 -07:00
|
|
|
hlsl_transform_ir(ctx, split_matrix_copies, body, NULL);
|
|
|
|
|
2023-06-25 16:46:10 -07:00
|
|
|
lower_ir(ctx, lower_narrowing_casts, body);
|
2023-06-25 17:03:26 -07:00
|
|
|
lower_ir(ctx, lower_casts_to_bool, body);
|
2023-06-25 16:46:10 -07:00
|
|
|
lower_ir(ctx, lower_int_dot, body);
|
2023-03-06 18:34:10 -08:00
|
|
|
lower_ir(ctx, lower_int_division, body);
|
2023-03-06 18:35:59 -08:00
|
|
|
lower_ir(ctx, lower_int_modulus, body);
|
2023-06-25 17:06:45 -07:00
|
|
|
lower_ir(ctx, lower_int_abs, body);
|
2023-03-06 18:31:16 -08:00
|
|
|
lower_ir(ctx, lower_float_modulus, body);
|
2023-06-07 12:55:31 -07:00
|
|
|
hlsl_transform_ir(ctx, fold_redundant_casts, body, NULL);
|
2021-12-01 08:14:50 -08:00
|
|
|
do
|
|
|
|
{
|
2023-04-25 06:04:29 -07:00
|
|
|
progress = hlsl_transform_ir(ctx, hlsl_fold_constant_exprs, body, NULL);
|
|
|
|
progress |= hlsl_transform_ir(ctx, hlsl_fold_constant_swizzles, body, NULL);
|
2023-06-09 06:39:38 -07:00
|
|
|
progress |= hlsl_copy_propagation_execute(ctx, body);
|
2023-04-25 06:04:29 -07:00
|
|
|
progress |= hlsl_transform_ir(ctx, fold_swizzle_chains, body, NULL);
|
|
|
|
progress |= hlsl_transform_ir(ctx, remove_trivial_swizzles, body, NULL);
|
2021-12-01 08:14:50 -08:00
|
|
|
}
|
|
|
|
while (progress);
|
2021-03-17 22:22:22 -07:00
|
|
|
|
2023-06-25 16:46:10 -07:00
|
|
|
lower_ir(ctx, lower_nonconstant_vector_derefs, body);
|
2023-06-25 17:03:26 -07:00
|
|
|
lower_ir(ctx, lower_casts_to_bool, body);
|
2023-06-25 16:46:10 -07:00
|
|
|
lower_ir(ctx, lower_int_dot, body);
|
2023-05-08 15:25:18 -07:00
|
|
|
|
2023-05-29 18:59:17 -07:00
|
|
|
hlsl_transform_ir(ctx, validate_static_object_references, body, NULL);
|
|
|
|
hlsl_transform_ir(ctx, track_object_components_sampler_dim, body, NULL);
|
|
|
|
if (profile->major_version >= 4)
|
|
|
|
hlsl_transform_ir(ctx, lower_combined_samples, body, NULL);
|
|
|
|
hlsl_transform_ir(ctx, track_object_components_usage, body, NULL);
|
2023-08-04 12:02:39 -07:00
|
|
|
sort_synthetic_separated_samplers_first(ctx);
|
2023-05-29 18:59:17 -07:00
|
|
|
|
2023-07-24 23:46:28 -07:00
|
|
|
if (profile->major_version >= 4)
|
2023-06-25 16:46:10 -07:00
|
|
|
lower_ir(ctx, lower_ternary, body);
|
2021-08-16 15:29:34 -07:00
|
|
|
if (profile->major_version < 4)
|
2023-01-24 04:44:39 -08:00
|
|
|
{
|
2023-06-25 17:10:34 -07:00
|
|
|
lower_ir(ctx, lower_division, body);
|
2023-06-25 17:11:37 -07:00
|
|
|
lower_ir(ctx, lower_sqrt, body);
|
2023-06-25 16:46:10 -07:00
|
|
|
lower_ir(ctx, lower_dot, body);
|
|
|
|
lower_ir(ctx, lower_round, body);
|
2023-01-24 04:44:39 -08:00
|
|
|
}
|
2021-05-20 22:32:24 -07:00
|
|
|
|
2023-02-04 15:30:36 -08:00
|
|
|
if (profile->major_version < 2)
|
|
|
|
{
|
2023-06-25 16:46:10 -07:00
|
|
|
lower_ir(ctx, lower_abs, body);
|
2023-02-04 15:30:36 -08:00
|
|
|
}
|
|
|
|
|
2022-07-20 12:37:07 -07:00
|
|
|
/* TODO: move forward, remove when no longer needed */
|
2023-05-04 12:06:58 -07:00
|
|
|
transform_derefs(ctx, replace_deref_path_with_offset, body);
|
2023-04-25 06:04:29 -07:00
|
|
|
while (hlsl_transform_ir(ctx, hlsl_fold_constant_exprs, body, NULL));
|
2022-07-20 12:37:07 -07:00
|
|
|
|
2021-03-17 22:22:22 -07:00
|
|
|
do
|
|
|
|
compute_liveness(ctx, entry_func);
|
2023-04-25 06:04:29 -07:00
|
|
|
while (hlsl_transform_ir(ctx, dce, body, NULL));
|
2021-03-16 14:31:53 -07:00
|
|
|
|
2021-03-17 22:22:22 -07:00
|
|
|
compute_liveness(ctx, entry_func);
|
2021-03-02 13:34:46 -08:00
|
|
|
|
|
|
|
if (TRACE_ON())
|
2021-05-20 22:32:22 -07:00
|
|
|
rb_for_each_entry(&ctx->functions, dump_function, ctx);
|
2021-03-02 13:34:46 -08:00
|
|
|
|
2022-11-25 14:47:56 -08:00
|
|
|
calculate_resource_register_counts(ctx);
|
2023-04-25 09:41:38 -07:00
|
|
|
|
2023-06-21 10:21:19 -07:00
|
|
|
allocate_register_reservations(ctx);
|
|
|
|
|
2021-05-20 22:32:20 -07:00
|
|
|
allocate_temp_registers(ctx, entry_func);
|
2021-08-16 15:29:34 -07:00
|
|
|
if (profile->major_version < 4)
|
2021-10-11 19:58:46 -07:00
|
|
|
{
|
2021-04-08 21:38:27 -07:00
|
|
|
allocate_const_registers(ctx, entry_func);
|
2021-10-11 19:58:46 -07:00
|
|
|
}
|
2021-06-23 21:57:35 -07:00
|
|
|
else
|
2021-10-11 19:58:46 -07:00
|
|
|
{
|
2021-06-23 21:57:35 -07:00
|
|
|
allocate_buffers(ctx);
|
2023-01-30 13:27:32 -08:00
|
|
|
allocate_objects(ctx, HLSL_REGSET_TEXTURES);
|
|
|
|
allocate_objects(ctx, HLSL_REGSET_UAVS);
|
2021-10-11 19:58:46 -07:00
|
|
|
}
|
2021-04-27 10:14:21 -07:00
|
|
|
allocate_semantic_registers(ctx);
|
2023-01-30 13:27:32 -08:00
|
|
|
allocate_objects(ctx, HLSL_REGSET_SAMPLERS);
|
2021-04-08 21:38:23 -07:00
|
|
|
|
2021-05-20 22:32:23 -07:00
|
|
|
if (ctx->result)
|
|
|
|
return ctx->result;
|
2021-04-15 17:03:43 -07:00
|
|
|
|
2022-02-28 03:23:43 -08:00
|
|
|
switch (target_type)
|
|
|
|
{
|
|
|
|
case VKD3D_SHADER_TARGET_D3D_BYTECODE:
|
|
|
|
return hlsl_sm1_write(ctx, entry_func, out);
|
|
|
|
|
|
|
|
case VKD3D_SHADER_TARGET_DXBC_TPF:
|
|
|
|
return hlsl_sm4_write(ctx, entry_func, out);
|
|
|
|
|
|
|
|
default:
|
|
|
|
ERR("Unsupported shader target type %#x.\n", target_type);
|
|
|
|
return VKD3D_ERROR_INVALID_ARGUMENT;
|
|
|
|
}
|
2021-03-02 13:34:46 -08:00
|
|
|
}
|