Files
wine-staging/patches/vkd3d-latest/0006-Updated-vkd3d-to-0c3250122cb7dcefef171d1288a1b704a30.patch
Alistair Leslie-Hughes f102154244 Updated vkd3d-latest patchset
2025-06-27 08:23:40 +10:00

11836 lines
552 KiB
Diff

From 8d41d4b89e716cec200618599e2eb070a781c13f Mon Sep 17 00:00:00 2001
From: Alistair Leslie-Hughes <leslie_alistair@hotmail.com>
Date: Fri, 27 Jun 2025 08:08:51 +1000
Subject: [PATCH] Updated vkd3d to 0c3250122cb7dcefef171d1288a1b704a303d4a1.
---
libs/vkd3d/libs/vkd3d-shader/d3d_asm.c | 184 +-
libs/vkd3d/libs/vkd3d-shader/d3dbc.c | 371 +--
libs/vkd3d/libs/vkd3d-shader/dxil.c | 560 ++--
libs/vkd3d/libs/vkd3d-shader/glsl.c | 160 +-
libs/vkd3d/libs/vkd3d-shader/hlsl.h | 2 +-
libs/vkd3d/libs/vkd3d-shader/hlsl_codegen.c | 489 ++--
libs/vkd3d/libs/vkd3d-shader/ir.c | 2294 +++++++++--------
libs/vkd3d/libs/vkd3d-shader/msl.c | 182 +-
libs/vkd3d/libs/vkd3d-shader/spirv.c | 898 ++++---
libs/vkd3d/libs/vkd3d-shader/tpf.c | 795 +++---
.../libs/vkd3d-shader/vkd3d_shader_main.c | 124 +-
.../libs/vkd3d-shader/vkd3d_shader_private.h | 669 ++---
libs/vkd3d/libs/vkd3d/resource.c | 27 +-
libs/vkd3d/libs/vkd3d/vkd3d_private.h | 2 +-
14 files changed, 3453 insertions(+), 3304 deletions(-)
diff --git a/libs/vkd3d/libs/vkd3d-shader/d3d_asm.c b/libs/vkd3d/libs/vkd3d-shader/d3d_asm.c
index 21feb75d639..ae9278f68b1 100644
--- a/libs/vkd3d/libs/vkd3d-shader/d3d_asm.c
+++ b/libs/vkd3d/libs/vkd3d-shader/d3d_asm.c
@@ -715,8 +715,8 @@ static void shader_print_register(struct vkd3d_d3d_asm_compiler *compiler, const
switch (compiler->current->opcode)
{
- case VKD3DSIH_MOV:
- case VKD3DSIH_MOVC:
+ case VSIR_OP_MOV:
+ case VSIR_OP_MOVC:
untyped = true;
break;
@@ -1284,11 +1284,11 @@ static void shader_dump_instruction_flags(struct vkd3d_d3d_asm_compiler *compile
switch (ins->opcode)
{
- case VKD3DSIH_BREAKP:
- case VKD3DSIH_CONTINUEP:
- case VKD3DSIH_DISCARD:
- case VKD3DSIH_IF:
- case VKD3DSIH_RETP:
+ case VSIR_OP_BREAKP:
+ case VSIR_OP_CONTINUEP:
+ case VSIR_OP_DISCARD:
+ case VSIR_OP_IF:
+ case VSIR_OP_RETP:
switch (ins->flags)
{
case VKD3D_SHADER_CONDITIONAL_OP_NZ:
@@ -1303,8 +1303,8 @@ static void shader_dump_instruction_flags(struct vkd3d_d3d_asm_compiler *compile
}
break;
- case VKD3DSIH_IFC:
- case VKD3DSIH_BREAKC:
+ case VSIR_OP_IFC:
+ case VSIR_OP_BREAKC:
switch (ins->flags)
{
case VKD3D_SHADER_REL_OP_GT:
@@ -1331,7 +1331,7 @@ static void shader_dump_instruction_flags(struct vkd3d_d3d_asm_compiler *compile
}
break;
- case VKD3DSIH_RESINFO:
+ case VSIR_OP_RESINFO:
switch (ins->flags)
{
case VKD3DSI_NONE:
@@ -1348,7 +1348,7 @@ static void shader_dump_instruction_flags(struct vkd3d_d3d_asm_compiler *compile
}
break;
- case VKD3DSIH_SAMPLE_INFO:
+ case VSIR_OP_SAMPLE_INFO:
switch (ins->flags)
{
case VKD3DSI_NONE:
@@ -1362,24 +1362,24 @@ static void shader_dump_instruction_flags(struct vkd3d_d3d_asm_compiler *compile
}
break;
- case VKD3DSIH_IMM_ATOMIC_CMP_EXCH:
- case VKD3DSIH_IMM_ATOMIC_IADD:
- case VKD3DSIH_IMM_ATOMIC_AND:
- case VKD3DSIH_IMM_ATOMIC_IMAX:
- case VKD3DSIH_IMM_ATOMIC_IMIN:
- case VKD3DSIH_IMM_ATOMIC_OR:
- case VKD3DSIH_IMM_ATOMIC_UMAX:
- case VKD3DSIH_IMM_ATOMIC_UMIN:
- case VKD3DSIH_IMM_ATOMIC_EXCH:
- case VKD3DSIH_IMM_ATOMIC_XOR:
+ case VSIR_OP_IMM_ATOMIC_CMP_EXCH:
+ case VSIR_OP_IMM_ATOMIC_IADD:
+ case VSIR_OP_IMM_ATOMIC_AND:
+ case VSIR_OP_IMM_ATOMIC_IMAX:
+ case VSIR_OP_IMM_ATOMIC_IMIN:
+ case VSIR_OP_IMM_ATOMIC_OR:
+ case VSIR_OP_IMM_ATOMIC_UMAX:
+ case VSIR_OP_IMM_ATOMIC_UMIN:
+ case VSIR_OP_IMM_ATOMIC_EXCH:
+ case VSIR_OP_IMM_ATOMIC_XOR:
shader_dump_atomic_op_flags(compiler, ins->flags);
break;
- case VKD3DSIH_SYNC:
+ case VSIR_OP_SYNC:
shader_dump_sync_flags(compiler, ins->flags);
break;
- case VKD3DSIH_TEXLD:
+ case VSIR_OP_TEXLD:
if (vkd3d_shader_ver_ge(&compiler->shader_version, 2, 0))
{
if (ins->flags & VKD3DSI_TEXLD_PROJECT)
@@ -1389,20 +1389,20 @@ static void shader_dump_instruction_flags(struct vkd3d_d3d_asm_compiler *compile
}
break;
- case VKD3DSIH_WAVE_OP_ADD:
- case VKD3DSIH_WAVE_OP_IMAX:
- case VKD3DSIH_WAVE_OP_IMIN:
- case VKD3DSIH_WAVE_OP_MAX:
- case VKD3DSIH_WAVE_OP_MIN:
- case VKD3DSIH_WAVE_OP_MUL:
- case VKD3DSIH_WAVE_OP_UMAX:
- case VKD3DSIH_WAVE_OP_UMIN:
+ case VSIR_OP_WAVE_OP_ADD:
+ case VSIR_OP_WAVE_OP_IMAX:
+ case VSIR_OP_WAVE_OP_IMIN:
+ case VSIR_OP_WAVE_OP_MAX:
+ case VSIR_OP_WAVE_OP_MIN:
+ case VSIR_OP_WAVE_OP_MUL:
+ case VSIR_OP_WAVE_OP_UMAX:
+ case VSIR_OP_WAVE_OP_UMIN:
vkd3d_string_buffer_printf(&compiler->buffer, (ins->flags & VKD3DSI_WAVE_PREFIX) ? "_prefix" : "_active");
break;
- case VKD3DSIH_ISHL:
- case VKD3DSIH_ISHR:
- case VKD3DSIH_USHR:
+ case VSIR_OP_ISHL:
+ case VSIR_OP_ISHR:
+ case VSIR_OP_USHR:
if (ins->flags & VKD3DSI_SHIFT_UNMASKED)
vkd3d_string_buffer_printf(buffer, "_unmasked");
/* fall through */
@@ -1473,8 +1473,8 @@ static void shader_dump_instruction(struct vkd3d_d3d_asm_compiler *compiler,
switch (ins->opcode)
{
- case VKD3DSIH_DCL:
- case VKD3DSIH_DCL_UAV_TYPED:
+ case VSIR_OP_DCL:
+ case VSIR_OP_DCL_UAV_TYPED:
vkd3d_string_buffer_printf(buffer, "%s", compiler->colours.opcode);
shader_print_dcl_usage(compiler, "_", &ins->declaration.semantic, ins->flags, "");
shader_dump_ins_modifiers(compiler, &ins->declaration.semantic.resource.reg);
@@ -1483,7 +1483,7 @@ static void shader_dump_instruction(struct vkd3d_d3d_asm_compiler *compiler,
shader_dump_register_space(compiler, ins->declaration.semantic.resource.range.space);
break;
- case VKD3DSIH_DCL_CONSTANT_BUFFER:
+ case VSIR_OP_DCL_CONSTANT_BUFFER:
shader_print_register(compiler, " ", &ins->declaration.cb.src.reg, true, "");
if (vkd3d_shader_ver_ge(&compiler->shader_version, 6, 0))
shader_print_subscript(compiler, ins->declaration.cb.size, NULL);
@@ -1494,33 +1494,33 @@ static void shader_dump_instruction(struct vkd3d_d3d_asm_compiler *compiler,
shader_dump_register_space(compiler, ins->declaration.cb.range.space);
break;
- case VKD3DSIH_DCL_FUNCTION_BODY:
+ case VSIR_OP_DCL_FUNCTION_BODY:
vkd3d_string_buffer_printf(buffer, " fb%u", ins->declaration.index);
break;
- case VKD3DSIH_DCL_FUNCTION_TABLE:
+ case VSIR_OP_DCL_FUNCTION_TABLE:
vkd3d_string_buffer_printf(buffer, " ft%u = {...}", ins->declaration.index);
break;
- case VKD3DSIH_DCL_GLOBAL_FLAGS:
+ case VSIR_OP_DCL_GLOBAL_FLAGS:
vkd3d_string_buffer_printf(buffer, " ");
shader_dump_global_flags(compiler, ins->declaration.global_flags);
break;
- case VKD3DSIH_DCL_HS_MAX_TESSFACTOR:
+ case VSIR_OP_DCL_HS_MAX_TESSFACTOR:
shader_print_float_literal(compiler, " ", ins->declaration.max_tessellation_factor, "");
break;
- case VKD3DSIH_DCL_IMMEDIATE_CONSTANT_BUFFER:
+ case VSIR_OP_DCL_IMMEDIATE_CONSTANT_BUFFER:
shader_dump_icb(compiler, ins->declaration.icb);
break;
- case VKD3DSIH_DCL_INDEX_RANGE:
+ case VSIR_OP_DCL_INDEX_RANGE:
shader_print_dst_param(compiler, " ", &ins->declaration.index_range.dst, true, "");
shader_print_uint_literal(compiler, " ", ins->declaration.index_range.register_count, "");
break;
- case VKD3DSIH_DCL_INDEXABLE_TEMP:
+ case VSIR_OP_DCL_INDEXABLE_TEMP:
vkd3d_string_buffer_printf(buffer, " %sx%u%s", compiler->colours.reg,
ins->declaration.indexable_temp.register_idx, compiler->colours.reset);
shader_print_subscript(compiler, ins->declaration.indexable_temp.register_size, NULL);
@@ -1531,113 +1531,113 @@ static void shader_dump_instruction(struct vkd3d_d3d_asm_compiler *compiler,
shader_dump_icb(compiler, ins->declaration.indexable_temp.initialiser);
break;
- case VKD3DSIH_DCL_INPUT_PS:
+ case VSIR_OP_DCL_INPUT_PS:
shader_print_interpolation_mode(compiler, " ", ins->flags, "");
shader_print_dst_param(compiler, " ", &ins->declaration.dst, true, "");
break;
- case VKD3DSIH_DCL_INPUT_PS_SGV:
- case VKD3DSIH_DCL_INPUT_SGV:
- case VKD3DSIH_DCL_INPUT_SIV:
- case VKD3DSIH_DCL_OUTPUT_SGV:
- case VKD3DSIH_DCL_OUTPUT_SIV:
+ case VSIR_OP_DCL_INPUT_PS_SGV:
+ case VSIR_OP_DCL_INPUT_SGV:
+ case VSIR_OP_DCL_INPUT_SIV:
+ case VSIR_OP_DCL_OUTPUT_SGV:
+ case VSIR_OP_DCL_OUTPUT_SIV:
shader_print_dst_param(compiler, " ", &ins->declaration.register_semantic.reg, true, "");
shader_print_input_sysval_semantic(compiler, ", ", ins->declaration.register_semantic.sysval_semantic, "");
break;
- case VKD3DSIH_DCL_INPUT_PS_SIV:
+ case VSIR_OP_DCL_INPUT_PS_SIV:
shader_print_interpolation_mode(compiler, " ", ins->flags, "");
shader_print_dst_param(compiler, " ", &ins->declaration.register_semantic.reg, true, "");
shader_print_input_sysval_semantic(compiler, ", ", ins->declaration.register_semantic.sysval_semantic, "");
break;
- case VKD3DSIH_DCL_INPUT:
- case VKD3DSIH_DCL_OUTPUT:
+ case VSIR_OP_DCL_INPUT:
+ case VSIR_OP_DCL_OUTPUT:
shader_print_dst_param(compiler, " ", &ins->declaration.dst, true, "");
break;
- case VKD3DSIH_DCL_INPUT_PRIMITIVE:
- case VKD3DSIH_DCL_OUTPUT_TOPOLOGY:
+ case VSIR_OP_DCL_INPUT_PRIMITIVE:
+ case VSIR_OP_DCL_OUTPUT_TOPOLOGY:
shader_print_primitive_type(compiler, " ", &ins->declaration.primitive_type, "");
break;
- case VKD3DSIH_DCL_INTERFACE:
+ case VSIR_OP_DCL_INTERFACE:
vkd3d_string_buffer_printf(buffer, " fp%u", ins->declaration.fp.index);
shader_print_subscript(compiler, ins->declaration.fp.array_size, NULL);
shader_print_subscript(compiler, ins->declaration.fp.body_count, NULL);
vkd3d_string_buffer_printf(buffer, " = {...}");
break;
- case VKD3DSIH_DCL_RESOURCE_RAW:
+ case VSIR_OP_DCL_RESOURCE_RAW:
shader_print_dst_param(compiler, " ", &ins->declaration.raw_resource.resource.reg, true, "");
shader_dump_register_space(compiler, ins->declaration.raw_resource.resource.range.space);
break;
- case VKD3DSIH_DCL_RESOURCE_STRUCTURED:
+ case VSIR_OP_DCL_RESOURCE_STRUCTURED:
shader_print_dst_param(compiler, " ", &ins->declaration.structured_resource.resource.reg, true, "");
shader_print_uint_literal(compiler, ", ", ins->declaration.structured_resource.byte_stride, "");
shader_dump_register_space(compiler, ins->declaration.structured_resource.resource.range.space);
break;
- case VKD3DSIH_DCL_SAMPLER:
+ case VSIR_OP_DCL_SAMPLER:
shader_print_register(compiler, " ", &ins->declaration.sampler.src.reg, true,
ins->flags == VKD3DSI_SAMPLER_COMPARISON_MODE ? ", comparisonMode" : "");
shader_dump_register_space(compiler, ins->declaration.sampler.range.space);
break;
- case VKD3DSIH_DCL_TEMPS:
- case VKD3DSIH_DCL_GS_INSTANCES:
- case VKD3DSIH_DCL_HS_FORK_PHASE_INSTANCE_COUNT:
- case VKD3DSIH_DCL_HS_JOIN_PHASE_INSTANCE_COUNT:
- case VKD3DSIH_DCL_INPUT_CONTROL_POINT_COUNT:
- case VKD3DSIH_DCL_OUTPUT_CONTROL_POINT_COUNT:
- case VKD3DSIH_DCL_VERTICES_OUT:
+ case VSIR_OP_DCL_TEMPS:
+ case VSIR_OP_DCL_GS_INSTANCES:
+ case VSIR_OP_DCL_HS_FORK_PHASE_INSTANCE_COUNT:
+ case VSIR_OP_DCL_HS_JOIN_PHASE_INSTANCE_COUNT:
+ case VSIR_OP_DCL_INPUT_CONTROL_POINT_COUNT:
+ case VSIR_OP_DCL_OUTPUT_CONTROL_POINT_COUNT:
+ case VSIR_OP_DCL_VERTICES_OUT:
shader_print_uint_literal(compiler, " ", ins->declaration.count, "");
break;
- case VKD3DSIH_DCL_TESSELLATOR_DOMAIN:
+ case VSIR_OP_DCL_TESSELLATOR_DOMAIN:
shader_print_tessellator_domain(compiler, " ", ins->declaration.tessellator_domain, "");
break;
- case VKD3DSIH_DCL_TESSELLATOR_OUTPUT_PRIMITIVE:
+ case VSIR_OP_DCL_TESSELLATOR_OUTPUT_PRIMITIVE:
shader_print_tessellator_output_primitive(compiler, " ", ins->declaration.tessellator_output_primitive, "");
break;
- case VKD3DSIH_DCL_TESSELLATOR_PARTITIONING:
+ case VSIR_OP_DCL_TESSELLATOR_PARTITIONING:
shader_print_tessellator_partitioning(compiler, " ", ins->declaration.tessellator_partitioning, "");
break;
- case VKD3DSIH_DCL_TGSM_RAW:
+ case VSIR_OP_DCL_TGSM_RAW:
shader_print_dst_param(compiler, " ", &ins->declaration.tgsm_raw.reg, true, "");
shader_print_uint_literal(compiler, ", ", ins->declaration.tgsm_raw.byte_count, "");
break;
- case VKD3DSIH_DCL_TGSM_STRUCTURED:
+ case VSIR_OP_DCL_TGSM_STRUCTURED:
shader_print_dst_param(compiler, " ", &ins->declaration.tgsm_structured.reg, true, "");
shader_print_uint_literal(compiler, ", ", ins->declaration.tgsm_structured.byte_stride, "");
shader_print_uint_literal(compiler, ", ", ins->declaration.tgsm_structured.structure_count, "");
break;
- case VKD3DSIH_DCL_THREAD_GROUP:
+ case VSIR_OP_DCL_THREAD_GROUP:
shader_print_uint_literal(compiler, " ", ins->declaration.thread_group_size.x, "");
shader_print_uint_literal(compiler, ", ", ins->declaration.thread_group_size.y, "");
shader_print_uint_literal(compiler, ", ", ins->declaration.thread_group_size.z, "");
break;
- case VKD3DSIH_DCL_UAV_RAW:
+ case VSIR_OP_DCL_UAV_RAW:
shader_dump_uav_flags(compiler, ins->flags);
shader_print_dst_param(compiler, " ", &ins->declaration.raw_resource.resource.reg, true, "");
shader_dump_register_space(compiler, ins->declaration.raw_resource.resource.range.space);
break;
- case VKD3DSIH_DCL_UAV_STRUCTURED:
+ case VSIR_OP_DCL_UAV_STRUCTURED:
shader_dump_uav_flags(compiler, ins->flags);
shader_print_dst_param(compiler, " ", &ins->declaration.structured_resource.resource.reg, true, "");
shader_print_uint_literal(compiler, ", ", ins->declaration.structured_resource.byte_stride, "");
shader_dump_register_space(compiler, ins->declaration.structured_resource.resource.range.space);
break;
- case VKD3DSIH_DEF:
+ case VSIR_OP_DEF:
vkd3d_string_buffer_printf(buffer, " %sc%u%s", compiler->colours.reg,
ins->dst[0].reg.idx[0].offset, compiler->colours.reset);
shader_print_float_literal(compiler, " = ", ins->src[0].reg.u.immconst_f32[0], "");
@@ -1646,7 +1646,7 @@ static void shader_dump_instruction(struct vkd3d_d3d_asm_compiler *compiler,
shader_print_float_literal(compiler, ", ", ins->src[0].reg.u.immconst_f32[3], "");
break;
- case VKD3DSIH_DEFI:
+ case VSIR_OP_DEFI:
vkd3d_string_buffer_printf(buffer, " %si%u%s", compiler->colours.reg,
ins->dst[0].reg.idx[0].offset, compiler->colours.reset);
shader_print_int_literal(compiler, " = ", ins->src[0].reg.u.immconst_u32[0], "");
@@ -1655,7 +1655,7 @@ static void shader_dump_instruction(struct vkd3d_d3d_asm_compiler *compiler,
shader_print_int_literal(compiler, ", ", ins->src[0].reg.u.immconst_u32[3], "");
break;
- case VKD3DSIH_DEFB:
+ case VSIR_OP_DEFB:
vkd3d_string_buffer_printf(buffer, " %sb%u%s", compiler->colours.reg,
ins->dst[0].reg.idx[0].offset, compiler->colours.reset);
shader_print_bool_literal(compiler, " = ", ins->src[0].reg.u.immconst_u32[0], "");
@@ -2048,19 +2048,19 @@ enum vkd3d_result d3d_asm_compile(const struct vsir_program *program,
switch (ins->opcode)
{
- case VKD3DSIH_ELSE:
- case VKD3DSIH_ENDIF:
- case VKD3DSIH_ENDLOOP:
- case VKD3DSIH_ENDSWITCH:
+ case VSIR_OP_ELSE:
+ case VSIR_OP_ENDIF:
+ case VSIR_OP_ENDLOOP:
+ case VSIR_OP_ENDSWITCH:
if (indent)
--indent;
break;
- case VKD3DSIH_LABEL:
- case VKD3DSIH_HS_DECLS:
- case VKD3DSIH_HS_CONTROL_POINT_PHASE:
- case VKD3DSIH_HS_FORK_PHASE:
- case VKD3DSIH_HS_JOIN_PHASE:
+ case VSIR_OP_LABEL:
+ case VSIR_OP_HS_DECLS:
+ case VSIR_OP_HS_CONTROL_POINT_PHASE:
+ case VSIR_OP_HS_FORK_PHASE:
+ case VSIR_OP_HS_JOIN_PHASE:
indent = 0;
break;
@@ -2077,12 +2077,12 @@ enum vkd3d_result d3d_asm_compile(const struct vsir_program *program,
switch (ins->opcode)
{
- case VKD3DSIH_ELSE:
- case VKD3DSIH_IF:
- case VKD3DSIH_IFC:
- case VKD3DSIH_LOOP:
- case VKD3DSIH_SWITCH:
- case VKD3DSIH_LABEL:
+ case VSIR_OP_ELSE:
+ case VSIR_OP_IF:
+ case VSIR_OP_IFC:
+ case VSIR_OP_LOOP:
+ case VSIR_OP_SWITCH:
+ case VSIR_OP_LABEL:
++indent;
break;
diff --git a/libs/vkd3d/libs/vkd3d-shader/d3dbc.c b/libs/vkd3d/libs/vkd3d-shader/d3dbc.c
index f19a6283197..eeb4deff61f 100644
--- a/libs/vkd3d/libs/vkd3d-shader/d3dbc.c
+++ b/libs/vkd3d/libs/vkd3d-shader/d3dbc.c
@@ -259,159 +259,159 @@ struct vkd3d_shader_sm1_parser
static const struct vkd3d_sm1_opcode_info vs_opcode_table[] =
{
/* Arithmetic */
- {VKD3D_SM1_OP_NOP, 0, 0, VKD3DSIH_NOP},
- {VKD3D_SM1_OP_MOV, 1, 1, VKD3DSIH_MOV},
- {VKD3D_SM1_OP_MOVA, 1, 1, VKD3DSIH_MOVA, {2, 0}},
- {VKD3D_SM1_OP_ADD, 1, 2, VKD3DSIH_ADD},
- {VKD3D_SM1_OP_SUB, 1, 2, VKD3DSIH_SUB},
- {VKD3D_SM1_OP_MAD, 1, 3, VKD3DSIH_MAD},
- {VKD3D_SM1_OP_MUL, 1, 2, VKD3DSIH_MUL},
- {VKD3D_SM1_OP_RCP, 1, 1, VKD3DSIH_RCP},
- {VKD3D_SM1_OP_RSQ, 1, 1, VKD3DSIH_RSQ},
- {VKD3D_SM1_OP_DP3, 1, 2, VKD3DSIH_DP3},
- {VKD3D_SM1_OP_DP4, 1, 2, VKD3DSIH_DP4},
- {VKD3D_SM1_OP_MIN, 1, 2, VKD3DSIH_MIN},
- {VKD3D_SM1_OP_MAX, 1, 2, VKD3DSIH_MAX},
- {VKD3D_SM1_OP_SLT, 1, 2, VKD3DSIH_SLT},
- {VKD3D_SM1_OP_SGE, 1, 2, VKD3DSIH_SGE},
- {VKD3D_SM1_OP_ABS, 1, 1, VKD3DSIH_ABS, {2, 0}},
- {VKD3D_SM1_OP_EXP, 1, 1, VKD3DSIH_EXP},
- {VKD3D_SM1_OP_LOG, 1, 1, VKD3DSIH_LOG},
- {VKD3D_SM1_OP_EXPP, 1, 1, VKD3DSIH_EXPP},
- {VKD3D_SM1_OP_LOGP, 1, 1, VKD3DSIH_LOGP},
- {VKD3D_SM1_OP_LIT, 1, 1, VKD3DSIH_LIT},
- {VKD3D_SM1_OP_DST, 1, 2, VKD3DSIH_DST},
- {VKD3D_SM1_OP_LRP, 1, 3, VKD3DSIH_LRP, {2, 0}},
- {VKD3D_SM1_OP_FRC, 1, 1, VKD3DSIH_FRC},
- {VKD3D_SM1_OP_POW, 1, 2, VKD3DSIH_POW, {2, 0}},
- {VKD3D_SM1_OP_CRS, 1, 2, VKD3DSIH_CRS, {2, 0}},
- {VKD3D_SM1_OP_SGN, 1, 3, VKD3DSIH_SGN, {2, 0}, {2, 1}},
- {VKD3D_SM1_OP_SGN, 1, 1, VKD3DSIH_SGN, {3, 0}},
- {VKD3D_SM1_OP_NRM, 1, 1, VKD3DSIH_NRM, {2, 0}},
- {VKD3D_SM1_OP_SINCOS, 1, 3, VKD3DSIH_SINCOS, {2, 0}, {2, 1}},
- {VKD3D_SM1_OP_SINCOS, 1, 1, VKD3DSIH_SINCOS, {3, 0}},
+ {VKD3D_SM1_OP_NOP, 0, 0, VSIR_OP_NOP},
+ {VKD3D_SM1_OP_MOV, 1, 1, VSIR_OP_MOV},
+ {VKD3D_SM1_OP_MOVA, 1, 1, VSIR_OP_MOVA, {2, 0}},
+ {VKD3D_SM1_OP_ADD, 1, 2, VSIR_OP_ADD},
+ {VKD3D_SM1_OP_SUB, 1, 2, VSIR_OP_SUB},
+ {VKD3D_SM1_OP_MAD, 1, 3, VSIR_OP_MAD},
+ {VKD3D_SM1_OP_MUL, 1, 2, VSIR_OP_MUL},
+ {VKD3D_SM1_OP_RCP, 1, 1, VSIR_OP_RCP},
+ {VKD3D_SM1_OP_RSQ, 1, 1, VSIR_OP_RSQ},
+ {VKD3D_SM1_OP_DP3, 1, 2, VSIR_OP_DP3},
+ {VKD3D_SM1_OP_DP4, 1, 2, VSIR_OP_DP4},
+ {VKD3D_SM1_OP_MIN, 1, 2, VSIR_OP_MIN},
+ {VKD3D_SM1_OP_MAX, 1, 2, VSIR_OP_MAX},
+ {VKD3D_SM1_OP_SLT, 1, 2, VSIR_OP_SLT},
+ {VKD3D_SM1_OP_SGE, 1, 2, VSIR_OP_SGE},
+ {VKD3D_SM1_OP_ABS, 1, 1, VSIR_OP_ABS, {2, 0}},
+ {VKD3D_SM1_OP_EXP, 1, 1, VSIR_OP_EXP},
+ {VKD3D_SM1_OP_LOG, 1, 1, VSIR_OP_LOG},
+ {VKD3D_SM1_OP_EXPP, 1, 1, VSIR_OP_EXPP},
+ {VKD3D_SM1_OP_LOGP, 1, 1, VSIR_OP_LOGP},
+ {VKD3D_SM1_OP_LIT, 1, 1, VSIR_OP_LIT},
+ {VKD3D_SM1_OP_DST, 1, 2, VSIR_OP_DST},
+ {VKD3D_SM1_OP_LRP, 1, 3, VSIR_OP_LRP, {2, 0}},
+ {VKD3D_SM1_OP_FRC, 1, 1, VSIR_OP_FRC},
+ {VKD3D_SM1_OP_POW, 1, 2, VSIR_OP_POW, {2, 0}},
+ {VKD3D_SM1_OP_CRS, 1, 2, VSIR_OP_CRS, {2, 0}},
+ {VKD3D_SM1_OP_SGN, 1, 3, VSIR_OP_SGN, {2, 0}, {2, 1}},
+ {VKD3D_SM1_OP_SGN, 1, 1, VSIR_OP_SGN, {3, 0}},
+ {VKD3D_SM1_OP_NRM, 1, 1, VSIR_OP_NRM, {2, 0}},
+ {VKD3D_SM1_OP_SINCOS, 1, 3, VSIR_OP_SINCOS, {2, 0}, {2, 1}},
+ {VKD3D_SM1_OP_SINCOS, 1, 1, VSIR_OP_SINCOS, {3, 0}},
/* Matrix */
- {VKD3D_SM1_OP_M4x4, 1, 2, VKD3DSIH_M4x4},
- {VKD3D_SM1_OP_M4x3, 1, 2, VKD3DSIH_M4x3},
- {VKD3D_SM1_OP_M3x4, 1, 2, VKD3DSIH_M3x4},
- {VKD3D_SM1_OP_M3x3, 1, 2, VKD3DSIH_M3x3},
- {VKD3D_SM1_OP_M3x2, 1, 2, VKD3DSIH_M3x2},
+ {VKD3D_SM1_OP_M4x4, 1, 2, VSIR_OP_M4x4},
+ {VKD3D_SM1_OP_M4x3, 1, 2, VSIR_OP_M4x3},
+ {VKD3D_SM1_OP_M3x4, 1, 2, VSIR_OP_M3x4},
+ {VKD3D_SM1_OP_M3x3, 1, 2, VSIR_OP_M3x3},
+ {VKD3D_SM1_OP_M3x2, 1, 2, VSIR_OP_M3x2},
/* Declarations */
- {VKD3D_SM1_OP_DCL, 0, 0, VKD3DSIH_DCL},
+ {VKD3D_SM1_OP_DCL, 0, 0, VSIR_OP_DCL},
/* Constant definitions */
- {VKD3D_SM1_OP_DEF, 1, 1, VKD3DSIH_DEF},
- {VKD3D_SM1_OP_DEFB, 1, 1, VKD3DSIH_DEFB, {2, 0}},
- {VKD3D_SM1_OP_DEFI, 1, 1, VKD3DSIH_DEFI, {2, 0}},
+ {VKD3D_SM1_OP_DEF, 1, 1, VSIR_OP_DEF},
+ {VKD3D_SM1_OP_DEFB, 1, 1, VSIR_OP_DEFB, {2, 0}},
+ {VKD3D_SM1_OP_DEFI, 1, 1, VSIR_OP_DEFI, {2, 0}},
/* Control flow */
- {VKD3D_SM1_OP_REP, 0, 1, VKD3DSIH_REP, {2, 0}},
- {VKD3D_SM1_OP_ENDREP, 0, 0, VKD3DSIH_ENDREP, {2, 0}},
- {VKD3D_SM1_OP_IF, 0, 1, VKD3DSIH_IF, {2, 0}},
- {VKD3D_SM1_OP_IFC, 0, 2, VKD3DSIH_IFC, {2, 1}},
- {VKD3D_SM1_OP_ELSE, 0, 0, VKD3DSIH_ELSE, {2, 0}},
- {VKD3D_SM1_OP_ENDIF, 0, 0, VKD3DSIH_ENDIF, {2, 0}},
- {VKD3D_SM1_OP_BREAK, 0, 0, VKD3DSIH_BREAK, {2, 1}},
- {VKD3D_SM1_OP_BREAKC, 0, 2, VKD3DSIH_BREAKC, {2, 1}},
- {VKD3D_SM1_OP_BREAKP, 0, 1, VKD3DSIH_BREAKP, {2, 1}},
- {VKD3D_SM1_OP_CALL, 0, 1, VKD3DSIH_CALL, {2, 0}},
- {VKD3D_SM1_OP_CALLNZ, 0, 2, VKD3DSIH_CALLNZ, {2, 0}},
- {VKD3D_SM1_OP_LOOP, 0, 2, VKD3DSIH_LOOP, {2, 0}},
- {VKD3D_SM1_OP_RET, 0, 0, VKD3DSIH_RET, {2, 0}},
- {VKD3D_SM1_OP_ENDLOOP, 0, 0, VKD3DSIH_ENDLOOP, {2, 0}},
- {VKD3D_SM1_OP_LABEL, 0, 1, VKD3DSIH_LABEL, {2, 0}},
-
- {VKD3D_SM1_OP_SETP, 1, 2, VKD3DSIH_SETP, {2, 1}},
- {VKD3D_SM1_OP_TEXLDL, 1, 2, VKD3DSIH_TEXLDL, {3, 0}},
- {0, 0, 0, VKD3DSIH_INVALID},
+ {VKD3D_SM1_OP_REP, 0, 1, VSIR_OP_REP, {2, 0}},
+ {VKD3D_SM1_OP_ENDREP, 0, 0, VSIR_OP_ENDREP, {2, 0}},
+ {VKD3D_SM1_OP_IF, 0, 1, VSIR_OP_IF, {2, 0}},
+ {VKD3D_SM1_OP_IFC, 0, 2, VSIR_OP_IFC, {2, 1}},
+ {VKD3D_SM1_OP_ELSE, 0, 0, VSIR_OP_ELSE, {2, 0}},
+ {VKD3D_SM1_OP_ENDIF, 0, 0, VSIR_OP_ENDIF, {2, 0}},
+ {VKD3D_SM1_OP_BREAK, 0, 0, VSIR_OP_BREAK, {2, 1}},
+ {VKD3D_SM1_OP_BREAKC, 0, 2, VSIR_OP_BREAKC, {2, 1}},
+ {VKD3D_SM1_OP_BREAKP, 0, 1, VSIR_OP_BREAKP, {2, 1}},
+ {VKD3D_SM1_OP_CALL, 0, 1, VSIR_OP_CALL, {2, 0}},
+ {VKD3D_SM1_OP_CALLNZ, 0, 2, VSIR_OP_CALLNZ, {2, 0}},
+ {VKD3D_SM1_OP_LOOP, 0, 2, VSIR_OP_LOOP, {2, 0}},
+ {VKD3D_SM1_OP_RET, 0, 0, VSIR_OP_RET, {2, 0}},
+ {VKD3D_SM1_OP_ENDLOOP, 0, 0, VSIR_OP_ENDLOOP, {2, 0}},
+ {VKD3D_SM1_OP_LABEL, 0, 1, VSIR_OP_LABEL, {2, 0}},
+
+ {VKD3D_SM1_OP_SETP, 1, 2, VSIR_OP_SETP, {2, 1}},
+ {VKD3D_SM1_OP_TEXLDL, 1, 2, VSIR_OP_TEXLDL, {3, 0}},
+ {0, 0, 0, VSIR_OP_INVALID},
};
static const struct vkd3d_sm1_opcode_info ps_opcode_table[] =
{
/* Arithmetic */
- {VKD3D_SM1_OP_NOP, 0, 0, VKD3DSIH_NOP},
- {VKD3D_SM1_OP_MOV, 1, 1, VKD3DSIH_MOV},
- {VKD3D_SM1_OP_ADD, 1, 2, VKD3DSIH_ADD},
- {VKD3D_SM1_OP_SUB, 1, 2, VKD3DSIH_SUB},
- {VKD3D_SM1_OP_MAD, 1, 3, VKD3DSIH_MAD},
- {VKD3D_SM1_OP_MUL, 1, 2, VKD3DSIH_MUL},
- {VKD3D_SM1_OP_RCP, 1, 1, VKD3DSIH_RCP, {2, 0}},
- {VKD3D_SM1_OP_RSQ, 1, 1, VKD3DSIH_RSQ, {2, 0}},
- {VKD3D_SM1_OP_DP3, 1, 2, VKD3DSIH_DP3},
- {VKD3D_SM1_OP_DP4, 1, 2, VKD3DSIH_DP4, {1, 2}},
- {VKD3D_SM1_OP_MIN, 1, 2, VKD3DSIH_MIN, {2, 0}},
- {VKD3D_SM1_OP_MAX, 1, 2, VKD3DSIH_MAX, {2, 0}},
- {VKD3D_SM1_OP_ABS, 1, 1, VKD3DSIH_ABS, {2, 0}},
- {VKD3D_SM1_OP_EXP, 1, 1, VKD3DSIH_EXP, {2, 0}},
- {VKD3D_SM1_OP_LOG, 1, 1, VKD3DSIH_LOG, {2, 0}},
- {VKD3D_SM1_OP_LRP, 1, 3, VKD3DSIH_LRP},
- {VKD3D_SM1_OP_FRC, 1, 1, VKD3DSIH_FRC, {2, 0}},
- {VKD3D_SM1_OP_CND, 1, 3, VKD3DSIH_CND, {1, 0}, {1, 4}},
- {VKD3D_SM1_OP_CMP, 1, 3, VKD3DSIH_CMP, {1, 2}},
- {VKD3D_SM1_OP_POW, 1, 2, VKD3DSIH_POW, {2, 0}},
- {VKD3D_SM1_OP_CRS, 1, 2, VKD3DSIH_CRS, {2, 0}},
- {VKD3D_SM1_OP_NRM, 1, 1, VKD3DSIH_NRM, {2, 0}},
- {VKD3D_SM1_OP_SINCOS, 1, 3, VKD3DSIH_SINCOS, {2, 0}, {2, 1}},
- {VKD3D_SM1_OP_SINCOS, 1, 1, VKD3DSIH_SINCOS, {3, 0}},
- {VKD3D_SM1_OP_DP2ADD, 1, 3, VKD3DSIH_DP2ADD, {2, 0}},
+ {VKD3D_SM1_OP_NOP, 0, 0, VSIR_OP_NOP},
+ {VKD3D_SM1_OP_MOV, 1, 1, VSIR_OP_MOV},
+ {VKD3D_SM1_OP_ADD, 1, 2, VSIR_OP_ADD},
+ {VKD3D_SM1_OP_SUB, 1, 2, VSIR_OP_SUB},
+ {VKD3D_SM1_OP_MAD, 1, 3, VSIR_OP_MAD},
+ {VKD3D_SM1_OP_MUL, 1, 2, VSIR_OP_MUL},
+ {VKD3D_SM1_OP_RCP, 1, 1, VSIR_OP_RCP, {2, 0}},
+ {VKD3D_SM1_OP_RSQ, 1, 1, VSIR_OP_RSQ, {2, 0}},
+ {VKD3D_SM1_OP_DP3, 1, 2, VSIR_OP_DP3},
+ {VKD3D_SM1_OP_DP4, 1, 2, VSIR_OP_DP4, {1, 2}},
+ {VKD3D_SM1_OP_MIN, 1, 2, VSIR_OP_MIN, {2, 0}},
+ {VKD3D_SM1_OP_MAX, 1, 2, VSIR_OP_MAX, {2, 0}},
+ {VKD3D_SM1_OP_ABS, 1, 1, VSIR_OP_ABS, {2, 0}},
+ {VKD3D_SM1_OP_EXP, 1, 1, VSIR_OP_EXP, {2, 0}},
+ {VKD3D_SM1_OP_LOG, 1, 1, VSIR_OP_LOG, {2, 0}},
+ {VKD3D_SM1_OP_LRP, 1, 3, VSIR_OP_LRP},
+ {VKD3D_SM1_OP_FRC, 1, 1, VSIR_OP_FRC, {2, 0}},
+ {VKD3D_SM1_OP_CND, 1, 3, VSIR_OP_CND, {1, 0}, {1, 4}},
+ {VKD3D_SM1_OP_CMP, 1, 3, VSIR_OP_CMP, {1, 2}},
+ {VKD3D_SM1_OP_POW, 1, 2, VSIR_OP_POW, {2, 0}},
+ {VKD3D_SM1_OP_CRS, 1, 2, VSIR_OP_CRS, {2, 0}},
+ {VKD3D_SM1_OP_NRM, 1, 1, VSIR_OP_NRM, {2, 0}},
+ {VKD3D_SM1_OP_SINCOS, 1, 3, VSIR_OP_SINCOS, {2, 0}, {2, 1}},
+ {VKD3D_SM1_OP_SINCOS, 1, 1, VSIR_OP_SINCOS, {3, 0}},
+ {VKD3D_SM1_OP_DP2ADD, 1, 3, VSIR_OP_DP2ADD, {2, 0}},
/* Matrix */
- {VKD3D_SM1_OP_M4x4, 1, 2, VKD3DSIH_M4x4, {2, 0}},
- {VKD3D_SM1_OP_M4x3, 1, 2, VKD3DSIH_M4x3, {2, 0}},
- {VKD3D_SM1_OP_M3x4, 1, 2, VKD3DSIH_M3x4, {2, 0}},
- {VKD3D_SM1_OP_M3x3, 1, 2, VKD3DSIH_M3x3, {2, 0}},
- {VKD3D_SM1_OP_M3x2, 1, 2, VKD3DSIH_M3x2, {2, 0}},
+ {VKD3D_SM1_OP_M4x4, 1, 2, VSIR_OP_M4x4, {2, 0}},
+ {VKD3D_SM1_OP_M4x3, 1, 2, VSIR_OP_M4x3, {2, 0}},
+ {VKD3D_SM1_OP_M3x4, 1, 2, VSIR_OP_M3x4, {2, 0}},
+ {VKD3D_SM1_OP_M3x3, 1, 2, VSIR_OP_M3x3, {2, 0}},
+ {VKD3D_SM1_OP_M3x2, 1, 2, VSIR_OP_M3x2, {2, 0}},
/* Declarations */
- {VKD3D_SM1_OP_DCL, 0, 0, VKD3DSIH_DCL, {2, 0}},
+ {VKD3D_SM1_OP_DCL, 0, 0, VSIR_OP_DCL, {2, 0}},
/* Constant definitions */
- {VKD3D_SM1_OP_DEF, 1, 1, VKD3DSIH_DEF},
- {VKD3D_SM1_OP_DEFB, 1, 1, VKD3DSIH_DEFB, {2, 0}},
- {VKD3D_SM1_OP_DEFI, 1, 1, VKD3DSIH_DEFI, {2, 1}},
+ {VKD3D_SM1_OP_DEF, 1, 1, VSIR_OP_DEF},
+ {VKD3D_SM1_OP_DEFB, 1, 1, VSIR_OP_DEFB, {2, 0}},
+ {VKD3D_SM1_OP_DEFI, 1, 1, VSIR_OP_DEFI, {2, 1}},
/* Control flow */
- {VKD3D_SM1_OP_REP, 0, 1, VKD3DSIH_REP, {2, 1}},
- {VKD3D_SM1_OP_ENDREP, 0, 0, VKD3DSIH_ENDREP, {2, 1}},
- {VKD3D_SM1_OP_IF, 0, 1, VKD3DSIH_IF, {2, 1}},
- {VKD3D_SM1_OP_IFC, 0, 2, VKD3DSIH_IFC, {2, 1}},
- {VKD3D_SM1_OP_ELSE, 0, 0, VKD3DSIH_ELSE, {2, 1}},
- {VKD3D_SM1_OP_ENDIF, 0, 0, VKD3DSIH_ENDIF, {2, 1}},
- {VKD3D_SM1_OP_BREAK, 0, 0, VKD3DSIH_BREAK, {2, 1}},
- {VKD3D_SM1_OP_BREAKC, 0, 2, VKD3DSIH_BREAKC, {2, 1}},
- {VKD3D_SM1_OP_BREAKP, 0, 1, VKD3DSIH_BREAKP, {2, 1}},
- {VKD3D_SM1_OP_CALL, 0, 1, VKD3DSIH_CALL, {2, 1}},
- {VKD3D_SM1_OP_CALLNZ, 0, 2, VKD3DSIH_CALLNZ, {2, 1}},
- {VKD3D_SM1_OP_LOOP, 0, 2, VKD3DSIH_LOOP, {3, 0}},
- {VKD3D_SM1_OP_RET, 0, 0, VKD3DSIH_RET, {2, 1}},
- {VKD3D_SM1_OP_ENDLOOP, 0, 0, VKD3DSIH_ENDLOOP, {3, 0}},
- {VKD3D_SM1_OP_LABEL, 0, 1, VKD3DSIH_LABEL, {2, 1}},
+ {VKD3D_SM1_OP_REP, 0, 1, VSIR_OP_REP, {2, 1}},
+ {VKD3D_SM1_OP_ENDREP, 0, 0, VSIR_OP_ENDREP, {2, 1}},
+ {VKD3D_SM1_OP_IF, 0, 1, VSIR_OP_IF, {2, 1}},
+ {VKD3D_SM1_OP_IFC, 0, 2, VSIR_OP_IFC, {2, 1}},
+ {VKD3D_SM1_OP_ELSE, 0, 0, VSIR_OP_ELSE, {2, 1}},
+ {VKD3D_SM1_OP_ENDIF, 0, 0, VSIR_OP_ENDIF, {2, 1}},
+ {VKD3D_SM1_OP_BREAK, 0, 0, VSIR_OP_BREAK, {2, 1}},
+ {VKD3D_SM1_OP_BREAKC, 0, 2, VSIR_OP_BREAKC, {2, 1}},
+ {VKD3D_SM1_OP_BREAKP, 0, 1, VSIR_OP_BREAKP, {2, 1}},
+ {VKD3D_SM1_OP_CALL, 0, 1, VSIR_OP_CALL, {2, 1}},
+ {VKD3D_SM1_OP_CALLNZ, 0, 2, VSIR_OP_CALLNZ, {2, 1}},
+ {VKD3D_SM1_OP_LOOP, 0, 2, VSIR_OP_LOOP, {3, 0}},
+ {VKD3D_SM1_OP_RET, 0, 0, VSIR_OP_RET, {2, 1}},
+ {VKD3D_SM1_OP_ENDLOOP, 0, 0, VSIR_OP_ENDLOOP, {3, 0}},
+ {VKD3D_SM1_OP_LABEL, 0, 1, VSIR_OP_LABEL, {2, 1}},
/* Texture */
- {VKD3D_SM1_OP_TEXCOORD, 1, 0, VKD3DSIH_TEXCOORD, {0, 0}, {1, 3}},
- {VKD3D_SM1_OP_TEXCOORD, 1, 1, VKD3DSIH_TEXCRD, {1, 4}, {1, 4}},
- {VKD3D_SM1_OP_TEXKILL, 1, 0, VKD3DSIH_TEXKILL, {1, 0}},
- {VKD3D_SM1_OP_TEX, 1, 0, VKD3DSIH_TEX, {0, 0}, {1, 3}},
- {VKD3D_SM1_OP_TEX, 1, 1, VKD3DSIH_TEXLD, {1, 4}, {1, 4}},
- {VKD3D_SM1_OP_TEX, 1, 2, VKD3DSIH_TEXLD, {2, 0}},
- {VKD3D_SM1_OP_TEXBEM, 1, 1, VKD3DSIH_TEXBEM, {0, 0}, {1, 3}},
- {VKD3D_SM1_OP_TEXBEML, 1, 1, VKD3DSIH_TEXBEML, {1, 0}, {1, 3}},
- {VKD3D_SM1_OP_TEXREG2AR, 1, 1, VKD3DSIH_TEXREG2AR, {1, 0}, {1, 3}},
- {VKD3D_SM1_OP_TEXREG2GB, 1, 1, VKD3DSIH_TEXREG2GB, {1, 0}, {1, 3}},
- {VKD3D_SM1_OP_TEXREG2RGB, 1, 1, VKD3DSIH_TEXREG2RGB, {1, 2}, {1, 3}},
- {VKD3D_SM1_OP_TEXM3x2PAD, 1, 1, VKD3DSIH_TEXM3x2PAD, {1, 0}, {1, 3}},
- {VKD3D_SM1_OP_TEXM3x2TEX, 1, 1, VKD3DSIH_TEXM3x2TEX, {1, 0}, {1, 3}},
- {VKD3D_SM1_OP_TEXM3x3PAD, 1, 1, VKD3DSIH_TEXM3x3PAD, {1, 0}, {1, 3}},
- {VKD3D_SM1_OP_TEXM3x3DIFF, 1, 1, VKD3DSIH_TEXM3x3DIFF, {0, 0}, {0, 0}},
- {VKD3D_SM1_OP_TEXM3x3SPEC, 1, 2, VKD3DSIH_TEXM3x3SPEC, {1, 0}, {1, 3}},
- {VKD3D_SM1_OP_TEXM3x3VSPEC, 1, 1, VKD3DSIH_TEXM3x3VSPEC, {1, 0}, {1, 3}},
- {VKD3D_SM1_OP_TEXM3x3TEX, 1, 1, VKD3DSIH_TEXM3x3TEX, {1, 0}, {1, 3}},
- {VKD3D_SM1_OP_TEXDP3TEX, 1, 1, VKD3DSIH_TEXDP3TEX, {1, 2}, {1, 3}},
- {VKD3D_SM1_OP_TEXM3x2DEPTH, 1, 1, VKD3DSIH_TEXM3x2DEPTH, {1, 3}, {1, 3}},
- {VKD3D_SM1_OP_TEXDP3, 1, 1, VKD3DSIH_TEXDP3, {1, 2}, {1, 3}},
- {VKD3D_SM1_OP_TEXM3x3, 1, 1, VKD3DSIH_TEXM3x3, {1, 2}, {1, 3}},
- {VKD3D_SM1_OP_TEXDEPTH, 1, 0, VKD3DSIH_TEXDEPTH, {1, 4}, {1, 4}},
- {VKD3D_SM1_OP_BEM, 1, 2, VKD3DSIH_BEM, {1, 4}, {1, 4}},
- {VKD3D_SM1_OP_DSX, 1, 1, VKD3DSIH_DSX, {2, 1}},
- {VKD3D_SM1_OP_DSY, 1, 1, VKD3DSIH_DSY, {2, 1}},
- {VKD3D_SM1_OP_TEXLDD, 1, 4, VKD3DSIH_TEXLDD, {2, 1}},
- {VKD3D_SM1_OP_SETP, 1, 2, VKD3DSIH_SETP, {2, 1}},
- {VKD3D_SM1_OP_TEXLDL, 1, 2, VKD3DSIH_TEXLDL, {3, 0}},
- {VKD3D_SM1_OP_PHASE, 0, 0, VKD3DSIH_PHASE, {1, 4}, {1, 4}},
- {0, 0, 0, VKD3DSIH_INVALID},
+ {VKD3D_SM1_OP_TEXCOORD, 1, 0, VSIR_OP_TEXCOORD, {0, 0}, {1, 3}},
+ {VKD3D_SM1_OP_TEXCOORD, 1, 1, VSIR_OP_TEXCRD, {1, 4}, {1, 4}},
+ {VKD3D_SM1_OP_TEXKILL, 1, 0, VSIR_OP_TEXKILL, {1, 0}},
+ {VKD3D_SM1_OP_TEX, 1, 0, VSIR_OP_TEX, {0, 0}, {1, 3}},
+ {VKD3D_SM1_OP_TEX, 1, 1, VSIR_OP_TEXLD, {1, 4}, {1, 4}},
+ {VKD3D_SM1_OP_TEX, 1, 2, VSIR_OP_TEXLD, {2, 0}},
+ {VKD3D_SM1_OP_TEXBEM, 1, 1, VSIR_OP_TEXBEM, {0, 0}, {1, 3}},
+ {VKD3D_SM1_OP_TEXBEML, 1, 1, VSIR_OP_TEXBEML, {1, 0}, {1, 3}},
+ {VKD3D_SM1_OP_TEXREG2AR, 1, 1, VSIR_OP_TEXREG2AR, {1, 0}, {1, 3}},
+ {VKD3D_SM1_OP_TEXREG2GB, 1, 1, VSIR_OP_TEXREG2GB, {1, 0}, {1, 3}},
+ {VKD3D_SM1_OP_TEXREG2RGB, 1, 1, VSIR_OP_TEXREG2RGB, {1, 2}, {1, 3}},
+ {VKD3D_SM1_OP_TEXM3x2PAD, 1, 1, VSIR_OP_TEXM3x2PAD, {1, 0}, {1, 3}},
+ {VKD3D_SM1_OP_TEXM3x2TEX, 1, 1, VSIR_OP_TEXM3x2TEX, {1, 0}, {1, 3}},
+ {VKD3D_SM1_OP_TEXM3x3PAD, 1, 1, VSIR_OP_TEXM3x3PAD, {1, 0}, {1, 3}},
+ {VKD3D_SM1_OP_TEXM3x3DIFF, 1, 1, VSIR_OP_TEXM3x3DIFF, {0, 0}, {0, 0}},
+ {VKD3D_SM1_OP_TEXM3x3SPEC, 1, 2, VSIR_OP_TEXM3x3SPEC, {1, 0}, {1, 3}},
+ {VKD3D_SM1_OP_TEXM3x3VSPEC, 1, 1, VSIR_OP_TEXM3x3VSPEC, {1, 0}, {1, 3}},
+ {VKD3D_SM1_OP_TEXM3x3TEX, 1, 1, VSIR_OP_TEXM3x3TEX, {1, 0}, {1, 3}},
+ {VKD3D_SM1_OP_TEXDP3TEX, 1, 1, VSIR_OP_TEXDP3TEX, {1, 2}, {1, 3}},
+ {VKD3D_SM1_OP_TEXM3x2DEPTH, 1, 1, VSIR_OP_TEXM3x2DEPTH, {1, 3}, {1, 3}},
+ {VKD3D_SM1_OP_TEXDP3, 1, 1, VSIR_OP_TEXDP3, {1, 2}, {1, 3}},
+ {VKD3D_SM1_OP_TEXM3x3, 1, 1, VSIR_OP_TEXM3x3, {1, 2}, {1, 3}},
+ {VKD3D_SM1_OP_TEXDEPTH, 1, 0, VSIR_OP_TEXDEPTH, {1, 4}, {1, 4}},
+ {VKD3D_SM1_OP_BEM, 1, 2, VSIR_OP_BEM, {1, 4}, {1, 4}},
+ {VKD3D_SM1_OP_DSX, 1, 1, VSIR_OP_DSX, {2, 1}},
+ {VKD3D_SM1_OP_DSY, 1, 1, VSIR_OP_DSY, {2, 1}},
+ {VKD3D_SM1_OP_TEXLDD, 1, 4, VSIR_OP_TEXLDD, {2, 1}},
+ {VKD3D_SM1_OP_SETP, 1, 2, VSIR_OP_SETP, {2, 1}},
+ {VKD3D_SM1_OP_TEXLDL, 1, 2, VSIR_OP_TEXLDL, {3, 0}},
+ {VKD3D_SM1_OP_PHASE, 0, 0, VSIR_OP_PHASE, {1, 4}, {1, 4}},
+ {0, 0, 0, VSIR_OP_INVALID},
};
static const struct
@@ -475,7 +475,7 @@ static const struct vkd3d_sm1_opcode_info *shader_sm1_get_opcode_info(
for (;;)
{
info = &sm1->opcode_table[i++];
- if (info->vkd3d_opcode == VKD3DSIH_INVALID)
+ if (info->vkd3d_opcode == VSIR_OP_INVALID)
return NULL;
if (opcode == info->sm1_opcode
@@ -994,7 +994,7 @@ static void shader_sm1_skip_opcode(const struct vkd3d_shader_sm1_parser *sm1, co
/* DCL instructions do not have sources or destinations, but they
* read two tokens to a semantic. See
* shader_sm1_read_semantic(). */
- if (opcode_info->vkd3d_opcode == VKD3DSIH_DCL)
+ if (opcode_info->vkd3d_opcode == VSIR_OP_DCL)
{
*ptr += 2;
}
@@ -1002,7 +1002,7 @@ static void shader_sm1_skip_opcode(const struct vkd3d_shader_sm1_parser *sm1, co
* four tokens for that source. See shader_sm1_read_immconst().
* Technically shader model 1 doesn't have integer registers or DEFI; we
* handle it here anyway because it's easy. */
- else if (opcode_info->vkd3d_opcode == VKD3DSIH_DEF || opcode_info->vkd3d_opcode == VKD3DSIH_DEFI)
+ else if (opcode_info->vkd3d_opcode == VSIR_OP_DEF || opcode_info->vkd3d_opcode == VSIR_OP_DEFI)
{
*ptr += 3;
}
@@ -1194,7 +1194,7 @@ static void shader_sm1_read_comment(struct vkd3d_shader_sm1_parser *sm1)
static void shader_sm1_validate_instruction(struct vkd3d_shader_sm1_parser *sm1, struct vkd3d_shader_instruction *ins)
{
- if ((ins->opcode == VKD3DSIH_BREAKP || ins->opcode == VKD3DSIH_IF) && ins->flags)
+ if ((ins->opcode == VSIR_OP_BREAKP || ins->opcode == VSIR_OP_IF) && ins->flags)
{
vkd3d_shader_parser_warning(&sm1->p, VKD3D_SHADER_WARNING_D3DBC_IGNORED_INSTRUCTION_FLAGS,
"Ignoring unexpected instruction flags %#x.", ins->flags);
@@ -1242,7 +1242,7 @@ static void shader_sm1_read_instruction(struct vkd3d_shader_sm1_parser *sm1, str
goto fail;
}
- if (opcode_info->vkd3d_opcode == VKD3DSIH_TEXKILL)
+ if (opcode_info->vkd3d_opcode == VSIR_OP_TEXKILL)
{
vsir_src_count = 1;
vsir_dst_count = 0;
@@ -1288,29 +1288,29 @@ static void shader_sm1_read_instruction(struct vkd3d_shader_sm1_parser *sm1, str
goto fail;
}
- if (ins->opcode == VKD3DSIH_DCL)
+ if (ins->opcode == VSIR_OP_DCL)
{
shader_sm1_read_semantic(sm1, &p, &ins->declaration.semantic);
}
- else if (ins->opcode == VKD3DSIH_DEF)
+ else if (ins->opcode == VSIR_OP_DEF)
{
shader_sm1_read_dst_param(sm1, &p, dst_param);
shader_sm1_read_immconst(sm1, &p, &src_params[0], VSIR_DIMENSION_VEC4, VKD3D_DATA_FLOAT);
shader_sm1_scan_register(sm1, &dst_param->reg, dst_param->write_mask, true);
}
- else if (ins->opcode == VKD3DSIH_DEFB)
+ else if (ins->opcode == VSIR_OP_DEFB)
{
shader_sm1_read_dst_param(sm1, &p, dst_param);
shader_sm1_read_immconst(sm1, &p, &src_params[0], VSIR_DIMENSION_SCALAR, VKD3D_DATA_UINT);
shader_sm1_scan_register(sm1, &dst_param->reg, dst_param->write_mask, true);
}
- else if (ins->opcode == VKD3DSIH_DEFI)
+ else if (ins->opcode == VSIR_OP_DEFI)
{
shader_sm1_read_dst_param(sm1, &p, dst_param);
shader_sm1_read_immconst(sm1, &p, &src_params[0], VSIR_DIMENSION_VEC4, VKD3D_DATA_INT);
shader_sm1_scan_register(sm1, &dst_param->reg, dst_param->write_mask, true);
}
- else if (ins->opcode == VKD3DSIH_TEXKILL)
+ else if (ins->opcode == VSIR_OP_TEXKILL)
{
/* TEXKILL, uniquely, encodes its argument as a destination, when it is
* semantically a source. Since we have multiple passes which operate
@@ -1360,7 +1360,7 @@ static void shader_sm1_read_instruction(struct vkd3d_shader_sm1_parser *sm1, str
return;
fail:
- ins->opcode = VKD3DSIH_INVALID;
+ ins->opcode = VSIR_OP_INVALID;
*ptr = sm1->end;
}
@@ -1497,7 +1497,7 @@ int d3dbc_parse(const struct vkd3d_shader_compile_info *compile_info, uint64_t c
ins = &instructions->elements[instructions->count];
shader_sm1_read_instruction(&sm1, ins);
- if (ins->opcode == VKD3DSIH_INVALID)
+ if (ins->opcode == VSIR_OP_INVALID)
{
WARN("Encountered unrecognized or invalid instruction.\n");
vsir_program_cleanup(program);
@@ -1662,7 +1662,7 @@ static const struct vkd3d_sm1_opcode_info *shader_sm1_get_opcode_info_from_vsir(
for (;;)
{
info = &d3dbc->opcode_table[i++];
- if (info->vkd3d_opcode == VKD3DSIH_INVALID)
+ if (info->vkd3d_opcode == VSIR_OP_INVALID)
return NULL;
if (vkd3d_opcode == info->vkd3d_opcode
@@ -1773,7 +1773,7 @@ static bool is_inconsequential_instr(const struct vkd3d_shader_instruction *ins)
const struct vkd3d_shader_src_param *src = &ins->src[0];
unsigned int i;
- if (ins->opcode != VKD3DSIH_MOV)
+ if (ins->opcode != VSIR_OP_MOV)
return false;
if (dst->modifiers != VKD3DSPDM_NONE)
return false;
@@ -1990,47 +1990,47 @@ static void d3dbc_write_vsir_instruction(struct d3dbc_compiler *d3dbc, const str
switch (ins->opcode)
{
- case VKD3DSIH_DEF:
+ case VSIR_OP_DEF:
d3dbc_write_vsir_def(d3dbc, ins);
break;
- case VKD3DSIH_DCL:
+ case VSIR_OP_DCL:
d3dbc_write_vsir_dcl(d3dbc, ins);
break;
- case VKD3DSIH_TEXKILL:
+ case VSIR_OP_TEXKILL:
d3dbc_write_texkill(d3dbc, ins);
break;
- case VKD3DSIH_ABS:
- case VKD3DSIH_ADD:
- case VKD3DSIH_CMP:
- case VKD3DSIH_DP2ADD:
- case VKD3DSIH_DP3:
- case VKD3DSIH_DP4:
- case VKD3DSIH_DSX:
- case VKD3DSIH_DSY:
- case VKD3DSIH_ELSE:
- case VKD3DSIH_ENDIF:
- case VKD3DSIH_FRC:
- case VKD3DSIH_IFC:
- case VKD3DSIH_MAD:
- case VKD3DSIH_MAX:
- case VKD3DSIH_MIN:
- case VKD3DSIH_MOV:
- case VKD3DSIH_MOVA:
- case VKD3DSIH_MUL:
- case VKD3DSIH_SINCOS:
- case VKD3DSIH_SLT:
- case VKD3DSIH_TEXLD:
- case VKD3DSIH_TEXLDD:
+ case VSIR_OP_ABS:
+ case VSIR_OP_ADD:
+ case VSIR_OP_CMP:
+ case VSIR_OP_DP2ADD:
+ case VSIR_OP_DP3:
+ case VSIR_OP_DP4:
+ case VSIR_OP_DSX:
+ case VSIR_OP_DSY:
+ case VSIR_OP_ELSE:
+ case VSIR_OP_ENDIF:
+ case VSIR_OP_FRC:
+ case VSIR_OP_IFC:
+ case VSIR_OP_MAD:
+ case VSIR_OP_MAX:
+ case VSIR_OP_MIN:
+ case VSIR_OP_MOV:
+ case VSIR_OP_MOVA:
+ case VSIR_OP_MUL:
+ case VSIR_OP_SINCOS:
+ case VSIR_OP_SLT:
+ case VSIR_OP_TEXLD:
+ case VSIR_OP_TEXLDD:
d3dbc_write_instruction(d3dbc, ins);
break;
- case VKD3DSIH_EXP:
- case VKD3DSIH_LOG:
- case VKD3DSIH_RCP:
- case VKD3DSIH_RSQ:
+ case VSIR_OP_EXP:
+ case VSIR_OP_LOG:
+ case VSIR_OP_RCP:
+ case VSIR_OP_RSQ:
writemask = ins->dst->write_mask;
if (writemask != VKD3DSP_WRITEMASK_0 && writemask != VKD3DSP_WRITEMASK_1
&& writemask != VKD3DSP_WRITEMASK_2 && writemask != VKD3DSP_WRITEMASK_3)
@@ -2076,6 +2076,13 @@ static void d3dbc_write_semantic_dcl(struct d3dbc_compiler *d3dbc,
VKD3D_ASSERT(ret);
reg.reg.type = output ? VKD3DSPR_OUTPUT : VKD3DSPR_INPUT;
reg.reg.idx[0].offset = element->register_index;
+ if (!vkd3d_shader_ver_ge(version, 3, 0))
+ {
+ if (reg.reg.idx[0].offset > SM1_RASTOUT_REGISTER_OFFSET)
+ reg.reg.idx[0].offset -= SM1_RASTOUT_REGISTER_OFFSET;
+ else if (reg.reg.idx[0].offset > SM1_COLOR_REGISTER_OFFSET)
+ reg.reg.idx[0].offset -= SM1_COLOR_REGISTER_OFFSET;
+ }
}
token = VKD3D_SM1_OP_DCL;
diff --git a/libs/vkd3d/libs/vkd3d-shader/dxil.c b/libs/vkd3d/libs/vkd3d-shader/dxil.c
index 194c51a6ffd..db74a7bfbcc 100644
--- a/libs/vkd3d/libs/vkd3d-shader/dxil.c
+++ b/libs/vkd3d/libs/vkd3d-shader/dxil.c
@@ -3679,7 +3679,7 @@ static void sm6_parser_declare_icb(struct sm6_parser *sm6, const struct sm6_type
{
struct vkd3d_shader_instruction *ins;
- ins = sm6_parser_add_instruction(sm6, VKD3DSIH_DCL_IMMEDIATE_CONSTANT_BUFFER);
+ ins = sm6_parser_add_instruction(sm6, VSIR_OP_DCL_IMMEDIATE_CONSTANT_BUFFER);
/* The icb value index will be resolved later so forward references can be handled. */
ins->declaration.icb = (void *)(intptr_t)init;
dst->value_type = VALUE_TYPE_ICB;
@@ -3693,9 +3693,9 @@ static void sm6_parser_declare_indexable_temp(struct sm6_parser *sm6, const stru
enum vkd3d_data_type data_type = vkd3d_data_type_from_sm6_type(elem_type);
if (ins)
- vsir_instruction_init(ins, &sm6->p.location, VKD3DSIH_DCL_INDEXABLE_TEMP);
+ vsir_instruction_init(ins, &sm6->p.location, VSIR_OP_DCL_INDEXABLE_TEMP);
else
- ins = sm6_parser_add_instruction(sm6, VKD3DSIH_DCL_INDEXABLE_TEMP);
+ ins = sm6_parser_add_instruction(sm6, VSIR_OP_DCL_INDEXABLE_TEMP);
ins->declaration.indexable_temp.register_idx = sm6->indexable_temp_count++;
ins->declaration.indexable_temp.register_size = count;
ins->declaration.indexable_temp.alignment = alignment;
@@ -3715,7 +3715,7 @@ static void sm6_parser_declare_tgsm_raw(struct sm6_parser *sm6, const struct sm6
struct vkd3d_shader_instruction *ins;
unsigned int byte_count;
- ins = sm6_parser_add_instruction(sm6, VKD3DSIH_DCL_TGSM_RAW);
+ ins = sm6_parser_add_instruction(sm6, VSIR_OP_DCL_TGSM_RAW);
dst_param_init(&ins->declaration.tgsm_raw.reg);
dst->value_type = VALUE_TYPE_GROUPSHAREDMEM;
dst->u.groupsharedmem.id = sm6->tgsm_count++;
@@ -3742,7 +3742,7 @@ static void sm6_parser_declare_tgsm_structured(struct sm6_parser *sm6, const str
{
struct vkd3d_shader_instruction *ins;
- ins = sm6_parser_add_instruction(sm6, VKD3DSIH_DCL_TGSM_STRUCTURED);
+ ins = sm6_parser_add_instruction(sm6, VSIR_OP_DCL_TGSM_STRUCTURED);
dst_param_init(&ins->declaration.tgsm_structured.reg);
dst->value_type = VALUE_TYPE_GROUPSHAREDMEM;
dst->u.groupsharedmem.id = sm6->tgsm_count++;
@@ -4031,21 +4031,21 @@ static enum vkd3d_result sm6_parser_globals_init(struct sm6_parser *sm6)
for (i = 0; i < sm6->p.program->instructions.count; ++i)
{
ins = &sm6->p.program->instructions.elements[i];
- if (ins->opcode == VKD3DSIH_DCL_INDEXABLE_TEMP && ins->declaration.indexable_temp.initialiser)
+ if (ins->opcode == VSIR_OP_DCL_INDEXABLE_TEMP && ins->declaration.indexable_temp.initialiser)
{
ins->declaration.indexable_temp.initialiser = resolve_forward_initialiser(
(uintptr_t)ins->declaration.indexable_temp.initialiser, sm6);
}
- else if (ins->opcode == VKD3DSIH_DCL_IMMEDIATE_CONSTANT_BUFFER)
+ else if (ins->opcode == VSIR_OP_DCL_IMMEDIATE_CONSTANT_BUFFER)
{
ins->declaration.icb = resolve_forward_initialiser((uintptr_t)ins->declaration.icb, sm6);
}
- else if (ins->opcode == VKD3DSIH_DCL_TGSM_RAW)
+ else if (ins->opcode == VSIR_OP_DCL_TGSM_RAW)
{
ins->declaration.tgsm_raw.zero_init = resolve_forward_zero_initialiser(ins->flags, sm6);
ins->flags = 0;
}
- else if (ins->opcode == VKD3DSIH_DCL_TGSM_STRUCTURED)
+ else if (ins->opcode == VSIR_OP_DCL_TGSM_STRUCTURED)
{
ins->declaration.tgsm_structured.zero_init = resolve_forward_zero_initialiser(ins->flags, sm6);
ins->flags = 0;
@@ -4357,26 +4357,26 @@ static enum vkd3d_shader_opcode map_dx_atomicrmw_op(uint64_t code)
switch (code)
{
case RMW_ADD:
- return VKD3DSIH_IMM_ATOMIC_IADD;
+ return VSIR_OP_IMM_ATOMIC_IADD;
case RMW_AND:
- return VKD3DSIH_IMM_ATOMIC_AND;
+ return VSIR_OP_IMM_ATOMIC_AND;
case RMW_MAX:
- return VKD3DSIH_IMM_ATOMIC_IMAX;
+ return VSIR_OP_IMM_ATOMIC_IMAX;
case RMW_MIN:
- return VKD3DSIH_IMM_ATOMIC_IMIN;
+ return VSIR_OP_IMM_ATOMIC_IMIN;
case RMW_OR:
- return VKD3DSIH_IMM_ATOMIC_OR;
+ return VSIR_OP_IMM_ATOMIC_OR;
case RMW_UMAX:
- return VKD3DSIH_IMM_ATOMIC_UMAX;
+ return VSIR_OP_IMM_ATOMIC_UMAX;
case RMW_UMIN:
- return VKD3DSIH_IMM_ATOMIC_UMIN;
+ return VSIR_OP_IMM_ATOMIC_UMIN;
case RMW_XCHG:
- return VKD3DSIH_IMM_ATOMIC_EXCH;
+ return VSIR_OP_IMM_ATOMIC_EXCH;
case RMW_XOR:
- return VKD3DSIH_IMM_ATOMIC_XOR;
+ return VSIR_OP_IMM_ATOMIC_XOR;
default:
/* DXIL currently doesn't use SUB and NAND. */
- return VKD3DSIH_INVALID;
+ return VSIR_OP_INVALID;
}
}
@@ -4417,7 +4417,7 @@ static void sm6_parser_emit_atomicrmw(struct sm6_parser *sm6, const struct dxil_
if (!dxil_record_validate_operand_count(record, i + 4, i + 4, sm6))
return;
- if ((op = map_dx_atomicrmw_op(code = record->operands[i++])) == VKD3DSIH_INVALID)
+ if ((op = map_dx_atomicrmw_op(code = record->operands[i++])) == VSIR_OP_INVALID)
{
FIXME("Unhandled atomicrmw op %"PRIu64".\n", code);
vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_INVALID_OPERAND,
@@ -4485,7 +4485,7 @@ static enum vkd3d_shader_opcode map_binary_op(uint64_t code, const struct sm6_ty
WARN("Argument type %u is not bool, int16/32/64 or floating point.\n", type_a->class);
vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_INVALID_OPERAND,
"An argument to a binary operation is not bool, int16/32/64 or floating point.");
- return VKD3DSIH_INVALID;
+ return VSIR_OP_INVALID;
}
if (type_a != type_b)
{
@@ -4500,55 +4500,58 @@ static enum vkd3d_shader_opcode map_binary_op(uint64_t code, const struct sm6_ty
case BINOP_ADD:
case BINOP_SUB:
/* NEG is applied later for subtraction. */
- op = is_int ? VKD3DSIH_IADD : (is_double ? VKD3DSIH_DADD : VKD3DSIH_ADD);
+ op = is_int ? VSIR_OP_IADD : (is_double ? VSIR_OP_DADD : VSIR_OP_ADD);
is_valid = !is_bool;
break;
case BINOP_AND:
- op = VKD3DSIH_AND;
+ op = VSIR_OP_AND;
is_valid = is_int;
break;
case BINOP_ASHR:
- op = VKD3DSIH_ISHR;
+ op = VSIR_OP_ISHR;
is_valid = is_int && !is_bool;
break;
case BINOP_LSHR:
- op = VKD3DSIH_USHR;
+ op = VSIR_OP_USHR;
is_valid = is_int && !is_bool;
break;
case BINOP_MUL:
- op = is_int ? VKD3DSIH_IMUL_LOW : (is_double ? VKD3DSIH_DMUL : VKD3DSIH_MUL);
+ op = is_int ? VSIR_OP_IMUL_LOW : (is_double ? VSIR_OP_DMUL : VSIR_OP_MUL);
is_valid = !is_bool;
break;
case BINOP_OR:
- op = VKD3DSIH_OR;
+ op = VSIR_OP_OR;
is_valid = is_int;
break;
case BINOP_SDIV:
- op = is_int ? VKD3DSIH_IDIV : (is_double ? VKD3DSIH_DDIV : VKD3DSIH_DIV);
+ op = is_int ? VSIR_OP_IDIV : (is_double ? VSIR_OP_DDIV : VSIR_OP_DIV);
is_valid = !is_bool;
break;
case BINOP_SREM:
- op = is_int ? VKD3DSIH_IDIV : VKD3DSIH_FREM;
+ op = is_int ? VSIR_OP_IREM : VSIR_OP_FREM;
is_valid = !is_bool;
break;
case BINOP_SHL:
- op = VKD3DSIH_ISHL;
+ op = VSIR_OP_ISHL;
is_valid = is_int && !is_bool;
break;
case BINOP_UDIV:
+ op = VSIR_OP_UDIV_SIMPLE;
+ is_valid = is_int && !is_bool;
+ break;
case BINOP_UREM:
- op = VKD3DSIH_UDIV;
+ op = VSIR_OP_UREM;
is_valid = is_int && !is_bool;
break;
case BINOP_XOR:
- op = VKD3DSIH_XOR;
+ op = VSIR_OP_XOR;
is_valid = is_int;
break;
default:
FIXME("Unhandled binary op %#"PRIx64".\n", code);
vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_INVALID_OPERAND,
"Binary operation %#"PRIx64" is unhandled.", code);
- return VKD3DSIH_INVALID;
+ return VSIR_OP_INVALID;
}
if (!is_valid)
@@ -4582,7 +4585,7 @@ static void sm6_parser_emit_binop(struct sm6_parser *sm6, const struct dxil_reco
return;
code = record->operands[i++];
- if ((handler_idx = map_binary_op(code, a->type, b->type, sm6)) == VKD3DSIH_INVALID)
+ if ((handler_idx = map_binary_op(code, a->type, b->type, sm6)) == VSIR_OP_INVALID)
return;
vsir_instruction_init(ins, &sm6->p.location, handler_idx);
@@ -4592,25 +4595,27 @@ static void sm6_parser_emit_binop(struct sm6_parser *sm6, const struct dxil_reco
switch (handler_idx)
{
- case VKD3DSIH_ADD:
- case VKD3DSIH_MUL:
- case VKD3DSIH_DIV:
- case VKD3DSIH_FREM:
+ case VSIR_OP_ADD:
+ case VSIR_OP_MUL:
+ case VSIR_OP_DIV:
+ case VSIR_OP_FREM:
if (!(flags & FP_ALLOW_UNSAFE_ALGEBRA))
ins->flags |= VKD3DSI_PRECISE_X;
flags &= ~FP_ALLOW_UNSAFE_ALGEBRA;
/* SPIR-V FPFastMathMode is only available in the Kernel execution model. */
silence_warning = !(flags & ~(FP_NO_NAN | FP_NO_INF | FP_NO_SIGNED_ZEROS | FP_ALLOW_RECIPROCAL));
break;
- case VKD3DSIH_IADD:
- case VKD3DSIH_IMUL_LOW:
- case VKD3DSIH_ISHL:
+ case VSIR_OP_IADD:
+ case VSIR_OP_IMUL_LOW:
+ case VSIR_OP_ISHL:
silence_warning = !(flags & ~(OB_NO_UNSIGNED_WRAP | OB_NO_SIGNED_WRAP));
break;
- case VKD3DSIH_ISHR:
- case VKD3DSIH_USHR:
- case VKD3DSIH_IDIV:
- case VKD3DSIH_UDIV:
+ case VSIR_OP_ISHR:
+ case VSIR_OP_USHR:
+ case VSIR_OP_IDIV:
+ case VSIR_OP_UDIV_SIMPLE:
+ case VSIR_OP_IREM:
+ case VSIR_OP_UREM:
silence_warning = !(flags & ~PEB_EXACT);
break;
default:
@@ -4637,27 +4642,14 @@ static void sm6_parser_emit_binop(struct sm6_parser *sm6, const struct dxil_reco
dst->type = a->type;
- if (handler_idx == VKD3DSIH_UDIV || handler_idx == VKD3DSIH_IDIV)
+ if (handler_idx == VSIR_OP_ISHL || handler_idx == VSIR_OP_ISHR || handler_idx == VSIR_OP_USHR)
{
- struct vkd3d_shader_dst_param *dst_params = instruction_dst_params_alloc(ins, 2, sm6);
- unsigned int index = code != BINOP_UDIV && code != BINOP_SDIV;
-
- dst_param_init(&dst_params[0]);
- dst_param_init(&dst_params[1]);
- sm6_parser_init_ssa_value(sm6, dst);
- sm6_register_from_value(&dst_params[index].reg, dst, sm6);
- vsir_dst_param_init_null(&dst_params[index ^ 1]);
- }
- else
- {
- if (handler_idx == VKD3DSIH_ISHL || handler_idx == VKD3DSIH_ISHR || handler_idx == VKD3DSIH_USHR)
- {
- /* DXC emits AND instructions where necessary to mask shift counts. Shift binops
- * do not imply masking the shift as the TPF equivalents do. */
- ins->flags |= VKD3DSI_SHIFT_UNMASKED;
- }
- instruction_dst_param_init_ssa_scalar(ins, sm6);
+ /* DXC emits AND instructions where necessary to mask shift counts.
+ * Shift binops do not imply masking the shift as the TPF equivalents
+ * do. */
+ ins->flags |= VKD3DSI_SHIFT_UNMASKED;
}
+ instruction_dst_param_init_ssa_scalar(ins, sm6);
}
static const struct sm6_block *sm6_function_get_block(const struct sm6_function *function, uint64_t index,
@@ -4712,7 +4704,7 @@ static void sm6_parser_emit_br(struct sm6_parser *sm6, const struct dxil_record
code_block->terminator.false_block = sm6_function_get_block(function, record->operands[1], sm6);
}
- ins->opcode = VKD3DSIH_NOP;
+ ins->opcode = VSIR_OP_NOP;
}
static bool sm6_parser_emit_reg_composite_construct(struct sm6_parser *sm6,
@@ -4750,7 +4742,7 @@ static bool sm6_parser_emit_reg_composite_construct(struct sm6_parser *sm6,
for (i = 0; i < component_count; ++i, ++ins)
{
- vsir_instruction_init(ins, &sm6->p.location, VKD3DSIH_MOV);
+ vsir_instruction_init(ins, &sm6->p.location, VSIR_OP_MOV);
if (!(src_params = instruction_src_params_alloc(ins, 1, sm6)))
return false;
@@ -4808,7 +4800,7 @@ static enum vkd3d_shader_opcode sm6_dx_map_void_op(enum dx_intrinsic_opcode op)
switch (op)
{
case DX_WAVE_IS_FIRST_LANE:
- return VKD3DSIH_WAVE_IS_FIRST_LANE;
+ return VSIR_OP_WAVE_IS_FIRST_LANE;
default:
vkd3d_unreachable();
}
@@ -4827,81 +4819,81 @@ static enum vkd3d_shader_opcode map_dx_unary_op(enum dx_intrinsic_opcode op)
switch (op)
{
case DX_ISNAN:
- return VKD3DSIH_ISNAN;
+ return VSIR_OP_ISNAN;
case DX_ISINF:
- return VKD3DSIH_ISINF;
+ return VSIR_OP_ISINF;
case DX_ISFINITE:
- return VKD3DSIH_ISFINITE;
+ return VSIR_OP_ISFINITE;
case DX_COS:
- return VKD3DSIH_COS;
+ return VSIR_OP_COS;
case DX_SIN:
- return VKD3DSIH_SIN;
+ return VSIR_OP_SIN;
case DX_TAN:
- return VKD3DSIH_TAN;
+ return VSIR_OP_TAN;
case DX_ACOS:
- return VKD3DSIH_ACOS;
+ return VSIR_OP_ACOS;
case DX_ASIN:
- return VKD3DSIH_ASIN;
+ return VSIR_OP_ASIN;
case DX_ATAN:
- return VKD3DSIH_ATAN;
+ return VSIR_OP_ATAN;
case DX_HCOS:
- return VKD3DSIH_HCOS;
+ return VSIR_OP_HCOS;
case DX_HSIN:
- return VKD3DSIH_HSIN;
+ return VSIR_OP_HSIN;
case DX_HTAN:
- return VKD3DSIH_HTAN;
+ return VSIR_OP_HTAN;
case DX_EXP:
- return VKD3DSIH_EXP;
+ return VSIR_OP_EXP;
case DX_FRC:
- return VKD3DSIH_FRC;
+ return VSIR_OP_FRC;
case DX_LOG:
- return VKD3DSIH_LOG;
+ return VSIR_OP_LOG;
case DX_SQRT:
- return VKD3DSIH_SQRT;
+ return VSIR_OP_SQRT;
case DX_RSQRT:
- return VKD3DSIH_RSQ;
+ return VSIR_OP_RSQ;
case DX_ROUND_NE:
- return VKD3DSIH_ROUND_NE;
+ return VSIR_OP_ROUND_NE;
case DX_ROUND_NI:
- return VKD3DSIH_ROUND_NI;
+ return VSIR_OP_ROUND_NI;
case DX_ROUND_PI:
- return VKD3DSIH_ROUND_PI;
+ return VSIR_OP_ROUND_PI;
case DX_ROUND_Z:
- return VKD3DSIH_ROUND_Z;
+ return VSIR_OP_ROUND_Z;
case DX_BFREV:
- return VKD3DSIH_BFREV;
+ return VSIR_OP_BFREV;
case DX_COUNT_BITS:
- return VKD3DSIH_COUNTBITS;
+ return VSIR_OP_COUNTBITS;
case DX_FIRST_BIT_LO:
- return VKD3DSIH_FIRSTBIT_LO;
+ return VSIR_OP_FIRSTBIT_LO;
case DX_FIRST_BIT_HI:
- return VKD3DSIH_FIRSTBIT_HI;
+ return VSIR_OP_FIRSTBIT_HI;
case DX_FIRST_BIT_SHI:
- return VKD3DSIH_FIRSTBIT_SHI;
+ return VSIR_OP_FIRSTBIT_SHI;
case DX_DERIV_COARSEX:
- return VKD3DSIH_DSX_COARSE;
+ return VSIR_OP_DSX_COARSE;
case DX_DERIV_COARSEY:
- return VKD3DSIH_DSY_COARSE;
+ return VSIR_OP_DSY_COARSE;
case DX_DERIV_FINEX:
- return VKD3DSIH_DSX_FINE;
+ return VSIR_OP_DSX_FINE;
case DX_DERIV_FINEY:
- return VKD3DSIH_DSY_FINE;
+ return VSIR_OP_DSY_FINE;
case DX_LEGACY_F32TOF16:
- return VKD3DSIH_F32TOF16;
+ return VSIR_OP_F32TOF16;
case DX_LEGACY_F16TOF32:
- return VKD3DSIH_F16TOF32;
+ return VSIR_OP_F16TOF32;
case DX_WAVE_ACTIVE_ALL_EQUAL:
- return VKD3DSIH_WAVE_ACTIVE_ALL_EQUAL;
+ return VSIR_OP_WAVE_ACTIVE_ALL_EQUAL;
case DX_WAVE_ALL_BIT_COUNT:
- return VKD3DSIH_WAVE_ALL_BIT_COUNT;
+ return VSIR_OP_WAVE_ALL_BIT_COUNT;
case DX_WAVE_ALL_TRUE:
- return VKD3DSIH_WAVE_ALL_TRUE;
+ return VSIR_OP_WAVE_ALL_TRUE;
case DX_WAVE_ANY_TRUE:
- return VKD3DSIH_WAVE_ANY_TRUE;
+ return VSIR_OP_WAVE_ANY_TRUE;
case DX_WAVE_PREFIX_BIT_COUNT:
- return VKD3DSIH_WAVE_PREFIX_BIT_COUNT;
+ return VSIR_OP_WAVE_PREFIX_BIT_COUNT;
case DX_WAVE_READ_LANE_FIRST:
- return VKD3DSIH_WAVE_READ_LANE_FIRST;
+ return VSIR_OP_WAVE_READ_LANE_FIRST;
default:
vkd3d_unreachable();
}
@@ -4926,21 +4918,21 @@ static enum vkd3d_shader_opcode map_dx_binary_op(enum dx_intrinsic_opcode op, co
switch (op)
{
case DX_FMAX:
- return type->u.width == 64 ? VKD3DSIH_DMAX : VKD3DSIH_MAX;
+ return type->u.width == 64 ? VSIR_OP_DMAX : VSIR_OP_MAX;
case DX_FMIN:
- return type->u.width == 64 ? VKD3DSIH_DMIN : VKD3DSIH_MIN;
+ return type->u.width == 64 ? VSIR_OP_DMIN : VSIR_OP_MIN;
case DX_IMAX:
- return VKD3DSIH_IMAX;
+ return VSIR_OP_IMAX;
case DX_IMIN:
- return VKD3DSIH_IMIN;
+ return VSIR_OP_IMIN;
case DX_QUAD_READ_LANE_AT:
- return VKD3DSIH_QUAD_READ_LANE_AT;
+ return VSIR_OP_QUAD_READ_LANE_AT;
case DX_UMAX:
- return VKD3DSIH_UMAX;
+ return VSIR_OP_UMAX;
case DX_UMIN:
- return VKD3DSIH_UMIN;
+ return VSIR_OP_UMIN;
case DX_WAVE_READ_LANE_AT:
- return VKD3DSIH_WAVE_READ_LANE_AT;
+ return VSIR_OP_WAVE_READ_LANE_AT;
default:
vkd3d_unreachable();
}
@@ -4968,29 +4960,29 @@ static enum vkd3d_shader_opcode map_dx_atomic_binop(const struct sm6_value *oper
switch (code)
{
case ATOMIC_BINOP_ADD:
- return VKD3DSIH_IMM_ATOMIC_IADD;
+ return VSIR_OP_IMM_ATOMIC_IADD;
case ATOMIC_BINOP_AND:
- return VKD3DSIH_IMM_ATOMIC_AND;
+ return VSIR_OP_IMM_ATOMIC_AND;
case ATOMIC_BINOP_IMAX:
- return VKD3DSIH_IMM_ATOMIC_IMAX;
+ return VSIR_OP_IMM_ATOMIC_IMAX;
case ATOMIC_BINOP_IMIN:
- return VKD3DSIH_IMM_ATOMIC_IMIN;
+ return VSIR_OP_IMM_ATOMIC_IMIN;
case ATOMIC_BINOP_OR:
- return VKD3DSIH_IMM_ATOMIC_OR;
+ return VSIR_OP_IMM_ATOMIC_OR;
case ATOMIC_BINOP_UMAX:
- return VKD3DSIH_IMM_ATOMIC_UMAX;
+ return VSIR_OP_IMM_ATOMIC_UMAX;
case ATOMIC_BINOP_UMIN:
- return VKD3DSIH_IMM_ATOMIC_UMIN;
+ return VSIR_OP_IMM_ATOMIC_UMIN;
case ATOMIC_BINOP_XCHG:
- return VKD3DSIH_IMM_ATOMIC_EXCH;
+ return VSIR_OP_IMM_ATOMIC_EXCH;
case ATOMIC_BINOP_XOR:
- return VKD3DSIH_IMM_ATOMIC_XOR;
+ return VSIR_OP_IMM_ATOMIC_XOR;
/* DXIL currently doesn't use SUB and NAND. */
default:
FIXME("Unhandled atomic binop %"PRIu64".\n", code);
vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_INVALID_OPERAND,
"Operation %"PRIu64" for an atomic binop instruction is unhandled.", code);
- return VKD3DSIH_INVALID;
+ return VSIR_OP_INVALID;
}
}
@@ -5013,8 +5005,8 @@ static void sm6_parser_emit_dx_atomic_binop(struct sm6_parser *sm6, enum dx_intr
return;
if (is_cmp_xchg)
- handler_idx = VKD3DSIH_IMM_ATOMIC_CMP_EXCH;
- else if ((handler_idx = map_dx_atomic_binop(operands[1], sm6)) == VKD3DSIH_INVALID)
+ handler_idx = VSIR_OP_IMM_ATOMIC_CMP_EXCH;
+ else if ((handler_idx = map_dx_atomic_binop(operands[1], sm6)) == VSIR_OP_INVALID)
return;
coord_idx = 2 - is_cmp_xchg;
@@ -5066,7 +5058,7 @@ static void sm6_parser_emit_dx_barrier(struct sm6_parser *sm6, enum dx_intrinsic
struct vkd3d_shader_instruction *ins = state->ins;
enum dxil_sync_flags flags;
- vsir_instruction_init(ins, &sm6->p.location, VKD3DSIH_SYNC);
+ vsir_instruction_init(ins, &sm6->p.location, VSIR_OP_SYNC);
flags = sm6_value_get_constant_uint(operands[0], sm6);
ins->flags = flags & (SYNC_THREAD_GROUP | SYNC_THREAD_GROUP_UAV);
if (flags & SYNC_GLOBAL_UAV)
@@ -5110,7 +5102,7 @@ static void sm6_parser_emit_dx_buffer_update_counter(struct sm6_parser *sm6, enu
}
inc = i;
- vsir_instruction_init(ins, &sm6->p.location, (inc < 0) ? VKD3DSIH_IMM_ATOMIC_CONSUME : VKD3DSIH_IMM_ATOMIC_ALLOC);
+ vsir_instruction_init(ins, &sm6->p.location, (inc < 0) ? VSIR_OP_IMM_ATOMIC_CONSUME : VSIR_OP_IMM_ATOMIC_ALLOC);
if (!(src_params = instruction_src_params_alloc(ins, 1, sm6)))
return;
src_param_init_vector_from_handle(sm6, &src_params[0], &resource->u.handle);
@@ -5141,7 +5133,7 @@ static void sm6_parser_emit_dx_calculate_lod(struct sm6_parser *sm6, enum dx_int
clamp = sm6_value_get_constant_uint(operands[5], sm6);
ins = state->ins;
- vsir_instruction_init(ins, &sm6->p.location, VKD3DSIH_LOD);
+ vsir_instruction_init(ins, &sm6->p.location, VSIR_OP_LOD);
if (!(src_params = instruction_src_params_alloc(ins, 3, sm6)))
return;
src_param_init_vector_from_reg(&src_params[0], &coord);
@@ -5165,7 +5157,7 @@ static void sm6_parser_emit_dx_cbuffer_load(struct sm6_parser *sm6, enum dx_intr
if (!sm6_value_validate_is_handle(buffer, sm6))
return;
- vsir_instruction_init(ins, &sm6->p.location, VKD3DSIH_MOV);
+ vsir_instruction_init(ins, &sm6->p.location, VSIR_OP_MOV);
if (!(src_param = instruction_src_params_alloc(ins, 1, sm6)))
return;
@@ -5207,11 +5199,11 @@ static void sm6_parser_emit_dx_input_register_mov(struct sm6_parser *sm6,
{
struct vkd3d_shader_src_param *src_param;
- vsir_instruction_init(ins, &sm6->p.location, VKD3DSIH_MOV);
+ vsir_instruction_init(ins, &sm6->p.location, VSIR_OP_MOV);
if (!(src_param = instruction_src_params_alloc(ins, 1, sm6)))
return;
- sm6_parser_dcl_register_builtin(sm6, VKD3DSIH_DCL_INPUT, reg_type, data_type, 1);
+ sm6_parser_dcl_register_builtin(sm6, VSIR_OP_DCL_INPUT, reg_type, data_type, 1);
vsir_register_init(&src_param->reg, reg_type, data_type, 0);
src_param_init(src_param);
@@ -5275,7 +5267,7 @@ static void sm6_parser_emit_dx_create_handle(struct sm6_parser *sm6, enum dx_int
dst->u.handle.non_uniform = !!sm6_value_get_constant_uint(operands[3], sm6);
/* NOP is used to flag no instruction emitted. */
- ins->opcode = VKD3DSIH_NOP;
+ ins->opcode = VSIR_OP_NOP;
}
static void sm6_parser_emit_dx_stream(struct sm6_parser *sm6, enum dx_intrinsic_opcode op,
@@ -5285,7 +5277,7 @@ static void sm6_parser_emit_dx_stream(struct sm6_parser *sm6, enum dx_intrinsic_
struct vkd3d_shader_src_param *src_param;
unsigned int i;
- vsir_instruction_init(ins, &sm6->p.location, (op == DX_CUT_STREAM) ? VKD3DSIH_CUT_STREAM : VKD3DSIH_EMIT_STREAM);
+ vsir_instruction_init(ins, &sm6->p.location, (op == DX_CUT_STREAM) ? VSIR_OP_CUT_STREAM : VSIR_OP_EMIT_STREAM);
if (!(src_param = instruction_src_params_alloc(ins, 1, sm6)))
return;
@@ -5316,7 +5308,7 @@ static void sm6_parser_emit_dx_discard(struct sm6_parser *sm6, enum dx_intrinsic
struct vkd3d_shader_instruction *ins = state->ins;
struct vkd3d_shader_src_param *src_param;
- vsir_instruction_init(ins, &sm6->p.location, VKD3DSIH_DISCARD);
+ vsir_instruction_init(ins, &sm6->p.location, VSIR_OP_DISCARD);
if ((src_param = instruction_src_params_alloc(ins, 1, sm6)))
src_param_init_from_value(src_param, operands[0], sm6);
@@ -5329,7 +5321,7 @@ static void sm6_parser_emit_dx_domain_location(struct sm6_parser *sm6, enum dx_i
struct vkd3d_shader_src_param *src_param;
unsigned int component_idx;
- vsir_instruction_init(ins, &sm6->p.location, VKD3DSIH_MOV);
+ vsir_instruction_init(ins, &sm6->p.location, VSIR_OP_MOV);
if ((component_idx = sm6_value_get_constant_uint(operands[0], sm6)) >= 3)
{
@@ -5341,7 +5333,7 @@ static void sm6_parser_emit_dx_domain_location(struct sm6_parser *sm6, enum dx_i
if (!(src_param = instruction_src_params_alloc(ins, 1, sm6)))
return;
- sm6_parser_dcl_register_builtin(sm6, VKD3DSIH_DCL_INPUT, VKD3DSPR_TESSCOORD, VKD3D_DATA_FLOAT, 3);
+ sm6_parser_dcl_register_builtin(sm6, VSIR_OP_DCL_INPUT, VKD3DSPR_TESSCOORD, VKD3D_DATA_FLOAT, 3);
vsir_register_init(&src_param->reg, VKD3DSPR_TESSCOORD, VKD3D_DATA_FLOAT, 0);
src_param->reg.dimension = VSIR_DIMENSION_VEC4;
src_param_init_scalar(src_param, component_idx);
@@ -5361,15 +5353,15 @@ static void sm6_parser_emit_dx_dot(struct sm6_parser *sm6, enum dx_intrinsic_opc
switch (op)
{
case DX_DOT2:
- handler_idx = VKD3DSIH_DP2;
+ handler_idx = VSIR_OP_DP2;
component_count = 2;
break;
case DX_DOT3:
- handler_idx = VKD3DSIH_DP3;
+ handler_idx = VSIR_OP_DP3;
component_count = 3;
break;
case DX_DOT4:
- handler_idx = VKD3DSIH_DP4;
+ handler_idx = VSIR_OP_DP4;
component_count = 4;
break;
default:
@@ -5422,7 +5414,7 @@ static void sm6_parser_emit_dx_eval_attrib(struct sm6_parser *sm6, enum dx_intri
}
vsir_instruction_init(ins, &sm6->p.location, (op == DX_EVAL_CENTROID)
- ? VKD3DSIH_EVAL_CENTROID : VKD3DSIH_EVAL_SAMPLE_INDEX);
+ ? VSIR_OP_EVAL_CENTROID : VSIR_OP_EVAL_SAMPLE_INDEX);
if (!(src_params = instruction_src_params_alloc(ins, 1 + (op == DX_EVAL_SAMPLE_INDEX), sm6)))
return;
@@ -5444,7 +5436,7 @@ static void sm6_parser_emit_dx_fabs(struct sm6_parser *sm6, enum dx_intrinsic_op
struct vkd3d_shader_instruction *ins = state->ins;
struct vkd3d_shader_src_param *src_param;
- vsir_instruction_init(ins, &sm6->p.location, VKD3DSIH_MOV);
+ vsir_instruction_init(ins, &sm6->p.location, VSIR_OP_MOV);
if (!(src_param = instruction_src_params_alloc(ins, 1, sm6)))
return;
src_param_init_from_value(src_param, operands[0], sm6);
@@ -5480,8 +5472,8 @@ static void sm6_parser_emit_dx_compute_builtin(struct sm6_parser *sm6, enum dx_i
vkd3d_unreachable();
}
- sm6_parser_dcl_register_builtin(sm6, VKD3DSIH_DCL_INPUT, reg_type, VKD3D_DATA_UINT, component_count);
- vsir_instruction_init(ins, &sm6->p.location, VKD3DSIH_MOV);
+ sm6_parser_dcl_register_builtin(sm6, VSIR_OP_DCL_INPUT, reg_type, VKD3D_DATA_UINT, component_count);
+ vsir_instruction_init(ins, &sm6->p.location, VSIR_OP_MOV);
if (!(src_param = instruction_src_params_alloc(ins, 1, sm6)))
return;
vsir_register_init(&src_param->reg, reg_type, VKD3D_DATA_UINT, 0);
@@ -5500,12 +5492,12 @@ static enum vkd3d_shader_opcode sm6_dx_map_ma_op(enum dx_intrinsic_opcode op, co
switch (op)
{
case DX_FMA:
- return VKD3DSIH_DFMA;
+ return VSIR_OP_DFMA;
case DX_FMAD:
- return VKD3DSIH_MAD;
+ return VSIR_OP_MAD;
case DX_IMAD:
case DX_UMAD:
- return VKD3DSIH_IMAD;
+ return VSIR_OP_IMAD;
default:
vkd3d_unreachable();
}
@@ -5543,7 +5535,7 @@ static void sm6_parser_emit_dx_get_dimensions(struct sm6_parser *sm6, enum dx_in
is_texture = resource->u.handle.d->resource_type != VKD3D_SHADER_RESOURCE_BUFFER;
resource_kind = resource->u.handle.d->kind;
- instruction_init_with_resource(ins, is_texture ? VKD3DSIH_RESINFO : VKD3DSIH_BUFINFO, resource, sm6);
+ instruction_init_with_resource(ins, is_texture ? VSIR_OP_RESINFO : VSIR_OP_BUFINFO, resource, sm6);
if (!(src_params = instruction_src_params_alloc(ins, 1 + is_texture, sm6)))
return;
@@ -5562,7 +5554,7 @@ static void sm6_parser_emit_dx_get_dimensions(struct sm6_parser *sm6, enum dx_in
/* DXIL does not have an intrinsic for sample info, and resinfo is expected to return
* the sample count in .w for MS textures. The result is always a struct of 4 x uint32. */
- vsir_instruction_init(ins, &sm6->p.location, VKD3DSIH_SAMPLE_INFO);
+ vsir_instruction_init(ins, &sm6->p.location, VSIR_OP_SAMPLE_INFO);
ins->flags = VKD3DSI_SAMPLE_INFO_UINT;
if (!(src_params = instruction_src_params_alloc(ins, 1, sm6)))
@@ -5578,7 +5570,7 @@ static void sm6_parser_emit_dx_get_dimensions(struct sm6_parser *sm6, enum dx_in
/* Move the result to an SSA in case another instruction overwrites r0 before
* the components are extracted for use. */
++ins;
- vsir_instruction_init(ins, &sm6->p.location, VKD3DSIH_MOV);
+ vsir_instruction_init(ins, &sm6->p.location, VSIR_OP_MOV);
if (!(src_params = instruction_src_params_alloc(ins, 1, sm6)))
return;
src_param_init_vector_from_reg(&src_params[0], &dst->reg);
@@ -5606,9 +5598,9 @@ static enum vkd3d_shader_opcode sm6_dx_map_tertiary_op(enum dx_intrinsic_opcode
switch (op)
{
case DX_IBFE:
- return VKD3DSIH_IBFE;
+ return VSIR_OP_IBFE;
case DX_UBFE:
- return VKD3DSIH_UBFE;
+ return VSIR_OP_UBFE;
default:
vkd3d_unreachable();
}
@@ -5654,7 +5646,7 @@ static void sm6_parser_emit_dx_load_input(struct sm6_parser *sm6, enum dx_intrin
"The index for a control point load is undefined.");
}
- vsir_instruction_init(ins, &sm6->p.location, VKD3DSIH_MOV);
+ vsir_instruction_init(ins, &sm6->p.location, VSIR_OP_MOV);
if (is_patch_constant)
{
@@ -5709,7 +5701,7 @@ static void sm6_parser_emit_dx_make_double(struct sm6_parser *sm6, enum dx_intri
return;
ins = state->ins;
- vsir_instruction_init(ins, &sm6->p.location, VKD3DSIH_MOV);
+ vsir_instruction_init(ins, &sm6->p.location, VSIR_OP_MOV);
if (!(src_params = instruction_src_params_alloc(ins, 1, sm6)))
return;
src_params[0].reg = reg;
@@ -5735,13 +5727,13 @@ static enum vkd3d_shader_opcode dx_map_quad_op(enum dxil_quad_op_kind op)
switch (op)
{
case QUAD_READ_ACROSS_X:
- return VKD3DSIH_QUAD_READ_ACROSS_X;
+ return VSIR_OP_QUAD_READ_ACROSS_X;
case QUAD_READ_ACROSS_Y:
- return VKD3DSIH_QUAD_READ_ACROSS_Y;
+ return VSIR_OP_QUAD_READ_ACROSS_Y;
case QUAD_READ_ACROSS_D:
- return VKD3DSIH_QUAD_READ_ACROSS_D;
+ return VSIR_OP_QUAD_READ_ACROSS_D;
default:
- return VKD3DSIH_INVALID;
+ return VSIR_OP_INVALID;
}
}
@@ -5754,7 +5746,7 @@ static void sm6_parser_emit_dx_quad_op(struct sm6_parser *sm6, enum dx_intrinsic
enum dxil_quad_op_kind quad_op;
quad_op = sm6_value_get_constant_uint(operands[1], sm6);
- if ((opcode = dx_map_quad_op(quad_op)) == VKD3DSIH_INVALID)
+ if ((opcode = dx_map_quad_op(quad_op)) == VSIR_OP_INVALID)
{
FIXME("Unhandled quad op kind %u.\n", quad_op);
vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_UNHANDLED_INTRINSIC,
@@ -5804,7 +5796,7 @@ static void sm6_parser_emit_dx_raw_buffer_load(struct sm6_parser *sm6, enum dx_i
component_count = vsir_write_mask_component_count(write_mask);
}
- instruction_init_with_resource(ins, raw ? VKD3DSIH_LD_RAW : VKD3DSIH_LD_STRUCTURED, resource, sm6);
+ instruction_init_with_resource(ins, raw ? VSIR_OP_LD_RAW : VSIR_OP_LD_STRUCTURED, resource, sm6);
operand_count = 2 + !raw;
if (!(src_params = instruction_src_params_alloc(ins, operand_count, sm6)))
return;
@@ -5869,7 +5861,7 @@ static void sm6_parser_emit_dx_raw_buffer_store(struct sm6_parser *sm6, enum dx_
return;
ins = state->ins;
- vsir_instruction_init(ins, &sm6->p.location, raw ? VKD3DSIH_STORE_RAW : VKD3DSIH_STORE_STRUCTURED);
+ vsir_instruction_init(ins, &sm6->p.location, raw ? VSIR_OP_STORE_RAW : VSIR_OP_STORE_STRUCTURED);
operand_count = 2 + !raw;
if (!(src_params = instruction_src_params_alloc(ins, operand_count, sm6)))
@@ -5909,7 +5901,7 @@ static void sm6_parser_emit_dx_buffer_load(struct sm6_parser *sm6, enum dx_intri
}
instruction_init_with_resource(ins, (resource->u.handle.d->type == VKD3D_SHADER_DESCRIPTOR_TYPE_UAV)
- ? VKD3DSIH_LD_UAV_TYPED : VKD3DSIH_LD, resource, sm6);
+ ? VSIR_OP_LD_UAV_TYPED : VSIR_OP_LD, resource, sm6);
if (!(src_params = instruction_src_params_alloc(ins, 2, sm6)))
return;
@@ -5974,7 +5966,7 @@ static void sm6_parser_emit_dx_buffer_store(struct sm6_parser *sm6, enum dx_intr
return;
ins = state->ins;
- vsir_instruction_init(ins, &sm6->p.location, VKD3DSIH_STORE_UAV_TYPED);
+ vsir_instruction_init(ins, &sm6->p.location, VSIR_OP_STORE_UAV_TYPED);
if (!(src_params = instruction_src_params_alloc(ins, 2, sm6)))
return;
@@ -5999,7 +5991,7 @@ static void sm6_parser_emit_dx_get_sample_count(struct sm6_parser *sm6, enum dx_
struct vkd3d_shader_instruction *ins = state->ins;
struct vkd3d_shader_src_param *src_param;
- vsir_instruction_init(ins, &sm6->p.location, VKD3DSIH_SAMPLE_INFO);
+ vsir_instruction_init(ins, &sm6->p.location, VSIR_OP_SAMPLE_INFO);
ins->flags = VKD3DSI_SAMPLE_INFO_UINT;
if (!(src_param = instruction_src_params_alloc(ins, 1, sm6)))
@@ -6026,7 +6018,7 @@ static void sm6_parser_emit_dx_get_sample_pos(struct sm6_parser *sm6, enum dx_in
return;
}
- vsir_instruction_init(ins, &sm6->p.location, VKD3DSIH_SAMPLE_POS);
+ vsir_instruction_init(ins, &sm6->p.location, VSIR_OP_SAMPLE_POS);
if (!(src_params = instruction_src_params_alloc(ins, 2, sm6)))
return;
@@ -6091,7 +6083,7 @@ static void sm6_parser_emit_dx_sample(struct sm6_parser *sm6, enum dx_intrinsic_
switch (op)
{
case DX_SAMPLE:
- instruction_init_with_resource(ins, VKD3DSIH_SAMPLE, resource, sm6);
+ instruction_init_with_resource(ins, VSIR_OP_SAMPLE, resource, sm6);
src_params = instruction_src_params_alloc(ins, 3, sm6);
clamp_idx = 9;
break;
@@ -6099,7 +6091,7 @@ static void sm6_parser_emit_dx_sample(struct sm6_parser *sm6, enum dx_intrinsic_
clamp_idx = 10;
/* fall through */
case DX_SAMPLE_LOD:
- instruction_init_with_resource(ins, (op == DX_SAMPLE_B) ? VKD3DSIH_SAMPLE_B : VKD3DSIH_SAMPLE_LOD,
+ instruction_init_with_resource(ins, (op == DX_SAMPLE_B) ? VSIR_OP_SAMPLE_B : VSIR_OP_SAMPLE_LOD,
resource, sm6);
src_params = instruction_src_params_alloc(ins, 4, sm6);
src_param_init_from_value(&src_params[3], operands[9], sm6);
@@ -6108,14 +6100,14 @@ static void sm6_parser_emit_dx_sample(struct sm6_parser *sm6, enum dx_intrinsic_
clamp_idx = 10;
/* fall through */
case DX_SAMPLE_C_LZ:
- instruction_init_with_resource(ins, (op == DX_SAMPLE_C_LZ) ? VKD3DSIH_SAMPLE_C_LZ : VKD3DSIH_SAMPLE_C,
+ instruction_init_with_resource(ins, (op == DX_SAMPLE_C_LZ) ? VSIR_OP_SAMPLE_C_LZ : VSIR_OP_SAMPLE_C,
resource, sm6);
src_params = instruction_src_params_alloc(ins, 4, sm6);
src_param_init_from_value(&src_params[3], operands[9], sm6);
component_count = 1;
break;
case DX_SAMPLE_GRAD:
- instruction_init_with_resource(ins, VKD3DSIH_SAMPLE_GRAD, resource, sm6);
+ instruction_init_with_resource(ins, VSIR_OP_SAMPLE_GRAD, resource, sm6);
src_params = instruction_src_params_alloc(ins, 5, sm6);
src_param_init_vector_from_reg(&src_params[3], &ddx);
src_param_init_vector_from_reg(&src_params[4], &ddy);
@@ -6151,7 +6143,7 @@ static void sm6_parser_emit_dx_sample_index(struct sm6_parser *sm6, enum dx_intr
struct vkd3d_shader_src_param *src_param;
unsigned int element_idx;
- vsir_instruction_init(ins, &sm6->p.location, VKD3DSIH_MOV);
+ vsir_instruction_init(ins, &sm6->p.location, VSIR_OP_MOV);
/* SV_SampleIndex is identified in VSIR by its signature element index,
* but the index is not supplied as a parameter to the DXIL intrinsic. */
@@ -6177,7 +6169,7 @@ static void sm6_parser_emit_dx_saturate(struct sm6_parser *sm6, enum dx_intrinsi
struct vkd3d_shader_instruction *ins = state->ins;
struct vkd3d_shader_src_param *src_param;
- vsir_instruction_init(ins, &sm6->p.location, VKD3DSIH_MOV);
+ vsir_instruction_init(ins, &sm6->p.location, VSIR_OP_MOV);
if (!(src_param = instruction_src_params_alloc(ins, 1, sm6)))
return;
src_param_init_from_value(src_param, operands[0], sm6);
@@ -6192,7 +6184,7 @@ static void sm6_parser_emit_dx_split_double(struct sm6_parser *sm6, enum dx_intr
struct vkd3d_shader_instruction *ins = state->ins;
struct vkd3d_shader_src_param *src_param;
- vsir_instruction_init(ins, &sm6->p.location, VKD3DSIH_MOV);
+ vsir_instruction_init(ins, &sm6->p.location, VSIR_OP_MOV);
if (!(src_param = instruction_src_params_alloc(ins, 1, sm6)))
return;
src_param_init_from_value(src_param, operands[0], sm6);
@@ -6245,7 +6237,7 @@ static void sm6_parser_emit_dx_store_output(struct sm6_parser *sm6, enum dx_intr
return;
}
- vsir_instruction_init(ins, &sm6->p.location, VKD3DSIH_MOV);
+ vsir_instruction_init(ins, &sm6->p.location, VSIR_OP_MOV);
if (!(dst_param = instruction_dst_params_alloc(ins, 1, sm6)))
return;
@@ -6256,7 +6248,7 @@ static void sm6_parser_emit_dx_store_output(struct sm6_parser *sm6, enum dx_intr
if (e->register_index == UINT_MAX)
{
- sm6_parser_dcl_register_builtin(sm6, VKD3DSIH_DCL_OUTPUT, dst_param->reg.type,
+ sm6_parser_dcl_register_builtin(sm6, VSIR_OP_DCL_OUTPUT, dst_param->reg.type,
dst_param->reg.data_type, vsir_write_mask_component_count(e->mask));
}
@@ -6294,13 +6286,13 @@ static void sm6_parser_emit_dx_texture_gather(struct sm6_parser *sm6, enum dx_in
ins = state->ins;
if (op == DX_TEXTURE_GATHER)
{
- instruction_init_with_resource(ins, extended_offset ? VKD3DSIH_GATHER4_PO : VKD3DSIH_GATHER4, resource, sm6);
+ instruction_init_with_resource(ins, extended_offset ? VSIR_OP_GATHER4_PO : VSIR_OP_GATHER4, resource, sm6);
if (!(src_params = instruction_src_params_alloc(ins, 3 + extended_offset, sm6)))
return;
}
else
{
- instruction_init_with_resource(ins, extended_offset ? VKD3DSIH_GATHER4_PO_C : VKD3DSIH_GATHER4_C, resource, sm6);
+ instruction_init_with_resource(ins, extended_offset ? VSIR_OP_GATHER4_PO_C : VSIR_OP_GATHER4_C, resource, sm6);
if (!(src_params = instruction_src_params_alloc(ins, 4 + extended_offset, sm6)))
return;
src_param_init_from_value(&src_params[3 + extended_offset], operands[9], sm6);
@@ -6354,8 +6346,8 @@ static void sm6_parser_emit_dx_texture_load(struct sm6_parser *sm6, enum dx_intr
}
ins = state->ins;
- instruction_init_with_resource(ins, is_uav ? VKD3DSIH_LD_UAV_TYPED
- : is_multisample ? VKD3DSIH_LD2DMS : VKD3DSIH_LD, resource, sm6);
+ instruction_init_with_resource(ins, is_uav ? VSIR_OP_LD_UAV_TYPED
+ : is_multisample ? VSIR_OP_LD2DMS : VSIR_OP_LD, resource, sm6);
instruction_set_texel_offset(ins, &operands[5], sm6);
for (i = 0; i < VKD3D_VEC4_SIZE; ++i)
@@ -6409,7 +6401,7 @@ static void sm6_parser_emit_dx_texture_store(struct sm6_parser *sm6, enum dx_int
return;
ins = state->ins;
- vsir_instruction_init(ins, &sm6->p.location, VKD3DSIH_STORE_UAV_TYPED);
+ vsir_instruction_init(ins, &sm6->p.location, VSIR_OP_STORE_UAV_TYPED);
if (!(src_params = instruction_src_params_alloc(ins, 2, sm6)))
return;
@@ -6427,7 +6419,7 @@ static void sm6_parser_emit_dx_wave_active_ballot(struct sm6_parser *sm6, enum d
struct vkd3d_shader_instruction *ins = state->ins;
struct vkd3d_shader_src_param *src_param;
- vsir_instruction_init(ins, &sm6->p.location, VKD3DSIH_WAVE_ACTIVE_BALLOT);
+ vsir_instruction_init(ins, &sm6->p.location, VSIR_OP_WAVE_ACTIVE_BALLOT);
if (!(src_param = instruction_src_params_alloc(ins, 1, sm6)))
return;
src_param_init_from_value(src_param, operands[0], sm6);
@@ -6441,16 +6433,16 @@ static enum vkd3d_shader_opcode sm6_dx_map_wave_bit_op(enum dxil_wave_bit_op_kin
switch (op)
{
case WAVE_BIT_OP_AND:
- return VKD3DSIH_WAVE_ACTIVE_BIT_AND;
+ return VSIR_OP_WAVE_ACTIVE_BIT_AND;
case WAVE_BIT_OP_OR:
- return VKD3DSIH_WAVE_ACTIVE_BIT_OR;
+ return VSIR_OP_WAVE_ACTIVE_BIT_OR;
case WAVE_BIT_OP_XOR:
- return VKD3DSIH_WAVE_ACTIVE_BIT_XOR;
+ return VSIR_OP_WAVE_ACTIVE_BIT_XOR;
default:
FIXME("Unhandled wave bit op %u.\n", op);
vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_UNHANDLED_INTRINSIC,
"Wave bit operation %u is unhandled.", op);
- return VKD3DSIH_INVALID;
+ return VSIR_OP_INVALID;
}
}
@@ -6464,7 +6456,7 @@ static void sm6_parser_emit_dx_wave_active_bit(struct sm6_parser *sm6, enum dx_i
wave_op = sm6_value_get_constant_uint(operands[1], sm6);
- if ((opcode = sm6_dx_map_wave_bit_op(wave_op, sm6)) == VKD3DSIH_INVALID)
+ if ((opcode = sm6_dx_map_wave_bit_op(wave_op, sm6)) == VSIR_OP_INVALID)
return;
vsir_instruction_init(ins, &sm6->p.location, opcode);
@@ -6481,22 +6473,22 @@ static enum vkd3d_shader_opcode sm6_dx_map_wave_op(enum dxil_wave_op_kind op, bo
switch (op)
{
case WAVE_OP_ADD:
- return VKD3DSIH_WAVE_OP_ADD;
+ return VSIR_OP_WAVE_OP_ADD;
case WAVE_OP_MUL:
- return VKD3DSIH_WAVE_OP_MUL;
+ return VSIR_OP_WAVE_OP_MUL;
case WAVE_OP_MIN:
if (is_float)
- return VKD3DSIH_WAVE_OP_MIN;
- return is_signed ? VKD3DSIH_WAVE_OP_IMIN : VKD3DSIH_WAVE_OP_UMIN;
+ return VSIR_OP_WAVE_OP_MIN;
+ return is_signed ? VSIR_OP_WAVE_OP_IMIN : VSIR_OP_WAVE_OP_UMIN;
case WAVE_OP_MAX:
if (is_float)
- return VKD3DSIH_WAVE_OP_MAX;
- return is_signed ? VKD3DSIH_WAVE_OP_IMAX : VKD3DSIH_WAVE_OP_UMAX;
+ return VSIR_OP_WAVE_OP_MAX;
+ return is_signed ? VSIR_OP_WAVE_OP_IMAX : VSIR_OP_WAVE_OP_UMAX;
default:
FIXME("Unhandled wave op %u.\n", op);
vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_UNHANDLED_INTRINSIC,
"Wave operation %u is unhandled.", op);
- return VKD3DSIH_INVALID;
+ return VSIR_OP_INVALID;
}
}
@@ -6513,7 +6505,7 @@ static void sm6_parser_emit_dx_wave_op(struct sm6_parser *sm6, enum dx_intrinsic
is_signed = !sm6_value_get_constant_uint(operands[2], sm6);
opcode = sm6_dx_map_wave_op(wave_op, is_signed, sm6_type_is_floating_point(operands[0]->type), sm6);
- if (opcode == VKD3DSIH_INVALID)
+ if (opcode == VSIR_OP_INVALID)
return;
vsir_instruction_init(ins, &sm6->p.location, opcode);
@@ -6786,7 +6778,7 @@ static bool sm6_parser_validate_dx_op(struct sm6_parser *sm6, enum dx_intrinsic_
static void sm6_parser_emit_unhandled(struct sm6_parser *sm6, struct vkd3d_shader_instruction *ins,
struct sm6_value *dst)
{
- ins->opcode = VKD3DSIH_NOP;
+ ins->opcode = VSIR_OP_NOP;
if (!dst->type)
return;
@@ -6915,7 +6907,7 @@ static void sm6_parser_emit_call(struct sm6_parser *sm6, const struct dxil_recor
static enum vkd3d_shader_opcode sm6_map_cast_op(uint64_t code, const struct sm6_type *from,
const struct sm6_type *to, struct sm6_parser *sm6)
{
- enum vkd3d_shader_opcode op = VKD3DSIH_INVALID;
+ enum vkd3d_shader_opcode op = VSIR_OP_INVALID;
bool from_int, to_int, from_fp, to_fp;
unsigned int from_width, to_width;
bool is_valid = false;
@@ -6931,65 +6923,65 @@ static enum vkd3d_shader_opcode sm6_map_cast_op(uint64_t code, const struct sm6_
FIXME("Unhandled cast of type class %u to type class %u.\n", from->class, to->class);
vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_INVALID_OPERAND,
"Cast of type class %u to type class %u is not implemented.", from->class, to->class);
- return VKD3DSIH_INVALID;
+ return VSIR_OP_INVALID;
}
if (to->u.width == 8 || from->u.width == 8)
{
FIXME("Unhandled 8-bit value.\n");
vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_INVALID_OPERAND,
"Cast to/from an 8-bit type is not implemented.");
- return VKD3DSIH_INVALID;
+ return VSIR_OP_INVALID;
}
switch (code)
{
case CAST_TRUNC:
- op = VKD3DSIH_UTOU;
+ op = VSIR_OP_UTOU;
is_valid = from_int && to_int && to->u.width <= from->u.width;
break;
case CAST_ZEXT:
- op = VKD3DSIH_UTOU;
+ op = VSIR_OP_UTOU;
is_valid = from_int && to_int && to->u.width >= from->u.width;
break;
case CAST_SEXT:
- op = VKD3DSIH_ITOI;
+ op = VSIR_OP_ITOI;
is_valid = from_int && to_int && to->u.width >= from->u.width;
break;
case CAST_FPTOUI:
- op = VKD3DSIH_FTOU;
+ op = VSIR_OP_FTOU;
is_valid = from_fp && to_int && to->u.width > 1;
break;
case CAST_FPTOSI:
- op = VKD3DSIH_FTOI;
+ op = VSIR_OP_FTOI;
is_valid = from_fp && to_int && to->u.width > 1;
break;
case CAST_UITOFP:
- op = VKD3DSIH_UTOF;
+ op = VSIR_OP_UTOF;
is_valid = from_int && to_fp;
break;
case CAST_SITOFP:
- op = VKD3DSIH_ITOF;
+ op = VSIR_OP_ITOF;
is_valid = from_int && to_fp;
break;
case CAST_FPTRUNC:
- op = VKD3DSIH_DTOF;
+ op = VSIR_OP_DTOF;
is_valid = from_fp && to_fp && to->u.width <= from->u.width;
break;
case CAST_FPEXT:
- op = VKD3DSIH_FTOD;
+ op = VSIR_OP_FTOD;
is_valid = from_fp && to_fp && to->u.width >= from->u.width;
break;
case CAST_BITCAST:
- op = VKD3DSIH_MOV;
+ op = VSIR_OP_MOV;
is_valid = to->u.width == from->u.width;
break;
@@ -6997,7 +6989,7 @@ static enum vkd3d_shader_opcode sm6_map_cast_op(uint64_t code, const struct sm6_
FIXME("Unhandled cast op %"PRIu64".\n", code);
vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_INVALID_OPERAND,
"Cast operation %"PRIu64" is unhandled.", code);
- return VKD3DSIH_INVALID;
+ return VSIR_OP_INVALID;
}
if (!is_valid)
@@ -7006,7 +6998,7 @@ static enum vkd3d_shader_opcode sm6_map_cast_op(uint64_t code, const struct sm6_
vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_INVALID_OPERAND,
"Cast operation %"PRIu64" from type class %u, width %u to type class %u, width %u is invalid.",
code, from->class, from->u.width, to->class, to->u.width);
- return VKD3DSIH_INVALID;
+ return VSIR_OP_INVALID;
}
/* 16-bit values are currently treated as 32-bit, because 16-bit is
@@ -7021,7 +7013,7 @@ static enum vkd3d_shader_opcode sm6_map_cast_op(uint64_t code, const struct sm6_
to_width = 32;
if (from->class == to->class && from_width == to_width)
- op = VKD3DSIH_NOP;
+ op = VSIR_OP_NOP;
return op;
}
@@ -7050,16 +7042,16 @@ static void sm6_parser_emit_cast(struct sm6_parser *sm6, const struct dxil_recor
{
*dst = *value;
dst->type = type;
- ins->opcode = VKD3DSIH_NOP;
+ ins->opcode = VSIR_OP_NOP;
return;
}
- if ((handler_idx = sm6_map_cast_op(record->operands[i], value->type, type, sm6)) == VKD3DSIH_INVALID)
+ if ((handler_idx = sm6_map_cast_op(record->operands[i], value->type, type, sm6)) == VSIR_OP_INVALID)
return;
vsir_instruction_init(ins, &sm6->p.location, handler_idx);
- if (handler_idx == VKD3DSIH_NOP)
+ if (handler_idx == VSIR_OP_NOP)
{
*dst = *value;
dst->type = type;
@@ -7075,7 +7067,7 @@ static void sm6_parser_emit_cast(struct sm6_parser *sm6, const struct dxil_recor
/* VSIR bitcasts are represented by source registers with types different
* from the types they were written with, rather than with different types
* for the MOV source and destination. */
- if (handler_idx == VKD3DSIH_MOV)
+ if (handler_idx == VSIR_OP_MOV)
src_param->reg.data_type = ins->dst[0].reg.data_type;
}
@@ -7089,33 +7081,33 @@ static const struct sm6_cmp_info *sm6_map_cmp2_op(uint64_t code)
{
static const struct sm6_cmp_info cmp_op_table[] =
{
- [FCMP_FALSE] = {VKD3DSIH_INVALID},
- [FCMP_OEQ] = {VKD3DSIH_EQO},
- [FCMP_OGT] = {VKD3DSIH_LTO, true},
- [FCMP_OGE] = {VKD3DSIH_GEO},
- [FCMP_OLT] = {VKD3DSIH_LTO},
- [FCMP_OLE] = {VKD3DSIH_GEO, true},
- [FCMP_ONE] = {VKD3DSIH_NEO},
- [FCMP_ORD] = {VKD3DSIH_ORD},
- [FCMP_UNO] = {VKD3DSIH_UNO},
- [FCMP_UEQ] = {VKD3DSIH_EQU},
- [FCMP_UGT] = {VKD3DSIH_LTU, true},
- [FCMP_UGE] = {VKD3DSIH_GEU},
- [FCMP_ULT] = {VKD3DSIH_LTU},
- [FCMP_ULE] = {VKD3DSIH_GEU, true},
- [FCMP_UNE] = {VKD3DSIH_NEU},
- [FCMP_TRUE] = {VKD3DSIH_INVALID},
-
- [ICMP_EQ] = {VKD3DSIH_IEQ},
- [ICMP_NE] = {VKD3DSIH_INE},
- [ICMP_UGT] = {VKD3DSIH_ULT, true},
- [ICMP_UGE] = {VKD3DSIH_UGE},
- [ICMP_ULT] = {VKD3DSIH_ULT},
- [ICMP_ULE] = {VKD3DSIH_UGE, true},
- [ICMP_SGT] = {VKD3DSIH_ILT, true},
- [ICMP_SGE] = {VKD3DSIH_IGE},
- [ICMP_SLT] = {VKD3DSIH_ILT},
- [ICMP_SLE] = {VKD3DSIH_IGE, true},
+ [FCMP_FALSE] = {VSIR_OP_INVALID},
+ [FCMP_OEQ] = {VSIR_OP_EQO},
+ [FCMP_OGT] = {VSIR_OP_LTO, true},
+ [FCMP_OGE] = {VSIR_OP_GEO},
+ [FCMP_OLT] = {VSIR_OP_LTO},
+ [FCMP_OLE] = {VSIR_OP_GEO, true},
+ [FCMP_ONE] = {VSIR_OP_NEO},
+ [FCMP_ORD] = {VSIR_OP_ORD},
+ [FCMP_UNO] = {VSIR_OP_UNO},
+ [FCMP_UEQ] = {VSIR_OP_EQU},
+ [FCMP_UGT] = {VSIR_OP_LTU, true},
+ [FCMP_UGE] = {VSIR_OP_GEU},
+ [FCMP_ULT] = {VSIR_OP_LTU},
+ [FCMP_ULE] = {VSIR_OP_GEU, true},
+ [FCMP_UNE] = {VSIR_OP_NEU},
+ [FCMP_TRUE] = {VSIR_OP_INVALID},
+
+ [ICMP_EQ] = {VSIR_OP_IEQ},
+ [ICMP_NE] = {VSIR_OP_INE},
+ [ICMP_UGT] = {VSIR_OP_ULT, true},
+ [ICMP_UGE] = {VSIR_OP_UGE},
+ [ICMP_ULT] = {VSIR_OP_ULT},
+ [ICMP_ULE] = {VSIR_OP_UGE, true},
+ [ICMP_SGT] = {VSIR_OP_ILT, true},
+ [ICMP_SGE] = {VSIR_OP_IGE},
+ [ICMP_SLT] = {VSIR_OP_ILT},
+ [ICMP_SLE] = {VSIR_OP_IGE, true},
};
return (code < ARRAY_SIZE(cmp_op_table)) ? &cmp_op_table[code] : NULL;
@@ -7161,7 +7153,7 @@ static void sm6_parser_emit_cmp2(struct sm6_parser *sm6, const struct dxil_recor
* do not otherwise occur, so deleting these avoids the need for backend support. */
if (sm6_type_is_bool(type_a) && code == ICMP_NE && sm6_value_is_constant_zero(b))
{
- ins->opcode = VKD3DSIH_NOP;
+ ins->opcode = VSIR_OP_NOP;
*dst = *a;
return;
}
@@ -7182,7 +7174,7 @@ static void sm6_parser_emit_cmp2(struct sm6_parser *sm6, const struct dxil_recor
"Type mismatch in comparison operation arguments.");
}
- if (!(cmp = sm6_map_cmp2_op(code)) || !cmp->handler_idx || cmp->handler_idx == VKD3DSIH_INVALID)
+ if (!(cmp = sm6_map_cmp2_op(code)) || !cmp->handler_idx || cmp->handler_idx == VSIR_OP_INVALID)
{
FIXME("Unhandled operation %"PRIu64".\n", code);
vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_INVALID_OPERAND,
@@ -7288,7 +7280,7 @@ static void sm6_parser_emit_cmpxchg(struct sm6_parser *sm6, const struct dxil_re
if (record->operand_count > i && record->operands[i])
FIXME("Ignoring weak cmpxchg.\n");
- vsir_instruction_init(ins, &sm6->p.location, VKD3DSIH_IMM_ATOMIC_CMP_EXCH);
+ vsir_instruction_init(ins, &sm6->p.location, VSIR_OP_IMM_ATOMIC_CMP_EXCH);
ins->flags = is_volatile ? VKD3DARF_SEQ_CST | VKD3DARF_VOLATILE : VKD3DARF_SEQ_CST;
if (!(src_params = instruction_src_params_alloc(ins, 3, sm6)))
@@ -7356,7 +7348,7 @@ static void sm6_parser_emit_extractval(struct sm6_parser *sm6, const struct dxil
}
dst->type = type;
- vsir_instruction_init(ins, &sm6->p.location, VKD3DSIH_MOV);
+ vsir_instruction_init(ins, &sm6->p.location, VSIR_OP_MOV);
if (!(src_param = instruction_src_params_alloc(ins, 1, sm6)))
return;
@@ -7462,7 +7454,7 @@ static void sm6_parser_emit_gep(struct sm6_parser *sm6, const struct dxil_record
index->index = elem_value;
index->is_in_bounds = record->operands[0];
- ins->opcode = VKD3DSIH_NOP;
+ ins->opcode = VSIR_OP_NOP;
}
static void sm6_parser_emit_load(struct sm6_parser *sm6, const struct dxil_record *record,
@@ -7514,7 +7506,7 @@ static void sm6_parser_emit_load(struct sm6_parser *sm6, const struct dxil_recor
if (ptr->structure_stride)
{
VKD3D_ASSERT(reg.type == VKD3DSPR_GROUPSHAREDMEM);
- vsir_instruction_init(ins, &sm6->p.location, VKD3DSIH_LD_STRUCTURED);
+ vsir_instruction_init(ins, &sm6->p.location, VSIR_OP_LD_STRUCTURED);
if (!(src_params = instruction_src_params_alloc(ins, 3, sm6)))
return;
@@ -7532,7 +7524,7 @@ static void sm6_parser_emit_load(struct sm6_parser *sm6, const struct dxil_recor
else
{
operand_count = 1 + (reg.type == VKD3DSPR_GROUPSHAREDMEM);
- vsir_instruction_init(ins, &sm6->p.location, (operand_count > 1) ? VKD3DSIH_LD_RAW : VKD3DSIH_MOV);
+ vsir_instruction_init(ins, &sm6->p.location, (operand_count > 1) ? VSIR_OP_LD_RAW : VSIR_OP_MOV);
if (!(src_params = instruction_src_params_alloc(ins, operand_count, sm6)))
return;
@@ -7616,7 +7608,7 @@ static void sm6_parser_emit_phi(struct sm6_parser *sm6, const struct dxil_record
incoming[j].block = sm6_function_get_block(function, record->operands[i + 1], sm6);
}
- ins->opcode = VKD3DSIH_NOP;
+ ins->opcode = VSIR_OP_NOP;
qsort(incoming, phi->incoming_count, sizeof(*incoming), phi_incoming_compare);
@@ -7651,7 +7643,7 @@ static void sm6_parser_emit_ret(struct sm6_parser *sm6, const struct dxil_record
code_block->terminator.type = TERMINATOR_RET;
- ins->opcode = VKD3DSIH_NOP;
+ ins->opcode = VSIR_OP_NOP;
}
static void sm6_parser_emit_store(struct sm6_parser *sm6, const struct dxil_record *record,
@@ -7701,7 +7693,7 @@ static void sm6_parser_emit_store(struct sm6_parser *sm6, const struct dxil_reco
if (ptr->structure_stride)
{
VKD3D_ASSERT(reg.type == VKD3DSPR_GROUPSHAREDMEM);
- vsir_instruction_init(ins, &sm6->p.location, VKD3DSIH_STORE_STRUCTURED);
+ vsir_instruction_init(ins, &sm6->p.location, VSIR_OP_STORE_STRUCTURED);
if (!(src_params = instruction_src_params_alloc(ins, 3, sm6)))
return;
@@ -7716,7 +7708,7 @@ static void sm6_parser_emit_store(struct sm6_parser *sm6, const struct dxil_reco
else
{
operand_count = 1 + (reg.type == VKD3DSPR_GROUPSHAREDMEM);
- vsir_instruction_init(ins, &sm6->p.location, (operand_count > 1) ? VKD3DSIH_STORE_RAW : VKD3DSIH_MOV);
+ vsir_instruction_init(ins, &sm6->p.location, (operand_count > 1) ? VSIR_OP_STORE_RAW : VSIR_OP_MOV);
if (!(src_params = instruction_src_params_alloc(ins, operand_count, sm6)))
return;
@@ -7814,7 +7806,7 @@ static void sm6_parser_emit_switch(struct sm6_parser *sm6, const struct dxil_rec
terminator->cases[i / 2u].value = sm6_value_get_constant_uint64(src, sm6);
}
- ins->opcode = VKD3DSIH_NOP;
+ ins->opcode = VSIR_OP_NOP;
}
static void sm6_parser_emit_vselect(struct sm6_parser *sm6, const struct dxil_record *record,
@@ -7843,7 +7835,7 @@ static void sm6_parser_emit_vselect(struct sm6_parser *sm6, const struct dxil_re
if (!sm6_value_validate_is_bool(src[0], sm6))
return;
- vsir_instruction_init(ins, &sm6->p.location, VKD3DSIH_MOVC);
+ vsir_instruction_init(ins, &sm6->p.location, VSIR_OP_MOVC);
if (!(src_params = instruction_src_params_alloc(ins, 3, sm6)))
return;
@@ -8273,7 +8265,7 @@ static enum vkd3d_result sm6_parser_function_init(struct sm6_parser *sm6, const
}
ins = &code_block->instructions[code_block->instruction_count];
- ins->opcode = VKD3DSIH_INVALID;
+ ins->opcode = VSIR_OP_INVALID;
dst = sm6_parser_get_current_value(sm6);
fwd_type = dst->type;
@@ -8365,7 +8357,7 @@ static enum vkd3d_result sm6_parser_function_init(struct sm6_parser *sm6, const
code_block = (block_idx < function->block_count) ? function->blocks[block_idx] : NULL;
}
if (code_block)
- code_block->instruction_count += ins->opcode != VKD3DSIH_NOP;
+ code_block->instruction_count += ins->opcode != VSIR_OP_NOP;
if (dst->type && fwd_type && dst->type != fwd_type)
{
@@ -8397,7 +8389,7 @@ static void sm6_block_emit_terminator(const struct sm6_block *block, struct sm6_
case TERMINATOR_UNCOND_BR:
if (!block->terminator.true_block)
return;
- ins = sm6_parser_add_instruction(sm6, VKD3DSIH_BRANCH);
+ ins = sm6_parser_add_instruction(sm6, VSIR_OP_BRANCH);
if (!(src_params = instruction_src_params_alloc(ins, 1, sm6)))
return;
vsir_src_param_init_label(&src_params[0], block->terminator.true_block->id);
@@ -8406,7 +8398,7 @@ static void sm6_block_emit_terminator(const struct sm6_block *block, struct sm6_
case TERMINATOR_COND_BR:
if (!block->terminator.true_block || !block->terminator.false_block)
return;
- ins = sm6_parser_add_instruction(sm6, VKD3DSIH_BRANCH);
+ ins = sm6_parser_add_instruction(sm6, VSIR_OP_BRANCH);
if (!(src_params = instruction_src_params_alloc(ins, 3, sm6)))
return;
src_param_init(&src_params[0]);
@@ -8416,7 +8408,7 @@ static void sm6_block_emit_terminator(const struct sm6_block *block, struct sm6_
break;
case TERMINATOR_SWITCH:
- ins = sm6_parser_add_instruction(sm6, VKD3DSIH_SWITCH_MONOLITHIC);
+ ins = sm6_parser_add_instruction(sm6, VSIR_OP_SWITCH_MONOLITHIC);
if (!(src_params = instruction_src_params_alloc(ins, block->terminator.case_count * 2u + 1, sm6)))
return;
src_param_init(&src_params[0]);
@@ -8463,7 +8455,7 @@ static void sm6_block_emit_terminator(const struct sm6_block *block, struct sm6_
break;
case TERMINATOR_RET:
- sm6_parser_add_instruction(sm6, VKD3DSIH_RET);
+ sm6_parser_add_instruction(sm6, VSIR_OP_RET);
break;
default:
@@ -8485,7 +8477,7 @@ static void sm6_block_emit_phi(const struct sm6_block *block, struct sm6_parser
src_phi = &block->phi[i];
incoming_count = src_phi->incoming_count;
- ins = sm6_parser_add_instruction(sm6, VKD3DSIH_PHI);
+ ins = sm6_parser_add_instruction(sm6, VSIR_OP_PHI);
if (!(src_params = instruction_src_params_alloc(ins, incoming_count * 2u, sm6)))
return;
if (!(dst_param = instruction_dst_params_alloc(ins, 1, sm6)))
@@ -8571,7 +8563,7 @@ static void sm6_parser_emit_label(struct sm6_parser *sm6, unsigned int label_id)
struct vkd3d_shader_src_param *src_param;
struct vkd3d_shader_instruction *ins;
- ins = sm6_parser_add_instruction(sm6, VKD3DSIH_LABEL);
+ ins = sm6_parser_add_instruction(sm6, VSIR_OP_LABEL);
if (!(src_param = instruction_src_params_alloc(ins, 1, sm6)))
return;
@@ -9181,7 +9173,7 @@ static struct vkd3d_shader_resource *sm6_parser_resources_load_common_info(struc
if (!m)
{
- ins->opcode = is_uav ? VKD3DSIH_DCL_UAV_RAW : VKD3DSIH_DCL_RESOURCE_RAW;
+ ins->opcode = is_uav ? VSIR_OP_DCL_UAV_RAW : VSIR_OP_DCL_RESOURCE_RAW;
ins->declaration.raw_resource.resource.reg.write_mask = 0;
return &ins->declaration.raw_resource.resource;
}
@@ -9206,7 +9198,7 @@ static struct vkd3d_shader_resource *sm6_parser_resources_load_common_info(struc
"A typed resource has no data type.");
}
- ins->opcode = is_uav ? VKD3DSIH_DCL_UAV_TYPED : VKD3DSIH_DCL;
+ ins->opcode = is_uav ? VSIR_OP_DCL_UAV_TYPED : VSIR_OP_DCL;
for (i = 0; i < VKD3D_VEC4_SIZE; ++i)
ins->declaration.semantic.resource_data_type[i] = resource_values.data_type;
ins->declaration.semantic.resource_type = resource_type;
@@ -9216,14 +9208,14 @@ static struct vkd3d_shader_resource *sm6_parser_resources_load_common_info(struc
}
else if (kind == RESOURCE_KIND_RAWBUFFER)
{
- ins->opcode = is_uav ? VKD3DSIH_DCL_UAV_RAW : VKD3DSIH_DCL_RESOURCE_RAW;
+ ins->opcode = is_uav ? VSIR_OP_DCL_UAV_RAW : VSIR_OP_DCL_RESOURCE_RAW;
ins->declaration.raw_resource.resource.reg.write_mask = 0;
return &ins->declaration.raw_resource.resource;
}
else if (kind == RESOURCE_KIND_STRUCTUREDBUFFER)
{
- ins->opcode = is_uav ? VKD3DSIH_DCL_UAV_STRUCTURED : VKD3DSIH_DCL_RESOURCE_STRUCTURED;
+ ins->opcode = is_uav ? VSIR_OP_DCL_UAV_STRUCTURED : VSIR_OP_DCL_RESOURCE_STRUCTURED;
ins->declaration.structured_resource.byte_stride = resource_values.byte_stride;
ins->declaration.structured_resource.resource.reg.write_mask = 0;
@@ -9292,7 +9284,7 @@ static enum vkd3d_result sm6_parser_resources_load_srv(struct sm6_parser *sm6,
return VKD3D_ERROR_INVALID_SHADER;
}
- vsir_instruction_init(ins, &sm6->p.location, VKD3DSIH_INVALID);
+ vsir_instruction_init(ins, &sm6->p.location, VSIR_OP_INVALID);
if (!(resource = sm6_parser_resources_load_common_info(sm6, node->operands[1], false, kind,
node->operands[8], ins)))
@@ -9304,7 +9296,7 @@ static enum vkd3d_result sm6_parser_resources_load_srv(struct sm6_parser *sm6,
d->kind = kind;
d->reg_type = VKD3DSPR_RESOURCE;
d->reg_data_type = VKD3D_DATA_UNUSED;
- d->resource_data_type = (ins->opcode == VKD3DSIH_DCL)
+ d->resource_data_type = (ins->opcode == VSIR_OP_DCL)
? ins->declaration.semantic.resource_data_type[0] : VKD3D_DATA_UNUSED;
init_resource_declaration(resource, VKD3DSPR_RESOURCE, d->reg_data_type, d->id, &d->range);
@@ -9360,7 +9352,7 @@ static enum vkd3d_result sm6_parser_resources_load_uav(struct sm6_parser *sm6,
}
}
- vsir_instruction_init(ins, &sm6->p.location, VKD3DSIH_INVALID);
+ vsir_instruction_init(ins, &sm6->p.location, VSIR_OP_INVALID);
if (values[1])
ins->flags = VKD3DSUF_GLOBALLY_COHERENT;
if (values[2])
@@ -9378,7 +9370,7 @@ static enum vkd3d_result sm6_parser_resources_load_uav(struct sm6_parser *sm6,
d->kind = values[0];
d->reg_type = VKD3DSPR_UAV;
d->reg_data_type = VKD3D_DATA_UNUSED;
- d->resource_data_type = (ins->opcode == VKD3DSIH_DCL_UAV_TYPED)
+ d->resource_data_type = (ins->opcode == VSIR_OP_DCL_UAV_TYPED)
? ins->declaration.semantic.resource_data_type[0] : VKD3D_DATA_UNUSED;
init_resource_declaration(resource, VKD3DSPR_UAV, d->reg_data_type, d->id, &d->range);
@@ -9414,7 +9406,7 @@ static enum vkd3d_result sm6_parser_resources_load_cbv(struct sm6_parser *sm6,
return VKD3D_ERROR_INVALID_SHADER;
}
- vsir_instruction_init(ins, &sm6->p.location, VKD3DSIH_DCL_CONSTANT_BUFFER);
+ vsir_instruction_init(ins, &sm6->p.location, VSIR_OP_DCL_CONSTANT_BUFFER);
ins->resource_type = VKD3D_SHADER_RESOURCE_BUFFER;
ins->declaration.cb.size = buffer_size;
ins->declaration.cb.src.swizzle = VKD3D_SHADER_NO_SWIZZLE;
@@ -9455,7 +9447,7 @@ static enum vkd3d_result sm6_parser_resources_load_sampler(struct sm6_parser *sm
"Ignoring %u extra operands for a sampler descriptor.", node->operand_count - 7);
}
- vsir_instruction_init(ins, &sm6->p.location, VKD3DSIH_DCL_SAMPLER);
+ vsir_instruction_init(ins, &sm6->p.location, VSIR_OP_DCL_SAMPLER);
ins->resource_type = VKD3D_SHADER_RESOURCE_NONE;
if (!sm6_metadata_get_uint_value(sm6, node->operands[6], &kind))
@@ -9976,7 +9968,7 @@ static void sm6_parser_emit_global_flags(struct sm6_parser *sm6, const struct sm
rotated_flags = (rotated_flags >> 1) | ((rotated_flags & 1) << 4);
global_flags = (global_flags & ~mask) | rotated_flags;
- ins = sm6_parser_add_instruction(sm6, VKD3DSIH_DCL_GLOBAL_FLAGS);
+ ins = sm6_parser_add_instruction(sm6, VSIR_OP_DCL_GLOBAL_FLAGS);
ins->declaration.global_flags = global_flags;
sm6->p.program->global_flags = global_flags;
}
@@ -10033,7 +10025,7 @@ static enum vkd3d_result sm6_parser_emit_thread_group(struct sm6_parser *sm6, co
}
}
- ins = sm6_parser_add_instruction(sm6, VKD3DSIH_DCL_THREAD_GROUP);
+ ins = sm6_parser_add_instruction(sm6, VSIR_OP_DCL_THREAD_GROUP);
ins->declaration.thread_group_size.x = group_sizes[0];
ins->declaration.thread_group_size.y = group_sizes[1];
ins->declaration.thread_group_size.z = group_sizes[2];
@@ -10073,7 +10065,7 @@ static void sm6_parser_emit_dcl_tessellator_domain(struct sm6_parser *sm6,
"Domain shader tessellator domain %u is unhandled.", tessellator_domain);
}
- ins = sm6_parser_add_instruction(sm6, VKD3DSIH_DCL_TESSELLATOR_DOMAIN);
+ ins = sm6_parser_add_instruction(sm6, VSIR_OP_DCL_TESSELLATOR_DOMAIN);
ins->declaration.tessellator_domain = tessellator_domain;
sm6->p.program->tess_domain = tessellator_domain;
}
@@ -10101,7 +10093,7 @@ static void sm6_parser_emit_dcl_tessellator_partitioning(struct sm6_parser *sm6,
"Hull shader tessellator partitioning %u is unhandled.", tessellator_partitioning);
}
- ins = sm6_parser_add_instruction(sm6, VKD3DSIH_DCL_TESSELLATOR_PARTITIONING);
+ ins = sm6_parser_add_instruction(sm6, VSIR_OP_DCL_TESSELLATOR_PARTITIONING);
ins->declaration.tessellator_partitioning = tessellator_partitioning;
sm6->p.program->tess_partitioning = tessellator_partitioning;
@@ -10119,7 +10111,7 @@ static void sm6_parser_emit_dcl_tessellator_output_primitive(struct sm6_parser *
"Hull shader tessellator output primitive %u is unhandled.", primitive);
}
- ins = sm6_parser_add_instruction(sm6, VKD3DSIH_DCL_TESSELLATOR_OUTPUT_PRIMITIVE);
+ ins = sm6_parser_add_instruction(sm6, VSIR_OP_DCL_TESSELLATOR_OUTPUT_PRIMITIVE);
ins->declaration.tessellator_output_primitive = primitive;
sm6->p.program->tess_output_primitive = primitive;
@@ -10146,7 +10138,7 @@ static void sm6_parser_emit_dcl_max_tessellation_factor(struct sm6_parser *sm6,
"Hull shader max tessellation factor %f is invalid.", max_tessellation_factor);
}
- ins = sm6_parser_add_instruction(sm6, VKD3DSIH_DCL_HS_MAX_TESSFACTOR);
+ ins = sm6_parser_add_instruction(sm6, VSIR_OP_DCL_HS_MAX_TESSFACTOR);
ins->declaration.max_tessellation_factor = max_tessellation_factor;
}
@@ -10233,7 +10225,7 @@ static void sm6_parser_gs_properties_init(struct sm6_parser *sm6, const struct s
break;
}
- sm6_parser_emit_dcl_primitive_topology(sm6, VKD3DSIH_DCL_INPUT_PRIMITIVE, input_primitive, patch_vertex_count);
+ sm6_parser_emit_dcl_primitive_topology(sm6, VSIR_OP_DCL_INPUT_PRIMITIVE, input_primitive, patch_vertex_count);
sm6->p.program->input_primitive = input_primitive;
sm6->p.program->input_control_point_count = input_control_point_count;
@@ -10245,7 +10237,7 @@ static void sm6_parser_gs_properties_init(struct sm6_parser *sm6, const struct s
vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_INVALID_PROPERTIES,
"Geometry shader output vertex count %u is invalid.", i);
}
- sm6_parser_emit_dcl_count(sm6, VKD3DSIH_DCL_VERTICES_OUT, i);
+ sm6_parser_emit_dcl_count(sm6, VSIR_OP_DCL_VERTICES_OUT, i);
sm6->p.program->vertices_out_count = i;
if (operands[2] > 1)
@@ -10263,7 +10255,7 @@ static void sm6_parser_gs_properties_init(struct sm6_parser *sm6, const struct s
"Geometry shader output primitive %u is unhandled.", output_primitive);
output_primitive = VKD3D_PT_TRIANGLELIST;
}
- sm6_parser_emit_dcl_primitive_topology(sm6, VKD3DSIH_DCL_OUTPUT_TOPOLOGY, output_primitive, 0);
+ sm6_parser_emit_dcl_primitive_topology(sm6, VSIR_OP_DCL_OUTPUT_TOPOLOGY, output_primitive, 0);
sm6->p.program->output_topology = output_primitive;
i = operands[4];
@@ -10273,7 +10265,7 @@ static void sm6_parser_gs_properties_init(struct sm6_parser *sm6, const struct s
vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_INVALID_PROPERTIES,
"Geometry shader instance count %u is invalid.", i);
}
- sm6_parser_emit_dcl_count(sm6, VKD3DSIH_DCL_GS_INSTANCES, i);
+ sm6_parser_emit_dcl_count(sm6, VSIR_OP_DCL_GS_INSTANCES, i);
}
static enum vkd3d_tessellator_domain sm6_parser_ds_properties_init(struct sm6_parser *sm6,
@@ -10380,7 +10372,7 @@ static enum vkd3d_tessellator_domain sm6_parser_hs_properties_init(struct sm6_pa
sm6_parser_validate_control_point_count(sm6, operands[1], false, "Hull shader input");
program->input_control_point_count = operands[1];
sm6_parser_validate_control_point_count(sm6, operands[2], false, "Hull shader output");
- sm6_parser_emit_dcl_count(sm6, VKD3DSIH_DCL_OUTPUT_CONTROL_POINT_COUNT, operands[2]);
+ sm6_parser_emit_dcl_count(sm6, VSIR_OP_DCL_OUTPUT_CONTROL_POINT_COUNT, operands[2]);
program->output_control_point_count = operands[2];
sm6_parser_emit_dcl_tessellator_domain(sm6, operands[3]);
sm6_parser_emit_dcl_tessellator_partitioning(sm6, operands[4]);
@@ -10905,7 +10897,7 @@ static enum vkd3d_result sm6_parser_init(struct sm6_parser *sm6, struct vsir_pro
if (version.type == VKD3D_SHADER_TYPE_HULL)
{
- sm6_parser_add_instruction(sm6, VKD3DSIH_HS_CONTROL_POINT_PHASE);
+ sm6_parser_add_instruction(sm6, VSIR_OP_HS_CONTROL_POINT_PHASE);
if ((ret = sm6_function_emit_blocks(fn, sm6)) < 0)
goto fail;
@@ -10920,7 +10912,7 @@ static enum vkd3d_result sm6_parser_init(struct sm6_parser *sm6, struct vsir_pro
goto fail;
}
- sm6_parser_add_instruction(sm6, VKD3DSIH_HS_FORK_PHASE);
+ sm6_parser_add_instruction(sm6, VSIR_OP_HS_FORK_PHASE);
if ((ret = sm6_function_emit_blocks(fn, sm6)) < 0)
goto fail;
diff --git a/libs/vkd3d/libs/vkd3d-shader/glsl.c b/libs/vkd3d/libs/vkd3d-shader/glsl.c
index 40865d842f1..b3e3d10791d 100644
--- a/libs/vkd3d/libs/vkd3d-shader/glsl.c
+++ b/libs/vkd3d/libs/vkd3d-shader/glsl.c
@@ -851,7 +851,7 @@ static void shader_glsl_ld(struct vkd3d_glsl_generator *gen, const struct vkd3d_
if (resource_type != VKD3D_SHADER_RESOURCE_BUFFER)
{
vkd3d_string_buffer_printf(fetch, ", ");
- if (ins->opcode != VKD3DSIH_LD2DMS)
+ if (ins->opcode != VSIR_OP_LD2DMS)
shader_glsl_print_src(fetch, gen, &ins->src[0], VKD3DSP_WRITEMASK_3, ins->src[0].reg.data_type);
else if (sample_count == 1)
/* If the resource isn't a true multisample resource, this is the
@@ -915,14 +915,14 @@ static void shader_glsl_sample(struct vkd3d_glsl_generator *gen, const struct vk
enum vkd3d_data_type data_type;
struct glsl_dst dst;
- bias = ins->opcode == VKD3DSIH_SAMPLE_B;
- dynamic_offset = ins->opcode == VKD3DSIH_GATHER4_PO;
- gather = ins->opcode == VKD3DSIH_GATHER4 || ins->opcode == VKD3DSIH_GATHER4_PO;
- grad = ins->opcode == VKD3DSIH_SAMPLE_GRAD;
- lod = ins->opcode == VKD3DSIH_SAMPLE_LOD || ins->opcode == VKD3DSIH_SAMPLE_C_LZ;
- lod_zero = ins->opcode == VKD3DSIH_SAMPLE_C_LZ;
+ bias = ins->opcode == VSIR_OP_SAMPLE_B;
+ dynamic_offset = ins->opcode == VSIR_OP_GATHER4_PO;
+ gather = ins->opcode == VSIR_OP_GATHER4 || ins->opcode == VSIR_OP_GATHER4_PO;
+ grad = ins->opcode == VSIR_OP_SAMPLE_GRAD;
+ lod = ins->opcode == VSIR_OP_SAMPLE_LOD || ins->opcode == VSIR_OP_SAMPLE_C_LZ;
+ lod_zero = ins->opcode == VSIR_OP_SAMPLE_C_LZ;
offset = dynamic_offset || vkd3d_shader_instruction_has_texel_offset(ins);
- shadow = ins->opcode == VKD3DSIH_SAMPLE_C || ins->opcode == VKD3DSIH_SAMPLE_C_LZ;
+ shadow = ins->opcode == VSIR_OP_SAMPLE_C || ins->opcode == VSIR_OP_SAMPLE_C_LZ;
resource = &ins->src[1 + dynamic_offset];
sampler = &ins->src[2 + dynamic_offset];
@@ -1459,177 +1459,177 @@ static void vkd3d_glsl_handle_instruction(struct vkd3d_glsl_generator *gen,
switch (ins->opcode)
{
- case VKD3DSIH_ADD:
- case VKD3DSIH_IADD:
+ case VSIR_OP_ADD:
+ case VSIR_OP_IADD:
shader_glsl_binop(gen, ins, "+");
break;
- case VKD3DSIH_AND:
+ case VSIR_OP_AND:
shader_glsl_binop(gen, ins, "&");
break;
- case VKD3DSIH_BREAK:
+ case VSIR_OP_BREAK:
shader_glsl_break(gen);
break;
- case VKD3DSIH_CASE:
+ case VSIR_OP_CASE:
shader_glsl_case(gen, ins);
break;
- case VKD3DSIH_CONTINUE:
+ case VSIR_OP_CONTINUE:
shader_glsl_continue(gen);
break;
- case VKD3DSIH_DCL_INDEXABLE_TEMP:
+ case VSIR_OP_DCL_INDEXABLE_TEMP:
shader_glsl_dcl_indexable_temp(gen, ins);
break;
- case VKD3DSIH_NOP:
+ case VSIR_OP_NOP:
break;
- case VKD3DSIH_DEFAULT:
+ case VSIR_OP_DEFAULT:
shader_glsl_default(gen);
break;
- case VKD3DSIH_DIV:
+ case VSIR_OP_DIV:
shader_glsl_binop(gen, ins, "/");
break;
- case VKD3DSIH_DP2:
+ case VSIR_OP_DP2:
shader_glsl_dot(gen, ins, vkd3d_write_mask_from_component_count(2));
break;
- case VKD3DSIH_DP3:
+ case VSIR_OP_DP3:
shader_glsl_dot(gen, ins, vkd3d_write_mask_from_component_count(3));
break;
- case VKD3DSIH_DP4:
+ case VSIR_OP_DP4:
shader_glsl_dot(gen, ins, VKD3DSP_WRITEMASK_ALL);
break;
- case VKD3DSIH_ELSE:
+ case VSIR_OP_ELSE:
shader_glsl_else(gen, ins);
break;
- case VKD3DSIH_ENDIF:
- case VKD3DSIH_ENDLOOP:
- case VKD3DSIH_ENDSWITCH:
+ case VSIR_OP_ENDIF:
+ case VSIR_OP_ENDLOOP:
+ case VSIR_OP_ENDSWITCH:
shader_glsl_end_block(gen);
break;
- case VKD3DSIH_EQO:
- case VKD3DSIH_IEQ:
+ case VSIR_OP_EQO:
+ case VSIR_OP_IEQ:
shader_glsl_relop(gen, ins, "==", "equal");
break;
- case VKD3DSIH_EXP:
+ case VSIR_OP_EXP:
shader_glsl_intrinsic(gen, ins, "exp2");
break;
- case VKD3DSIH_FRC:
+ case VSIR_OP_FRC:
shader_glsl_intrinsic(gen, ins, "fract");
break;
- case VKD3DSIH_FTOI:
+ case VSIR_OP_FTOI:
shader_glsl_cast(gen, ins, "int", "ivec");
break;
- case VKD3DSIH_FTOU:
+ case VSIR_OP_FTOU:
shader_glsl_cast(gen, ins, "uint", "uvec");
break;
- case VKD3DSIH_GATHER4:
- case VKD3DSIH_GATHER4_PO:
- case VKD3DSIH_SAMPLE:
- case VKD3DSIH_SAMPLE_B:
- case VKD3DSIH_SAMPLE_C:
- case VKD3DSIH_SAMPLE_C_LZ:
- case VKD3DSIH_SAMPLE_GRAD:
- case VKD3DSIH_SAMPLE_LOD:
+ case VSIR_OP_GATHER4:
+ case VSIR_OP_GATHER4_PO:
+ case VSIR_OP_SAMPLE:
+ case VSIR_OP_SAMPLE_B:
+ case VSIR_OP_SAMPLE_C:
+ case VSIR_OP_SAMPLE_C_LZ:
+ case VSIR_OP_SAMPLE_GRAD:
+ case VSIR_OP_SAMPLE_LOD:
shader_glsl_sample(gen, ins);
break;
- case VKD3DSIH_GEO:
- case VKD3DSIH_IGE:
+ case VSIR_OP_GEO:
+ case VSIR_OP_IGE:
shader_glsl_relop(gen, ins, ">=", "greaterThanEqual");
break;
- case VKD3DSIH_IF:
+ case VSIR_OP_IF:
shader_glsl_if(gen, ins);
break;
- case VKD3DSIH_MAD:
+ case VSIR_OP_MAD:
shader_glsl_intrinsic(gen, ins, "fma");
break;
- case VKD3DSIH_ILT:
- case VKD3DSIH_LTO:
- case VKD3DSIH_ULT:
+ case VSIR_OP_ILT:
+ case VSIR_OP_LTO:
+ case VSIR_OP_ULT:
shader_glsl_relop(gen, ins, "<", "lessThan");
break;
- case VKD3DSIH_IMAX:
- case VKD3DSIH_MAX:
- case VKD3DSIH_UMAX:
+ case VSIR_OP_IMAX:
+ case VSIR_OP_MAX:
+ case VSIR_OP_UMAX:
shader_glsl_intrinsic(gen, ins, "max");
break;
- case VKD3DSIH_MIN:
- case VKD3DSIH_UMIN:
+ case VSIR_OP_MIN:
+ case VSIR_OP_UMIN:
shader_glsl_intrinsic(gen, ins, "min");
break;
- case VKD3DSIH_IMUL_LOW:
+ case VSIR_OP_IMUL_LOW:
shader_glsl_binop(gen, ins, "*");
break;
- case VKD3DSIH_INE:
- case VKD3DSIH_NEU:
+ case VSIR_OP_INE:
+ case VSIR_OP_NEU:
shader_glsl_relop(gen, ins, "!=", "notEqual");
break;
- case VKD3DSIH_INEG:
+ case VSIR_OP_INEG:
shader_glsl_unary_op(gen, ins, "-");
break;
- case VKD3DSIH_ISHL:
+ case VSIR_OP_ISHL:
shader_glsl_binop(gen, ins, "<<");
break;
- case VKD3DSIH_ISHR:
- case VKD3DSIH_USHR:
+ case VSIR_OP_ISHR:
+ case VSIR_OP_USHR:
shader_glsl_binop(gen, ins, ">>");
break;
- case VKD3DSIH_ITOF:
- case VKD3DSIH_UTOF:
+ case VSIR_OP_ITOF:
+ case VSIR_OP_UTOF:
shader_glsl_cast(gen, ins, "float", "vec");
break;
- case VKD3DSIH_LD:
- case VKD3DSIH_LD2DMS:
+ case VSIR_OP_LD:
+ case VSIR_OP_LD2DMS:
shader_glsl_ld(gen, ins);
break;
- case VKD3DSIH_LD_UAV_TYPED:
+ case VSIR_OP_LD_UAV_TYPED:
shader_glsl_load_uav_typed(gen, ins);
break;
- case VKD3DSIH_LOG:
+ case VSIR_OP_LOG:
shader_glsl_intrinsic(gen, ins, "log2");
break;
- case VKD3DSIH_LOOP:
+ case VSIR_OP_LOOP:
shader_glsl_loop(gen);
break;
- case VKD3DSIH_MOV:
+ case VSIR_OP_MOV:
shader_glsl_mov(gen, ins);
break;
- case VKD3DSIH_MOVC:
+ case VSIR_OP_MOVC:
shader_glsl_movc(gen, ins);
break;
- case VKD3DSIH_MUL:
+ case VSIR_OP_MUL:
shader_glsl_binop(gen, ins, "*");
break;
- case VKD3DSIH_NOT:
+ case VSIR_OP_NOT:
shader_glsl_unary_op(gen, ins, "~");
break;
- case VKD3DSIH_OR:
+ case VSIR_OP_OR:
shader_glsl_binop(gen, ins, "|");
break;
- case VKD3DSIH_RET:
+ case VSIR_OP_RET:
shader_glsl_ret(gen, ins);
break;
- case VKD3DSIH_ROUND_NE:
+ case VSIR_OP_ROUND_NE:
shader_glsl_intrinsic(gen, ins, "roundEven");
break;
- case VKD3DSIH_ROUND_NI:
+ case VSIR_OP_ROUND_NI:
shader_glsl_intrinsic(gen, ins, "floor");
break;
- case VKD3DSIH_ROUND_PI:
+ case VSIR_OP_ROUND_PI:
shader_glsl_intrinsic(gen, ins, "ceil");
break;
- case VKD3DSIH_ROUND_Z:
+ case VSIR_OP_ROUND_Z:
shader_glsl_intrinsic(gen, ins, "trunc");
break;
- case VKD3DSIH_RSQ:
+ case VSIR_OP_RSQ:
shader_glsl_intrinsic(gen, ins, "inversesqrt");
break;
- case VKD3DSIH_SQRT:
+ case VSIR_OP_SQRT:
shader_glsl_intrinsic(gen, ins, "sqrt");
break;
- case VKD3DSIH_STORE_UAV_TYPED:
+ case VSIR_OP_STORE_UAV_TYPED:
shader_glsl_store_uav_typed(gen, ins);
break;
- case VKD3DSIH_SWITCH:
+ case VSIR_OP_SWITCH:
shader_glsl_switch(gen, ins);
break;
- case VKD3DSIH_XOR:
+ case VSIR_OP_XOR:
shader_glsl_binop(gen, ins, "^");
break;
default:
diff --git a/libs/vkd3d/libs/vkd3d-shader/hlsl.h b/libs/vkd3d/libs/vkd3d-shader/hlsl.h
index 369181cada8..0dce2831c3e 100644
--- a/libs/vkd3d/libs/vkd3d-shader/hlsl.h
+++ b/libs/vkd3d/libs/vkd3d-shader/hlsl.h
@@ -125,7 +125,7 @@ enum hlsl_sampler_dim
HLSL_SAMPLER_DIM_STRUCTURED_BUFFER,
HLSL_SAMPLER_DIM_RAW_BUFFER,
HLSL_SAMPLER_DIM_MAX = HLSL_SAMPLER_DIM_RAW_BUFFER,
- /* NOTE: Remember to update object_methods[] in hlsl.y if this enum is modified. */
+ /* NOTE: Remember to update texture_methods[] and uav_methods[] in hlsl.y if this is modified. */
};
enum hlsl_so_object_type
diff --git a/libs/vkd3d/libs/vkd3d-shader/hlsl_codegen.c b/libs/vkd3d/libs/vkd3d-shader/hlsl_codegen.c
index 04bb2d98b26..afd6169514f 100644
--- a/libs/vkd3d/libs/vkd3d-shader/hlsl_codegen.c
+++ b/libs/vkd3d/libs/vkd3d-shader/hlsl_codegen.c
@@ -8020,8 +8020,19 @@ static void generate_vsir_signature_entry(struct hlsl_ctx *ctx, struct vsir_prog
if ((!output && !var->last_read) || (output && !var->first_write))
return;
- if (!sm1_register_from_semantic_name(&program->shader_version,
+ if (sm1_register_from_semantic_name(&program->shader_version,
var->semantic.name, var->semantic.index, output, &sysval, &type, &register_index))
+ {
+ if (!vkd3d_shader_ver_ge(&program->shader_version, 3, 0))
+ {
+ if (type == VKD3DSPR_RASTOUT)
+ register_index += SM1_RASTOUT_REGISTER_OFFSET;
+ else if (type == VKD3DSPR_ATTROUT
+ || (type == VKD3DSPR_INPUT && program->shader_version.type == VKD3D_SHADER_TYPE_PIXEL))
+ register_index += SM1_COLOR_REGISTER_OFFSET;
+ }
+ }
+ else
{
enum vkd3d_decl_usage usage;
unsigned int usage_idx;
@@ -8181,7 +8192,7 @@ static enum vkd3d_data_type vsir_data_type_from_hlsl_type(struct hlsl_ctx *ctx,
}
}
- vkd3d_unreachable();
+ return VKD3D_DATA_UNUSED;
}
static enum vkd3d_data_type vsir_data_type_from_hlsl_instruction(struct hlsl_ctx *ctx,
@@ -8219,7 +8230,7 @@ static void sm1_generate_vsir_constant_defs(struct hlsl_ctx *ctx, struct vsir_pr
}
ins = &instructions->elements[instructions->count];
- if (!vsir_instruction_init_with_params(program, ins, &constant_reg->loc, VKD3DSIH_DEF, 1, 1))
+ if (!vsir_instruction_init_with_params(program, ins, &constant_reg->loc, VSIR_OP_DEF, 1, 1))
{
ctx->result = VKD3D_ERROR_OUT_OF_MEMORY;
return;
@@ -8302,7 +8313,7 @@ static void sm1_generate_vsir_sampler_dcls(struct hlsl_ctx *ctx,
}
ins = &instructions->elements[instructions->count];
- if (!vsir_instruction_init_with_params(program, ins, &var->loc, VKD3DSIH_DCL, 0, 0))
+ if (!vsir_instruction_init_with_params(program, ins, &var->loc, VSIR_OP_DCL, 0, 0))
{
ctx->result = VKD3D_ERROR_OUT_OF_MEMORY;
return;
@@ -8485,6 +8496,8 @@ static bool sm4_generate_vsir_reg_from_deref(struct hlsl_ctx *ctx, struct vsir_p
const struct hlsl_type *data_type = hlsl_deref_get_type(ctx, deref);
const struct hlsl_ir_var *var = deref->var;
+ reg->data_type = vsir_data_type_from_hlsl_type(ctx, data_type);
+
if (var->is_uniform)
{
enum hlsl_regset regset = hlsl_deref_get_regset(ctx, deref);
@@ -8493,18 +8506,10 @@ static bool sm4_generate_vsir_reg_from_deref(struct hlsl_ctx *ctx, struct vsir_p
{
reg->type = VKD3DSPR_RESOURCE;
reg->dimension = VSIR_DIMENSION_VEC4;
- if (vkd3d_shader_ver_ge(version, 5, 1))
- {
- reg->idx[0].offset = var->regs[HLSL_REGSET_TEXTURES].id;
- reg->idx[1].offset = var->regs[HLSL_REGSET_TEXTURES].index; /* FIXME: array index */
- reg->idx_count = 2;
- }
- else
- {
- reg->idx[0].offset = var->regs[HLSL_REGSET_TEXTURES].index;
- reg->idx[0].offset += hlsl_offset_from_deref_safe(ctx, deref);
- reg->idx_count = 1;
- }
+ reg->idx[0].offset = var->regs[HLSL_REGSET_TEXTURES].id;
+ reg->idx[1].offset = var->regs[HLSL_REGSET_TEXTURES].index;
+ reg->idx[1].offset += hlsl_offset_from_deref_safe(ctx, deref);
+ reg->idx_count = 2;
VKD3D_ASSERT(regset == HLSL_REGSET_TEXTURES);
*writemask = VKD3DSP_WRITEMASK_ALL;
}
@@ -8512,18 +8517,10 @@ static bool sm4_generate_vsir_reg_from_deref(struct hlsl_ctx *ctx, struct vsir_p
{
reg->type = VKD3DSPR_UAV;
reg->dimension = VSIR_DIMENSION_VEC4;
- if (vkd3d_shader_ver_ge(version, 5, 1))
- {
- reg->idx[0].offset = var->regs[HLSL_REGSET_UAVS].id;
- reg->idx[1].offset = var->regs[HLSL_REGSET_UAVS].index; /* FIXME: array index */
- reg->idx_count = 2;
- }
- else
- {
- reg->idx[0].offset = var->regs[HLSL_REGSET_UAVS].index;
- reg->idx[0].offset += hlsl_offset_from_deref_safe(ctx, deref);
- reg->idx_count = 1;
- }
+ reg->idx[0].offset = var->regs[HLSL_REGSET_UAVS].id;
+ reg->idx[1].offset = var->regs[HLSL_REGSET_UAVS].index;
+ reg->idx[1].offset += hlsl_offset_from_deref_safe(ctx, deref);
+ reg->idx_count = 2;
VKD3D_ASSERT(regset == HLSL_REGSET_UAVS);
*writemask = VKD3DSP_WRITEMASK_ALL;
}
@@ -8531,18 +8528,10 @@ static bool sm4_generate_vsir_reg_from_deref(struct hlsl_ctx *ctx, struct vsir_p
{
reg->type = VKD3DSPR_SAMPLER;
reg->dimension = VSIR_DIMENSION_NONE;
- if (vkd3d_shader_ver_ge(version, 5, 1))
- {
- reg->idx[0].offset = var->regs[HLSL_REGSET_SAMPLERS].id;
- reg->idx[1].offset = var->regs[HLSL_REGSET_SAMPLERS].index; /* FIXME: array index */
- reg->idx_count = 2;
- }
- else
- {
- reg->idx[0].offset = var->regs[HLSL_REGSET_SAMPLERS].index;
- reg->idx[0].offset += hlsl_offset_from_deref_safe(ctx, deref);
- reg->idx_count = 1;
- }
+ reg->idx[0].offset = var->regs[HLSL_REGSET_SAMPLERS].id;
+ reg->idx[1].offset = var->regs[HLSL_REGSET_SAMPLERS].index;
+ reg->idx[1].offset += hlsl_offset_from_deref_safe(ctx, deref);
+ reg->idx_count = 2;
VKD3D_ASSERT(regset == HLSL_REGSET_SAMPLERS);
*writemask = VKD3DSP_WRITEMASK_ALL;
}
@@ -8561,19 +8550,10 @@ static bool sm4_generate_vsir_reg_from_deref(struct hlsl_ctx *ctx, struct vsir_p
VKD3D_ASSERT(data_type->class <= HLSL_CLASS_VECTOR);
reg->type = VKD3DSPR_CONSTBUFFER;
reg->dimension = VSIR_DIMENSION_VEC4;
- if (vkd3d_shader_ver_ge(version, 5, 1))
- {
- reg->idx[0].offset = var->buffer->reg.id;
- reg->idx[1].offset = var->buffer->reg.index; /* FIXME: array index */
- reg->idx[2].offset = offset / 4;
- reg->idx_count = 3;
- }
- else
- {
- reg->idx[0].offset = var->buffer->reg.index;
- reg->idx[1].offset = offset / 4;
- reg->idx_count = 2;
- }
+ reg->idx[0].offset = var->buffer->reg.id;
+ reg->idx[1].offset = var->buffer->reg.index; /* FIXME: array index */
+ reg->idx[2].offset = offset / 4;
+ reg->idx_count = 3;
if (deref->rel_offset.node)
{
@@ -8715,7 +8695,7 @@ static void sm1_generate_vsir_instr_constant(struct hlsl_ctx *ctx,
VKD3D_ASSERT(instr->reg.allocated);
VKD3D_ASSERT(constant->reg.allocated);
- if (!(ins = generate_vsir_add_program_instruction(ctx, program, &instr->loc, VKD3DSIH_MOV, 1, 1)))
+ if (!(ins = generate_vsir_add_program_instruction(ctx, program, &instr->loc, VSIR_OP_MOV, 1, 1)))
return;
src_param = &ins->src[0];
@@ -8734,7 +8714,7 @@ static void sm4_generate_vsir_rasterizer_sample_count(struct hlsl_ctx *ctx,
struct hlsl_ir_node *instr = &expr->node;
struct vkd3d_shader_instruction *ins;
- if (!(ins = generate_vsir_add_program_instruction(ctx, program, &instr->loc, VKD3DSIH_SAMPLE_INFO, 1, 1)))
+ if (!(ins = generate_vsir_add_program_instruction(ctx, program, &instr->loc, VSIR_OP_SAMPLE_INFO, 1, 1)))
return;
ins->flags = VKD3DSI_SAMPLE_INFO_UINT;
@@ -8836,7 +8816,7 @@ static void sm1_generate_vsir_instr_expr_sincos(struct hlsl_ctx *ctx, struct vsi
VKD3D_ASSERT(instr->reg.allocated);
src_count = (ctx->profile->major_version < 3) ? 3 : 1;
- if (!(ins = generate_vsir_add_program_instruction(ctx, program, &instr->loc, VKD3DSIH_SINCOS, 1, src_count)))
+ if (!(ins = generate_vsir_add_program_instruction(ctx, program, &instr->loc, VSIR_OP_SINCOS, 1, src_count)))
return;
vsir_dst_from_hlsl_node(&ins->dst[0], ctx, instr);
@@ -8885,13 +8865,13 @@ static bool sm1_generate_vsir_instr_expr_cast(struct hlsl_ctx *ctx,
/* Integrals are internally represented as floats, so no change is necessary.*/
case HLSL_TYPE_HALF:
case HLSL_TYPE_FLOAT:
- generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_MOV, 0, 0, true);
+ generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VSIR_OP_MOV, 0, 0, true);
return true;
case HLSL_TYPE_DOUBLE:
if (ctx->double_as_float_alias)
{
- generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_MOV, 0, 0, true);
+ generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VSIR_OP_MOV, 0, 0, true);
return true;
}
hlsl_error(ctx, &instr->loc, VKD3D_SHADER_ERROR_HLSL_INVALID_TYPE,
@@ -8916,7 +8896,7 @@ static bool sm1_generate_vsir_instr_expr_cast(struct hlsl_ctx *ctx,
case HLSL_TYPE_MIN16UINT:
case HLSL_TYPE_UINT:
case HLSL_TYPE_BOOL:
- generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_MOV, 0, 0, true);
+ generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VSIR_OP_MOV, 0, 0, true);
return true;
case HLSL_TYPE_DOUBLE:
@@ -8929,7 +8909,7 @@ static bool sm1_generate_vsir_instr_expr_cast(struct hlsl_ctx *ctx,
switch (src_type->e.numeric.type)
{
case HLSL_TYPE_FLOAT:
- generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_MOV, 0, 0, true);
+ generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VSIR_OP_MOV, 0, 0, true);
return true;
break;
@@ -8968,7 +8948,7 @@ static bool sm1_generate_vsir_instr_expr(struct hlsl_ctx *ctx, struct vsir_progr
switch (expr->op)
{
case HLSL_OP1_ABS:
- generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_ABS, 0, 0, true);
+ generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VSIR_OP_ABS, 0, 0, true);
break;
case HLSL_OP1_CAST:
@@ -8984,53 +8964,53 @@ static bool sm1_generate_vsir_instr_expr(struct hlsl_ctx *ctx, struct vsir_progr
case HLSL_OP1_DSX:
if (!hlsl_type_is_floating_point(type))
goto err;
- generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_DSX, 0, 0, true);
+ generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VSIR_OP_DSX, 0, 0, true);
break;
case HLSL_OP1_DSY:
if (!hlsl_type_is_floating_point(type))
goto err;
- generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_DSY, 0, 0, true);
+ generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VSIR_OP_DSY, 0, 0, true);
break;
case HLSL_OP1_EXP2:
if (!hlsl_type_is_floating_point(type))
goto err;
- sm1_generate_vsir_instr_expr_per_component_instr_op(ctx, program, expr, VKD3DSIH_EXP);
+ sm1_generate_vsir_instr_expr_per_component_instr_op(ctx, program, expr, VSIR_OP_EXP);
break;
case HLSL_OP1_LOG2:
if (!hlsl_type_is_floating_point(type))
goto err;
- sm1_generate_vsir_instr_expr_per_component_instr_op(ctx, program, expr, VKD3DSIH_LOG);
+ sm1_generate_vsir_instr_expr_per_component_instr_op(ctx, program, expr, VSIR_OP_LOG);
break;
case HLSL_OP1_NEG:
if (type->e.numeric.type == HLSL_TYPE_BOOL)
goto err;
- generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_MOV, VKD3DSPSM_NEG, 0, true);
+ generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VSIR_OP_MOV, VKD3DSPSM_NEG, 0, true);
break;
case HLSL_OP1_RCP:
if (!hlsl_type_is_floating_point(type))
goto err;
- sm1_generate_vsir_instr_expr_per_component_instr_op(ctx, program, expr, VKD3DSIH_RCP);
+ sm1_generate_vsir_instr_expr_per_component_instr_op(ctx, program, expr, VSIR_OP_RCP);
break;
case HLSL_OP1_REINTERPRET:
- generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_MOV, 0, 0, true);
+ generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VSIR_OP_MOV, 0, 0, true);
break;
case HLSL_OP1_RSQ:
if (!hlsl_type_is_floating_point(type))
goto err;
- sm1_generate_vsir_instr_expr_per_component_instr_op(ctx, program, expr, VKD3DSIH_RSQ);
+ sm1_generate_vsir_instr_expr_per_component_instr_op(ctx, program, expr, VSIR_OP_RSQ);
break;
case HLSL_OP1_SAT:
if (!hlsl_type_is_floating_point(type))
goto err;
- generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_MOV, 0, VKD3DSPDM_SATURATE, true);
+ generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VSIR_OP_MOV, 0, VKD3DSPDM_SATURATE, true);
break;
case HLSL_OP1_SIN_REDUCED:
@@ -9043,7 +9023,7 @@ static bool sm1_generate_vsir_instr_expr(struct hlsl_ctx *ctx, struct vsir_progr
case HLSL_OP2_ADD:
if (type->e.numeric.type == HLSL_TYPE_BOOL)
goto err;
- generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_ADD, 0, 0, true);
+ generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VSIR_OP_ADD, 0, 0, true);
break;
case HLSL_OP2_DOT:
@@ -9052,11 +9032,11 @@ static bool sm1_generate_vsir_instr_expr(struct hlsl_ctx *ctx, struct vsir_progr
switch (expr->operands[0].node->data_type->e.numeric.dimx)
{
case 3:
- generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_DP3, 0, 0, false);
+ generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VSIR_OP_DP3, 0, 0, false);
break;
case 4:
- generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_DP4, 0, 0, false);
+ generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VSIR_OP_DP4, 0, 0, false);
break;
default:
@@ -9066,55 +9046,55 @@ static bool sm1_generate_vsir_instr_expr(struct hlsl_ctx *ctx, struct vsir_progr
break;
case HLSL_OP2_MAX:
- generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_MAX, 0, 0, true);
+ generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VSIR_OP_MAX, 0, 0, true);
break;
case HLSL_OP2_MIN:
- generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_MIN, 0, 0, true);
+ generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VSIR_OP_MIN, 0, 0, true);
break;
case HLSL_OP2_MUL:
- generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_MUL, 0, 0, true);
+ generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VSIR_OP_MUL, 0, 0, true);
break;
case HLSL_OP1_FRACT:
- generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_FRC, 0, 0, true);
+ generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VSIR_OP_FRC, 0, 0, true);
break;
case HLSL_OP2_LOGIC_AND:
if (type->e.numeric.type != HLSL_TYPE_BOOL)
goto err;
- generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_MIN, 0, 0, true);
+ generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VSIR_OP_MIN, 0, 0, true);
break;
case HLSL_OP2_LOGIC_OR:
if (type->e.numeric.type != HLSL_TYPE_BOOL)
goto err;
- generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_MAX, 0, 0, true);
+ generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VSIR_OP_MAX, 0, 0, true);
break;
case HLSL_OP2_SLT:
if (!hlsl_type_is_floating_point(type))
goto err;
- generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_SLT, 0, 0, true);
+ generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VSIR_OP_SLT, 0, 0, true);
break;
case HLSL_OP3_CMP:
if (!hlsl_type_is_floating_point(type))
goto err;
- generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_CMP, 0, 0, true);
+ generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VSIR_OP_CMP, 0, 0, true);
break;
case HLSL_OP3_DP2ADD:
if (!hlsl_type_is_floating_point(type))
goto err;
- generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_DP2ADD, 0, 0, false);
+ generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VSIR_OP_DP2ADD, 0, 0, false);
break;
case HLSL_OP3_MAD:
if (!hlsl_type_is_floating_point(type))
goto err;
- generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_MAD, 0, 0, true);
+ generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VSIR_OP_MAD, 0, 0, true);
break;
default:
@@ -9194,7 +9174,7 @@ static void sm1_generate_vsir_init_dst_param_from_deref(struct hlsl_ctx *ctx,
static void sm1_generate_vsir_instr_mova(struct hlsl_ctx *ctx,
struct vsir_program *program, struct hlsl_ir_node *instr)
{
- enum vkd3d_shader_opcode opcode = hlsl_version_ge(ctx, 2, 0) ? VKD3DSIH_MOVA : VKD3DSIH_MOV;
+ enum vkd3d_shader_opcode opcode = hlsl_version_ge(ctx, 2, 0) ? VSIR_OP_MOVA : VSIR_OP_MOV;
struct vkd3d_shader_dst_param *dst_param;
struct vkd3d_shader_instruction *ins;
@@ -9321,7 +9301,7 @@ static void sm1_generate_vsir_instr_load(struct hlsl_ctx *ctx, struct vsir_progr
if (load->src.rel_offset.node)
sm1_generate_vsir_instr_mova(ctx, program, load->src.rel_offset.node);
- if (!(ins = generate_vsir_add_program_instruction(ctx, program, &instr->loc, VKD3DSIH_MOV, 1, 1)))
+ if (!(ins = generate_vsir_add_program_instruction(ctx, program, &instr->loc, VSIR_OP_MOV, 1, 1)))
return;
vsir_dst_from_hlsl_node(&ins->dst[0], ctx, instr);
@@ -9348,21 +9328,21 @@ static void sm1_generate_vsir_instr_resource_load(struct hlsl_ctx *ctx,
switch (load->load_type)
{
case HLSL_RESOURCE_SAMPLE:
- opcode = VKD3DSIH_TEXLD;
+ opcode = VSIR_OP_TEXLD;
break;
case HLSL_RESOURCE_SAMPLE_PROJ:
- opcode = VKD3DSIH_TEXLD;
+ opcode = VSIR_OP_TEXLD;
flags |= VKD3DSI_TEXLD_PROJECT;
break;
case HLSL_RESOURCE_SAMPLE_LOD_BIAS:
- opcode = VKD3DSIH_TEXLD;
+ opcode = VSIR_OP_TEXLD;
flags |= VKD3DSI_TEXLD_BIAS;
break;
case HLSL_RESOURCE_SAMPLE_GRAD:
- opcode = VKD3DSIH_TEXLDD;
+ opcode = VSIR_OP_TEXLDD;
src_count += 2;
break;
@@ -9403,7 +9383,7 @@ static void generate_vsir_instr_swizzle(struct hlsl_ctx *ctx,
VKD3D_ASSERT(instr->reg.allocated);
- if (!(ins = generate_vsir_add_program_instruction(ctx, program, &instr->loc, VKD3DSIH_MOV, 1, 1)))
+ if (!(ins = generate_vsir_add_program_instruction(ctx, program, &instr->loc, VSIR_OP_MOV, 1, 1)))
return;
vsir_dst_from_hlsl_node(&ins->dst[0], ctx, instr);
@@ -9428,7 +9408,7 @@ static void sm1_generate_vsir_instr_store(struct hlsl_ctx *ctx, struct vsir_prog
struct vkd3d_shader_instruction *ins;
struct vkd3d_shader_src_param *src_param;
- if (!(ins = generate_vsir_add_program_instruction(ctx, program, &instr->loc, VKD3DSIH_MOV, 1, 1)))
+ if (!(ins = generate_vsir_add_program_instruction(ctx, program, &instr->loc, VSIR_OP_MOV, 1, 1)))
return;
sm1_generate_vsir_init_dst_param_from_deref(ctx, &ins->dst[0], &store->lhs, &ins->location, store->writemask);
@@ -9446,7 +9426,7 @@ static void sm1_generate_vsir_instr_jump(struct hlsl_ctx *ctx,
if (jump->type == HLSL_IR_JUMP_DISCARD_NEG)
{
- if (!(ins = generate_vsir_add_program_instruction(ctx, program, &instr->loc, VKD3DSIH_TEXKILL, 0, 1)))
+ if (!(ins = generate_vsir_add_program_instruction(ctx, program, &instr->loc, VSIR_OP_TEXKILL, 0, 1)))
return;
vsir_src_from_hlsl_node(&ins->src[0], ctx, condition, VKD3DSP_WRITEMASK_ALL);
@@ -9473,7 +9453,7 @@ static void sm1_generate_vsir_instr_if(struct hlsl_ctx *ctx, struct vsir_program
}
VKD3D_ASSERT(condition->data_type->e.numeric.dimx == 1 && condition->data_type->e.numeric.dimy == 1);
- if (!(ins = generate_vsir_add_program_instruction(ctx, program, &instr->loc, VKD3DSIH_IFC, 0, 2)))
+ if (!(ins = generate_vsir_add_program_instruction(ctx, program, &instr->loc, VSIR_OP_IFC, 0, 2)))
return;
ins->flags = VKD3D_SHADER_REL_OP_NE;
@@ -9487,12 +9467,12 @@ static void sm1_generate_vsir_instr_if(struct hlsl_ctx *ctx, struct vsir_program
sm1_generate_vsir_block(ctx, &iff->then_block, program);
- if (!(ins = generate_vsir_add_program_instruction(ctx, program, &instr->loc, VKD3DSIH_ELSE, 0, 0)))
+ if (!(ins = generate_vsir_add_program_instruction(ctx, program, &instr->loc, VSIR_OP_ELSE, 0, 0)))
return;
sm1_generate_vsir_block(ctx, &iff->else_block, program);
- if (!(ins = generate_vsir_add_program_instruction(ctx, program, &instr->loc, VKD3DSIH_ENDIF, 0, 0)))
+ if (!(ins = generate_vsir_add_program_instruction(ctx, program, &instr->loc, VSIR_OP_ENDIF, 0, 0)))
return;
}
@@ -10071,16 +10051,16 @@ static void sm4_generate_vsir_instr_dcl_semantic(struct hlsl_ctx *ctx, struct vs
{
case VKD3D_SHADER_SV_NONE:
opcode = (version->type == VKD3D_SHADER_TYPE_PIXEL)
- ? VKD3DSIH_DCL_INPUT_PS : VKD3DSIH_DCL_INPUT;
+ ? VSIR_OP_DCL_INPUT_PS : VSIR_OP_DCL_INPUT;
break;
case VKD3D_SHADER_SV_PRIMITIVE_ID:
if (version->type == VKD3D_SHADER_TYPE_PIXEL)
- opcode = VKD3DSIH_DCL_INPUT_PS_SGV;
+ opcode = VSIR_OP_DCL_INPUT_PS_SGV;
else if (version->type == VKD3D_SHADER_TYPE_GEOMETRY)
- opcode = VKD3DSIH_DCL_INPUT;
+ opcode = VSIR_OP_DCL_INPUT;
else
- opcode = VKD3DSIH_DCL_INPUT_SGV;
+ opcode = VSIR_OP_DCL_INPUT_SGV;
break;
case VKD3D_SHADER_SV_INSTANCE_ID:
@@ -10088,16 +10068,16 @@ static void sm4_generate_vsir_instr_dcl_semantic(struct hlsl_ctx *ctx, struct vs
case VKD3D_SHADER_SV_SAMPLE_INDEX:
case VKD3D_SHADER_SV_VERTEX_ID:
opcode = (version->type == VKD3D_SHADER_TYPE_PIXEL)
- ? VKD3DSIH_DCL_INPUT_PS_SGV : VKD3DSIH_DCL_INPUT_SGV;
+ ? VSIR_OP_DCL_INPUT_PS_SGV : VSIR_OP_DCL_INPUT_SGV;
break;
default:
if (version->type == VKD3D_SHADER_TYPE_PIXEL)
- opcode = VKD3DSIH_DCL_INPUT_PS_SIV;
+ opcode = VSIR_OP_DCL_INPUT_PS_SIV;
else if (is_primitive && version->type != VKD3D_SHADER_TYPE_GEOMETRY)
- opcode = VKD3DSIH_DCL_INPUT;
+ opcode = VSIR_OP_DCL_INPUT;
else
- opcode = VKD3DSIH_DCL_INPUT_SIV;
+ opcode = VSIR_OP_DCL_INPUT_SIV;
break;
}
}
@@ -10105,12 +10085,12 @@ static void sm4_generate_vsir_instr_dcl_semantic(struct hlsl_ctx *ctx, struct vs
{
if (semantic == VKD3D_SHADER_SV_NONE || version->type == VKD3D_SHADER_TYPE_PIXEL
|| (version->type == VKD3D_SHADER_TYPE_HULL && !ctx->is_patch_constant_func))
- opcode = VKD3DSIH_DCL_OUTPUT;
+ opcode = VSIR_OP_DCL_OUTPUT;
else if ((semantic == VKD3D_SHADER_SV_PRIMITIVE_ID || semantic == VKD3D_SHADER_SV_IS_FRONT_FACE)
&& version->type == VKD3D_SHADER_TYPE_GEOMETRY)
- opcode = VKD3DSIH_DCL_OUTPUT_SGV;
+ opcode = VSIR_OP_DCL_OUTPUT_SGV;
else
- opcode = VKD3DSIH_DCL_OUTPUT_SIV;
+ opcode = VSIR_OP_DCL_OUTPUT_SIV;
}
if (sm4_register_from_semantic_name(version, var->semantic.name, output, &type, &has_idx))
@@ -10130,13 +10110,13 @@ static void sm4_generate_vsir_instr_dcl_semantic(struct hlsl_ctx *ctx, struct vs
if (!(ins = generate_vsir_add_program_instruction(ctx, program, loc, opcode, 0, 0)))
return;
- if (opcode == VKD3DSIH_DCL_OUTPUT)
+ if (opcode == VSIR_OP_DCL_OUTPUT)
{
VKD3D_ASSERT(semantic == VKD3D_SHADER_SV_NONE || semantic == VKD3D_SHADER_SV_TARGET
|| version->type == VKD3D_SHADER_TYPE_HULL || type != VKD3DSPR_OUTPUT);
dst_param = &ins->declaration.dst;
}
- else if (opcode == VKD3DSIH_DCL_INPUT || opcode == VKD3DSIH_DCL_INPUT_PS)
+ else if (opcode == VSIR_OP_DCL_INPUT || opcode == VSIR_OP_DCL_INPUT_PS)
{
VKD3D_ASSERT(semantic == VKD3D_SHADER_SV_NONE || is_primitive || version->type == VKD3D_SHADER_TYPE_GEOMETRY);
dst_param = &ins->declaration.dst;
@@ -10182,7 +10162,7 @@ static void sm4_generate_vsir_instr_dcl_temps(struct hlsl_ctx *ctx, struct vsir_
{
struct vkd3d_shader_instruction *ins;
- if (!(ins = generate_vsir_add_program_instruction(ctx, program, loc, VKD3DSIH_DCL_TEMPS, 0, 0)))
+ if (!(ins = generate_vsir_add_program_instruction(ctx, program, loc, VSIR_OP_DCL_TEMPS, 0, 0)))
return;
ins->declaration.count = temp_count;
@@ -10194,7 +10174,7 @@ static void sm4_generate_vsir_instr_dcl_indexable_temp(struct hlsl_ctx *ctx,
{
struct vkd3d_shader_instruction *ins;
- if (!(ins = generate_vsir_add_program_instruction(ctx, program, loc, VKD3DSIH_DCL_INDEXABLE_TEMP, 0, 0)))
+ if (!(ins = generate_vsir_add_program_instruction(ctx, program, loc, VSIR_OP_DCL_INDEXABLE_TEMP, 0, 0)))
return;
ins->declaration.indexable_temp.register_idx = idx;
@@ -10221,11 +10201,12 @@ static void sm4_generate_vsir_cast_from_bool(struct hlsl_ctx *ctx, struct vsir_p
VKD3D_ASSERT(instr->reg.allocated);
- if (!(ins = generate_vsir_add_program_instruction(ctx, program, &instr->loc, VKD3DSIH_AND, 1, 2)))
+ if (!(ins = generate_vsir_add_program_instruction(ctx, program, &instr->loc, VSIR_OP_AND, 1, 2)))
return;
dst_param = &ins->dst[0];
vsir_dst_from_hlsl_node(dst_param, ctx, instr);
+ ins->dst[0].reg.data_type = VKD3D_DATA_UINT;
vsir_src_from_hlsl_node(&ins->src[0], ctx, operand, dst_param->write_mask);
@@ -10257,16 +10238,16 @@ static bool sm4_generate_vsir_instr_expr_cast(struct hlsl_ctx *ctx,
{
case HLSL_TYPE_HALF:
case HLSL_TYPE_FLOAT:
- generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_MOV, 0, 0, true);
+ generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VSIR_OP_MOV, 0, 0, true);
return true;
case HLSL_TYPE_INT:
- generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_ITOF, 0, 0, true);
+ generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VSIR_OP_ITOF, 0, 0, true);
return true;
case HLSL_TYPE_MIN16UINT: /* FIXME: Needs minimum-precision annotations. */
case HLSL_TYPE_UINT:
- generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_UTOF, 0, 0, true);
+ generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VSIR_OP_UTOF, 0, 0, true);
return true;
case HLSL_TYPE_BOOL:
@@ -10284,13 +10265,13 @@ static bool sm4_generate_vsir_instr_expr_cast(struct hlsl_ctx *ctx,
{
case HLSL_TYPE_HALF:
case HLSL_TYPE_FLOAT:
- generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_FTOI, 0, 0, true);
+ generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VSIR_OP_FTOI, 0, 0, true);
return true;
case HLSL_TYPE_INT:
case HLSL_TYPE_MIN16UINT: /* FIXME: Needs minimum-precision annotations. */
case HLSL_TYPE_UINT:
- generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_MOV, 0, 0, true);
+ generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VSIR_OP_MOV, 0, 0, true);
return true;
case HLSL_TYPE_BOOL:
@@ -10309,13 +10290,13 @@ static bool sm4_generate_vsir_instr_expr_cast(struct hlsl_ctx *ctx,
{
case HLSL_TYPE_HALF:
case HLSL_TYPE_FLOAT:
- generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_FTOU, 0, 0, true);
+ generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VSIR_OP_FTOU, 0, 0, true);
return true;
case HLSL_TYPE_INT:
case HLSL_TYPE_MIN16UINT: /* FIXME: Needs minimum-precision annotations. */
case HLSL_TYPE_UINT:
- generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_MOV, 0, 0, true);
+ generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VSIR_OP_MOV, 0, 0, true);
return true;
case HLSL_TYPE_BOOL:
@@ -10379,7 +10360,7 @@ static void sm4_generate_vsir_rcp_using_div(struct hlsl_ctx *ctx,
VKD3D_ASSERT(type_is_float(expr->node.data_type));
- if (!(ins = generate_vsir_add_program_instruction(ctx, program, &instr->loc, VKD3DSIH_DIV, 1, 2)))
+ if (!(ins = generate_vsir_add_program_instruction(ctx, program, &instr->loc, VSIR_OP_DIV, 1, 2)))
return;
dst_param = &ins->dst[0];
@@ -10413,12 +10394,12 @@ static bool sm4_generate_vsir_instr_expr(struct hlsl_ctx *ctx,
case HLSL_OP1_ABS:
VKD3D_ASSERT(type_is_float(dst_type));
- generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_MOV, VKD3DSPSM_ABS, 0, true);
+ generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VSIR_OP_MOV, VKD3DSPSM_ABS, 0, true);
return true;
case HLSL_OP1_BIT_NOT:
VKD3D_ASSERT(hlsl_type_is_integer(dst_type));
- generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_NOT, 0, 0, true);
+ generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VSIR_OP_NOT, 0, 0, true);
return true;
case HLSL_OP1_CAST:
@@ -10426,92 +10407,92 @@ static bool sm4_generate_vsir_instr_expr(struct hlsl_ctx *ctx,
case HLSL_OP1_CEIL:
VKD3D_ASSERT(type_is_float(dst_type));
- generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_ROUND_PI, 0, 0, true);
+ generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VSIR_OP_ROUND_PI, 0, 0, true);
return true;
case HLSL_OP1_COS:
VKD3D_ASSERT(type_is_float(dst_type));
- sm4_generate_vsir_expr_with_two_destinations(ctx, program, VKD3DSIH_SINCOS, expr, 1);
+ sm4_generate_vsir_expr_with_two_destinations(ctx, program, VSIR_OP_SINCOS, expr, 1);
return true;
case HLSL_OP1_DSX:
VKD3D_ASSERT(type_is_float(dst_type));
- generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_DSX, 0, 0, true);
+ generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VSIR_OP_DSX, 0, 0, true);
return true;
case HLSL_OP1_DSX_COARSE:
VKD3D_ASSERT(type_is_float(dst_type));
- generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_DSX_COARSE, 0, 0, true);
+ generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VSIR_OP_DSX_COARSE, 0, 0, true);
return true;
case HLSL_OP1_DSX_FINE:
VKD3D_ASSERT(type_is_float(dst_type));
- generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_DSX_FINE, 0, 0, true);
+ generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VSIR_OP_DSX_FINE, 0, 0, true);
return true;
case HLSL_OP1_DSY:
VKD3D_ASSERT(type_is_float(dst_type));
- generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_DSY, 0, 0, true);
+ generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VSIR_OP_DSY, 0, 0, true);
return true;
case HLSL_OP1_DSY_COARSE:
VKD3D_ASSERT(type_is_float(dst_type));
- generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_DSY_COARSE, 0, 0, true);
+ generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VSIR_OP_DSY_COARSE, 0, 0, true);
return true;
case HLSL_OP1_DSY_FINE:
VKD3D_ASSERT(type_is_float(dst_type));
- generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_DSY_FINE, 0, 0, true);
+ generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VSIR_OP_DSY_FINE, 0, 0, true);
return true;
case HLSL_OP1_EXP2:
VKD3D_ASSERT(type_is_float(dst_type));
- generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_EXP, 0, 0, true);
+ generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VSIR_OP_EXP, 0, 0, true);
return true;
case HLSL_OP1_F16TOF32:
VKD3D_ASSERT(type_is_float(dst_type));
VKD3D_ASSERT(hlsl_version_ge(ctx, 5, 0));
- generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_F16TOF32, 0, 0, true);
+ generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VSIR_OP_F16TOF32, 0, 0, true);
return true;
case HLSL_OP1_F32TOF16:
VKD3D_ASSERT(dst_type->e.numeric.type == HLSL_TYPE_UINT);
VKD3D_ASSERT(hlsl_version_ge(ctx, 5, 0));
- generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_F32TOF16, 0, 0, true);
+ generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VSIR_OP_F32TOF16, 0, 0, true);
return true;
case HLSL_OP1_FLOOR:
VKD3D_ASSERT(type_is_float(dst_type));
- generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_ROUND_NI, 0, 0, true);
+ generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VSIR_OP_ROUND_NI, 0, 0, true);
return true;
case HLSL_OP1_FRACT:
VKD3D_ASSERT(type_is_float(dst_type));
- generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_FRC, 0, 0, true);
+ generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VSIR_OP_FRC, 0, 0, true);
return true;
case HLSL_OP1_LOG2:
VKD3D_ASSERT(type_is_float(dst_type));
- generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_LOG, 0, 0, true);
+ generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VSIR_OP_LOG, 0, 0, true);
return true;
case HLSL_OP1_LOGIC_NOT:
VKD3D_ASSERT(dst_type->e.numeric.type == HLSL_TYPE_BOOL);
- generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_NOT, 0, 0, true);
+ generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VSIR_OP_NOT, 0, 0, true);
return true;
case HLSL_OP1_NEG:
switch (dst_type->e.numeric.type)
{
case HLSL_TYPE_FLOAT:
- generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_MOV, VKD3DSPSM_NEG, 0, true);
+ generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VSIR_OP_MOV, VKD3DSPSM_NEG, 0, true);
return true;
case HLSL_TYPE_INT:
case HLSL_TYPE_MIN16UINT: /* FIXME: Needs minimum-precision annotations. */
case HLSL_TYPE_UINT:
- generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_INEG, 0, 0, true);
+ generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VSIR_OP_INEG, 0, 0, true);
return true;
default:
@@ -10525,7 +10506,7 @@ static bool sm4_generate_vsir_instr_expr(struct hlsl_ctx *ctx,
case HLSL_TYPE_FLOAT:
/* SM5 comes with a RCP opcode */
if (hlsl_version_ge(ctx, 5, 0))
- generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_RCP, 0, 0, true);
+ generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VSIR_OP_RCP, 0, 0, true);
else
sm4_generate_vsir_rcp_using_div(ctx, program, expr);
return true;
@@ -10536,50 +10517,50 @@ static bool sm4_generate_vsir_instr_expr(struct hlsl_ctx *ctx,
}
case HLSL_OP1_REINTERPRET:
- generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_MOV, 0, 0, true);
+ generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VSIR_OP_MOV, 0, 0, true);
return true;
case HLSL_OP1_ROUND:
VKD3D_ASSERT(type_is_float(dst_type));
- generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_ROUND_NE, 0, 0, true);
+ generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VSIR_OP_ROUND_NE, 0, 0, true);
return true;
case HLSL_OP1_RSQ:
VKD3D_ASSERT(type_is_float(dst_type));
- generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_RSQ, 0, 0, true);
+ generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VSIR_OP_RSQ, 0, 0, true);
return true;
case HLSL_OP1_SAT:
VKD3D_ASSERT(type_is_float(dst_type));
- generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_MOV, 0, VKD3DSPDM_SATURATE, true);
+ generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VSIR_OP_MOV, 0, VKD3DSPDM_SATURATE, true);
return true;
case HLSL_OP1_SIN:
VKD3D_ASSERT(type_is_float(dst_type));
- sm4_generate_vsir_expr_with_two_destinations(ctx, program, VKD3DSIH_SINCOS, expr, 0);
+ sm4_generate_vsir_expr_with_two_destinations(ctx, program, VSIR_OP_SINCOS, expr, 0);
return true;
case HLSL_OP1_SQRT:
VKD3D_ASSERT(type_is_float(dst_type));
- generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_SQRT, 0, 0, true);
+ generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VSIR_OP_SQRT, 0, 0, true);
return true;
case HLSL_OP1_TRUNC:
VKD3D_ASSERT(type_is_float(dst_type));
- generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_ROUND_Z, 0, 0, true);
+ generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VSIR_OP_ROUND_Z, 0, 0, true);
return true;
case HLSL_OP2_ADD:
switch (dst_type->e.numeric.type)
{
case HLSL_TYPE_FLOAT:
- generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_ADD, 0, 0, true);
+ generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VSIR_OP_ADD, 0, 0, true);
return true;
case HLSL_TYPE_INT:
case HLSL_TYPE_MIN16UINT: /* FIXME: Needs minimum-precision annotations. */
case HLSL_TYPE_UINT:
- generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_IADD, 0, 0, true);
+ generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VSIR_OP_IADD, 0, 0, true);
return true;
default:
@@ -10589,29 +10570,29 @@ static bool sm4_generate_vsir_instr_expr(struct hlsl_ctx *ctx,
case HLSL_OP2_BIT_AND:
VKD3D_ASSERT(hlsl_type_is_integer(dst_type));
- generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_AND, 0, 0, true);
+ generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VSIR_OP_AND, 0, 0, true);
return true;
case HLSL_OP2_BIT_OR:
VKD3D_ASSERT(hlsl_type_is_integer(dst_type));
- generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_OR, 0, 0, true);
+ generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VSIR_OP_OR, 0, 0, true);
return true;
case HLSL_OP2_BIT_XOR:
VKD3D_ASSERT(hlsl_type_is_integer(dst_type));
- generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_XOR, 0, 0, true);
+ generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VSIR_OP_XOR, 0, 0, true);
return true;
case HLSL_OP2_DIV:
switch (dst_type->e.numeric.type)
{
case HLSL_TYPE_FLOAT:
- generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_DIV, 0, 0, true);
+ generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VSIR_OP_DIV, 0, 0, true);
return true;
case HLSL_TYPE_MIN16UINT: /* FIXME: Needs minimum-precision annotations. */
case HLSL_TYPE_UINT:
- sm4_generate_vsir_expr_with_two_destinations(ctx, program, VKD3DSIH_UDIV, expr, 0);
+ sm4_generate_vsir_expr_with_two_destinations(ctx, program, VSIR_OP_UDIV, expr, 0);
return true;
default:
@@ -10626,15 +10607,15 @@ static bool sm4_generate_vsir_instr_expr(struct hlsl_ctx *ctx,
switch (expr->operands[0].node->data_type->e.numeric.dimx)
{
case 4:
- generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_DP4, 0, 0, false);
+ generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VSIR_OP_DP4, 0, 0, false);
return true;
case 3:
- generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_DP3, 0, 0, false);
+ generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VSIR_OP_DP3, 0, 0, false);
return true;
case 2:
- generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_DP2, 0, 0, false);
+ generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VSIR_OP_DP2, 0, 0, false);
return true;
case 1:
@@ -10653,14 +10634,14 @@ static bool sm4_generate_vsir_instr_expr(struct hlsl_ctx *ctx,
switch (src_type->e.numeric.type)
{
case HLSL_TYPE_FLOAT:
- generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_EQO, 0, 0, true);
+ generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VSIR_OP_EQO, 0, 0, true);
return true;
case HLSL_TYPE_BOOL:
case HLSL_TYPE_INT:
case HLSL_TYPE_MIN16UINT: /* FIXME: Needs minimum-precision annotations. */
case HLSL_TYPE_UINT:
- generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_IEQ, 0, 0, true);
+ generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VSIR_OP_IEQ, 0, 0, true);
return true;
default:
@@ -10675,17 +10656,17 @@ static bool sm4_generate_vsir_instr_expr(struct hlsl_ctx *ctx,
switch (src_type->e.numeric.type)
{
case HLSL_TYPE_FLOAT:
- generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_GEO, 0, 0, true);
+ generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VSIR_OP_GEO, 0, 0, true);
return true;
case HLSL_TYPE_INT:
- generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_IGE, 0, 0, true);
+ generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VSIR_OP_IGE, 0, 0, true);
return true;
case HLSL_TYPE_BOOL:
case HLSL_TYPE_MIN16UINT: /* FIXME: Needs minimum-precision annotations. */
case HLSL_TYPE_UINT:
- generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_UGE, 0, 0, true);
+ generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VSIR_OP_UGE, 0, 0, true);
return true;
default:
@@ -10700,17 +10681,17 @@ static bool sm4_generate_vsir_instr_expr(struct hlsl_ctx *ctx,
switch (src_type->e.numeric.type)
{
case HLSL_TYPE_FLOAT:
- generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_LTO, 0, 0, true);
+ generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VSIR_OP_LTO, 0, 0, true);
return true;
case HLSL_TYPE_INT:
- generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_ILT, 0, 0, true);
+ generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VSIR_OP_ILT, 0, 0, true);
return true;
case HLSL_TYPE_BOOL:
case HLSL_TYPE_MIN16UINT: /* FIXME: Needs minimum-precision annotations. */
case HLSL_TYPE_UINT:
- generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_ULT, 0, 0, true);
+ generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VSIR_OP_ULT, 0, 0, true);
return true;
default:
@@ -10721,31 +10702,31 @@ static bool sm4_generate_vsir_instr_expr(struct hlsl_ctx *ctx,
case HLSL_OP2_LOGIC_AND:
VKD3D_ASSERT(dst_type->e.numeric.type == HLSL_TYPE_BOOL);
- generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_AND, 0, 0, true);
+ generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VSIR_OP_AND, 0, 0, true);
return true;
case HLSL_OP2_LOGIC_OR:
VKD3D_ASSERT(dst_type->e.numeric.type == HLSL_TYPE_BOOL);
- generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_OR, 0, 0, true);
+ generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VSIR_OP_OR, 0, 0, true);
return true;
case HLSL_OP2_LSHIFT:
VKD3D_ASSERT(hlsl_type_is_integer(dst_type));
VKD3D_ASSERT(dst_type->e.numeric.type != HLSL_TYPE_BOOL);
- generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_ISHL, 0, 0, true);
+ generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VSIR_OP_ISHL, 0, 0, true);
return true;
case HLSL_OP3_MAD:
switch (dst_type->e.numeric.type)
{
case HLSL_TYPE_FLOAT:
- generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_MAD, 0, 0, true);
+ generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VSIR_OP_MAD, 0, 0, true);
return true;
case HLSL_TYPE_INT:
case HLSL_TYPE_MIN16UINT: /* FIXME: Needs minimum-precision annotations. */
case HLSL_TYPE_UINT:
- generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_IMAD, 0, 0, true);
+ generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VSIR_OP_IMAD, 0, 0, true);
return true;
default:
@@ -10757,16 +10738,16 @@ static bool sm4_generate_vsir_instr_expr(struct hlsl_ctx *ctx,
switch (dst_type->e.numeric.type)
{
case HLSL_TYPE_FLOAT:
- generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_MAX, 0, 0, true);
+ generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VSIR_OP_MAX, 0, 0, true);
return true;
case HLSL_TYPE_INT:
- generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_IMAX, 0, 0, true);
+ generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VSIR_OP_IMAX, 0, 0, true);
return true;
case HLSL_TYPE_MIN16UINT: /* FIXME: Needs minimum-precision annotations. */
case HLSL_TYPE_UINT:
- generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_UMAX, 0, 0, true);
+ generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VSIR_OP_UMAX, 0, 0, true);
return true;
default:
@@ -10778,16 +10759,16 @@ static bool sm4_generate_vsir_instr_expr(struct hlsl_ctx *ctx,
switch (dst_type->e.numeric.type)
{
case HLSL_TYPE_FLOAT:
- generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_MIN, 0, 0, true);
+ generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VSIR_OP_MIN, 0, 0, true);
return true;
case HLSL_TYPE_INT:
- generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_IMIN, 0, 0, true);
+ generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VSIR_OP_IMIN, 0, 0, true);
return true;
case HLSL_TYPE_MIN16UINT: /* FIXME: Needs minimum-precision annotations. */
case HLSL_TYPE_UINT:
- generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_UMIN, 0, 0, true);
+ generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VSIR_OP_UMIN, 0, 0, true);
return true;
default:
@@ -10800,7 +10781,7 @@ static bool sm4_generate_vsir_instr_expr(struct hlsl_ctx *ctx,
{
case HLSL_TYPE_MIN16UINT: /* FIXME: Needs minimum-precision annotations. */
case HLSL_TYPE_UINT:
- sm4_generate_vsir_expr_with_two_destinations(ctx, program, VKD3DSIH_UDIV, expr, 1);
+ sm4_generate_vsir_expr_with_two_destinations(ctx, program, VSIR_OP_UDIV, expr, 1);
return true;
default:
@@ -10812,7 +10793,7 @@ static bool sm4_generate_vsir_instr_expr(struct hlsl_ctx *ctx,
switch (dst_type->e.numeric.type)
{
case HLSL_TYPE_FLOAT:
- generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_MUL, 0, 0, true);
+ generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VSIR_OP_MUL, 0, 0, true);
return true;
case HLSL_TYPE_INT:
@@ -10820,7 +10801,7 @@ static bool sm4_generate_vsir_instr_expr(struct hlsl_ctx *ctx,
case HLSL_TYPE_UINT:
/* Using IMUL instead of UMUL because we're taking the low
* bits, and the native compiler generates IMUL. */
- sm4_generate_vsir_expr_with_two_destinations(ctx, program, VKD3DSIH_IMUL, expr, 1);
+ sm4_generate_vsir_expr_with_two_destinations(ctx, program, VSIR_OP_IMUL, expr, 1);
return true;
default:
@@ -10834,14 +10815,14 @@ static bool sm4_generate_vsir_instr_expr(struct hlsl_ctx *ctx,
switch (src_type->e.numeric.type)
{
case HLSL_TYPE_FLOAT:
- generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_NEU, 0, 0, true);
+ generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VSIR_OP_NEU, 0, 0, true);
return true;
case HLSL_TYPE_BOOL:
case HLSL_TYPE_INT:
case HLSL_TYPE_MIN16UINT: /* FIXME: Needs minimum-precision annotations. */
case HLSL_TYPE_UINT:
- generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_INE, 0, 0, true);
+ generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VSIR_OP_INE, 0, 0, true);
return true;
default:
@@ -10854,11 +10835,11 @@ static bool sm4_generate_vsir_instr_expr(struct hlsl_ctx *ctx,
VKD3D_ASSERT(hlsl_type_is_integer(dst_type));
VKD3D_ASSERT(dst_type->e.numeric.type != HLSL_TYPE_BOOL);
generate_vsir_instr_expr_single_instr_op(ctx, program, expr,
- dst_type->e.numeric.type == HLSL_TYPE_INT ? VKD3DSIH_ISHR : VKD3DSIH_USHR, 0, 0, true);
+ dst_type->e.numeric.type == HLSL_TYPE_INT ? VSIR_OP_ISHR : VSIR_OP_USHR, 0, 0, true);
return true;
case HLSL_OP3_TERNARY:
- generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_MOVC, 0, 0, true);
+ generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VSIR_OP_MOVC, 0, 0, true);
return true;
default:
@@ -10875,7 +10856,7 @@ static bool sm4_generate_vsir_instr_store(struct hlsl_ctx *ctx,
struct vkd3d_shader_src_param *src_param;
struct vkd3d_shader_instruction *ins;
- if (!(ins = generate_vsir_add_program_instruction(ctx, program, &instr->loc, VKD3DSIH_MOV, 1, 1)))
+ if (!(ins = generate_vsir_add_program_instruction(ctx, program, &instr->loc, VSIR_OP_MOV, 1, 1)))
return false;
dst_param = &ins->dst[0];
@@ -10915,7 +10896,7 @@ static bool sm4_generate_vsir_instr_load(struct hlsl_ctx *ctx, struct vsir_progr
/* Uniform bools can be specified as anything, but internal bools
* always have 0 for false and ~0 for true. Normalise that here. */
- if (!(ins = generate_vsir_add_program_instruction(ctx, program, &instr->loc, VKD3DSIH_MOVC, 1, 3)))
+ if (!(ins = generate_vsir_add_program_instruction(ctx, program, &instr->loc, VSIR_OP_MOVC, 1, 3)))
return false;
dst_param = &ins->dst[0];
@@ -10934,7 +10915,7 @@ static bool sm4_generate_vsir_instr_load(struct hlsl_ctx *ctx, struct vsir_progr
}
else
{
- if (!(ins = generate_vsir_add_program_instruction(ctx, program, &instr->loc, VKD3DSIH_MOV, 1, 1)))
+ if (!(ins = generate_vsir_add_program_instruction(ctx, program, &instr->loc, VSIR_OP_MOV, 1, 1)))
return false;
dst_param = &ins->dst[0];
@@ -10965,12 +10946,12 @@ static bool sm4_generate_vsir_instr_resource_store(struct hlsl_ctx *ctx,
if (hlsl_version_lt(ctx, 5, 0))
{
- opcode = store->store_type == HLSL_RESOURCE_STREAM_APPEND ? VKD3DSIH_EMIT : VKD3DSIH_CUT;
+ opcode = store->store_type == HLSL_RESOURCE_STREAM_APPEND ? VSIR_OP_EMIT : VSIR_OP_CUT;
ins = generate_vsir_add_program_instruction(ctx, program, &store->node.loc, opcode, 0, 0);
return !!ins;
}
- opcode = store->store_type == HLSL_RESOURCE_STREAM_APPEND ? VKD3DSIH_EMIT_STREAM : VKD3DSIH_CUT_STREAM;
+ opcode = store->store_type == HLSL_RESOURCE_STREAM_APPEND ? VSIR_OP_EMIT_STREAM : VSIR_OP_CUT_STREAM;
if (!(ins = generate_vsir_add_program_instruction(ctx, program, &store->node.loc, opcode, 0, 1)))
return false;
@@ -10995,7 +10976,7 @@ static bool sm4_generate_vsir_instr_resource_store(struct hlsl_ctx *ctx,
if (resource_type->sampler_dim == HLSL_SAMPLER_DIM_RAW_BUFFER)
{
- if (!(ins = generate_vsir_add_program_instruction(ctx, program, &instr->loc, VKD3DSIH_STORE_RAW, 1, 2)))
+ if (!(ins = generate_vsir_add_program_instruction(ctx, program, &instr->loc, VSIR_OP_STORE_RAW, 1, 2)))
return false;
writemask = vkd3d_write_mask_from_component_count(value->data_type->e.numeric.dimx);
@@ -11005,7 +10986,7 @@ static bool sm4_generate_vsir_instr_resource_store(struct hlsl_ctx *ctx,
}
else
{
- if (!(ins = generate_vsir_add_program_instruction(ctx, program, &instr->loc, VKD3DSIH_STORE_UAV_TYPED, 1, 2)))
+ if (!(ins = generate_vsir_add_program_instruction(ctx, program, &instr->loc, VSIR_OP_STORE_UAV_TYPED, 1, 2)))
return false;
if (!sm4_generate_vsir_init_dst_param_from_deref(ctx, program,
@@ -11080,11 +11061,11 @@ static bool sm4_generate_vsir_instr_ld(struct hlsl_ctx *ctx,
|| resource_type->sampler_dim == HLSL_SAMPLER_DIM_2DMSARRAY);
if (uav)
- opcode = VKD3DSIH_LD_UAV_TYPED;
+ opcode = VSIR_OP_LD_UAV_TYPED;
else if (raw)
- opcode = VKD3DSIH_LD_RAW;
+ opcode = VSIR_OP_LD_RAW;
else
- opcode = multisampled ? VKD3DSIH_LD2DMS : VKD3DSIH_LD;
+ opcode = multisampled ? VSIR_OP_LD2DMS : VSIR_OP_LD;
if (!(ins = generate_vsir_add_program_instruction(ctx, program, &instr->loc, opcode, 1, 2 + multisampled)))
return false;
@@ -11145,32 +11126,32 @@ static bool sm4_generate_vsir_instr_sample(struct hlsl_ctx *ctx,
switch (load->load_type)
{
case HLSL_RESOURCE_SAMPLE:
- opcode = VKD3DSIH_SAMPLE;
+ opcode = VSIR_OP_SAMPLE;
src_count = 3;
break;
case HLSL_RESOURCE_SAMPLE_CMP:
- opcode = VKD3DSIH_SAMPLE_C;
+ opcode = VSIR_OP_SAMPLE_C;
src_count = 4;
break;
case HLSL_RESOURCE_SAMPLE_CMP_LZ:
- opcode = VKD3DSIH_SAMPLE_C_LZ;
+ opcode = VSIR_OP_SAMPLE_C_LZ;
src_count = 4;
break;
case HLSL_RESOURCE_SAMPLE_LOD:
- opcode = VKD3DSIH_SAMPLE_LOD;
+ opcode = VSIR_OP_SAMPLE_LOD;
src_count = 4;
break;
case HLSL_RESOURCE_SAMPLE_LOD_BIAS:
- opcode = VKD3DSIH_SAMPLE_B;
+ opcode = VSIR_OP_SAMPLE_B;
src_count = 4;
break;
case HLSL_RESOURCE_SAMPLE_GRAD:
- opcode = VKD3DSIH_SAMPLE_GRAD;
+ opcode = VSIR_OP_SAMPLE_GRAD;
src_count = 5;
break;
@@ -11201,15 +11182,15 @@ static bool sm4_generate_vsir_instr_sample(struct hlsl_ctx *ctx,
sampler, VKD3DSP_WRITEMASK_ALL, &instr->loc))
return false;
- if (opcode == VKD3DSIH_SAMPLE_LOD || opcode == VKD3DSIH_SAMPLE_B)
+ if (opcode == VSIR_OP_SAMPLE_LOD || opcode == VSIR_OP_SAMPLE_B)
{
vsir_src_from_hlsl_node(&ins->src[3], ctx, load->lod.node, VKD3DSP_WRITEMASK_ALL);
}
- else if (opcode == VKD3DSIH_SAMPLE_C || opcode == VKD3DSIH_SAMPLE_C_LZ)
+ else if (opcode == VSIR_OP_SAMPLE_C || opcode == VSIR_OP_SAMPLE_C_LZ)
{
vsir_src_from_hlsl_node(&ins->src[3], ctx, load->cmp.node, VKD3DSP_WRITEMASK_ALL);
}
- else if (opcode == VKD3DSIH_SAMPLE_GRAD)
+ else if (opcode == VSIR_OP_SAMPLE_GRAD)
{
vsir_src_from_hlsl_node(&ins->src[3], ctx, load->ddx.node, VKD3DSP_WRITEMASK_ALL);
vsir_src_from_hlsl_node(&ins->src[4], ctx, load->ddy.node, VKD3DSP_WRITEMASK_ALL);
@@ -11224,7 +11205,7 @@ static bool sm4_generate_vsir_instr_gather(struct hlsl_ctx *ctx, struct vsir_pro
const struct hlsl_ir_node *texel_offset = load->texel_offset.node;
const struct hlsl_ir_node *coords = load->coords.node;
const struct hlsl_deref *resource = &load->resource;
- enum vkd3d_shader_opcode opcode = VKD3DSIH_GATHER4;
+ enum vkd3d_shader_opcode opcode = VSIR_OP_GATHER4;
const struct hlsl_deref *sampler = &load->sampler;
const struct hlsl_ir_node *instr = &load->node;
unsigned int src_count = 3, current_arg = 0;
@@ -11238,13 +11219,13 @@ static bool sm4_generate_vsir_instr_gather(struct hlsl_ctx *ctx, struct vsir_pro
"Offset must resolve to integer literal in the range -8 to 7 for profiles < 5.");
return false;
}
- opcode = VKD3DSIH_GATHER4_PO;
+ opcode = VSIR_OP_GATHER4_PO;
++src_count;
}
if (compare)
{
- opcode = opcode == VKD3DSIH_GATHER4 ? VKD3DSIH_GATHER4_C : VKD3DSIH_GATHER4_PO_C;
+ opcode = opcode == VSIR_OP_GATHER4 ? VSIR_OP_GATHER4_C : VSIR_OP_GATHER4_PO_C;
++src_count;
}
@@ -11254,7 +11235,7 @@ static bool sm4_generate_vsir_instr_gather(struct hlsl_ctx *ctx, struct vsir_pro
vsir_dst_from_hlsl_node(&ins->dst[0], ctx, instr);
vsir_src_from_hlsl_node(&ins->src[current_arg++], ctx, coords, VKD3DSP_WRITEMASK_ALL);
- if (opcode == VKD3DSIH_GATHER4_PO || opcode == VKD3DSIH_GATHER4_PO_C)
+ if (opcode == VSIR_OP_GATHER4_PO || opcode == VSIR_OP_GATHER4_PO_C)
vsir_src_from_hlsl_node(&ins->src[current_arg++], ctx, texel_offset, VKD3DSP_WRITEMASK_ALL);
else
sm4_generate_vsir_encode_texel_offset_as_aoffimmi(ins, texel_offset);
@@ -11286,7 +11267,7 @@ static bool sm4_generate_vsir_instr_sample_info(struct hlsl_ctx *ctx,
VKD3D_ASSERT(type->e.numeric.type == HLSL_TYPE_UINT || type->e.numeric.type == HLSL_TYPE_FLOAT);
- if (!(ins = generate_vsir_add_program_instruction(ctx, program, &instr->loc, VKD3DSIH_SAMPLE_INFO, 1, 1)))
+ if (!(ins = generate_vsir_add_program_instruction(ctx, program, &instr->loc, VSIR_OP_SAMPLE_INFO, 1, 1)))
return false;
if (type->e.numeric.type == HLSL_TYPE_UINT)
@@ -11318,7 +11299,7 @@ static bool sm4_generate_vsir_instr_resinfo(struct hlsl_ctx *ctx,
VKD3D_ASSERT(type->e.numeric.type == HLSL_TYPE_UINT || type->e.numeric.type == HLSL_TYPE_FLOAT);
- if (!(ins = generate_vsir_add_program_instruction(ctx, program, &instr->loc, VKD3DSIH_RESINFO, 1, 2)))
+ if (!(ins = generate_vsir_add_program_instruction(ctx, program, &instr->loc, VSIR_OP_RESINFO, 1, 2)))
return false;
if (type->e.numeric.type == HLSL_TYPE_UINT)
@@ -11423,25 +11404,25 @@ static bool sm4_generate_vsir_instr_interlocked(struct hlsl_ctx *ctx,
static const enum vkd3d_shader_opcode opcodes[] =
{
- [HLSL_INTERLOCKED_ADD] = VKD3DSIH_ATOMIC_IADD,
- [HLSL_INTERLOCKED_AND] = VKD3DSIH_ATOMIC_AND,
- [HLSL_INTERLOCKED_CMP_EXCH] = VKD3DSIH_ATOMIC_CMP_STORE,
- [HLSL_INTERLOCKED_MAX] = VKD3DSIH_ATOMIC_UMAX,
- [HLSL_INTERLOCKED_MIN] = VKD3DSIH_ATOMIC_UMIN,
- [HLSL_INTERLOCKED_OR] = VKD3DSIH_ATOMIC_OR,
- [HLSL_INTERLOCKED_XOR] = VKD3DSIH_ATOMIC_XOR,
+ [HLSL_INTERLOCKED_ADD] = VSIR_OP_ATOMIC_IADD,
+ [HLSL_INTERLOCKED_AND] = VSIR_OP_ATOMIC_AND,
+ [HLSL_INTERLOCKED_CMP_EXCH] = VSIR_OP_ATOMIC_CMP_STORE,
+ [HLSL_INTERLOCKED_MAX] = VSIR_OP_ATOMIC_UMAX,
+ [HLSL_INTERLOCKED_MIN] = VSIR_OP_ATOMIC_UMIN,
+ [HLSL_INTERLOCKED_OR] = VSIR_OP_ATOMIC_OR,
+ [HLSL_INTERLOCKED_XOR] = VSIR_OP_ATOMIC_XOR,
};
static const enum vkd3d_shader_opcode imm_opcodes[] =
{
- [HLSL_INTERLOCKED_ADD] = VKD3DSIH_IMM_ATOMIC_IADD,
- [HLSL_INTERLOCKED_AND] = VKD3DSIH_IMM_ATOMIC_AND,
- [HLSL_INTERLOCKED_CMP_EXCH] = VKD3DSIH_IMM_ATOMIC_CMP_EXCH,
- [HLSL_INTERLOCKED_EXCH] = VKD3DSIH_IMM_ATOMIC_EXCH,
- [HLSL_INTERLOCKED_MAX] = VKD3DSIH_IMM_ATOMIC_UMAX,
- [HLSL_INTERLOCKED_MIN] = VKD3DSIH_IMM_ATOMIC_UMIN,
- [HLSL_INTERLOCKED_OR] = VKD3DSIH_IMM_ATOMIC_OR,
- [HLSL_INTERLOCKED_XOR] = VKD3DSIH_IMM_ATOMIC_XOR,
+ [HLSL_INTERLOCKED_ADD] = VSIR_OP_IMM_ATOMIC_IADD,
+ [HLSL_INTERLOCKED_AND] = VSIR_OP_IMM_ATOMIC_AND,
+ [HLSL_INTERLOCKED_CMP_EXCH] = VSIR_OP_IMM_ATOMIC_CMP_EXCH,
+ [HLSL_INTERLOCKED_EXCH] = VSIR_OP_IMM_ATOMIC_EXCH,
+ [HLSL_INTERLOCKED_MAX] = VSIR_OP_IMM_ATOMIC_UMAX,
+ [HLSL_INTERLOCKED_MIN] = VSIR_OP_IMM_ATOMIC_UMIN,
+ [HLSL_INTERLOCKED_OR] = VSIR_OP_IMM_ATOMIC_OR,
+ [HLSL_INTERLOCKED_XOR] = VSIR_OP_IMM_ATOMIC_XOR,
};
struct hlsl_ir_node *cmp_value = interlocked->cmp_value.node, *value = interlocked->value.node;
@@ -11456,14 +11437,14 @@ static bool sm4_generate_vsir_instr_interlocked(struct hlsl_ctx *ctx,
if (value->data_type->e.numeric.type == HLSL_TYPE_INT)
{
- if (opcode == VKD3DSIH_ATOMIC_UMAX)
- opcode = VKD3DSIH_ATOMIC_IMAX;
- else if (opcode == VKD3DSIH_ATOMIC_UMIN)
- opcode = VKD3DSIH_ATOMIC_IMIN;
- else if (opcode == VKD3DSIH_IMM_ATOMIC_UMAX)
- opcode = VKD3DSIH_IMM_ATOMIC_IMAX;
- else if (opcode == VKD3DSIH_IMM_ATOMIC_UMIN)
- opcode = VKD3DSIH_IMM_ATOMIC_IMIN;
+ if (opcode == VSIR_OP_ATOMIC_UMAX)
+ opcode = VSIR_OP_ATOMIC_IMAX;
+ else if (opcode == VSIR_OP_ATOMIC_UMIN)
+ opcode = VSIR_OP_ATOMIC_IMIN;
+ else if (opcode == VSIR_OP_IMM_ATOMIC_UMAX)
+ opcode = VSIR_OP_IMM_ATOMIC_IMAX;
+ else if (opcode == VSIR_OP_IMM_ATOMIC_UMIN)
+ opcode = VSIR_OP_IMM_ATOMIC_IMIN;
}
if (!(ins = generate_vsir_add_program_instruction(ctx, program, &instr->loc, opcode,
@@ -11501,13 +11482,13 @@ static bool sm4_generate_vsir_instr_jump(struct hlsl_ctx *ctx,
switch (jump->type)
{
case HLSL_IR_JUMP_BREAK:
- return generate_vsir_add_program_instruction(ctx, program, &instr->loc, VKD3DSIH_BREAK, 0, 0);
+ return generate_vsir_add_program_instruction(ctx, program, &instr->loc, VSIR_OP_BREAK, 0, 0);
case HLSL_IR_JUMP_CONTINUE:
- return generate_vsir_add_program_instruction(ctx, program, &instr->loc, VKD3DSIH_CONTINUE, 0, 0);
+ return generate_vsir_add_program_instruction(ctx, program, &instr->loc, VSIR_OP_CONTINUE, 0, 0);
case HLSL_IR_JUMP_DISCARD_NZ:
- if (!(ins = generate_vsir_add_program_instruction(ctx, program, &instr->loc, VKD3DSIH_DISCARD, 0, 1)))
+ if (!(ins = generate_vsir_add_program_instruction(ctx, program, &instr->loc, VSIR_OP_DISCARD, 0, 1)))
return false;
ins->flags = VKD3D_SHADER_CONDITIONAL_OP_NZ;
@@ -11529,7 +11510,7 @@ static bool sm4_generate_vsir_instr_sync(struct hlsl_ctx *ctx,
const struct hlsl_ir_node *instr = &sync->node;
struct vkd3d_shader_instruction *ins;
- if (!(ins = generate_vsir_add_program_instruction(ctx, program, &instr->loc, VKD3DSIH_SYNC, 0, 0)))
+ if (!(ins = generate_vsir_add_program_instruction(ctx, program, &instr->loc, VSIR_OP_SYNC, 0, 0)))
return false;
ins->flags = sync->sync_flags;
@@ -11545,7 +11526,7 @@ static void sm4_generate_vsir_instr_if(struct hlsl_ctx *ctx, struct vsir_program
VKD3D_ASSERT(iff->condition.node->data_type->e.numeric.dimx == 1);
- if (!(ins = generate_vsir_add_program_instruction(ctx, program, &instr->loc, VKD3DSIH_IF, 0, 1)))
+ if (!(ins = generate_vsir_add_program_instruction(ctx, program, &instr->loc, VSIR_OP_IF, 0, 1)))
return;
ins->flags = VKD3D_SHADER_CONDITIONAL_OP_NZ;
@@ -11555,12 +11536,12 @@ static void sm4_generate_vsir_instr_if(struct hlsl_ctx *ctx, struct vsir_program
if (!list_empty(&iff->else_block.instrs))
{
- if (!(ins = generate_vsir_add_program_instruction(ctx, program, &instr->loc, VKD3DSIH_ELSE, 0, 0)))
+ if (!(ins = generate_vsir_add_program_instruction(ctx, program, &instr->loc, VSIR_OP_ELSE, 0, 0)))
return;
sm4_generate_vsir_block(ctx, &iff->else_block, program);
}
- if (!(ins = generate_vsir_add_program_instruction(ctx, program, &instr->loc, VKD3DSIH_ENDIF, 0, 0)))
+ if (!(ins = generate_vsir_add_program_instruction(ctx, program, &instr->loc, VSIR_OP_ENDIF, 0, 0)))
return;
}
@@ -11570,12 +11551,12 @@ static void sm4_generate_vsir_instr_loop(struct hlsl_ctx *ctx,
struct hlsl_ir_node *instr = &loop->node;
struct vkd3d_shader_instruction *ins;
- if (!(ins = generate_vsir_add_program_instruction(ctx, program, &instr->loc, VKD3DSIH_LOOP, 0, 0)))
+ if (!(ins = generate_vsir_add_program_instruction(ctx, program, &instr->loc, VSIR_OP_LOOP, 0, 0)))
return;
sm4_generate_vsir_block(ctx, &loop->body, program);
- if (!(ins = generate_vsir_add_program_instruction(ctx, program, &instr->loc, VKD3DSIH_ENDLOOP, 0, 0)))
+ if (!(ins = generate_vsir_add_program_instruction(ctx, program, &instr->loc, VSIR_OP_ENDLOOP, 0, 0)))
return;
}
@@ -11587,7 +11568,7 @@ static void sm4_generate_vsir_instr_switch(struct hlsl_ctx *ctx,
struct vkd3d_shader_instruction *ins;
struct hlsl_ir_switch_case *cas;
- if (!(ins = generate_vsir_add_program_instruction(ctx, program, &instr->loc, VKD3DSIH_SWITCH, 0, 1)))
+ if (!(ins = generate_vsir_add_program_instruction(ctx, program, &instr->loc, VSIR_OP_SWITCH, 0, 1)))
return;
vsir_src_from_hlsl_node(&ins->src[0], ctx, selector, VKD3DSP_WRITEMASK_ALL);
@@ -11595,14 +11576,14 @@ static void sm4_generate_vsir_instr_switch(struct hlsl_ctx *ctx,
{
if (cas->is_default)
{
- if (!(ins = generate_vsir_add_program_instruction(ctx, program, &instr->loc, VKD3DSIH_DEFAULT, 0, 0)))
+ if (!(ins = generate_vsir_add_program_instruction(ctx, program, &instr->loc, VSIR_OP_DEFAULT, 0, 0)))
return;
}
else
{
struct hlsl_constant_value value = {.u[0].u = cas->value};
- if (!(ins = generate_vsir_add_program_instruction(ctx, program, &instr->loc, VKD3DSIH_CASE, 0, 1)))
+ if (!(ins = generate_vsir_add_program_instruction(ctx, program, &instr->loc, VSIR_OP_CASE, 0, 1)))
return;
vsir_src_from_hlsl_constant_value(&ins->src[0], ctx, &value, VKD3D_DATA_UINT, 1, VKD3DSP_WRITEMASK_ALL);
}
@@ -11610,7 +11591,7 @@ static void sm4_generate_vsir_instr_switch(struct hlsl_ctx *ctx,
sm4_generate_vsir_block(ctx, &cas->body, program);
}
- if (!(ins = generate_vsir_add_program_instruction(ctx, program, &instr->loc, VKD3DSIH_ENDSWITCH, 0, 0)))
+ if (!(ins = generate_vsir_add_program_instruction(ctx, program, &instr->loc, VSIR_OP_ENDSWITCH, 0, 0)))
return;
}
@@ -11750,7 +11731,7 @@ static void sm4_generate_vsir_add_function(struct hlsl_ctx *ctx,
sm4_generate_vsir_block(ctx, &func->body, program);
- generate_vsir_add_program_instruction(ctx, program, &func->loc, VKD3DSIH_RET, 0, 0);
+ generate_vsir_add_program_instruction(ctx, program, &func->loc, VSIR_OP_RET, 0, 0);
}
static int sm4_compare_extern_resources(const void *a, const void *b)
@@ -12030,7 +12011,7 @@ static void sm4_generate_vsir_add_dcl_constant_buffer(struct hlsl_ctx *ctx,
struct vkd3d_shader_src_param *src_param;
struct vkd3d_shader_instruction *ins;
- if (!(ins = generate_vsir_add_program_instruction(ctx, program, &cbuffer->loc, VKD3DSIH_DCL_CONSTANT_BUFFER, 0, 0)))
+ if (!(ins = generate_vsir_add_program_instruction(ctx, program, &cbuffer->loc, VSIR_OP_DCL_CONSTANT_BUFFER, 0, 0)))
{
ctx->result = VKD3D_ERROR_OUT_OF_MEMORY;
return;
@@ -12071,7 +12052,7 @@ static void sm4_generate_vsir_add_dcl_sampler(struct hlsl_ctx *ctx,
if (resource->var && !resource->var->objects_usage[HLSL_REGSET_SAMPLERS][i].used)
continue;
- if (!(ins = generate_vsir_add_program_instruction(ctx, program, &resource->loc, VKD3DSIH_DCL_SAMPLER, 0, 0)))
+ if (!(ins = generate_vsir_add_program_instruction(ctx, program, &resource->loc, VSIR_OP_DCL_SAMPLER, 0, 0)))
{
ctx->result = VKD3D_ERROR_OUT_OF_MEMORY;
return;
@@ -12184,13 +12165,13 @@ static void sm4_generate_vsir_add_dcl_texture(struct hlsl_ctx *ctx,
switch (component_type->sampler_dim)
{
case HLSL_SAMPLER_DIM_STRUCTURED_BUFFER:
- opcode = VKD3DSIH_DCL_UAV_STRUCTURED;
+ opcode = VSIR_OP_DCL_UAV_STRUCTURED;
break;
case HLSL_SAMPLER_DIM_RAW_BUFFER:
- opcode = VKD3DSIH_DCL_UAV_RAW;
+ opcode = VSIR_OP_DCL_UAV_RAW;
break;
default:
- opcode = VKD3DSIH_DCL_UAV_TYPED;
+ opcode = VSIR_OP_DCL_UAV_TYPED;
break;
}
}
@@ -12199,10 +12180,10 @@ static void sm4_generate_vsir_add_dcl_texture(struct hlsl_ctx *ctx,
switch (component_type->sampler_dim)
{
case HLSL_SAMPLER_DIM_RAW_BUFFER:
- opcode = VKD3DSIH_DCL_RESOURCE_RAW;
+ opcode = VSIR_OP_DCL_RESOURCE_RAW;
break;
default:
- opcode = VKD3DSIH_DCL;
+ opcode = VSIR_OP_DCL;
break;
}
}
@@ -12272,7 +12253,7 @@ static void sm4_generate_vsir_add_dcl_stream(struct hlsl_ctx *ctx,
{
struct vkd3d_shader_instruction *ins;
- if (!(ins = generate_vsir_add_program_instruction(ctx, program, &var->loc, VKD3DSIH_DCL_STREAM, 0, 1)))
+ if (!(ins = generate_vsir_add_program_instruction(ctx, program, &var->loc, VSIR_OP_DCL_STREAM, 0, 1)))
{
ctx->result = VKD3D_ERROR_OUT_OF_MEMORY;
return;
@@ -12372,12 +12353,12 @@ static void sm4_generate_vsir(struct hlsl_ctx *ctx, struct hlsl_ir_function_decl
if (version.type == VKD3D_SHADER_TYPE_HULL)
generate_vsir_add_program_instruction(ctx, program,
- &ctx->patch_constant_func->loc, VKD3DSIH_HS_CONTROL_POINT_PHASE, 0, 0);
+ &ctx->patch_constant_func->loc, VSIR_OP_HS_CONTROL_POINT_PHASE, 0, 0);
sm4_generate_vsir_add_function(ctx, func, config_flags, program);
if (version.type == VKD3D_SHADER_TYPE_HULL)
{
generate_vsir_add_program_instruction(ctx, program,
- &ctx->patch_constant_func->loc, VKD3DSIH_HS_FORK_PHASE, 0, 0);
+ &ctx->patch_constant_func->loc, VSIR_OP_HS_FORK_PHASE, 0, 0);
sm4_generate_vsir_add_function(ctx, ctx->patch_constant_func, config_flags, program);
}
diff --git a/libs/vkd3d/libs/vkd3d-shader/ir.c b/libs/vkd3d/libs/vkd3d-shader/ir.c
index c26077e43d9..9b44925888b 100644
--- a/libs/vkd3d/libs/vkd3d-shader/ir.c
+++ b/libs/vkd3d/libs/vkd3d-shader/ir.c
@@ -33,335 +33,338 @@ const char *vsir_opcode_get_name(enum vkd3d_shader_opcode op, const char *error)
{
static const char * const names[] =
{
- [VKD3DSIH_ABS ] = "abs",
- [VKD3DSIH_ACOS ] = "acos",
- [VKD3DSIH_ADD ] = "add",
- [VKD3DSIH_AND ] = "and",
- [VKD3DSIH_ASIN ] = "asin",
- [VKD3DSIH_ATAN ] = "atan",
- [VKD3DSIH_ATOMIC_AND ] = "atomic_and",
- [VKD3DSIH_ATOMIC_CMP_STORE ] = "atomic_cmp_store",
- [VKD3DSIH_ATOMIC_IADD ] = "atomic_iadd",
- [VKD3DSIH_ATOMIC_IMAX ] = "atomic_imax",
- [VKD3DSIH_ATOMIC_IMIN ] = "atomic_imin",
- [VKD3DSIH_ATOMIC_OR ] = "atomic_or",
- [VKD3DSIH_ATOMIC_UMAX ] = "atomic_umax",
- [VKD3DSIH_ATOMIC_UMIN ] = "atomic_umin",
- [VKD3DSIH_ATOMIC_XOR ] = "atomic_xor",
- [VKD3DSIH_BEM ] = "bem",
- [VKD3DSIH_BFI ] = "bfi",
- [VKD3DSIH_BFREV ] = "bfrev",
- [VKD3DSIH_BRANCH ] = "branch",
- [VKD3DSIH_BREAK ] = "break",
- [VKD3DSIH_BREAKC ] = "break",
- [VKD3DSIH_BREAKP ] = "breakp",
- [VKD3DSIH_BUFINFO ] = "bufinfo",
- [VKD3DSIH_CALL ] = "call",
- [VKD3DSIH_CALLNZ ] = "callnz",
- [VKD3DSIH_CASE ] = "case",
- [VKD3DSIH_CHECK_ACCESS_FULLY_MAPPED ] = "check_access_fully_mapped",
- [VKD3DSIH_CMP ] = "cmp",
- [VKD3DSIH_CND ] = "cnd",
- [VKD3DSIH_COS ] = "cos",
- [VKD3DSIH_CONTINUE ] = "continue",
- [VKD3DSIH_CONTINUEP ] = "continuec",
- [VKD3DSIH_COUNTBITS ] = "countbits",
- [VKD3DSIH_CRS ] = "crs",
- [VKD3DSIH_CUT ] = "cut",
- [VKD3DSIH_CUT_STREAM ] = "cut_stream",
- [VKD3DSIH_DADD ] = "dadd",
- [VKD3DSIH_DCL ] = "dcl",
- [VKD3DSIH_DCL_CONSTANT_BUFFER ] = "dcl_constantBuffer",
- [VKD3DSIH_DCL_FUNCTION_BODY ] = "dcl_function_body",
- [VKD3DSIH_DCL_FUNCTION_TABLE ] = "dcl_function_table",
- [VKD3DSIH_DCL_GLOBAL_FLAGS ] = "dcl_globalFlags",
- [VKD3DSIH_DCL_GS_INSTANCES ] = "dcl_gs_instances",
- [VKD3DSIH_DCL_HS_FORK_PHASE_INSTANCE_COUNT] = "dcl_hs_fork_phase_instance_count",
- [VKD3DSIH_DCL_HS_JOIN_PHASE_INSTANCE_COUNT] = "dcl_hs_join_phase_instance_count",
- [VKD3DSIH_DCL_HS_MAX_TESSFACTOR ] = "dcl_hs_max_tessfactor",
- [VKD3DSIH_DCL_IMMEDIATE_CONSTANT_BUFFER ] = "dcl_immediateConstantBuffer",
- [VKD3DSIH_DCL_INDEX_RANGE ] = "dcl_index_range",
- [VKD3DSIH_DCL_INDEXABLE_TEMP ] = "dcl_indexableTemp",
- [VKD3DSIH_DCL_INPUT ] = "dcl_input",
- [VKD3DSIH_DCL_INPUT_CONTROL_POINT_COUNT ] = "dcl_input_control_point_count",
- [VKD3DSIH_DCL_INPUT_PRIMITIVE ] = "dcl_inputprimitive",
- [VKD3DSIH_DCL_INPUT_PS ] = "dcl_input_ps",
- [VKD3DSIH_DCL_INPUT_PS_SGV ] = "dcl_input_ps_sgv",
- [VKD3DSIH_DCL_INPUT_PS_SIV ] = "dcl_input_ps_siv",
- [VKD3DSIH_DCL_INPUT_SGV ] = "dcl_input_sgv",
- [VKD3DSIH_DCL_INPUT_SIV ] = "dcl_input_siv",
- [VKD3DSIH_DCL_INTERFACE ] = "dcl_interface",
- [VKD3DSIH_DCL_OUTPUT ] = "dcl_output",
- [VKD3DSIH_DCL_OUTPUT_CONTROL_POINT_COUNT ] = "dcl_output_control_point_count",
- [VKD3DSIH_DCL_OUTPUT_SGV ] = "dcl_output_sgv",
- [VKD3DSIH_DCL_OUTPUT_SIV ] = "dcl_output_siv",
- [VKD3DSIH_DCL_OUTPUT_TOPOLOGY ] = "dcl_outputtopology",
- [VKD3DSIH_DCL_RESOURCE_RAW ] = "dcl_resource_raw",
- [VKD3DSIH_DCL_RESOURCE_STRUCTURED ] = "dcl_resource_structured",
- [VKD3DSIH_DCL_SAMPLER ] = "dcl_sampler",
- [VKD3DSIH_DCL_STREAM ] = "dcl_stream",
- [VKD3DSIH_DCL_TEMPS ] = "dcl_temps",
- [VKD3DSIH_DCL_TESSELLATOR_DOMAIN ] = "dcl_tessellator_domain",
- [VKD3DSIH_DCL_TESSELLATOR_OUTPUT_PRIMITIVE] = "dcl_tessellator_output_primitive",
- [VKD3DSIH_DCL_TESSELLATOR_PARTITIONING ] = "dcl_tessellator_partitioning",
- [VKD3DSIH_DCL_TGSM_RAW ] = "dcl_tgsm_raw",
- [VKD3DSIH_DCL_TGSM_STRUCTURED ] = "dcl_tgsm_structured",
- [VKD3DSIH_DCL_THREAD_GROUP ] = "dcl_thread_group",
- [VKD3DSIH_DCL_UAV_RAW ] = "dcl_uav_raw",
- [VKD3DSIH_DCL_UAV_STRUCTURED ] = "dcl_uav_structured",
- [VKD3DSIH_DCL_UAV_TYPED ] = "dcl_uav_typed",
- [VKD3DSIH_DCL_VERTICES_OUT ] = "dcl_maxout",
- [VKD3DSIH_DDIV ] = "ddiv",
- [VKD3DSIH_DEF ] = "def",
- [VKD3DSIH_DEFAULT ] = "default",
- [VKD3DSIH_DEFB ] = "defb",
- [VKD3DSIH_DEFI ] = "defi",
- [VKD3DSIH_DEQO ] = "deq",
- [VKD3DSIH_DFMA ] = "dfma",
- [VKD3DSIH_DGEO ] = "dge",
- [VKD3DSIH_DISCARD ] = "discard",
- [VKD3DSIH_DIV ] = "div",
- [VKD3DSIH_DLT ] = "dlt",
- [VKD3DSIH_DMAX ] = "dmax",
- [VKD3DSIH_DMIN ] = "dmin",
- [VKD3DSIH_DMOV ] = "dmov",
- [VKD3DSIH_DMOVC ] = "dmovc",
- [VKD3DSIH_DMUL ] = "dmul",
- [VKD3DSIH_DNE ] = "dne",
- [VKD3DSIH_DP2 ] = "dp2",
- [VKD3DSIH_DP2ADD ] = "dp2add",
- [VKD3DSIH_DP3 ] = "dp3",
- [VKD3DSIH_DP4 ] = "dp4",
- [VKD3DSIH_DRCP ] = "drcp",
- [VKD3DSIH_DST ] = "dst",
- [VKD3DSIH_DSX ] = "dsx",
- [VKD3DSIH_DSX_COARSE ] = "deriv_rtx_coarse",
- [VKD3DSIH_DSX_FINE ] = "deriv_rtx_fine",
- [VKD3DSIH_DSY ] = "dsy",
- [VKD3DSIH_DSY_COARSE ] = "deriv_rty_coarse",
- [VKD3DSIH_DSY_FINE ] = "deriv_rty_fine",
- [VKD3DSIH_DTOF ] = "dtof",
- [VKD3DSIH_DTOI ] = "dtoi",
- [VKD3DSIH_DTOU ] = "dtou",
- [VKD3DSIH_ELSE ] = "else",
- [VKD3DSIH_EMIT ] = "emit",
- [VKD3DSIH_EMIT_STREAM ] = "emit_stream",
- [VKD3DSIH_ENDIF ] = "endif",
- [VKD3DSIH_ENDLOOP ] = "endloop",
- [VKD3DSIH_ENDREP ] = "endrep",
- [VKD3DSIH_ENDSWITCH ] = "endswitch",
- [VKD3DSIH_EQO ] = "eq",
- [VKD3DSIH_EQU ] = "eq_unord",
- [VKD3DSIH_EVAL_CENTROID ] = "eval_centroid",
- [VKD3DSIH_EVAL_SAMPLE_INDEX ] = "eval_sample_index",
- [VKD3DSIH_EXP ] = "exp",
- [VKD3DSIH_EXPP ] = "expp",
- [VKD3DSIH_F16TOF32 ] = "f16tof32",
- [VKD3DSIH_F32TOF16 ] = "f32tof16",
- [VKD3DSIH_FCALL ] = "fcall",
- [VKD3DSIH_FIRSTBIT_HI ] = "firstbit_hi",
- [VKD3DSIH_FIRSTBIT_LO ] = "firstbit_lo",
- [VKD3DSIH_FIRSTBIT_SHI ] = "firstbit_shi",
- [VKD3DSIH_FRC ] = "frc",
- [VKD3DSIH_FREM ] = "frem",
- [VKD3DSIH_FTOD ] = "ftod",
- [VKD3DSIH_FTOI ] = "ftoi",
- [VKD3DSIH_FTOU ] = "ftou",
- [VKD3DSIH_GATHER4 ] = "gather4",
- [VKD3DSIH_GATHER4_C ] = "gather4_c",
- [VKD3DSIH_GATHER4_C_S ] = "gather4_c_s",
- [VKD3DSIH_GATHER4_PO ] = "gather4_po",
- [VKD3DSIH_GATHER4_PO_C ] = "gather4_po_c",
- [VKD3DSIH_GATHER4_PO_C_S ] = "gather4_po_c_s",
- [VKD3DSIH_GATHER4_PO_S ] = "gather4_po_s",
- [VKD3DSIH_GATHER4_S ] = "gather4_s",
- [VKD3DSIH_GEO ] = "ge",
- [VKD3DSIH_GEU ] = "ge_unord",
- [VKD3DSIH_HCOS ] = "hcos",
- [VKD3DSIH_HS_CONTROL_POINT_PHASE ] = "hs_control_point_phase",
- [VKD3DSIH_HS_DECLS ] = "hs_decls",
- [VKD3DSIH_HS_FORK_PHASE ] = "hs_fork_phase",
- [VKD3DSIH_HS_JOIN_PHASE ] = "hs_join_phase",
- [VKD3DSIH_HSIN ] = "hsin",
- [VKD3DSIH_HTAN ] = "htan",
- [VKD3DSIH_IADD ] = "iadd",
- [VKD3DSIH_IBFE ] = "ibfe",
- [VKD3DSIH_IDIV ] = "idiv",
- [VKD3DSIH_IEQ ] = "ieq",
- [VKD3DSIH_IF ] = "if",
- [VKD3DSIH_IFC ] = "if",
- [VKD3DSIH_IGE ] = "ige",
- [VKD3DSIH_ILT ] = "ilt",
- [VKD3DSIH_IMAD ] = "imad",
- [VKD3DSIH_IMAX ] = "imax",
- [VKD3DSIH_IMIN ] = "imin",
- [VKD3DSIH_IMM_ATOMIC_ALLOC ] = "imm_atomic_alloc",
- [VKD3DSIH_IMM_ATOMIC_AND ] = "imm_atomic_and",
- [VKD3DSIH_IMM_ATOMIC_CMP_EXCH ] = "imm_atomic_cmp_exch",
- [VKD3DSIH_IMM_ATOMIC_CONSUME ] = "imm_atomic_consume",
- [VKD3DSIH_IMM_ATOMIC_EXCH ] = "imm_atomic_exch",
- [VKD3DSIH_IMM_ATOMIC_IADD ] = "imm_atomic_iadd",
- [VKD3DSIH_IMM_ATOMIC_IMAX ] = "imm_atomic_imax",
- [VKD3DSIH_IMM_ATOMIC_IMIN ] = "imm_atomic_imin",
- [VKD3DSIH_IMM_ATOMIC_OR ] = "imm_atomic_or",
- [VKD3DSIH_IMM_ATOMIC_UMAX ] = "imm_atomic_umax",
- [VKD3DSIH_IMM_ATOMIC_UMIN ] = "imm_atomic_umin",
- [VKD3DSIH_IMM_ATOMIC_XOR ] = "imm_atomic_xor",
- [VKD3DSIH_IMUL ] = "imul",
- [VKD3DSIH_IMUL_LOW ] = "imul_low",
- [VKD3DSIH_INE ] = "ine",
- [VKD3DSIH_INEG ] = "ineg",
- [VKD3DSIH_ISFINITE ] = "isfinite",
- [VKD3DSIH_ISHL ] = "ishl",
- [VKD3DSIH_ISHR ] = "ishr",
- [VKD3DSIH_ISINF ] = "isinf",
- [VKD3DSIH_ISNAN ] = "isnan",
- [VKD3DSIH_ITOD ] = "itod",
- [VKD3DSIH_ITOF ] = "itof",
- [VKD3DSIH_ITOI ] = "itoi",
- [VKD3DSIH_LABEL ] = "label",
- [VKD3DSIH_LD ] = "ld",
- [VKD3DSIH_LD2DMS ] = "ld2dms",
- [VKD3DSIH_LD2DMS_S ] = "ld2dms_s",
- [VKD3DSIH_LD_RAW ] = "ld_raw",
- [VKD3DSIH_LD_RAW_S ] = "ld_raw_s",
- [VKD3DSIH_LD_S ] = "ld_s",
- [VKD3DSIH_LD_STRUCTURED ] = "ld_structured",
- [VKD3DSIH_LD_STRUCTURED_S ] = "ld_structured_s",
- [VKD3DSIH_LD_UAV_TYPED ] = "ld_uav_typed",
- [VKD3DSIH_LD_UAV_TYPED_S ] = "ld_uav_typed_s",
- [VKD3DSIH_LIT ] = "lit",
- [VKD3DSIH_LOD ] = "lod",
- [VKD3DSIH_LOG ] = "log",
- [VKD3DSIH_LOGP ] = "logp",
- [VKD3DSIH_LOOP ] = "loop",
- [VKD3DSIH_LRP ] = "lrp",
- [VKD3DSIH_LTO ] = "lt",
- [VKD3DSIH_LTU ] = "lt_unord",
- [VKD3DSIH_M3x2 ] = "m3x2",
- [VKD3DSIH_M3x3 ] = "m3x3",
- [VKD3DSIH_M3x4 ] = "m3x4",
- [VKD3DSIH_M4x3 ] = "m4x3",
- [VKD3DSIH_M4x4 ] = "m4x4",
- [VKD3DSIH_MAD ] = "mad",
- [VKD3DSIH_MAX ] = "max",
- [VKD3DSIH_MIN ] = "min",
- [VKD3DSIH_MOV ] = "mov",
- [VKD3DSIH_MOVA ] = "mova",
- [VKD3DSIH_MOVC ] = "movc",
- [VKD3DSIH_MSAD ] = "msad",
- [VKD3DSIH_MUL ] = "mul",
- [VKD3DSIH_NEO ] = "ne_ord",
- [VKD3DSIH_NEU ] = "ne",
- [VKD3DSIH_NOP ] = "nop",
- [VKD3DSIH_NOT ] = "not",
- [VKD3DSIH_NRM ] = "nrm",
- [VKD3DSIH_OR ] = "or",
- [VKD3DSIH_ORD ] = "ord",
- [VKD3DSIH_PHASE ] = "phase",
- [VKD3DSIH_PHI ] = "phi",
- [VKD3DSIH_POW ] = "pow",
- [VKD3DSIH_QUAD_READ_ACROSS_D ] = "quad_read_across_d",
- [VKD3DSIH_QUAD_READ_ACROSS_X ] = "quad_read_across_x",
- [VKD3DSIH_QUAD_READ_ACROSS_Y ] = "quad_read_across_y",
- [VKD3DSIH_QUAD_READ_LANE_AT ] = "quad_read_lane_at",
- [VKD3DSIH_RCP ] = "rcp",
- [VKD3DSIH_REP ] = "rep",
- [VKD3DSIH_RESINFO ] = "resinfo",
- [VKD3DSIH_RET ] = "ret",
- [VKD3DSIH_RETP ] = "retp",
- [VKD3DSIH_ROUND_NE ] = "round_ne",
- [VKD3DSIH_ROUND_NI ] = "round_ni",
- [VKD3DSIH_ROUND_PI ] = "round_pi",
- [VKD3DSIH_ROUND_Z ] = "round_z",
- [VKD3DSIH_RSQ ] = "rsq",
- [VKD3DSIH_SAMPLE ] = "sample",
- [VKD3DSIH_SAMPLE_B ] = "sample_b",
- [VKD3DSIH_SAMPLE_B_CL_S ] = "sample_b_cl_s",
- [VKD3DSIH_SAMPLE_C ] = "sample_c",
- [VKD3DSIH_SAMPLE_C_CL_S ] = "sample_c_cl_s",
- [VKD3DSIH_SAMPLE_C_LZ ] = "sample_c_lz",
- [VKD3DSIH_SAMPLE_C_LZ_S ] = "sample_c_lz_s",
- [VKD3DSIH_SAMPLE_CL_S ] = "sample_cl_s",
- [VKD3DSIH_SAMPLE_GRAD ] = "sample_d",
- [VKD3DSIH_SAMPLE_GRAD_CL_S ] = "sample_d_cl_s",
- [VKD3DSIH_SAMPLE_INFO ] = "sample_info",
- [VKD3DSIH_SAMPLE_LOD ] = "sample_l",
- [VKD3DSIH_SAMPLE_LOD_S ] = "sample_l_s",
- [VKD3DSIH_SAMPLE_POS ] = "sample_pos",
- [VKD3DSIH_SETP ] = "setp",
- [VKD3DSIH_SGE ] = "sge",
- [VKD3DSIH_SGN ] = "sgn",
- [VKD3DSIH_SIN ] = "sin",
- [VKD3DSIH_SINCOS ] = "sincos",
- [VKD3DSIH_SLT ] = "slt",
- [VKD3DSIH_SQRT ] = "sqrt",
- [VKD3DSIH_STORE_RAW ] = "store_raw",
- [VKD3DSIH_STORE_STRUCTURED ] = "store_structured",
- [VKD3DSIH_STORE_UAV_TYPED ] = "store_uav_typed",
- [VKD3DSIH_SUB ] = "sub",
- [VKD3DSIH_SWAPC ] = "swapc",
- [VKD3DSIH_SWITCH ] = "switch",
- [VKD3DSIH_SWITCH_MONOLITHIC ] = "switch",
- [VKD3DSIH_SYNC ] = "sync",
- [VKD3DSIH_TAN ] = "tan",
- [VKD3DSIH_TEX ] = "tex",
- [VKD3DSIH_TEXBEM ] = "texbem",
- [VKD3DSIH_TEXBEML ] = "texbeml",
- [VKD3DSIH_TEXCOORD ] = "texcoord",
- [VKD3DSIH_TEXCRD ] = "texcrd",
- [VKD3DSIH_TEXDEPTH ] = "texdepth",
- [VKD3DSIH_TEXDP3 ] = "texdp3",
- [VKD3DSIH_TEXDP3TEX ] = "texdp3tex",
- [VKD3DSIH_TEXKILL ] = "texkill",
- [VKD3DSIH_TEXLD ] = "texld",
- [VKD3DSIH_TEXLDD ] = "texldd",
- [VKD3DSIH_TEXLDL ] = "texldl",
- [VKD3DSIH_TEXM3x2DEPTH ] = "texm3x2depth",
- [VKD3DSIH_TEXM3x2PAD ] = "texm3x2pad",
- [VKD3DSIH_TEXM3x2TEX ] = "texm3x2tex",
- [VKD3DSIH_TEXM3x3 ] = "texm3x3",
- [VKD3DSIH_TEXM3x3DIFF ] = "texm3x3diff",
- [VKD3DSIH_TEXM3x3PAD ] = "texm3x3pad",
- [VKD3DSIH_TEXM3x3SPEC ] = "texm3x3spec",
- [VKD3DSIH_TEXM3x3TEX ] = "texm3x3tex",
- [VKD3DSIH_TEXM3x3VSPEC ] = "texm3x3vspec",
- [VKD3DSIH_TEXREG2AR ] = "texreg2ar",
- [VKD3DSIH_TEXREG2GB ] = "texreg2gb",
- [VKD3DSIH_TEXREG2RGB ] = "texreg2rgb",
- [VKD3DSIH_UBFE ] = "ubfe",
- [VKD3DSIH_UDIV ] = "udiv",
- [VKD3DSIH_UGE ] = "uge",
- [VKD3DSIH_ULT ] = "ult",
- [VKD3DSIH_UMAX ] = "umax",
- [VKD3DSIH_UMIN ] = "umin",
- [VKD3DSIH_UMUL ] = "umul",
- [VKD3DSIH_UNO ] = "uno",
- [VKD3DSIH_USHR ] = "ushr",
- [VKD3DSIH_UTOD ] = "utod",
- [VKD3DSIH_UTOF ] = "utof",
- [VKD3DSIH_UTOU ] = "utou",
- [VKD3DSIH_WAVE_ACTIVE_ALL_EQUAL ] = "wave_active_all_equal",
- [VKD3DSIH_WAVE_ACTIVE_BALLOT ] = "wave_active_ballot",
- [VKD3DSIH_WAVE_ACTIVE_BIT_AND ] = "wave_active_bit_and",
- [VKD3DSIH_WAVE_ACTIVE_BIT_OR ] = "wave_active_bit_or",
- [VKD3DSIH_WAVE_ACTIVE_BIT_XOR ] = "wave_active_bit_xor",
- [VKD3DSIH_WAVE_ALL_BIT_COUNT ] = "wave_all_bit_count",
- [VKD3DSIH_WAVE_ALL_TRUE ] = "wave_all_true",
- [VKD3DSIH_WAVE_ANY_TRUE ] = "wave_any_true",
- [VKD3DSIH_WAVE_IS_FIRST_LANE ] = "wave_is_first_lane",
- [VKD3DSIH_WAVE_OP_ADD ] = "wave_op_add",
- [VKD3DSIH_WAVE_OP_IMAX ] = "wave_op_imax",
- [VKD3DSIH_WAVE_OP_IMIN ] = "wave_op_imin",
- [VKD3DSIH_WAVE_OP_MAX ] = "wave_op_max",
- [VKD3DSIH_WAVE_OP_MIN ] = "wave_op_min",
- [VKD3DSIH_WAVE_OP_MUL ] = "wave_op_mul",
- [VKD3DSIH_WAVE_OP_UMAX ] = "wave_op_umax",
- [VKD3DSIH_WAVE_OP_UMIN ] = "wave_op_umin",
- [VKD3DSIH_WAVE_PREFIX_BIT_COUNT ] = "wave_prefix_bit_count",
- [VKD3DSIH_WAVE_READ_LANE_AT ] = "wave_read_lane_at",
- [VKD3DSIH_WAVE_READ_LANE_FIRST ] = "wave_read_lane_first",
- [VKD3DSIH_XOR ] = "xor",
+ [VSIR_OP_ABS ] = "abs",
+ [VSIR_OP_ACOS ] = "acos",
+ [VSIR_OP_ADD ] = "add",
+ [VSIR_OP_AND ] = "and",
+ [VSIR_OP_ASIN ] = "asin",
+ [VSIR_OP_ATAN ] = "atan",
+ [VSIR_OP_ATOMIC_AND ] = "atomic_and",
+ [VSIR_OP_ATOMIC_CMP_STORE ] = "atomic_cmp_store",
+ [VSIR_OP_ATOMIC_IADD ] = "atomic_iadd",
+ [VSIR_OP_ATOMIC_IMAX ] = "atomic_imax",
+ [VSIR_OP_ATOMIC_IMIN ] = "atomic_imin",
+ [VSIR_OP_ATOMIC_OR ] = "atomic_or",
+ [VSIR_OP_ATOMIC_UMAX ] = "atomic_umax",
+ [VSIR_OP_ATOMIC_UMIN ] = "atomic_umin",
+ [VSIR_OP_ATOMIC_XOR ] = "atomic_xor",
+ [VSIR_OP_BEM ] = "bem",
+ [VSIR_OP_BFI ] = "bfi",
+ [VSIR_OP_BFREV ] = "bfrev",
+ [VSIR_OP_BRANCH ] = "branch",
+ [VSIR_OP_BREAK ] = "break",
+ [VSIR_OP_BREAKC ] = "break",
+ [VSIR_OP_BREAKP ] = "breakp",
+ [VSIR_OP_BUFINFO ] = "bufinfo",
+ [VSIR_OP_CALL ] = "call",
+ [VSIR_OP_CALLNZ ] = "callnz",
+ [VSIR_OP_CASE ] = "case",
+ [VSIR_OP_CHECK_ACCESS_FULLY_MAPPED ] = "check_access_fully_mapped",
+ [VSIR_OP_CMP ] = "cmp",
+ [VSIR_OP_CND ] = "cnd",
+ [VSIR_OP_COS ] = "cos",
+ [VSIR_OP_CONTINUE ] = "continue",
+ [VSIR_OP_CONTINUEP ] = "continuec",
+ [VSIR_OP_COUNTBITS ] = "countbits",
+ [VSIR_OP_CRS ] = "crs",
+ [VSIR_OP_CUT ] = "cut",
+ [VSIR_OP_CUT_STREAM ] = "cut_stream",
+ [VSIR_OP_DADD ] = "dadd",
+ [VSIR_OP_DCL ] = "dcl",
+ [VSIR_OP_DCL_CONSTANT_BUFFER ] = "dcl_constantBuffer",
+ [VSIR_OP_DCL_FUNCTION_BODY ] = "dcl_function_body",
+ [VSIR_OP_DCL_FUNCTION_TABLE ] = "dcl_function_table",
+ [VSIR_OP_DCL_GLOBAL_FLAGS ] = "dcl_globalFlags",
+ [VSIR_OP_DCL_GS_INSTANCES ] = "dcl_gs_instances",
+ [VSIR_OP_DCL_HS_FORK_PHASE_INSTANCE_COUNT] = "dcl_hs_fork_phase_instance_count",
+ [VSIR_OP_DCL_HS_JOIN_PHASE_INSTANCE_COUNT] = "dcl_hs_join_phase_instance_count",
+ [VSIR_OP_DCL_HS_MAX_TESSFACTOR ] = "dcl_hs_max_tessfactor",
+ [VSIR_OP_DCL_IMMEDIATE_CONSTANT_BUFFER ] = "dcl_immediateConstantBuffer",
+ [VSIR_OP_DCL_INDEX_RANGE ] = "dcl_index_range",
+ [VSIR_OP_DCL_INDEXABLE_TEMP ] = "dcl_indexableTemp",
+ [VSIR_OP_DCL_INPUT ] = "dcl_input",
+ [VSIR_OP_DCL_INPUT_CONTROL_POINT_COUNT ] = "dcl_input_control_point_count",
+ [VSIR_OP_DCL_INPUT_PRIMITIVE ] = "dcl_inputprimitive",
+ [VSIR_OP_DCL_INPUT_PS ] = "dcl_input_ps",
+ [VSIR_OP_DCL_INPUT_PS_SGV ] = "dcl_input_ps_sgv",
+ [VSIR_OP_DCL_INPUT_PS_SIV ] = "dcl_input_ps_siv",
+ [VSIR_OP_DCL_INPUT_SGV ] = "dcl_input_sgv",
+ [VSIR_OP_DCL_INPUT_SIV ] = "dcl_input_siv",
+ [VSIR_OP_DCL_INTERFACE ] = "dcl_interface",
+ [VSIR_OP_DCL_OUTPUT ] = "dcl_output",
+ [VSIR_OP_DCL_OUTPUT_CONTROL_POINT_COUNT ] = "dcl_output_control_point_count",
+ [VSIR_OP_DCL_OUTPUT_SGV ] = "dcl_output_sgv",
+ [VSIR_OP_DCL_OUTPUT_SIV ] = "dcl_output_siv",
+ [VSIR_OP_DCL_OUTPUT_TOPOLOGY ] = "dcl_outputtopology",
+ [VSIR_OP_DCL_RESOURCE_RAW ] = "dcl_resource_raw",
+ [VSIR_OP_DCL_RESOURCE_STRUCTURED ] = "dcl_resource_structured",
+ [VSIR_OP_DCL_SAMPLER ] = "dcl_sampler",
+ [VSIR_OP_DCL_STREAM ] = "dcl_stream",
+ [VSIR_OP_DCL_TEMPS ] = "dcl_temps",
+ [VSIR_OP_DCL_TESSELLATOR_DOMAIN ] = "dcl_tessellator_domain",
+ [VSIR_OP_DCL_TESSELLATOR_OUTPUT_PRIMITIVE] = "dcl_tessellator_output_primitive",
+ [VSIR_OP_DCL_TESSELLATOR_PARTITIONING ] = "dcl_tessellator_partitioning",
+ [VSIR_OP_DCL_TGSM_RAW ] = "dcl_tgsm_raw",
+ [VSIR_OP_DCL_TGSM_STRUCTURED ] = "dcl_tgsm_structured",
+ [VSIR_OP_DCL_THREAD_GROUP ] = "dcl_thread_group",
+ [VSIR_OP_DCL_UAV_RAW ] = "dcl_uav_raw",
+ [VSIR_OP_DCL_UAV_STRUCTURED ] = "dcl_uav_structured",
+ [VSIR_OP_DCL_UAV_TYPED ] = "dcl_uav_typed",
+ [VSIR_OP_DCL_VERTICES_OUT ] = "dcl_maxout",
+ [VSIR_OP_DDIV ] = "ddiv",
+ [VSIR_OP_DEF ] = "def",
+ [VSIR_OP_DEFAULT ] = "default",
+ [VSIR_OP_DEFB ] = "defb",
+ [VSIR_OP_DEFI ] = "defi",
+ [VSIR_OP_DEQO ] = "deq",
+ [VSIR_OP_DFMA ] = "dfma",
+ [VSIR_OP_DGEO ] = "dge",
+ [VSIR_OP_DISCARD ] = "discard",
+ [VSIR_OP_DIV ] = "div",
+ [VSIR_OP_DLT ] = "dlt",
+ [VSIR_OP_DMAX ] = "dmax",
+ [VSIR_OP_DMIN ] = "dmin",
+ [VSIR_OP_DMOV ] = "dmov",
+ [VSIR_OP_DMOVC ] = "dmovc",
+ [VSIR_OP_DMUL ] = "dmul",
+ [VSIR_OP_DNE ] = "dne",
+ [VSIR_OP_DP2 ] = "dp2",
+ [VSIR_OP_DP2ADD ] = "dp2add",
+ [VSIR_OP_DP3 ] = "dp3",
+ [VSIR_OP_DP4 ] = "dp4",
+ [VSIR_OP_DRCP ] = "drcp",
+ [VSIR_OP_DST ] = "dst",
+ [VSIR_OP_DSX ] = "dsx",
+ [VSIR_OP_DSX_COARSE ] = "deriv_rtx_coarse",
+ [VSIR_OP_DSX_FINE ] = "deriv_rtx_fine",
+ [VSIR_OP_DSY ] = "dsy",
+ [VSIR_OP_DSY_COARSE ] = "deriv_rty_coarse",
+ [VSIR_OP_DSY_FINE ] = "deriv_rty_fine",
+ [VSIR_OP_DTOF ] = "dtof",
+ [VSIR_OP_DTOI ] = "dtoi",
+ [VSIR_OP_DTOU ] = "dtou",
+ [VSIR_OP_ELSE ] = "else",
+ [VSIR_OP_EMIT ] = "emit",
+ [VSIR_OP_EMIT_STREAM ] = "emit_stream",
+ [VSIR_OP_ENDIF ] = "endif",
+ [VSIR_OP_ENDLOOP ] = "endloop",
+ [VSIR_OP_ENDREP ] = "endrep",
+ [VSIR_OP_ENDSWITCH ] = "endswitch",
+ [VSIR_OP_EQO ] = "eq",
+ [VSIR_OP_EQU ] = "eq_unord",
+ [VSIR_OP_EVAL_CENTROID ] = "eval_centroid",
+ [VSIR_OP_EVAL_SAMPLE_INDEX ] = "eval_sample_index",
+ [VSIR_OP_EXP ] = "exp",
+ [VSIR_OP_EXPP ] = "expp",
+ [VSIR_OP_F16TOF32 ] = "f16tof32",
+ [VSIR_OP_F32TOF16 ] = "f32tof16",
+ [VSIR_OP_FCALL ] = "fcall",
+ [VSIR_OP_FIRSTBIT_HI ] = "firstbit_hi",
+ [VSIR_OP_FIRSTBIT_LO ] = "firstbit_lo",
+ [VSIR_OP_FIRSTBIT_SHI ] = "firstbit_shi",
+ [VSIR_OP_FRC ] = "frc",
+ [VSIR_OP_FREM ] = "frem",
+ [VSIR_OP_FTOD ] = "ftod",
+ [VSIR_OP_FTOI ] = "ftoi",
+ [VSIR_OP_FTOU ] = "ftou",
+ [VSIR_OP_GATHER4 ] = "gather4",
+ [VSIR_OP_GATHER4_C ] = "gather4_c",
+ [VSIR_OP_GATHER4_C_S ] = "gather4_c_s",
+ [VSIR_OP_GATHER4_PO ] = "gather4_po",
+ [VSIR_OP_GATHER4_PO_C ] = "gather4_po_c",
+ [VSIR_OP_GATHER4_PO_C_S ] = "gather4_po_c_s",
+ [VSIR_OP_GATHER4_PO_S ] = "gather4_po_s",
+ [VSIR_OP_GATHER4_S ] = "gather4_s",
+ [VSIR_OP_GEO ] = "ge",
+ [VSIR_OP_GEU ] = "ge_unord",
+ [VSIR_OP_HCOS ] = "hcos",
+ [VSIR_OP_HS_CONTROL_POINT_PHASE ] = "hs_control_point_phase",
+ [VSIR_OP_HS_DECLS ] = "hs_decls",
+ [VSIR_OP_HS_FORK_PHASE ] = "hs_fork_phase",
+ [VSIR_OP_HS_JOIN_PHASE ] = "hs_join_phase",
+ [VSIR_OP_HSIN ] = "hsin",
+ [VSIR_OP_HTAN ] = "htan",
+ [VSIR_OP_IADD ] = "iadd",
+ [VSIR_OP_IBFE ] = "ibfe",
+ [VSIR_OP_IDIV ] = "idiv",
+ [VSIR_OP_IEQ ] = "ieq",
+ [VSIR_OP_IF ] = "if",
+ [VSIR_OP_IFC ] = "if",
+ [VSIR_OP_IGE ] = "ige",
+ [VSIR_OP_ILT ] = "ilt",
+ [VSIR_OP_IMAD ] = "imad",
+ [VSIR_OP_IMAX ] = "imax",
+ [VSIR_OP_IMIN ] = "imin",
+ [VSIR_OP_IMM_ATOMIC_ALLOC ] = "imm_atomic_alloc",
+ [VSIR_OP_IMM_ATOMIC_AND ] = "imm_atomic_and",
+ [VSIR_OP_IMM_ATOMIC_CMP_EXCH ] = "imm_atomic_cmp_exch",
+ [VSIR_OP_IMM_ATOMIC_CONSUME ] = "imm_atomic_consume",
+ [VSIR_OP_IMM_ATOMIC_EXCH ] = "imm_atomic_exch",
+ [VSIR_OP_IMM_ATOMIC_IADD ] = "imm_atomic_iadd",
+ [VSIR_OP_IMM_ATOMIC_IMAX ] = "imm_atomic_imax",
+ [VSIR_OP_IMM_ATOMIC_IMIN ] = "imm_atomic_imin",
+ [VSIR_OP_IMM_ATOMIC_OR ] = "imm_atomic_or",
+ [VSIR_OP_IMM_ATOMIC_UMAX ] = "imm_atomic_umax",
+ [VSIR_OP_IMM_ATOMIC_UMIN ] = "imm_atomic_umin",
+ [VSIR_OP_IMM_ATOMIC_XOR ] = "imm_atomic_xor",
+ [VSIR_OP_IMUL ] = "imul",
+ [VSIR_OP_IMUL_LOW ] = "imul_low",
+ [VSIR_OP_INE ] = "ine",
+ [VSIR_OP_INEG ] = "ineg",
+ [VSIR_OP_IREM ] = "irem",
+ [VSIR_OP_ISFINITE ] = "isfinite",
+ [VSIR_OP_ISHL ] = "ishl",
+ [VSIR_OP_ISHR ] = "ishr",
+ [VSIR_OP_ISINF ] = "isinf",
+ [VSIR_OP_ISNAN ] = "isnan",
+ [VSIR_OP_ITOD ] = "itod",
+ [VSIR_OP_ITOF ] = "itof",
+ [VSIR_OP_ITOI ] = "itoi",
+ [VSIR_OP_LABEL ] = "label",
+ [VSIR_OP_LD ] = "ld",
+ [VSIR_OP_LD2DMS ] = "ld2dms",
+ [VSIR_OP_LD2DMS_S ] = "ld2dms_s",
+ [VSIR_OP_LD_RAW ] = "ld_raw",
+ [VSIR_OP_LD_RAW_S ] = "ld_raw_s",
+ [VSIR_OP_LD_S ] = "ld_s",
+ [VSIR_OP_LD_STRUCTURED ] = "ld_structured",
+ [VSIR_OP_LD_STRUCTURED_S ] = "ld_structured_s",
+ [VSIR_OP_LD_UAV_TYPED ] = "ld_uav_typed",
+ [VSIR_OP_LD_UAV_TYPED_S ] = "ld_uav_typed_s",
+ [VSIR_OP_LIT ] = "lit",
+ [VSIR_OP_LOD ] = "lod",
+ [VSIR_OP_LOG ] = "log",
+ [VSIR_OP_LOGP ] = "logp",
+ [VSIR_OP_LOOP ] = "loop",
+ [VSIR_OP_LRP ] = "lrp",
+ [VSIR_OP_LTO ] = "lt",
+ [VSIR_OP_LTU ] = "lt_unord",
+ [VSIR_OP_M3x2 ] = "m3x2",
+ [VSIR_OP_M3x3 ] = "m3x3",
+ [VSIR_OP_M3x4 ] = "m3x4",
+ [VSIR_OP_M4x3 ] = "m4x3",
+ [VSIR_OP_M4x4 ] = "m4x4",
+ [VSIR_OP_MAD ] = "mad",
+ [VSIR_OP_MAX ] = "max",
+ [VSIR_OP_MIN ] = "min",
+ [VSIR_OP_MOV ] = "mov",
+ [VSIR_OP_MOVA ] = "mova",
+ [VSIR_OP_MOVC ] = "movc",
+ [VSIR_OP_MSAD ] = "msad",
+ [VSIR_OP_MUL ] = "mul",
+ [VSIR_OP_NEO ] = "ne_ord",
+ [VSIR_OP_NEU ] = "ne",
+ [VSIR_OP_NOP ] = "nop",
+ [VSIR_OP_NOT ] = "not",
+ [VSIR_OP_NRM ] = "nrm",
+ [VSIR_OP_OR ] = "or",
+ [VSIR_OP_ORD ] = "ord",
+ [VSIR_OP_PHASE ] = "phase",
+ [VSIR_OP_PHI ] = "phi",
+ [VSIR_OP_POW ] = "pow",
+ [VSIR_OP_QUAD_READ_ACROSS_D ] = "quad_read_across_d",
+ [VSIR_OP_QUAD_READ_ACROSS_X ] = "quad_read_across_x",
+ [VSIR_OP_QUAD_READ_ACROSS_Y ] = "quad_read_across_y",
+ [VSIR_OP_QUAD_READ_LANE_AT ] = "quad_read_lane_at",
+ [VSIR_OP_RCP ] = "rcp",
+ [VSIR_OP_REP ] = "rep",
+ [VSIR_OP_RESINFO ] = "resinfo",
+ [VSIR_OP_RET ] = "ret",
+ [VSIR_OP_RETP ] = "retp",
+ [VSIR_OP_ROUND_NE ] = "round_ne",
+ [VSIR_OP_ROUND_NI ] = "round_ni",
+ [VSIR_OP_ROUND_PI ] = "round_pi",
+ [VSIR_OP_ROUND_Z ] = "round_z",
+ [VSIR_OP_RSQ ] = "rsq",
+ [VSIR_OP_SAMPLE ] = "sample",
+ [VSIR_OP_SAMPLE_B ] = "sample_b",
+ [VSIR_OP_SAMPLE_B_CL_S ] = "sample_b_cl_s",
+ [VSIR_OP_SAMPLE_C ] = "sample_c",
+ [VSIR_OP_SAMPLE_C_CL_S ] = "sample_c_cl_s",
+ [VSIR_OP_SAMPLE_C_LZ ] = "sample_c_lz",
+ [VSIR_OP_SAMPLE_C_LZ_S ] = "sample_c_lz_s",
+ [VSIR_OP_SAMPLE_CL_S ] = "sample_cl_s",
+ [VSIR_OP_SAMPLE_GRAD ] = "sample_d",
+ [VSIR_OP_SAMPLE_GRAD_CL_S ] = "sample_d_cl_s",
+ [VSIR_OP_SAMPLE_INFO ] = "sample_info",
+ [VSIR_OP_SAMPLE_LOD ] = "sample_l",
+ [VSIR_OP_SAMPLE_LOD_S ] = "sample_l_s",
+ [VSIR_OP_SAMPLE_POS ] = "sample_pos",
+ [VSIR_OP_SETP ] = "setp",
+ [VSIR_OP_SGE ] = "sge",
+ [VSIR_OP_SGN ] = "sgn",
+ [VSIR_OP_SIN ] = "sin",
+ [VSIR_OP_SINCOS ] = "sincos",
+ [VSIR_OP_SLT ] = "slt",
+ [VSIR_OP_SQRT ] = "sqrt",
+ [VSIR_OP_STORE_RAW ] = "store_raw",
+ [VSIR_OP_STORE_STRUCTURED ] = "store_structured",
+ [VSIR_OP_STORE_UAV_TYPED ] = "store_uav_typed",
+ [VSIR_OP_SUB ] = "sub",
+ [VSIR_OP_SWAPC ] = "swapc",
+ [VSIR_OP_SWITCH ] = "switch",
+ [VSIR_OP_SWITCH_MONOLITHIC ] = "switch",
+ [VSIR_OP_SYNC ] = "sync",
+ [VSIR_OP_TAN ] = "tan",
+ [VSIR_OP_TEX ] = "tex",
+ [VSIR_OP_TEXBEM ] = "texbem",
+ [VSIR_OP_TEXBEML ] = "texbeml",
+ [VSIR_OP_TEXCOORD ] = "texcoord",
+ [VSIR_OP_TEXCRD ] = "texcrd",
+ [VSIR_OP_TEXDEPTH ] = "texdepth",
+ [VSIR_OP_TEXDP3 ] = "texdp3",
+ [VSIR_OP_TEXDP3TEX ] = "texdp3tex",
+ [VSIR_OP_TEXKILL ] = "texkill",
+ [VSIR_OP_TEXLD ] = "texld",
+ [VSIR_OP_TEXLDD ] = "texldd",
+ [VSIR_OP_TEXLDL ] = "texldl",
+ [VSIR_OP_TEXM3x2DEPTH ] = "texm3x2depth",
+ [VSIR_OP_TEXM3x2PAD ] = "texm3x2pad",
+ [VSIR_OP_TEXM3x2TEX ] = "texm3x2tex",
+ [VSIR_OP_TEXM3x3 ] = "texm3x3",
+ [VSIR_OP_TEXM3x3DIFF ] = "texm3x3diff",
+ [VSIR_OP_TEXM3x3PAD ] = "texm3x3pad",
+ [VSIR_OP_TEXM3x3SPEC ] = "texm3x3spec",
+ [VSIR_OP_TEXM3x3TEX ] = "texm3x3tex",
+ [VSIR_OP_TEXM3x3VSPEC ] = "texm3x3vspec",
+ [VSIR_OP_TEXREG2AR ] = "texreg2ar",
+ [VSIR_OP_TEXREG2GB ] = "texreg2gb",
+ [VSIR_OP_TEXREG2RGB ] = "texreg2rgb",
+ [VSIR_OP_UBFE ] = "ubfe",
+ [VSIR_OP_UDIV ] = "udiv",
+ [VSIR_OP_UDIV_SIMPLE ] = "udiv_simple",
+ [VSIR_OP_UGE ] = "uge",
+ [VSIR_OP_ULT ] = "ult",
+ [VSIR_OP_UMAX ] = "umax",
+ [VSIR_OP_UMIN ] = "umin",
+ [VSIR_OP_UMUL ] = "umul",
+ [VSIR_OP_UNO ] = "uno",
+ [VSIR_OP_UREM ] = "urem",
+ [VSIR_OP_USHR ] = "ushr",
+ [VSIR_OP_UTOD ] = "utod",
+ [VSIR_OP_UTOF ] = "utof",
+ [VSIR_OP_UTOU ] = "utou",
+ [VSIR_OP_WAVE_ACTIVE_ALL_EQUAL ] = "wave_active_all_equal",
+ [VSIR_OP_WAVE_ACTIVE_BALLOT ] = "wave_active_ballot",
+ [VSIR_OP_WAVE_ACTIVE_BIT_AND ] = "wave_active_bit_and",
+ [VSIR_OP_WAVE_ACTIVE_BIT_OR ] = "wave_active_bit_or",
+ [VSIR_OP_WAVE_ACTIVE_BIT_XOR ] = "wave_active_bit_xor",
+ [VSIR_OP_WAVE_ALL_BIT_COUNT ] = "wave_all_bit_count",
+ [VSIR_OP_WAVE_ALL_TRUE ] = "wave_all_true",
+ [VSIR_OP_WAVE_ANY_TRUE ] = "wave_any_true",
+ [VSIR_OP_WAVE_IS_FIRST_LANE ] = "wave_is_first_lane",
+ [VSIR_OP_WAVE_OP_ADD ] = "wave_op_add",
+ [VSIR_OP_WAVE_OP_IMAX ] = "wave_op_imax",
+ [VSIR_OP_WAVE_OP_IMIN ] = "wave_op_imin",
+ [VSIR_OP_WAVE_OP_MAX ] = "wave_op_max",
+ [VSIR_OP_WAVE_OP_MIN ] = "wave_op_min",
+ [VSIR_OP_WAVE_OP_MUL ] = "wave_op_mul",
+ [VSIR_OP_WAVE_OP_UMAX ] = "wave_op_umax",
+ [VSIR_OP_WAVE_OP_UMIN ] = "wave_op_umin",
+ [VSIR_OP_WAVE_PREFIX_BIT_COUNT ] = "wave_prefix_bit_count",
+ [VSIR_OP_WAVE_READ_LANE_AT ] = "wave_read_lane_at",
+ [VSIR_OP_WAVE_READ_LANE_FIRST ] = "wave_read_lane_first",
+ [VSIR_OP_XOR ] = "xor",
};
if ((uint32_t)op < ARRAY_SIZE(names))
@@ -637,11 +640,23 @@ static void vsir_src_param_init_sampler(struct vkd3d_shader_src_param *src, unsi
src->reg.dimension = VSIR_DIMENSION_NONE;
}
-static void src_param_init_ssa_scalar(struct vkd3d_shader_src_param *src, unsigned int idx,
- enum vkd3d_data_type data_type)
+static void src_param_init_ssa(struct vkd3d_shader_src_param *src, unsigned int idx,
+ enum vkd3d_data_type data_type, enum vsir_dimension dimension)
{
vsir_src_param_init(src, VKD3DSPR_SSA, data_type, 1);
src->reg.idx[0].offset = idx;
+
+ if (dimension == VSIR_DIMENSION_VEC4)
+ {
+ src->reg.dimension = VSIR_DIMENSION_VEC4;
+ src->swizzle = VKD3D_SHADER_NO_SWIZZLE;
+ }
+}
+
+static void src_param_init_ssa_scalar(struct vkd3d_shader_src_param *src, unsigned int idx,
+ enum vkd3d_data_type data_type)
+{
+ src_param_init_ssa(src, idx, data_type, VSIR_DIMENSION_SCALAR);
}
static void src_param_init_ssa_bool(struct vkd3d_shader_src_param *src, unsigned int idx)
@@ -654,18 +669,9 @@ static void src_param_init_ssa_float(struct vkd3d_shader_src_param *src, unsigne
src_param_init_ssa_scalar(src, idx, VKD3D_DATA_FLOAT);
}
-static void src_param_init_ssa_vec4(struct vkd3d_shader_src_param *src, unsigned int idx,
- enum vkd3d_data_type data_type)
-{
- vsir_src_param_init(src, VKD3DSPR_SSA, data_type, 1);
- src->reg.idx[0].offset = idx;
- src->reg.dimension = VSIR_DIMENSION_VEC4;
- src->swizzle = VKD3D_SHADER_NO_SWIZZLE;
-}
-
static void src_param_init_ssa_float4(struct vkd3d_shader_src_param *src, unsigned int idx)
{
- src_param_init_ssa_vec4(src, idx, VKD3D_DATA_FLOAT);
+ src_param_init_ssa(src, idx, VKD3D_DATA_FLOAT, VSIR_DIMENSION_VEC4);
}
static void src_param_init_temp_bool(struct vkd3d_shader_src_param *src, unsigned int idx)
@@ -718,11 +724,23 @@ void vsir_dst_param_init_null(struct vkd3d_shader_dst_param *dst)
dst->write_mask = 0;
}
-static void dst_param_init_ssa_scalar(struct vkd3d_shader_dst_param *dst, unsigned int idx,
- enum vkd3d_data_type data_type)
+static void dst_param_init_ssa(struct vkd3d_shader_dst_param *dst, unsigned int idx,
+ enum vkd3d_data_type data_type, enum vsir_dimension dimension)
{
vsir_dst_param_init(dst, VKD3DSPR_SSA, data_type, 1);
dst->reg.idx[0].offset = idx;
+
+ if (dimension == VSIR_DIMENSION_VEC4)
+ {
+ dst->reg.dimension = VSIR_DIMENSION_VEC4;
+ dst->write_mask = VKD3DSP_WRITEMASK_ALL;
+ }
+}
+
+static void dst_param_init_ssa_scalar(struct vkd3d_shader_dst_param *dst, unsigned int idx,
+ enum vkd3d_data_type data_type)
+{
+ dst_param_init_ssa(dst, idx, data_type, VSIR_DIMENSION_SCALAR);
}
static void dst_param_init_ssa_bool(struct vkd3d_shader_dst_param *dst, unsigned int idx)
@@ -735,18 +753,9 @@ static void dst_param_init_ssa_float(struct vkd3d_shader_dst_param *dst, unsigne
dst_param_init_ssa_scalar(dst, idx, VKD3D_DATA_FLOAT);
}
-static void dst_param_init_ssa_vec4(struct vkd3d_shader_dst_param *dst, unsigned int idx,
- enum vkd3d_data_type data_type)
-{
- vsir_dst_param_init(dst, VKD3DSPR_SSA, data_type, 1);
- dst->reg.idx[0].offset = idx;
- dst->reg.dimension = VSIR_DIMENSION_VEC4;
- dst->write_mask = VKD3DSP_WRITEMASK_ALL;
-}
-
static void dst_param_init_ssa_float4(struct vkd3d_shader_dst_param *dst, unsigned int idx)
{
- dst_param_init_ssa_vec4(dst, idx, VKD3D_DATA_FLOAT);
+ dst_param_init_ssa(dst, idx, VKD3D_DATA_FLOAT, VSIR_DIMENSION_VEC4);
}
static void dst_param_init_temp_bool(struct vkd3d_shader_dst_param *dst, unsigned int idx)
@@ -820,7 +829,7 @@ static bool vsir_instruction_init_label(struct vkd3d_shader_instruction *ins,
vsir_src_param_init_label(src_param, label_id);
- vsir_instruction_init(ins, location, VKD3DSIH_LABEL);
+ vsir_instruction_init(ins, location, VSIR_OP_LABEL);
ins->src = src_param;
ins->src_count = 1;
@@ -830,15 +839,15 @@ static bool vsir_instruction_init_label(struct vkd3d_shader_instruction *ins,
static bool vsir_instruction_is_dcl(const struct vkd3d_shader_instruction *instruction)
{
enum vkd3d_shader_opcode opcode = instruction->opcode;
- return (VKD3DSIH_DCL <= opcode && opcode <= VKD3DSIH_DCL_VERTICES_OUT)
- || opcode == VKD3DSIH_HS_DECLS;
+ return (VSIR_OP_DCL <= opcode && opcode <= VSIR_OP_DCL_VERTICES_OUT)
+ || opcode == VSIR_OP_HS_DECLS;
}
static void vkd3d_shader_instruction_make_nop(struct vkd3d_shader_instruction *ins)
{
struct vkd3d_shader_location location = ins->location;
- vsir_instruction_init(ins, &location, VKD3DSIH_NOP);
+ vsir_instruction_init(ins, &location, VSIR_OP_NOP);
}
static bool get_opcode_from_rel_op(enum vkd3d_shader_rel_op rel_op, enum vkd3d_data_type data_type,
@@ -851,7 +860,7 @@ static bool get_opcode_from_rel_op(enum vkd3d_shader_rel_op rel_op, enum vkd3d_d
*requires_swap = (rel_op == VKD3D_SHADER_REL_OP_GT);
if (data_type == VKD3D_DATA_FLOAT)
{
- *opcode = VKD3DSIH_LTO;
+ *opcode = VSIR_OP_LTO;
return true;
}
break;
@@ -861,7 +870,7 @@ static bool get_opcode_from_rel_op(enum vkd3d_shader_rel_op rel_op, enum vkd3d_d
*requires_swap = (rel_op == VKD3D_SHADER_REL_OP_LE);
if (data_type == VKD3D_DATA_FLOAT)
{
- *opcode = VKD3DSIH_GEO;
+ *opcode = VSIR_OP_GEO;
return true;
}
break;
@@ -870,7 +879,7 @@ static bool get_opcode_from_rel_op(enum vkd3d_shader_rel_op rel_op, enum vkd3d_d
*requires_swap = false;
if (data_type == VKD3D_DATA_FLOAT)
{
- *opcode = VKD3DSIH_EQO;
+ *opcode = VSIR_OP_EQO;
return true;
}
break;
@@ -879,7 +888,7 @@ static bool get_opcode_from_rel_op(enum vkd3d_shader_rel_op rel_op, enum vkd3d_d
*requires_swap = false;
if (data_type == VKD3D_DATA_FLOAT)
{
- *opcode = VKD3DSIH_NEO;
+ *opcode = VSIR_OP_NEO;
return true;
}
break;
@@ -898,17 +907,17 @@ static enum vkd3d_result vsir_program_normalize_addr(struct vsir_program *progra
{
ins = &program->instructions.elements[i];
- if (ins->opcode == VKD3DSIH_MOV && ins->dst[0].reg.type == VKD3DSPR_ADDR)
+ if (ins->opcode == VSIR_OP_MOV && ins->dst[0].reg.type == VKD3DSPR_ADDR)
{
if (tmp_idx == ~0u)
tmp_idx = program->temp_count++;
- ins->opcode = VKD3DSIH_FTOU;
+ ins->opcode = VSIR_OP_FTOU;
vsir_register_init(&ins->dst[0].reg, VKD3DSPR_TEMP, VKD3D_DATA_UINT, 1);
ins->dst[0].reg.idx[0].offset = tmp_idx;
ins->dst[0].reg.dimension = VSIR_DIMENSION_VEC4;
}
- else if (ins->opcode == VKD3DSIH_MOVA)
+ else if (ins->opcode == VSIR_OP_MOVA)
{
if (tmp_idx == ~0u)
tmp_idx = program->temp_count++;
@@ -918,12 +927,12 @@ static enum vkd3d_result vsir_program_normalize_addr(struct vsir_program *progra
ins = &program->instructions.elements[i];
ins2 = &program->instructions.elements[i + 1];
- ins->opcode = VKD3DSIH_ROUND_NE;
+ ins->opcode = VSIR_OP_ROUND_NE;
vsir_register_init(&ins->dst[0].reg, VKD3DSPR_TEMP, VKD3D_DATA_FLOAT, 1);
ins->dst[0].reg.idx[0].offset = tmp_idx;
ins->dst[0].reg.dimension = VSIR_DIMENSION_VEC4;
- if (!vsir_instruction_init_with_params(program, ins2, &ins->location, VKD3DSIH_FTOU, 1, 1))
+ if (!vsir_instruction_init_with_params(program, ins2, &ins->location, VSIR_OP_FTOU, 1, 1))
return VKD3D_ERROR_OUT_OF_MEMORY;
vsir_register_init(&ins2->dst[0].reg, VKD3DSPR_TEMP, VKD3D_DATA_UINT, 1);
@@ -1001,7 +1010,7 @@ static enum vkd3d_result vsir_program_lower_ifc(struct vsir_program *program,
/* Create new if instruction using the previous result. */
ins = &instructions->elements[pos + 2];
- if (!vsir_instruction_init_with_params(program, ins, &ifc->location, VKD3DSIH_IF, 0, 1))
+ if (!vsir_instruction_init_with_params(program, ins, &ifc->location, VSIR_OP_IF, 0, 1))
return VKD3D_ERROR_OUT_OF_MEMORY;
ins->flags = VKD3D_SHADER_CONDITIONAL_OP_NZ;
@@ -1035,7 +1044,7 @@ static enum vkd3d_result vsir_program_lower_texkill(struct vsir_program *program
/* tmp = ins->src[0] < 0 */
ins = &instructions->elements[pos + 1];
- if (!vsir_instruction_init_with_params(program, ins, &texkill->location, VKD3DSIH_LTO, 1, 2))
+ if (!vsir_instruction_init_with_params(program, ins, &texkill->location, VSIR_OP_LTO, 1, 2))
return VKD3D_ERROR_OUT_OF_MEMORY;
vsir_register_init(&ins->dst[0].reg, VKD3DSPR_TEMP, VKD3D_DATA_UINT, 1);
@@ -1059,7 +1068,7 @@ static enum vkd3d_result vsir_program_lower_texkill(struct vsir_program *program
for (j = 1; j < components_read; ++j)
{
ins = &instructions->elements[pos + 1 + j];
- if (!(vsir_instruction_init_with_params(program, ins, &texkill->location, VKD3DSIH_OR, 1, 2)))
+ if (!(vsir_instruction_init_with_params(program, ins, &texkill->location, VSIR_OP_OR, 1, 2)))
return VKD3D_ERROR_OUT_OF_MEMORY;
vsir_register_init(&ins->dst[0].reg, VKD3DSPR_TEMP, VKD3D_DATA_UINT, 1);
@@ -1080,7 +1089,7 @@ static enum vkd3d_result vsir_program_lower_texkill(struct vsir_program *program
/* discard_nz tmp.x */
ins = &instructions->elements[pos + 1 + components_read];
- if (!(vsir_instruction_init_with_params(program, ins, &texkill->location, VKD3DSIH_DISCARD, 0, 1)))
+ if (!(vsir_instruction_init_with_params(program, ins, &texkill->location, VSIR_OP_DISCARD, 0, 1)))
return VKD3D_ERROR_OUT_OF_MEMORY;
ins->flags = VKD3D_SHADER_CONDITIONAL_OP_NZ;
@@ -1122,10 +1131,10 @@ static enum vkd3d_result vsir_program_lower_precise_mad(struct vsir_program *pro
mul_ins = &instructions->elements[pos];
add_ins = &instructions->elements[pos + 1];
- mul_ins->opcode = VKD3DSIH_MUL;
+ mul_ins->opcode = VSIR_OP_MUL;
mul_ins->src_count = 2;
- if (!(vsir_instruction_init_with_params(program, add_ins, &mul_ins->location, VKD3DSIH_ADD, 1, 2)))
+ if (!(vsir_instruction_init_with_params(program, add_ins, &mul_ins->location, VSIR_OP_ADD, 1, 2)))
return VKD3D_ERROR_OUT_OF_MEMORY;
add_ins->flags = mul_ins->flags & VKD3DSI_PRECISE_XYZW;
@@ -1159,7 +1168,88 @@ static enum vkd3d_result vsir_program_lower_imul(struct vsir_program *program,
imul->dst[0] = imul->dst[1];
imul->dst_count = 1;
- imul->opcode = VKD3DSIH_IMUL_LOW;
+ imul->opcode = VSIR_OP_IMUL_LOW;
+
+ return VKD3D_OK;
+}
+
+static enum vkd3d_result vsir_program_lower_udiv(struct vsir_program *program,
+ struct vkd3d_shader_instruction *udiv, struct vsir_transformation_context *ctx)
+{
+ struct vkd3d_shader_instruction_array *instructions = &program->instructions;
+ size_t pos = udiv - instructions->elements;
+ struct vkd3d_shader_instruction *ins, *mov;
+ unsigned int count = 2;
+
+ if (udiv->dst_count != 2)
+ {
+ vkd3d_shader_error(ctx->message_context, &udiv->location,
+ VKD3D_SHADER_ERROR_VSIR_INVALID_DEST_COUNT,
+ "Internal compiler error: invalid destination count %u for UDIV.",
+ udiv->dst_count);
+ return VKD3D_ERROR;
+ }
+
+ if (udiv->dst[0].reg.type != VKD3DSPR_NULL)
+ ++count;
+ if (udiv->dst[1].reg.type != VKD3DSPR_NULL)
+ ++count;
+
+ if (!shader_instruction_array_insert_at(instructions, pos + 1, count))
+ return VKD3D_ERROR_OUT_OF_MEMORY;
+ udiv = &instructions->elements[pos];
+
+ ins = &instructions->elements[pos + 1];
+
+ /* Save the sources in a SSA in case a destination collides with a source. */
+ mov = ins++;
+ if (!(vsir_instruction_init_with_params(program, mov, &udiv->location, VSIR_OP_MOV, 1, 1)))
+ return VKD3D_ERROR_OUT_OF_MEMORY;
+
+ mov->src[0] = udiv->src[0];
+ dst_param_init_ssa(&mov->dst[0], program->ssa_count, udiv->src[0].reg.data_type, udiv->src[0].reg.dimension);
+
+ mov = ins++;
+ if (!(vsir_instruction_init_with_params(program, mov, &udiv->location, VSIR_OP_MOV, 1, 1)))
+ return VKD3D_ERROR_OUT_OF_MEMORY;
+
+ mov->src[0] = udiv->src[1];
+ dst_param_init_ssa(&mov->dst[0], program->ssa_count + 1, udiv->src[1].reg.data_type, udiv->src[1].reg.dimension);
+
+ if (udiv->dst[0].reg.type != VKD3DSPR_NULL)
+ {
+ if (!(vsir_instruction_init_with_params(program, ins, &udiv->location, VSIR_OP_UDIV_SIMPLE, 1, 2)))
+ return VKD3D_ERROR_OUT_OF_MEMORY;
+
+ ins->flags = udiv->flags;
+
+ src_param_init_ssa(&ins->src[0], program->ssa_count,
+ udiv->src[0].reg.data_type, udiv->src[0].reg.dimension);
+ src_param_init_ssa(&ins->src[1], program->ssa_count + 1,
+ udiv->src[1].reg.data_type, udiv->src[1].reg.dimension);
+ ins->dst[0] = udiv->dst[0];
+
+ ++ins;
+ }
+
+ if (udiv->dst[1].reg.type != VKD3DSPR_NULL)
+ {
+ if (!(vsir_instruction_init_with_params(program, ins, &udiv->location, VSIR_OP_UREM, 1, 2)))
+ return VKD3D_ERROR_OUT_OF_MEMORY;
+
+ ins->flags = udiv->flags;
+
+ src_param_init_ssa(&ins->src[0], program->ssa_count,
+ udiv->src[0].reg.data_type, udiv->src[0].reg.dimension);
+ src_param_init_ssa(&ins->src[1], program->ssa_count + 1,
+ udiv->src[1].reg.data_type, udiv->src[1].reg.dimension);
+ ins->dst[0] = udiv->dst[1];
+
+ ++ins;
+ }
+
+ vkd3d_shader_instruction_make_nop(udiv);
+ program->ssa_count += 2;
return VKD3D_OK;
}
@@ -1182,7 +1272,7 @@ static enum vkd3d_result vsir_program_lower_sm1_sincos(struct vsir_program *prog
/* Save the source in a SSA in case a destination collides with the source. */
mov = ins++;
- if (!(vsir_instruction_init_with_params(program, mov, &sincos->location, VKD3DSIH_MOV, 1, 1)))
+ if (!(vsir_instruction_init_with_params(program, mov, &sincos->location, VSIR_OP_MOV, 1, 1)))
return VKD3D_ERROR_OUT_OF_MEMORY;
mov->src[0] = sincos->src[0];
@@ -1195,7 +1285,7 @@ static enum vkd3d_result vsir_program_lower_sm1_sincos(struct vsir_program *prog
if (sincos->dst->write_mask & VKD3DSP_WRITEMASK_1)
{
- if (!(vsir_instruction_init_with_params(program, ins, &sincos->location, VKD3DSIH_SIN, 1, 1)))
+ if (!(vsir_instruction_init_with_params(program, ins, &sincos->location, VSIR_OP_SIN, 1, 1)))
return VKD3D_ERROR_OUT_OF_MEMORY;
ins->flags = sincos->flags;
@@ -1210,7 +1300,7 @@ static enum vkd3d_result vsir_program_lower_sm1_sincos(struct vsir_program *prog
if (sincos->dst->write_mask & VKD3DSP_WRITEMASK_0)
{
- if (!(vsir_instruction_init_with_params(program, ins, &sincos->location, VKD3DSIH_COS, 1, 1)))
+ if (!(vsir_instruction_init_with_params(program, ins, &sincos->location, VSIR_OP_COS, 1, 1)))
return VKD3D_ERROR_OUT_OF_MEMORY;
ins->flags = sincos->flags;
@@ -1259,20 +1349,21 @@ static enum vkd3d_result vsir_program_lower_sm4_sincos(struct vsir_program *prog
/* Save the source in a SSA in case a destination collides with the source. */
mov = ins++;
- if (!(vsir_instruction_init_with_params(program, mov, &sincos->location, VKD3DSIH_MOV, 1, 1)))
+ if (!(vsir_instruction_init_with_params(program, mov, &sincos->location, VSIR_OP_MOV, 1, 1)))
return VKD3D_ERROR_OUT_OF_MEMORY;
mov->src[0] = sincos->src[0];
- dst_param_init_ssa_vec4(&mov->dst[0], program->ssa_count, sincos->src[0].reg.data_type);
+ dst_param_init_ssa(&mov->dst[0], program->ssa_count, sincos->src[0].reg.data_type, sincos->src[0].reg.dimension);
if (sincos->dst[0].reg.type != VKD3DSPR_NULL)
{
- if (!(vsir_instruction_init_with_params(program, ins, &sincos->location, VKD3DSIH_SIN, 1, 1)))
+ if (!(vsir_instruction_init_with_params(program, ins, &sincos->location, VSIR_OP_SIN, 1, 1)))
return VKD3D_ERROR_OUT_OF_MEMORY;
ins->flags = sincos->flags;
- src_param_init_ssa_vec4(&ins->src[0], program->ssa_count, sincos->src[0].reg.data_type);
+ src_param_init_ssa(&ins->src[0], program->ssa_count,
+ sincos->src[0].reg.data_type, sincos->src[0].reg.dimension);
ins->dst[0] = sincos->dst[0];
++ins;
@@ -1280,12 +1371,13 @@ static enum vkd3d_result vsir_program_lower_sm4_sincos(struct vsir_program *prog
if (sincos->dst[1].reg.type != VKD3DSPR_NULL)
{
- if (!(vsir_instruction_init_with_params(program, ins, &sincos->location, VKD3DSIH_COS, 1, 1)))
+ if (!(vsir_instruction_init_with_params(program, ins, &sincos->location, VSIR_OP_COS, 1, 1)))
return VKD3D_ERROR_OUT_OF_MEMORY;
ins->flags = sincos->flags;
- src_param_init_ssa_vec4(&ins->src[0], program->ssa_count, sincos->src[0].reg.data_type);
+ src_param_init_ssa(&ins->src[0], program->ssa_count,
+ sincos->src[0].reg.data_type, sincos->src[0].reg.dimension);
ins->dst[0] = sincos->dst[1];
++ins;
@@ -1318,7 +1410,7 @@ static enum vkd3d_result vsir_program_lower_texldp(struct vsir_program *program,
div_ins = &instructions->elements[pos + 1];
tex_ins = &instructions->elements[pos + 2];
- if (!vsir_instruction_init_with_params(program, div_ins, location, VKD3DSIH_DIV, 1, 2))
+ if (!vsir_instruction_init_with_params(program, div_ins, location, VSIR_OP_DIV, 1, 2))
return VKD3D_ERROR_OUT_OF_MEMORY;
vsir_dst_param_init(&div_ins->dst[0], VKD3DSPR_TEMP, VKD3D_DATA_FLOAT, 1);
@@ -1331,7 +1423,7 @@ static enum vkd3d_result vsir_program_lower_texldp(struct vsir_program *program,
div_ins->src[1] = tex->src[0];
div_ins->src[1].swizzle = vkd3d_shader_create_swizzle(w_comp, w_comp, w_comp, w_comp);
- if (!vsir_instruction_init_with_params(program, tex_ins, location, VKD3DSIH_TEXLD, 1, 2))
+ if (!vsir_instruction_init_with_params(program, tex_ins, location, VSIR_OP_TEXLD, 1, 2))
return VKD3D_ERROR_OUT_OF_MEMORY;
tex_ins->dst[0] = tex->dst[0];
@@ -1364,7 +1456,7 @@ static enum vkd3d_result vsir_program_lower_texld(struct vsir_program *program,
if (!tex->flags)
{
- tex->opcode = VKD3DSIH_SAMPLE;
+ tex->opcode = VSIR_OP_SAMPLE;
tex->src = srcs;
tex->src_count = 3;
}
@@ -1372,7 +1464,7 @@ static enum vkd3d_result vsir_program_lower_texld(struct vsir_program *program,
{
enum vkd3d_shader_swizzle_component w = vsir_swizzle_get_component(srcs[0].swizzle, 3);
- tex->opcode = VKD3DSIH_SAMPLE_B;
+ tex->opcode = VSIR_OP_SAMPLE_B;
tex->src = srcs;
tex->src_count = 4;
@@ -1407,7 +1499,7 @@ static enum vkd3d_result vsir_program_lower_texldd(struct vsir_program *program,
srcs[3] = texldd->src[2];
srcs[4] = texldd->src[3];
- texldd->opcode = VKD3DSIH_SAMPLE_GRAD;
+ texldd->opcode = VSIR_OP_SAMPLE_GRAD;
texldd->src = srcs;
texldd->src_count = 5;
@@ -1498,59 +1590,64 @@ static enum vkd3d_result vsir_program_lower_instructions(struct vsir_program *pr
switch (ins->opcode)
{
- case VKD3DSIH_IFC:
+ case VSIR_OP_IFC:
if ((ret = vsir_program_lower_ifc(program, ins, &tmp_idx, message_context)) < 0)
return ret;
break;
- case VKD3DSIH_TEXKILL:
+ case VSIR_OP_TEXKILL:
if ((ret = vsir_program_lower_texkill(program, ins, &tmp_idx)) < 0)
return ret;
break;
- case VKD3DSIH_MAD:
+ case VSIR_OP_MAD:
if ((ret = vsir_program_lower_precise_mad(program, ins, &tmp_idx)) < 0)
return ret;
break;
- case VKD3DSIH_DCL:
- case VKD3DSIH_DCL_CONSTANT_BUFFER:
- case VKD3DSIH_DCL_GLOBAL_FLAGS:
- case VKD3DSIH_DCL_SAMPLER:
- case VKD3DSIH_DCL_TEMPS:
- case VKD3DSIH_DCL_TESSELLATOR_DOMAIN:
- case VKD3DSIH_DCL_THREAD_GROUP:
- case VKD3DSIH_DCL_UAV_TYPED:
+ case VSIR_OP_DCL:
+ case VSIR_OP_DCL_CONSTANT_BUFFER:
+ case VSIR_OP_DCL_GLOBAL_FLAGS:
+ case VSIR_OP_DCL_SAMPLER:
+ case VSIR_OP_DCL_TEMPS:
+ case VSIR_OP_DCL_TESSELLATOR_DOMAIN:
+ case VSIR_OP_DCL_THREAD_GROUP:
+ case VSIR_OP_DCL_UAV_TYPED:
vkd3d_shader_instruction_make_nop(ins);
break;
- case VKD3DSIH_DCL_INPUT:
+ case VSIR_OP_DCL_INPUT:
vsir_program_lower_dcl_input(program, ins, ctx);
vkd3d_shader_instruction_make_nop(ins);
break;
- case VKD3DSIH_DCL_OUTPUT:
+ case VSIR_OP_DCL_OUTPUT:
vsir_program_lower_dcl_output(program, ins, ctx);
vkd3d_shader_instruction_make_nop(ins);
break;
- case VKD3DSIH_DCL_INPUT_SGV:
- case VKD3DSIH_DCL_INPUT_SIV:
- case VKD3DSIH_DCL_INPUT_PS:
- case VKD3DSIH_DCL_INPUT_PS_SGV:
- case VKD3DSIH_DCL_INPUT_PS_SIV:
- case VKD3DSIH_DCL_OUTPUT_SGV:
- case VKD3DSIH_DCL_OUTPUT_SIV:
+ case VSIR_OP_DCL_INPUT_SGV:
+ case VSIR_OP_DCL_INPUT_SIV:
+ case VSIR_OP_DCL_INPUT_PS:
+ case VSIR_OP_DCL_INPUT_PS_SGV:
+ case VSIR_OP_DCL_INPUT_PS_SIV:
+ case VSIR_OP_DCL_OUTPUT_SGV:
+ case VSIR_OP_DCL_OUTPUT_SIV:
vkd3d_shader_instruction_make_nop(ins);
break;
- case VKD3DSIH_IMUL:
- case VKD3DSIH_UMUL:
+ case VSIR_OP_IMUL:
+ case VSIR_OP_UMUL:
if ((ret = vsir_program_lower_imul(program, ins, ctx)) < 0)
return ret;
break;
- case VKD3DSIH_SINCOS:
+ case VSIR_OP_UDIV:
+ if ((ret = vsir_program_lower_udiv(program, ins, ctx)) < 0)
+ return ret;
+ break;
+
+ case VSIR_OP_SINCOS:
if (ins->dst_count == 1)
{
if ((ret = vsir_program_lower_sm1_sincos(program, ins)) < 0)
@@ -1563,7 +1660,7 @@ static enum vkd3d_result vsir_program_lower_instructions(struct vsir_program *pr
}
break;
- case VKD3DSIH_TEXLD:
+ case VSIR_OP_TEXLD:
if (ins->flags == VKD3DSI_TEXLD_PROJECT)
{
if ((ret = vsir_program_lower_texldp(program, ins, &tmp_idx)) < 0)
@@ -1576,29 +1673,29 @@ static enum vkd3d_result vsir_program_lower_instructions(struct vsir_program *pr
}
break;
- case VKD3DSIH_TEXLDD:
+ case VSIR_OP_TEXLDD:
if ((ret = vsir_program_lower_texldd(program, ins)) < 0)
return ret;
break;
- case VKD3DSIH_TEXBEM:
- case VKD3DSIH_TEXBEML:
- case VKD3DSIH_TEXCOORD:
- case VKD3DSIH_TEXCRD:
- case VKD3DSIH_TEXDEPTH:
- case VKD3DSIH_TEXDP3:
- case VKD3DSIH_TEXDP3TEX:
- case VKD3DSIH_TEXLDL:
- case VKD3DSIH_TEXM3x2PAD:
- case VKD3DSIH_TEXM3x2TEX:
- case VKD3DSIH_TEXM3x3DIFF:
- case VKD3DSIH_TEXM3x3PAD:
- case VKD3DSIH_TEXM3x3SPEC:
- case VKD3DSIH_TEXM3x3TEX:
- case VKD3DSIH_TEXM3x3VSPEC:
- case VKD3DSIH_TEXREG2AR:
- case VKD3DSIH_TEXREG2GB:
- case VKD3DSIH_TEXREG2RGB:
+ case VSIR_OP_TEXBEM:
+ case VSIR_OP_TEXBEML:
+ case VSIR_OP_TEXCOORD:
+ case VSIR_OP_TEXCRD:
+ case VSIR_OP_TEXDEPTH:
+ case VSIR_OP_TEXDP3:
+ case VSIR_OP_TEXDP3TEX:
+ case VSIR_OP_TEXLDL:
+ case VSIR_OP_TEXM3x2PAD:
+ case VSIR_OP_TEXM3x2TEX:
+ case VSIR_OP_TEXM3x3DIFF:
+ case VSIR_OP_TEXM3x3PAD:
+ case VSIR_OP_TEXM3x3SPEC:
+ case VSIR_OP_TEXM3x3TEX:
+ case VSIR_OP_TEXM3x3VSPEC:
+ case VSIR_OP_TEXREG2AR:
+ case VSIR_OP_TEXREG2GB:
+ case VSIR_OP_TEXREG2RGB:
vkd3d_shader_error(ctx->message_context, &ins->location, VKD3D_SHADER_ERROR_VSIR_NOT_IMPLEMENTED,
"Aborting due to unimplemented feature: Combined sampler instruction \"%s\" (%#x).",
vsir_opcode_get_name(ins->opcode, "<unknown>"), ins->opcode);
@@ -1657,12 +1754,12 @@ static enum vkd3d_result vsir_program_ensure_ret(struct vsir_program *program,
{
static const struct vkd3d_shader_location no_loc;
if (program->instructions.count
- && program->instructions.elements[program->instructions.count - 1].opcode == VKD3DSIH_RET)
+ && program->instructions.elements[program->instructions.count - 1].opcode == VSIR_OP_RET)
return VKD3D_OK;
if (!shader_instruction_array_insert_at(&program->instructions, program->instructions.count, 1))
return VKD3D_ERROR_OUT_OF_MEMORY;
- vsir_instruction_init(&program->instructions.elements[program->instructions.count - 1], &no_loc, VKD3DSIH_RET);
+ vsir_instruction_init(&program->instructions.elements[program->instructions.count - 1], &no_loc, VSIR_OP_RET);
return VKD3D_OK;
}
@@ -1733,7 +1830,7 @@ static enum vkd3d_result vsir_program_ensure_diffuse(struct vsir_program *progra
{
ins = &program->instructions.elements[i];
- if (!vsir_instruction_is_dcl(ins) && ins->opcode != VKD3DSIH_LABEL && ins->opcode != VKD3DSIH_NOP)
+ if (!vsir_instruction_is_dcl(ins) && ins->opcode != VSIR_OP_LABEL && ins->opcode != VSIR_OP_NOP)
break;
}
@@ -1741,7 +1838,7 @@ static enum vkd3d_result vsir_program_ensure_diffuse(struct vsir_program *progra
return VKD3D_ERROR_OUT_OF_MEMORY;
ins = &program->instructions.elements[i];
- vsir_instruction_init_with_params(program, ins, &no_loc, VKD3DSIH_MOV, 1, 1);
+ vsir_instruction_init_with_params(program, ins, &no_loc, VSIR_OP_MOV, 1, 1);
vsir_dst_param_init(&ins->dst[0], VKD3DSPR_ATTROUT, VKD3D_DATA_FLOAT, 1);
ins->dst[0].reg.idx[0].offset = 0;
ins->dst[0].reg.dimension = VSIR_DIMENSION_VEC4;
@@ -1953,7 +2050,7 @@ static enum vkd3d_result vsir_program_remap_output_signature(struct vsir_program
struct vkd3d_shader_instruction *ins = &program->instructions.elements[i];
struct vkd3d_shader_location loc;
- if (ins->opcode != VKD3DSIH_RET)
+ if (ins->opcode != VSIR_OP_RET)
continue;
loc = ins->location;
@@ -1965,7 +2062,7 @@ static enum vkd3d_result vsir_program_remap_output_signature(struct vsir_program
{
e = &signature->elements[j];
- vsir_instruction_init_with_params(program, ins, &loc, VKD3DSIH_MOV, 1, 1);
+ vsir_instruction_init_with_params(program, ins, &loc, VSIR_OP_MOV, 1, 1);
dst_param_init_output(&ins->dst[0], VKD3D_DATA_FLOAT, e->register_index, e->mask);
vsir_src_param_init(&ins->src[0], VKD3DSPR_IMMCONST, VKD3D_DATA_FLOAT, 0);
ins->src[0].reg.dimension = VSIR_DIMENSION_VEC4;
@@ -2006,7 +2103,7 @@ struct hull_flattener
static bool flattener_is_in_fork_or_join_phase(const struct hull_flattener *flattener)
{
- return flattener->phase == VKD3DSIH_HS_FORK_PHASE || flattener->phase == VKD3DSIH_HS_JOIN_PHASE;
+ return flattener->phase == VSIR_OP_HS_FORK_PHASE || flattener->phase == VSIR_OP_HS_JOIN_PHASE;
}
struct shader_phase_location
@@ -2030,7 +2127,7 @@ static void flattener_eliminate_phase_related_dcls(struct hull_flattener *normal
struct shader_phase_location *loc;
bool b;
- if (ins->opcode == VKD3DSIH_HS_FORK_PHASE || ins->opcode == VKD3DSIH_HS_JOIN_PHASE)
+ if (ins->opcode == VSIR_OP_HS_FORK_PHASE || ins->opcode == VSIR_OP_HS_JOIN_PHASE)
{
b = flattener_is_in_fork_or_join_phase(normaliser);
/* Reset the phase info. */
@@ -2042,21 +2139,21 @@ static void flattener_eliminate_phase_related_dcls(struct hull_flattener *normal
vkd3d_shader_instruction_make_nop(ins);
return;
}
- else if (ins->opcode == VKD3DSIH_DCL_HS_FORK_PHASE_INSTANCE_COUNT
- || ins->opcode == VKD3DSIH_DCL_HS_JOIN_PHASE_INSTANCE_COUNT)
+ else if (ins->opcode == VSIR_OP_DCL_HS_FORK_PHASE_INSTANCE_COUNT
+ || ins->opcode == VSIR_OP_DCL_HS_JOIN_PHASE_INSTANCE_COUNT)
{
normaliser->instance_count = ins->declaration.count + !ins->declaration.count;
vkd3d_shader_instruction_make_nop(ins);
return;
}
- if (normaliser->phase == VKD3DSIH_INVALID || vsir_instruction_is_dcl(ins))
+ if (normaliser->phase == VSIR_OP_INVALID || vsir_instruction_is_dcl(ins))
return;
if (normaliser->phase_body_idx == ~0u)
normaliser->phase_body_idx = index;
- if (ins->opcode == VKD3DSIH_RET)
+ if (ins->opcode == VSIR_OP_RET)
{
normaliser->last_ret_location = ins->location;
vkd3d_shader_instruction_make_nop(ins);
@@ -2133,7 +2230,7 @@ static enum vkd3d_result vsir_program_flatten_hull_shader_phases(struct vsir_pro
instructions = &flattener.instructions;
- flattener.phase = VKD3DSIH_INVALID;
+ flattener.phase = VSIR_OP_INVALID;
for (i = 0, locations.count = 0; i < instructions->count; ++i)
flattener_eliminate_phase_related_dcls(&flattener, i, &locations);
bitmap_clear(program->io_dcls, VKD3DSPR_FORKINSTID);
@@ -2142,11 +2239,12 @@ static enum vkd3d_result vsir_program_flatten_hull_shader_phases(struct vsir_pro
if ((result = flattener_flatten_phases(&flattener, &locations)) < 0)
return result;
- if (flattener.phase != VKD3DSIH_INVALID)
+ if (flattener.phase != VSIR_OP_INVALID)
{
if (!shader_instruction_array_reserve(&flattener.instructions, flattener.instructions.count + 1))
return VKD3D_ERROR_OUT_OF_MEMORY;
- vsir_instruction_init(&instructions->elements[instructions->count++], &flattener.last_ret_location, VKD3DSIH_RET);
+ vsir_instruction_init(&instructions->elements[instructions->count++],
+ &flattener.last_ret_location, VSIR_OP_RET);
}
program->instructions = flattener.instructions;
@@ -2162,7 +2260,7 @@ struct control_point_normaliser
static bool control_point_normaliser_is_in_control_point_phase(const struct control_point_normaliser *normaliser)
{
- return normaliser->phase == VKD3DSIH_HS_CONTROL_POINT_PHASE;
+ return normaliser->phase == VSIR_OP_HS_CONTROL_POINT_PHASE;
}
struct vkd3d_shader_src_param *vsir_program_create_outpointid_param(struct vsir_program *program)
@@ -2220,7 +2318,7 @@ static enum vkd3d_result control_point_normaliser_emit_hs_input(struct control_p
normaliser->instructions.count += count;
ins = &normaliser->instructions.elements[dst];
- vsir_instruction_init(ins, location, VKD3DSIH_HS_CONTROL_POINT_PHASE);
+ vsir_instruction_init(ins, location, VSIR_OP_HS_CONTROL_POINT_PHASE);
++ins;
@@ -2230,7 +2328,7 @@ static enum vkd3d_result control_point_normaliser_emit_hs_input(struct control_p
if (!e->used_mask)
continue;
- vsir_instruction_init(ins, location, VKD3DSIH_MOV);
+ vsir_instruction_init(ins, location, VSIR_OP_MOV);
ins->dst = shader_dst_param_allocator_get(&normaliser->instructions.dst_params, 1);
ins->dst_count = 1;
ins->src = shader_src_param_allocator_get(&normaliser->instructions.src_params, 1);
@@ -2257,7 +2355,7 @@ static enum vkd3d_result control_point_normaliser_emit_hs_input(struct control_p
++ins;
}
- vsir_instruction_init(ins, location, VKD3DSIH_RET);
+ vsir_instruction_init(ins, location, VSIR_OP_RET);
return VKD3D_OK;
}
@@ -2288,7 +2386,7 @@ static enum vkd3d_result instruction_array_normalise_hull_shader_control_point_i
}
normaliser.instructions = program->instructions;
instructions = &normaliser.instructions;
- normaliser.phase = VKD3DSIH_INVALID;
+ normaliser.phase = VSIR_OP_INVALID;
for (i = 0; i < normaliser.instructions.count; ++i)
{
@@ -2296,9 +2394,9 @@ static enum vkd3d_result instruction_array_normalise_hull_shader_control_point_i
switch (ins->opcode)
{
- case VKD3DSIH_HS_CONTROL_POINT_PHASE:
- case VKD3DSIH_HS_FORK_PHASE:
- case VKD3DSIH_HS_JOIN_PHASE:
+ case VSIR_OP_HS_CONTROL_POINT_PHASE:
+ case VSIR_OP_HS_FORK_PHASE:
+ case VSIR_OP_HS_JOIN_PHASE:
normaliser.phase = ins->opcode;
break;
default:
@@ -2310,7 +2408,7 @@ static enum vkd3d_result instruction_array_normalise_hull_shader_control_point_i
}
}
- normaliser.phase = VKD3DSIH_INVALID;
+ normaliser.phase = VSIR_OP_INVALID;
input_control_point_count = 1;
for (i = 0; i < instructions->count; ++i)
@@ -2319,15 +2417,15 @@ static enum vkd3d_result instruction_array_normalise_hull_shader_control_point_i
switch (ins->opcode)
{
- case VKD3DSIH_DCL_INPUT_CONTROL_POINT_COUNT:
+ case VSIR_OP_DCL_INPUT_CONTROL_POINT_COUNT:
input_control_point_count = ins->declaration.count;
break;
- case VKD3DSIH_HS_CONTROL_POINT_PHASE:
+ case VSIR_OP_HS_CONTROL_POINT_PHASE:
program->instructions = normaliser.instructions;
program->normalisation_level = VSIR_NORMALISED_HULL_CONTROL_POINT_IO;
return VKD3D_OK;
- case VKD3DSIH_HS_FORK_PHASE:
- case VKD3DSIH_HS_JOIN_PHASE:
+ case VSIR_OP_HS_FORK_PHASE:
+ case VSIR_OP_HS_JOIN_PHASE:
/* ins may be relocated if the instruction array expands. */
location = ins->location;
ret = control_point_normaliser_emit_hs_input(&normaliser, &program->input_signature,
@@ -2359,6 +2457,7 @@ struct io_normaliser_register_data
struct io_normaliser
{
struct vkd3d_shader_message_context *message_context;
+ enum vkd3d_result result;
struct vkd3d_shader_instruction_array instructions;
enum vkd3d_shader_type shader_type;
uint8_t major;
@@ -2385,7 +2484,7 @@ struct io_normaliser
static bool io_normaliser_is_in_fork_or_join_phase(const struct io_normaliser *normaliser)
{
- return normaliser->phase == VKD3DSIH_HS_FORK_PHASE || normaliser->phase == VKD3DSIH_HS_JOIN_PHASE;
+ return normaliser->phase == VSIR_OP_HS_FORK_PHASE || normaliser->phase == VSIR_OP_HS_JOIN_PHASE;
}
static bool shader_signature_find_element_for_reg(const struct shader_signature *signature,
@@ -2862,7 +2961,7 @@ static bool shader_dst_param_io_normalise(struct vkd3d_shader_dst_param *dst_par
}
static void shader_src_param_io_normalise(struct vkd3d_shader_src_param *src_param,
- struct io_normaliser *normaliser)
+ struct io_normaliser *normaliser, struct vkd3d_shader_instruction *ins)
{
unsigned int i, id_idx, reg_idx, write_mask, element_idx, component_idx;
struct vkd3d_shader_register *reg = &src_param->reg;
@@ -2925,7 +3024,12 @@ static void shader_src_param_io_normalise(struct vkd3d_shader_src_param *src_par
id_idx = reg->idx_count - 1;
write_mask = VKD3DSP_WRITEMASK_0 << vsir_swizzle_get_component(src_param->swizzle, 0);
if (!shader_signature_find_element_for_reg(signature, reg_idx, write_mask, &element_idx))
- vkd3d_unreachable();
+ {
+ vkd3d_shader_error(normaliser->message_context, &ins->location, VKD3D_SHADER_ERROR_VSIR_INVALID_SIGNATURE,
+ "Unable to resolve I/O register to a signature element.");
+ normaliser->result = VKD3D_ERROR_INVALID_SHADER;
+ return;
+ }
e = &signature->elements[element_idx];
if ((e->register_count > 1 || vsir_sysval_semantic_is_tess_factor(e->sysval_semantic)))
@@ -2948,9 +3052,9 @@ static void shader_instruction_normalise_io_params(struct vkd3d_shader_instructi
switch (ins->opcode)
{
- case VKD3DSIH_HS_CONTROL_POINT_PHASE:
- case VKD3DSIH_HS_FORK_PHASE:
- case VKD3DSIH_HS_JOIN_PHASE:
+ case VSIR_OP_HS_CONTROL_POINT_PHASE:
+ case VSIR_OP_HS_FORK_PHASE:
+ case VSIR_OP_HS_JOIN_PHASE:
normaliser->phase = ins->opcode;
memset(normaliser->input_dcl_params, 0, sizeof(normaliser->input_dcl_params));
memset(normaliser->output_dcl_params, 0, sizeof(normaliser->output_dcl_params));
@@ -2962,7 +3066,7 @@ static void shader_instruction_normalise_io_params(struct vkd3d_shader_instructi
for (i = 0; i < ins->dst_count; ++i)
shader_dst_param_io_normalise(&ins->dst[i], normaliser);
for (i = 0; i < ins->src_count; ++i)
- shader_src_param_io_normalise(&ins->src[i], normaliser);
+ shader_src_param_io_normalise(&ins->src[i], normaliser, ins);
break;
}
}
@@ -2970,14 +3074,14 @@ static void shader_instruction_normalise_io_params(struct vkd3d_shader_instructi
static enum vkd3d_result vsir_program_normalise_io_registers(struct vsir_program *program,
struct vsir_transformation_context *ctx)
{
- struct io_normaliser normaliser = {ctx->message_context, program->instructions};
+ struct io_normaliser normaliser = {ctx->message_context, VKD3D_OK, program->instructions};
struct vkd3d_shader_instruction *ins;
enum vkd3d_result ret;
unsigned int i;
VKD3D_ASSERT(program->normalisation_level == VSIR_NORMALISED_HULL_CONTROL_POINT_IO);
- normaliser.phase = VKD3DSIH_INVALID;
+ normaliser.phase = VSIR_OP_INVALID;
normaliser.shader_type = program->shader_version.type;
normaliser.major = program->shader_version.major;
normaliser.input_signature = &program->input_signature;
@@ -2990,17 +3094,17 @@ static enum vkd3d_result vsir_program_normalise_io_registers(struct vsir_program
switch (ins->opcode)
{
- case VKD3DSIH_DCL_OUTPUT_CONTROL_POINT_COUNT:
+ case VSIR_OP_DCL_OUTPUT_CONTROL_POINT_COUNT:
normaliser.output_control_point_count = ins->declaration.count;
break;
- case VKD3DSIH_DCL_INDEX_RANGE:
+ case VSIR_OP_DCL_INDEX_RANGE:
if ((ret = io_normaliser_add_index_range(&normaliser, ins)) < 0)
return ret;
vkd3d_shader_instruction_make_nop(ins);
break;
- case VKD3DSIH_HS_CONTROL_POINT_PHASE:
- case VKD3DSIH_HS_FORK_PHASE:
- case VKD3DSIH_HS_JOIN_PHASE:
+ case VSIR_OP_HS_CONTROL_POINT_PHASE:
+ case VSIR_OP_HS_FORK_PHASE:
+ case VSIR_OP_HS_JOIN_PHASE:
normaliser.phase = ins->opcode;
break;
default:
@@ -3018,14 +3122,14 @@ static enum vkd3d_result vsir_program_normalise_io_registers(struct vsir_program
return ret;
}
- normaliser.phase = VKD3DSIH_INVALID;
+ normaliser.phase = VSIR_OP_INVALID;
for (i = 0; i < normaliser.instructions.count; ++i)
shader_instruction_normalise_io_params(&normaliser.instructions.elements[i], &normaliser);
program->instructions = normaliser.instructions;
program->use_vocp = normaliser.use_vocp;
program->normalisation_level = VSIR_NORMALISED_SM6;
- return VKD3D_OK;
+ return normaliser.result;
}
struct flat_constant_def
@@ -3118,7 +3222,7 @@ static enum vkd3d_result vsir_program_normalise_flat_constants(struct vsir_progr
{
struct vkd3d_shader_instruction *ins = &program->instructions.elements[i];
- if (ins->opcode == VKD3DSIH_DEF || ins->opcode == VKD3DSIH_DEFI || ins->opcode == VKD3DSIH_DEFB)
+ if (ins->opcode == VSIR_OP_DEF || ins->opcode == VSIR_OP_DEFI || ins->opcode == VSIR_OP_DEFB)
{
struct flat_constant_def *def;
@@ -3160,9 +3264,9 @@ static enum vkd3d_result vsir_program_remove_dead_code(struct vsir_program *prog
switch (ins->opcode)
{
- case VKD3DSIH_IF:
- case VKD3DSIH_LOOP:
- case VKD3DSIH_SWITCH:
+ case VSIR_OP_IF:
+ case VSIR_OP_LOOP:
+ case VSIR_OP_SWITCH:
if (dead)
{
vkd3d_shader_instruction_make_nop(ins);
@@ -3170,15 +3274,15 @@ static enum vkd3d_result vsir_program_remove_dead_code(struct vsir_program *prog
}
break;
- case VKD3DSIH_ENDIF:
- case VKD3DSIH_ENDLOOP:
- case VKD3DSIH_ENDSWITCH:
- case VKD3DSIH_ELSE:
+ case VSIR_OP_ENDIF:
+ case VSIR_OP_ENDLOOP:
+ case VSIR_OP_ENDSWITCH:
+ case VSIR_OP_ELSE:
if (dead)
{
if (depth > 0)
{
- if (ins->opcode != VKD3DSIH_ELSE)
+ if (ins->opcode != VSIR_OP_ELSE)
--depth;
vkd3d_shader_instruction_make_nop(ins);
}
@@ -3193,9 +3297,9 @@ static enum vkd3d_result vsir_program_remove_dead_code(struct vsir_program *prog
* segment began. So it starts at zero and it signals the
* termination of the dead code segment when it would
* become negative. */
- case VKD3DSIH_BREAK:
- case VKD3DSIH_RET:
- case VKD3DSIH_CONTINUE:
+ case VSIR_OP_BREAK:
+ case VSIR_OP_RET:
+ case VSIR_OP_CONTINUE:
if (dead)
{
vkd3d_shader_instruction_make_nop(ins);
@@ -3210,8 +3314,8 @@ static enum vkd3d_result vsir_program_remove_dead_code(struct vsir_program *prog
/* If `case' or `default' appears at zero depth, it means
* that they are a possible target for the corresponding
* switch, so the code is live again. */
- case VKD3DSIH_CASE:
- case VKD3DSIH_DEFAULT:
+ case VSIR_OP_CASE:
+ case VSIR_OP_DEFAULT:
if (dead)
{
if (depth == 0)
@@ -3225,9 +3329,9 @@ static enum vkd3d_result vsir_program_remove_dead_code(struct vsir_program *prog
* outside of any block. When a phase returns, control is
* moved to the following phase, so they make code live
* again. */
- case VKD3DSIH_HS_CONTROL_POINT_PHASE:
- case VKD3DSIH_HS_FORK_PHASE:
- case VKD3DSIH_HS_JOIN_PHASE:
+ case VSIR_OP_HS_CONTROL_POINT_PHASE:
+ case VSIR_OP_HS_FORK_PHASE:
+ case VSIR_OP_HS_JOIN_PHASE:
dead = false;
break;
@@ -3341,7 +3445,7 @@ static bool cf_flattener_copy_instruction(struct cf_flattener *flattener,
{
struct vkd3d_shader_instruction *dst_ins;
- if (instruction->opcode == VKD3DSIH_NOP)
+ if (instruction->opcode == VSIR_OP_NOP)
return true;
if (!(dst_ins = cf_flattener_require_space(flattener, 1)))
@@ -3395,7 +3499,7 @@ static struct vkd3d_shader_src_param *cf_flattener_emit_branch(struct cf_flatten
if (!(ins = cf_flattener_require_space(flattener, 1)))
return NULL;
- vsir_instruction_init(ins, &flattener->location, VKD3DSIH_BRANCH);
+ vsir_instruction_init(ins, &flattener->location, VSIR_OP_BRANCH);
if (condition)
{
@@ -3555,9 +3659,9 @@ static enum vkd3d_result cf_flattener_iterate_instruction_array(struct cf_flatte
* phase instruction, and in all other shader types begins with the first label instruction.
* Declaring an indexable temp with function scope is not considered a declaration,
* because it needs to live inside a function. */
- if (!after_declarations_section && instruction->opcode != VKD3DSIH_NOP)
+ if (!after_declarations_section && instruction->opcode != VSIR_OP_NOP)
{
- bool is_function_indexable = instruction->opcode == VKD3DSIH_DCL_INDEXABLE_TEMP
+ bool is_function_indexable = instruction->opcode == VSIR_OP_DCL_INDEXABLE_TEMP
&& instruction->declaration.indexable_temp.has_function_scope;
if (!vsir_instruction_is_dcl(instruction) || is_function_indexable)
@@ -3572,22 +3676,22 @@ static enum vkd3d_result cf_flattener_iterate_instruction_array(struct cf_flatte
switch (instruction->opcode)
{
- case VKD3DSIH_HS_CONTROL_POINT_PHASE:
- case VKD3DSIH_HS_FORK_PHASE:
- case VKD3DSIH_HS_JOIN_PHASE:
+ case VSIR_OP_HS_CONTROL_POINT_PHASE:
+ case VSIR_OP_HS_FORK_PHASE:
+ case VSIR_OP_HS_JOIN_PHASE:
if (!cf_flattener_copy_instruction(flattener, instruction))
return VKD3D_ERROR_OUT_OF_MEMORY;
- if (instruction->opcode != VKD3DSIH_HS_CONTROL_POINT_PHASE || !instruction->flags)
+ if (instruction->opcode != VSIR_OP_HS_CONTROL_POINT_PHASE || !instruction->flags)
after_declarations_section = false;
break;
- case VKD3DSIH_LABEL:
+ case VSIR_OP_LABEL:
vkd3d_shader_error(message_context, &instruction->location,
VKD3D_SHADER_ERROR_VSIR_NOT_IMPLEMENTED,
"Aborting due to not yet implemented feature: Label instruction.");
return VKD3D_ERROR_NOT_IMPLEMENTED;
- case VKD3DSIH_IF:
+ case VSIR_OP_IF:
if (!(cf_info = cf_flattener_push_control_flow_level(flattener)))
return VKD3D_ERROR_OUT_OF_MEMORY;
@@ -3611,7 +3715,7 @@ static enum vkd3d_result cf_flattener_iterate_instruction_array(struct cf_flatte
++flattener->branch_id;
break;
- case VKD3DSIH_ELSE:
+ case VSIR_OP_ELSE:
if (cf_info->inside_block)
cf_flattener_emit_unconditional_branch(flattener, cf_info->u.if_.merge_block_id);
@@ -3625,7 +3729,7 @@ static enum vkd3d_result cf_flattener_iterate_instruction_array(struct cf_flatte
cf_info->inside_block = true;
break;
- case VKD3DSIH_ENDIF:
+ case VSIR_OP_ENDIF:
if (cf_info->inside_block)
cf_flattener_emit_unconditional_branch(flattener, cf_info->u.if_.merge_block_id);
@@ -3634,7 +3738,7 @@ static enum vkd3d_result cf_flattener_iterate_instruction_array(struct cf_flatte
cf_flattener_pop_control_flow_level(flattener);
break;
- case VKD3DSIH_LOOP:
+ case VSIR_OP_LOOP:
if (!(cf_info = cf_flattener_push_control_flow_level(flattener)))
return VKD3D_ERROR_OUT_OF_MEMORY;
@@ -3663,7 +3767,7 @@ static enum vkd3d_result cf_flattener_iterate_instruction_array(struct cf_flatte
++flattener->loop_id;
break;
- case VKD3DSIH_ENDLOOP:
+ case VSIR_OP_ENDLOOP:
if (cf_info->inside_block)
cf_flattener_emit_unconditional_branch(flattener, cf_info->u.loop.continue_block_id);
@@ -3674,7 +3778,7 @@ static enum vkd3d_result cf_flattener_iterate_instruction_array(struct cf_flatte
cf_flattener_pop_control_flow_level(flattener);
break;
- case VKD3DSIH_SWITCH:
+ case VSIR_OP_SWITCH:
if (!(cf_info = cf_flattener_push_control_flow_level(flattener)))
return VKD3D_ERROR_OUT_OF_MEMORY;
@@ -3685,7 +3789,7 @@ static enum vkd3d_result cf_flattener_iterate_instruction_array(struct cf_flatte
if (!(dst_ins = cf_flattener_require_space(flattener, 1)))
return VKD3D_ERROR_OUT_OF_MEMORY;
- vsir_instruction_init(dst_ins, &instruction->location, VKD3DSIH_SWITCH_MONOLITHIC);
+ vsir_instruction_init(dst_ins, &instruction->location, VSIR_OP_SWITCH_MONOLITHIC);
++flattener->instruction_count;
cf_info->u.switch_.id = flattener->switch_id;
@@ -3706,7 +3810,7 @@ static enum vkd3d_result cf_flattener_iterate_instruction_array(struct cf_flatte
break;
- case VKD3DSIH_ENDSWITCH:
+ case VSIR_OP_ENDSWITCH:
{
struct vkd3d_shader_src_param *src_params;
unsigned int j;
@@ -3741,7 +3845,7 @@ static enum vkd3d_result cf_flattener_iterate_instruction_array(struct cf_flatte
break;
}
- case VKD3DSIH_CASE:
+ case VSIR_OP_CASE:
{
unsigned int label_id, value;
@@ -3773,7 +3877,7 @@ static enum vkd3d_result cf_flattener_iterate_instruction_array(struct cf_flatte
break;
}
- case VKD3DSIH_DEFAULT:
+ case VSIR_OP_DEFAULT:
cf_info->u.switch_.default_block_id = cf_flattener_alloc_block_id(flattener);
if (cf_info->inside_block) /* fall-through */
cf_flattener_emit_unconditional_branch(flattener, cf_info->u.switch_.default_block_id);
@@ -3785,7 +3889,7 @@ static enum vkd3d_result cf_flattener_iterate_instruction_array(struct cf_flatte
cf_info->inside_block = true;
break;
- case VKD3DSIH_BREAK:
+ case VSIR_OP_BREAK:
{
struct cf_flattener_info *breakable_cf_info;
@@ -3808,7 +3912,7 @@ static enum vkd3d_result cf_flattener_iterate_instruction_array(struct cf_flatte
break;
}
- case VKD3DSIH_BREAKP:
+ case VSIR_OP_BREAKP:
{
struct cf_flattener_info *loop_cf_info;
@@ -3823,7 +3927,7 @@ static enum vkd3d_result cf_flattener_iterate_instruction_array(struct cf_flatte
break;
}
- case VKD3DSIH_CONTINUE:
+ case VSIR_OP_CONTINUE:
{
struct cf_flattener_info *loop_cf_info;
@@ -3839,7 +3943,7 @@ static enum vkd3d_result cf_flattener_iterate_instruction_array(struct cf_flatte
break;
}
- case VKD3DSIH_CONTINUEP:
+ case VSIR_OP_CONTINUEP:
{
struct cf_flattener_info *loop_cf_info;
@@ -3854,7 +3958,7 @@ static enum vkd3d_result cf_flattener_iterate_instruction_array(struct cf_flatte
break;
}
- case VKD3DSIH_RET:
+ case VSIR_OP_RET:
if (!cf_flattener_copy_instruction(flattener, instruction))
return VKD3D_ERROR_OUT_OF_MEMORY;
@@ -3971,14 +4075,14 @@ static enum vkd3d_result vsir_program_lower_switch_to_selection_ladder(struct vs
switch (ins->opcode)
{
- case VKD3DSIH_LABEL:
+ case VSIR_OP_LABEL:
current_label = label_from_src_param(&ins->src[0]);
if (!reserve_instructions(&instructions, &ins_capacity, ins_count + 1))
goto fail;
instructions[ins_count++] = *ins;
continue;
- case VKD3DSIH_SWITCH_MONOLITHIC:
+ case VSIR_OP_SWITCH_MONOLITHIC:
break;
default:
@@ -3999,7 +4103,7 @@ static enum vkd3d_result vsir_program_lower_switch_to_selection_ladder(struct vs
goto fail;
if (!vsir_instruction_init_with_params(program, &instructions[ins_count],
- &ins->location, VKD3DSIH_BRANCH, 0, 1))
+ &ins->location, VSIR_OP_BRANCH, 0, 1))
goto fail;
vsir_src_param_init_label(&instructions[ins_count].src[0], default_label);
++ins_count;
@@ -4015,7 +4119,7 @@ static enum vkd3d_result vsir_program_lower_switch_to_selection_ladder(struct vs
unsigned int fallthrough_label, case_label = label_from_src_param(&ins->src[3 + 2 * j + 1]);
if (!vsir_instruction_init_with_params(program,
- &instructions[ins_count], &ins->location, VKD3DSIH_IEQ, 1, 2))
+ &instructions[ins_count], &ins->location, VSIR_OP_IEQ, 1, 2))
goto fail;
dst_param_init_ssa_bool(&instructions[ins_count].dst[0], ssa_count);
instructions[ins_count].src[0] = ins->src[0];
@@ -4031,7 +4135,7 @@ static enum vkd3d_result vsir_program_lower_switch_to_selection_ladder(struct vs
fallthrough_label = block_count + 1;
if (!vsir_instruction_init_with_params(program, &instructions[ins_count],
- &ins->location, VKD3DSIH_BRANCH, 0, 3))
+ &ins->location, VSIR_OP_BRANCH, 0, 3))
goto fail;
src_param_init_ssa_bool(&instructions[ins_count].src[0], ssa_count);
vsir_src_param_init_label(&instructions[ins_count].src[1], case_label);
@@ -4053,7 +4157,7 @@ static enum vkd3d_result vsir_program_lower_switch_to_selection_ladder(struct vs
else
{
if (!vsir_instruction_init_with_params(program,
- &instructions[ins_count], &ins->location, VKD3DSIH_LABEL, 0, 1))
+ &instructions[ins_count], &ins->location, VSIR_OP_LABEL, 0, 1))
goto fail;
vsir_src_param_init_label(&instructions[ins_count].src[0], ++block_count);
++ins_count;
@@ -4168,7 +4272,7 @@ static enum vkd3d_result vsir_program_materialise_phi_ssas_to_temps(struct vsir_
/* Only phi src/dst SSA values need be converted here. Structurisation may
* introduce new cases of undominated SSA use, which will be handled later. */
- if (ins->opcode != VKD3DSIH_PHI)
+ if (ins->opcode != VSIR_OP_PHI)
continue;
++phi_count;
@@ -4219,12 +4323,12 @@ static enum vkd3d_result vsir_program_materialise_phi_ssas_to_temps(struct vsir_
switch (ins->opcode)
{
- case VKD3DSIH_LABEL:
+ case VSIR_OP_LABEL:
current_label = label_from_src_param(&ins->src[0]);
break;
- case VKD3DSIH_BRANCH:
- case VKD3DSIH_SWITCH_MONOLITHIC:
+ case VSIR_OP_BRANCH:
+ case VSIR_OP_SWITCH_MONOLITHIC:
info = &block_info[current_label - 1];
for (j = 0; j < info->incoming_count; ++j)
@@ -4232,7 +4336,7 @@ static enum vkd3d_result vsir_program_materialise_phi_ssas_to_temps(struct vsir_
struct phi_incoming_to_temp *incoming = &info->incomings[j];
mov_ins = &instructions[ins_count++];
- if (!vsir_instruction_init_with_params(program, mov_ins, &ins->location, VKD3DSIH_MOV, 1, 0))
+ if (!vsir_instruction_init_with_params(program, mov_ins, &ins->location, VSIR_OP_MOV, 1, 0))
goto fail;
*mov_ins->dst = *incoming->dst;
mov_ins->src = incoming->src;
@@ -4240,7 +4344,7 @@ static enum vkd3d_result vsir_program_materialise_phi_ssas_to_temps(struct vsir_
}
break;
- case VKD3DSIH_PHI:
+ case VSIR_OP_PHI:
continue;
default:
@@ -4277,7 +4381,7 @@ struct vsir_block_list
static void vsir_block_list_init(struct vsir_block_list *list)
{
- memset(list, 0, sizeof(*list));
+ *list = (struct vsir_block_list){0};
}
static void vsir_block_list_cleanup(struct vsir_block_list *list)
@@ -4649,11 +4753,11 @@ static void vsir_cfg_dump_dot(struct vsir_cfg *cfg)
switch (block->end->opcode)
{
- case VKD3DSIH_RET:
+ case VSIR_OP_RET:
shape = "trapezium";
break;
- case VKD3DSIH_BRANCH:
+ case VSIR_OP_BRANCH:
shape = vsir_register_is_label(&block->end->src[0].reg) ? "ellipse" : "box";
break;
@@ -4791,11 +4895,11 @@ static enum vkd3d_result vsir_cfg_init(struct vsir_cfg *cfg, struct vsir_program
switch (instruction->opcode)
{
- case VKD3DSIH_PHI:
- case VKD3DSIH_SWITCH_MONOLITHIC:
+ case VSIR_OP_PHI:
+ case VSIR_OP_SWITCH_MONOLITHIC:
vkd3d_unreachable();
- case VKD3DSIH_LABEL:
+ case VSIR_OP_LABEL:
{
unsigned int label = label_from_src_param(&instruction->src[0]);
@@ -4812,16 +4916,16 @@ static enum vkd3d_result vsir_cfg_init(struct vsir_cfg *cfg, struct vsir_program
break;
}
- case VKD3DSIH_BRANCH:
- case VKD3DSIH_RET:
+ case VSIR_OP_BRANCH:
+ case VSIR_OP_RET:
VKD3D_ASSERT(current_block);
current_block->end = instruction;
current_block = NULL;
break;
- case VKD3DSIH_HS_CONTROL_POINT_PHASE:
- case VKD3DSIH_HS_FORK_PHASE:
- case VKD3DSIH_HS_JOIN_PHASE:
+ case VSIR_OP_HS_CONTROL_POINT_PHASE:
+ case VSIR_OP_HS_FORK_PHASE:
+ case VSIR_OP_HS_JOIN_PHASE:
VKD3D_ASSERT(!current_block);
finish = true;
break;
@@ -4846,10 +4950,10 @@ static enum vkd3d_result vsir_cfg_init(struct vsir_cfg *cfg, struct vsir_program
switch (block->end->opcode)
{
- case VKD3DSIH_RET:
+ case VSIR_OP_RET:
break;
- case VKD3DSIH_BRANCH:
+ case VSIR_OP_BRANCH:
if (vsir_register_is_label(&block->end->src[0].reg))
{
if ((ret = vsir_cfg_add_edge(cfg, block, &block->end->src[0])) < 0)
@@ -5528,7 +5632,7 @@ static enum vkd3d_result vsir_cfg_build_structured_program(struct vsir_cfg *cfg)
/* Generate between zero and two jump instructions. */
switch (block->end->opcode)
{
- case VKD3DSIH_BRANCH:
+ case VSIR_OP_BRANCH:
{
struct vsir_cfg_edge_action action_true, action_false;
bool invert_condition = false;
@@ -5614,7 +5718,7 @@ static enum vkd3d_result vsir_cfg_build_structured_program(struct vsir_cfg *cfg)
break;
}
- case VKD3DSIH_RET:
+ case VSIR_OP_RET:
if (!(structure = vsir_cfg_structure_list_append(stack[stack_depth - 1], STRUCTURE_TYPE_JUMP)))
goto fail;
structure->u.jump.type = JUMP_RET;
@@ -6108,7 +6212,7 @@ static enum vkd3d_result vsir_cfg_structure_list_emit_loop(struct vsir_cfg *cfg,
if (!reserve_instructions(&target->instructions, &target->ins_capacity, target->ins_count + 1))
return VKD3D_ERROR_OUT_OF_MEMORY;
- vsir_instruction_init(&target->instructions[target->ins_count++], &no_loc, VKD3DSIH_LOOP);
+ vsir_instruction_init(&target->instructions[target->ins_count++], &no_loc, VSIR_OP_LOOP);
if ((ret = vsir_cfg_structure_list_emit(cfg, &loop->body, loop->idx)) < 0)
return ret;
@@ -6116,7 +6220,7 @@ static enum vkd3d_result vsir_cfg_structure_list_emit_loop(struct vsir_cfg *cfg,
if (!reserve_instructions(&target->instructions, &target->ins_capacity, target->ins_count + 5))
return VKD3D_ERROR_OUT_OF_MEMORY;
- vsir_instruction_init(&target->instructions[target->ins_count++], &no_loc, VKD3DSIH_ENDLOOP);
+ vsir_instruction_init(&target->instructions[target->ins_count++], &no_loc, VSIR_OP_ENDLOOP);
/* Add a trampoline to implement multilevel jumping depending on the stored
* jump_target value. */
@@ -6131,7 +6235,7 @@ static enum vkd3d_result vsir_cfg_structure_list_emit_loop(struct vsir_cfg *cfg,
const unsigned int inner_break_target = loop->idx << 1;
if (!vsir_instruction_init_with_params(cfg->program, &target->instructions[target->ins_count],
- &no_loc, VKD3DSIH_IEQ, 1, 2))
+ &no_loc, VSIR_OP_IEQ, 1, 2))
return VKD3D_ERROR_OUT_OF_MEMORY;
dst_param_init_temp_bool(&target->instructions[target->ins_count].dst[0], target->temp_count);
@@ -6141,7 +6245,7 @@ static enum vkd3d_result vsir_cfg_structure_list_emit_loop(struct vsir_cfg *cfg,
++target->ins_count;
if (!vsir_instruction_init_with_params(cfg->program, &target->instructions[target->ins_count],
- &no_loc, VKD3DSIH_CONTINUEP, 0, 1))
+ &no_loc, VSIR_OP_CONTINUEP, 0, 1))
return VKD3D_ERROR_OUT_OF_MEMORY;
src_param_init_temp_bool(&target->instructions[target->ins_count].src[0], target->temp_count);
@@ -6150,7 +6254,7 @@ static enum vkd3d_result vsir_cfg_structure_list_emit_loop(struct vsir_cfg *cfg,
++target->temp_count;
if (!vsir_instruction_init_with_params(cfg->program, &target->instructions[target->ins_count],
- &no_loc, VKD3DSIH_IEQ, 1, 2))
+ &no_loc, VSIR_OP_IEQ, 1, 2))
return VKD3D_ERROR_OUT_OF_MEMORY;
dst_param_init_temp_bool(&target->instructions[target->ins_count].dst[0], target->temp_count);
@@ -6160,7 +6264,7 @@ static enum vkd3d_result vsir_cfg_structure_list_emit_loop(struct vsir_cfg *cfg,
++target->ins_count;
if (!vsir_instruction_init_with_params(cfg->program, &target->instructions[target->ins_count],
- &no_loc, VKD3DSIH_BREAKP, 0, 1))
+ &no_loc, VSIR_OP_BREAKP, 0, 1))
return VKD3D_ERROR_OUT_OF_MEMORY;
target->instructions[target->ins_count].flags |= VKD3D_SHADER_CONDITIONAL_OP_Z;
@@ -6184,7 +6288,7 @@ static enum vkd3d_result vsir_cfg_structure_list_emit_selection(struct vsir_cfg
return VKD3D_ERROR_OUT_OF_MEMORY;
if (!vsir_instruction_init_with_params(cfg->program, &target->instructions[target->ins_count],
- &no_loc, VKD3DSIH_IF, 0, 1))
+ &no_loc, VSIR_OP_IF, 0, 1))
return VKD3D_ERROR_OUT_OF_MEMORY;
target->instructions[target->ins_count].src[0] = *selection->condition;
@@ -6202,7 +6306,7 @@ static enum vkd3d_result vsir_cfg_structure_list_emit_selection(struct vsir_cfg
if (!reserve_instructions(&target->instructions, &target->ins_capacity, target->ins_count + 1))
return VKD3D_ERROR_OUT_OF_MEMORY;
- vsir_instruction_init(&target->instructions[target->ins_count++], &no_loc, VKD3DSIH_ELSE);
+ vsir_instruction_init(&target->instructions[target->ins_count++], &no_loc, VSIR_OP_ELSE);
if ((ret = vsir_cfg_structure_list_emit(cfg, &selection->else_body, loop_idx)) < 0)
return ret;
@@ -6211,7 +6315,7 @@ static enum vkd3d_result vsir_cfg_structure_list_emit_selection(struct vsir_cfg
if (!reserve_instructions(&target->instructions, &target->ins_capacity, target->ins_count + 1))
return VKD3D_ERROR_OUT_OF_MEMORY;
- vsir_instruction_init(&target->instructions[target->ins_count++], &no_loc, VKD3DSIH_ENDIF);
+ vsir_instruction_init(&target->instructions[target->ins_count++], &no_loc, VSIR_OP_ENDIF);
return VKD3D_OK;
}
@@ -6235,19 +6339,19 @@ static enum vkd3d_result vsir_cfg_structure_list_emit_jump(struct vsir_cfg *cfg,
* in the lowest bit of jump_target. */
if (jump->target == loop_idx)
{
- opcode = jump->condition ? VKD3DSIH_CONTINUEP : VKD3DSIH_CONTINUE;
+ opcode = jump->condition ? VSIR_OP_CONTINUEP : VSIR_OP_CONTINUE;
break;
}
jump_target |= 1;
/* fall through */
case JUMP_BREAK:
- opcode = jump->condition ? VKD3DSIH_BREAKP : VKD3DSIH_BREAK;
+ opcode = jump->condition ? VSIR_OP_BREAKP : VSIR_OP_BREAK;
break;
case JUMP_RET:
VKD3D_ASSERT(!jump->condition);
- opcode = VKD3DSIH_RET;
+ opcode = VSIR_OP_RET;
break;
default:
@@ -6260,7 +6364,7 @@ static enum vkd3d_result vsir_cfg_structure_list_emit_jump(struct vsir_cfg *cfg,
if (jump->needs_launcher)
{
if (!vsir_instruction_init_with_params(cfg->program, &target->instructions[target->ins_count],
- &no_loc, VKD3DSIH_MOV, 1, 1))
+ &no_loc, VSIR_OP_MOV, 1, 1))
return VKD3D_ERROR_OUT_OF_MEMORY;
dst_param_init_temp_uint(&target->instructions[target->ins_count].dst[0], target->jump_target_temp_idx);
@@ -6388,7 +6492,7 @@ static enum vkd3d_result vsir_program_structurize(struct vsir_program *program,
switch (ins->opcode)
{
- case VKD3DSIH_LABEL:
+ case VSIR_OP_LABEL:
VKD3D_ASSERT(program->shader_version.type != VKD3D_SHADER_TYPE_HULL);
TRACE("Structurizing a non-hull shader.\n");
if ((ret = vsir_program_structurize_function(program, message_context,
@@ -6397,9 +6501,9 @@ static enum vkd3d_result vsir_program_structurize(struct vsir_program *program,
VKD3D_ASSERT(i == program->instructions.count);
break;
- case VKD3DSIH_HS_CONTROL_POINT_PHASE:
- case VKD3DSIH_HS_FORK_PHASE:
- case VKD3DSIH_HS_JOIN_PHASE:
+ case VSIR_OP_HS_CONTROL_POINT_PHASE:
+ case VSIR_OP_HS_FORK_PHASE:
+ case VSIR_OP_HS_JOIN_PHASE:
VKD3D_ASSERT(program->shader_version.type == VKD3D_SHADER_TYPE_HULL);
TRACE("Structurizing phase %u of a hull shader.\n", ins->opcode);
target.instructions[target.ins_count++] = *ins;
@@ -6565,7 +6669,7 @@ static enum vkd3d_result vsir_program_materialize_undominated_ssas_to_temps(stru
switch (ins->opcode)
{
- case VKD3DSIH_LABEL:
+ case VSIR_OP_LABEL:
VKD3D_ASSERT(program->shader_version.type != VKD3D_SHADER_TYPE_HULL);
TRACE("Materializing undominated SSAs in a non-hull shader.\n");
if ((ret = vsir_program_materialize_undominated_ssas_to_temps_in_function(
@@ -6574,9 +6678,9 @@ static enum vkd3d_result vsir_program_materialize_undominated_ssas_to_temps(stru
VKD3D_ASSERT(i == program->instructions.count);
break;
- case VKD3DSIH_HS_CONTROL_POINT_PHASE:
- case VKD3DSIH_HS_FORK_PHASE:
- case VKD3DSIH_HS_JOIN_PHASE:
+ case VSIR_OP_HS_CONTROL_POINT_PHASE:
+ case VSIR_OP_HS_FORK_PHASE:
+ case VSIR_OP_HS_JOIN_PHASE:
VKD3D_ASSERT(program->shader_version.type == VKD3D_SHADER_TYPE_HULL);
TRACE("Materializing undominated SSAs in phase %u of a hull shader.\n", ins->opcode);
++i;
@@ -6667,12 +6771,12 @@ static enum vkd3d_result insert_alpha_test_before_ret(struct vsir_program *progr
}
opcodes[] =
{
- [VKD3D_SHADER_COMPARISON_FUNC_EQUAL] = {VKD3DSIH_EQO, VKD3DSIH_IEQ},
- [VKD3D_SHADER_COMPARISON_FUNC_NOT_EQUAL] = {VKD3DSIH_NEO, VKD3DSIH_INE},
- [VKD3D_SHADER_COMPARISON_FUNC_GREATER_EQUAL] = {VKD3DSIH_GEO, VKD3DSIH_UGE},
- [VKD3D_SHADER_COMPARISON_FUNC_LESS] = {VKD3DSIH_LTO, VKD3DSIH_ULT},
- [VKD3D_SHADER_COMPARISON_FUNC_LESS_EQUAL] = {VKD3DSIH_GEO, VKD3DSIH_UGE, true},
- [VKD3D_SHADER_COMPARISON_FUNC_GREATER] = {VKD3DSIH_LTO, VKD3DSIH_ULT, true},
+ [VKD3D_SHADER_COMPARISON_FUNC_EQUAL] = {VSIR_OP_EQO, VSIR_OP_IEQ},
+ [VKD3D_SHADER_COMPARISON_FUNC_NOT_EQUAL] = {VSIR_OP_NEO, VSIR_OP_INE},
+ [VKD3D_SHADER_COMPARISON_FUNC_GREATER_EQUAL] = {VSIR_OP_GEO, VSIR_OP_UGE},
+ [VKD3D_SHADER_COMPARISON_FUNC_LESS] = {VSIR_OP_LTO, VSIR_OP_ULT},
+ [VKD3D_SHADER_COMPARISON_FUNC_LESS_EQUAL] = {VSIR_OP_GEO, VSIR_OP_UGE, true},
+ [VKD3D_SHADER_COMPARISON_FUNC_GREATER] = {VSIR_OP_LTO, VSIR_OP_ULT, true},
};
if (compare_func == VKD3D_SHADER_COMPARISON_FUNC_NEVER)
@@ -6682,7 +6786,7 @@ static enum vkd3d_result insert_alpha_test_before_ret(struct vsir_program *progr
ret = NULL;
ins = &program->instructions.elements[pos];
- vsir_instruction_init_with_params(program, ins, &loc, VKD3DSIH_DISCARD, 0, 1);
+ vsir_instruction_init_with_params(program, ins, &loc, VSIR_OP_DISCARD, 0, 1);
ins->flags = VKD3D_SHADER_CONDITIONAL_OP_Z;
src_param_init_const_uint(&ins->src[0], 0);
@@ -6726,14 +6830,14 @@ static enum vkd3d_result insert_alpha_test_before_ret(struct vsir_program *progr
ins->src[opcodes[compare_func].swap ? 1 : 0].swizzle = VKD3D_SHADER_SWIZZLE(W, W, W, W);
++ins;
- vsir_instruction_init_with_params(program, ins, &loc, VKD3DSIH_DISCARD, 0, 1);
+ vsir_instruction_init_with_params(program, ins, &loc, VSIR_OP_DISCARD, 0, 1);
ins->flags = VKD3D_SHADER_CONDITIONAL_OP_Z;
src_param_init_ssa_bool(&ins->src[0], program->ssa_count);
++program->ssa_count;
++ins;
- vsir_instruction_init_with_params(program, ins, &loc, VKD3DSIH_MOV, 1, 1);
+ vsir_instruction_init_with_params(program, ins, &loc, VSIR_OP_MOV, 1, 1);
vsir_dst_param_init(&ins->dst[0], VKD3DSPR_OUTPUT, VKD3D_DATA_FLOAT, 1);
ins->dst[0].reg.idx[0].offset = colour_signature_idx;
ins->dst[0].reg.dimension = VSIR_DIMENSION_VEC4;
@@ -6799,7 +6903,7 @@ static enum vkd3d_result vsir_program_insert_alpha_test(struct vsir_program *pro
if (vsir_instruction_is_dcl(ins))
continue;
- if (ins->opcode == VKD3DSIH_RET)
+ if (ins->opcode == VSIR_OP_RET)
{
if ((ret = insert_alpha_test_before_ret(program, ins, compare_func,
ref, colour_signature_idx, colour_temp, &new_pos, message_context)) < 0)
@@ -6849,7 +6953,7 @@ static enum vkd3d_result insert_clip_planes_before_ret(struct vsir_program *prog
if (!(mask & (1u << i)))
continue;
- vsir_instruction_init_with_params(program, ins, &loc, VKD3DSIH_DP4, 1, 2);
+ vsir_instruction_init_with_params(program, ins, &loc, VSIR_OP_DP4, 1, 2);
src_param_init_temp_float4(&ins->src[0], position_temp);
src_param_init_parameter(&ins->src[1], VKD3D_SHADER_PARAMETER_NAME_CLIP_PLANE_0 + i, VKD3D_DATA_FLOAT);
ins->src[1].swizzle = VKD3D_SHADER_NO_SWIZZLE;
@@ -6867,7 +6971,7 @@ static enum vkd3d_result insert_clip_planes_before_ret(struct vsir_program *prog
++ins;
}
- vsir_instruction_init_with_params(program, ins, &loc, VKD3DSIH_MOV, 1, 1);
+ vsir_instruction_init_with_params(program, ins, &loc, VSIR_OP_MOV, 1, 1);
vsir_dst_param_init(&ins->dst[0], VKD3DSPR_OUTPUT, VKD3D_DATA_FLOAT, 1);
ins->dst[0].reg.idx[0].offset = position_signature_idx;
ins->dst[0].reg.dimension = VSIR_DIMENSION_VEC4;
@@ -6974,7 +7078,7 @@ static enum vkd3d_result vsir_program_insert_clip_planes(struct vsir_program *pr
if (vsir_instruction_is_dcl(ins))
continue;
- if (ins->opcode == VKD3DSIH_RET)
+ if (ins->opcode == VSIR_OP_RET)
{
if ((ret = insert_clip_planes_before_ret(program, ins, mask, position_signature_idx,
position_temp, low_signature_idx, high_signature_idx, &new_pos)) < 0)
@@ -7020,7 +7124,7 @@ static enum vkd3d_result insert_point_size_before_ret(struct vsir_program *progr
ret = NULL;
ins = &program->instructions.elements[pos];
- vsir_instruction_init_with_params(program, ins, &loc, VKD3DSIH_MOV, 1, 1);
+ vsir_instruction_init_with_params(program, ins, &loc, VSIR_OP_MOV, 1, 1);
vsir_dst_param_init(&ins->dst[0], VKD3DSPR_RASTOUT, VKD3D_DATA_FLOAT, 1);
ins->dst[0].reg.idx[0].offset = VSIR_RASTOUT_POINT_SIZE;
src_param_init_parameter(&ins->src[0], VKD3D_SHADER_PARAMETER_NAME_POINT_SIZE, VKD3D_DATA_FLOAT);
@@ -7066,7 +7170,7 @@ static enum vkd3d_result vsir_program_insert_point_size(struct vsir_program *pro
{
struct vkd3d_shader_instruction *ins = &program->instructions.elements[i];
- if (ins->opcode == VKD3DSIH_RET)
+ if (ins->opcode == VSIR_OP_RET)
{
size_t new_pos;
int ret;
@@ -7155,7 +7259,7 @@ static enum vkd3d_result vsir_program_insert_point_size_clamp(struct vsir_progra
if (min_parameter)
{
- vsir_instruction_init_with_params(program, ins, loc, VKD3DSIH_MAX, 1, 2);
+ vsir_instruction_init_with_params(program, ins, loc, VSIR_OP_MAX, 1, 2);
src_param_init_ssa_float(&ins->src[0], ssa_value);
src_param_init_parameter(&ins->src[1], VKD3D_SHADER_PARAMETER_NAME_POINT_SIZE_MIN, VKD3D_DATA_FLOAT);
if (max_parameter)
@@ -7174,7 +7278,7 @@ static enum vkd3d_result vsir_program_insert_point_size_clamp(struct vsir_progra
if (max_parameter)
{
- vsir_instruction_init_with_params(program, ins, loc, VKD3DSIH_MIN, 1, 2);
+ vsir_instruction_init_with_params(program, ins, loc, VSIR_OP_MIN, 1, 2);
src_param_init_ssa_float(&ins->src[0], ssa_value);
src_param_init_parameter(&ins->src[1], VKD3D_SHADER_PARAMETER_NAME_POINT_SIZE_MAX, VKD3D_DATA_FLOAT);
vsir_dst_param_init(&ins->dst[0], VKD3DSPR_RASTOUT, VKD3D_DATA_FLOAT, 1);
@@ -7308,7 +7412,7 @@ static enum vkd3d_result vsir_program_insert_point_coord(struct vsir_program *pr
{
ins = &program->instructions.elements[i];
- if (!vsir_instruction_is_dcl(ins) && ins->opcode != VKD3DSIH_LABEL && ins->opcode != VKD3DSIH_NOP)
+ if (!vsir_instruction_is_dcl(ins) && ins->opcode != VSIR_OP_LABEL && ins->opcode != VSIR_OP_NOP)
break;
}
@@ -7351,7 +7455,7 @@ static enum vkd3d_result vsir_program_insert_point_coord(struct vsir_program *pr
return VKD3D_ERROR_OUT_OF_MEMORY;
ins = &program->instructions.elements[insert_pos];
- vsir_instruction_init_with_params(program, ins, &no_loc, VKD3DSIH_MOV, 1, 1);
+ vsir_instruction_init_with_params(program, ins, &no_loc, VSIR_OP_MOV, 1, 1);
dst_param_init_temp_float4(&ins->dst[0], coord_temp);
ins->dst[0].write_mask = VKD3DSP_WRITEMASK_0 | VKD3DSP_WRITEMASK_1;
vsir_src_param_init(&ins->src[0], VKD3DSPR_POINT_COORD, VKD3D_DATA_FLOAT, 0);
@@ -7359,7 +7463,7 @@ static enum vkd3d_result vsir_program_insert_point_coord(struct vsir_program *pr
ins->src[0].swizzle = VKD3D_SHADER_NO_SWIZZLE;
++ins;
- vsir_instruction_init_with_params(program, ins, &no_loc, VKD3DSIH_MOV, 1, 1);
+ vsir_instruction_init_with_params(program, ins, &no_loc, VSIR_OP_MOV, 1, 1);
dst_param_init_temp_float4(&ins->dst[0], coord_temp);
ins->dst[0].write_mask = VKD3DSP_WRITEMASK_2 | VKD3DSP_WRITEMASK_3;
vsir_src_param_init(&ins->src[0], VKD3DSPR_IMMCONST, VKD3D_DATA_FLOAT, 0);
@@ -7430,7 +7534,7 @@ static enum vkd3d_result insert_fragment_fog_before_ret(struct vsir_program *pro
ins = &program->instructions.elements[pos];
- vsir_instruction_init_with_params(program, ins, &loc, VKD3DSIH_ADD, 1, 2);
+ vsir_instruction_init_with_params(program, ins, &loc, VSIR_OP_ADD, 1, 2);
dst_param_init_ssa_float(&ins->dst[0], ssa_temp);
src_param_init_parameter(&ins->src[0], VKD3D_SHADER_PARAMETER_NAME_FOG_END, VKD3D_DATA_FLOAT);
vsir_src_param_init(&ins->src[1], VKD3DSPR_INPUT, VKD3D_DATA_FLOAT, 1);
@@ -7439,7 +7543,7 @@ static enum vkd3d_result insert_fragment_fog_before_ret(struct vsir_program *pro
ins->src[1].swizzle = VKD3D_SHADER_SWIZZLE(X, X, X, X);
ins->src[1].modifiers = VKD3DSPSM_NEG;
- vsir_instruction_init_with_params(program, ++ins, &loc, VKD3DSIH_MUL, 1, 2);
+ vsir_instruction_init_with_params(program, ++ins, &loc, VSIR_OP_MUL, 1, 2);
dst_param_init_ssa_float(&ins->dst[0], ssa_factor);
ins->dst[0].modifiers = VKD3DSPDM_SATURATE;
src_param_init_ssa_float(&ins->src[0], ssa_temp);
@@ -7462,7 +7566,7 @@ static enum vkd3d_result insert_fragment_fog_before_ret(struct vsir_program *pro
ins = &program->instructions.elements[pos];
- vsir_instruction_init_with_params(program, ins, &loc, VKD3DSIH_MUL, 1, 2);
+ vsir_instruction_init_with_params(program, ins, &loc, VSIR_OP_MUL, 1, 2);
dst_param_init_ssa_float(&ins->dst[0], ssa_temp);
src_param_init_parameter(&ins->src[0], VKD3D_SHADER_PARAMETER_NAME_FOG_SCALE, VKD3D_DATA_FLOAT);
vsir_src_param_init(&ins->src[1], VKD3DSPR_INPUT, VKD3D_DATA_FLOAT, 1);
@@ -7470,7 +7574,7 @@ static enum vkd3d_result insert_fragment_fog_before_ret(struct vsir_program *pro
ins->src[1].reg.dimension = VSIR_DIMENSION_VEC4;
ins->src[1].swizzle = VKD3D_SHADER_SWIZZLE(X, X, X, X);
- vsir_instruction_init_with_params(program, ++ins, &loc, VKD3DSIH_EXP, 1, 1);
+ vsir_instruction_init_with_params(program, ++ins, &loc, VSIR_OP_EXP, 1, 1);
dst_param_init_ssa_float(&ins->dst[0], ssa_factor);
ins->dst[0].modifiers = VKD3DSPDM_SATURATE;
src_param_init_ssa_float(&ins->src[0], ssa_temp);
@@ -7495,7 +7599,7 @@ static enum vkd3d_result insert_fragment_fog_before_ret(struct vsir_program *pro
ins = &program->instructions.elements[pos];
- vsir_instruction_init_with_params(program, ins, &loc, VKD3DSIH_MUL, 1, 2);
+ vsir_instruction_init_with_params(program, ins, &loc, VSIR_OP_MUL, 1, 2);
dst_param_init_ssa_float(&ins->dst[0], ssa_temp);
src_param_init_parameter(&ins->src[0], VKD3D_SHADER_PARAMETER_NAME_FOG_SCALE, VKD3D_DATA_FLOAT);
vsir_src_param_init(&ins->src[1], VKD3DSPR_INPUT, VKD3D_DATA_FLOAT, 1);
@@ -7503,12 +7607,12 @@ static enum vkd3d_result insert_fragment_fog_before_ret(struct vsir_program *pro
ins->src[1].reg.dimension = VSIR_DIMENSION_VEC4;
ins->src[1].swizzle = VKD3D_SHADER_SWIZZLE(X, X, X, X);
- vsir_instruction_init_with_params(program, ++ins, &loc, VKD3DSIH_MUL, 1, 2);
+ vsir_instruction_init_with_params(program, ++ins, &loc, VSIR_OP_MUL, 1, 2);
dst_param_init_ssa_float(&ins->dst[0], ssa_temp2);
src_param_init_ssa_float(&ins->src[0], ssa_temp);
src_param_init_ssa_float(&ins->src[1], ssa_temp);
- vsir_instruction_init_with_params(program, ++ins, &loc, VKD3DSIH_EXP, 1, 1);
+ vsir_instruction_init_with_params(program, ++ins, &loc, VSIR_OP_EXP, 1, 1);
dst_param_init_ssa_float(&ins->dst[0], ssa_factor);
ins->dst[0].modifiers = VKD3DSPDM_SATURATE;
src_param_init_ssa_float(&ins->src[0], ssa_temp2);
@@ -7525,13 +7629,13 @@ static enum vkd3d_result insert_fragment_fog_before_ret(struct vsir_program *pro
* mad oC0, sr0, srFACTOR, FOG_COLOUR
*/
- vsir_instruction_init_with_params(program, ++ins, &loc, VKD3DSIH_ADD, 1, 2);
+ vsir_instruction_init_with_params(program, ++ins, &loc, VSIR_OP_ADD, 1, 2);
dst_param_init_ssa_float4(&ins->dst[0], program->ssa_count++);
src_param_init_temp_float4(&ins->src[0], colour_temp);
src_param_init_parameter_vec4(&ins->src[1], VKD3D_SHADER_PARAMETER_NAME_FOG_COLOUR, VKD3D_DATA_FLOAT);
ins->src[1].modifiers = VKD3DSPSM_NEG;
- vsir_instruction_init_with_params(program, ++ins, &loc, VKD3DSIH_MAD, 1, 3);
+ vsir_instruction_init_with_params(program, ++ins, &loc, VSIR_OP_MAD, 1, 3);
dst_param_init_output(&ins->dst[0], VKD3D_DATA_FLOAT, colour_signature_idx,
program->output_signature.elements[colour_signature_idx].mask);
src_param_init_ssa_float4(&ins->src[0], program->ssa_count - 1);
@@ -7599,7 +7703,7 @@ static enum vkd3d_result vsir_program_insert_fragment_fog(struct vsir_program *p
if (vsir_instruction_is_dcl(ins))
continue;
- if (ins->opcode == VKD3DSIH_RET)
+ if (ins->opcode == VSIR_OP_RET)
{
if ((ret = insert_fragment_fog_before_ret(program, ins, mode, fog_signature_idx,
colour_signature_idx, colour_temp, &new_pos, message_context)) < 0)
@@ -7677,7 +7781,7 @@ static enum vkd3d_result insert_vertex_fog_before_ret(struct vsir_program *progr
ins = &program->instructions.elements[pos];
/* Write the fog output. */
- vsir_instruction_init_with_params(program, ins, &loc, VKD3DSIH_MOV, 1, 1);
+ vsir_instruction_init_with_params(program, ins, &loc, VSIR_OP_MOV, 1, 1);
dst_param_init_output(&ins->dst[0], VKD3D_DATA_FLOAT, fog_signature_idx, 0x1);
src_param_init_temp_float4(&ins->src[0], temp);
if (source == VKD3D_SHADER_FOG_SOURCE_Z)
@@ -7687,7 +7791,7 @@ static enum vkd3d_result insert_vertex_fog_before_ret(struct vsir_program *progr
++ins;
/* Write the position or specular output. */
- vsir_instruction_init_with_params(program, ins, &loc, VKD3DSIH_MOV, 1, 1);
+ vsir_instruction_init_with_params(program, ins, &loc, VSIR_OP_MOV, 1, 1);
dst_param_init_output(&ins->dst[0], vkd3d_data_type_from_component_type(e->component_type),
source_signature_idx, e->mask);
src_param_init_temp_float4(&ins->src[0], temp);
@@ -7767,7 +7871,7 @@ static enum vkd3d_result vsir_program_insert_vertex_fog(struct vsir_program *pro
if (vsir_instruction_is_dcl(ins))
continue;
- if (ins->opcode == VKD3DSIH_RET)
+ if (ins->opcode == VSIR_OP_RET)
{
size_t new_pos;
int ret;
@@ -7809,308 +7913,311 @@ static bool vsir_src_is_masked(enum vkd3d_shader_opcode opcode, unsigned int src
{
switch (opcode)
{
- case VKD3DSIH_ABS:
- case VKD3DSIH_ACOS:
- case VKD3DSIH_ADD:
- case VKD3DSIH_AND:
- case VKD3DSIH_ASIN:
- case VKD3DSIH_ATAN:
- case VKD3DSIH_BFI:
- case VKD3DSIH_BFREV:
- case VKD3DSIH_CMP:
- case VKD3DSIH_CND:
- case VKD3DSIH_COS:
- case VKD3DSIH_COUNTBITS:
- case VKD3DSIH_DADD: /* NB: These are masked, but the mask is double-sized. */
- case VKD3DSIH_DDIV:
- case VKD3DSIH_DFMA:
- case VKD3DSIH_DIV:
- case VKD3DSIH_DMAX:
- case VKD3DSIH_DMIN:
- case VKD3DSIH_DMOV:
- case VKD3DSIH_DMOVC:
- case VKD3DSIH_DMUL:
- case VKD3DSIH_DRCP:
- case VKD3DSIH_DSX:
- case VKD3DSIH_DSX_COARSE:
- case VKD3DSIH_DSX_FINE:
- case VKD3DSIH_DSY:
- case VKD3DSIH_DSY_COARSE:
- case VKD3DSIH_DSY_FINE:
- case VKD3DSIH_EQO:
- case VKD3DSIH_EQU:
- case VKD3DSIH_EXP:
- case VKD3DSIH_EXPP:
- case VKD3DSIH_F16TOF32:
- case VKD3DSIH_F32TOF16:
- case VKD3DSIH_FIRSTBIT_HI:
- case VKD3DSIH_FIRSTBIT_LO:
- case VKD3DSIH_FIRSTBIT_SHI:
- case VKD3DSIH_FRC:
- case VKD3DSIH_FREM:
- case VKD3DSIH_FTOD:
- case VKD3DSIH_FTOI:
- case VKD3DSIH_FTOU:
- case VKD3DSIH_GEO:
- case VKD3DSIH_GEU:
- case VKD3DSIH_HCOS:
- case VKD3DSIH_HSIN:
- case VKD3DSIH_HTAN:
- case VKD3DSIH_IADD:
- case VKD3DSIH_IBFE:
- case VKD3DSIH_IDIV:
- case VKD3DSIH_IEQ:
- case VKD3DSIH_IGE:
- case VKD3DSIH_ILT:
- case VKD3DSIH_IMAD:
- case VKD3DSIH_IMAX:
- case VKD3DSIH_IMIN:
- case VKD3DSIH_IMUL:
- case VKD3DSIH_IMUL_LOW:
- case VKD3DSIH_INE:
- case VKD3DSIH_INEG:
- case VKD3DSIH_ISFINITE:
- case VKD3DSIH_ISHL:
- case VKD3DSIH_ISHR:
- case VKD3DSIH_ISINF:
- case VKD3DSIH_ISNAN:
- case VKD3DSIH_ITOD:
- case VKD3DSIH_ITOF:
- case VKD3DSIH_ITOI:
- case VKD3DSIH_LOG:
- case VKD3DSIH_LOGP:
- case VKD3DSIH_LRP:
- case VKD3DSIH_LTO:
- case VKD3DSIH_LTU:
- case VKD3DSIH_MAD:
- case VKD3DSIH_MAX:
- case VKD3DSIH_MIN:
- case VKD3DSIH_MOV:
- case VKD3DSIH_MOVA:
- case VKD3DSIH_MOVC:
- case VKD3DSIH_MSAD: /* FIXME: Is this correct? */
- case VKD3DSIH_MUL:
- case VKD3DSIH_NEO:
- case VKD3DSIH_NEU:
- case VKD3DSIH_NOT:
- case VKD3DSIH_OR:
- case VKD3DSIH_ORD:
- case VKD3DSIH_PHI:
- case VKD3DSIH_POW:
- case VKD3DSIH_QUAD_READ_ACROSS_D:
- case VKD3DSIH_QUAD_READ_ACROSS_X:
- case VKD3DSIH_QUAD_READ_ACROSS_Y:
- case VKD3DSIH_RCP:
- case VKD3DSIH_ROUND_NE:
- case VKD3DSIH_ROUND_NI:
- case VKD3DSIH_ROUND_PI:
- case VKD3DSIH_ROUND_Z:
- case VKD3DSIH_RSQ:
- case VKD3DSIH_SETP:
- case VKD3DSIH_SGE:
- case VKD3DSIH_SGN:
- case VKD3DSIH_SIN:
- case VKD3DSIH_SINCOS: /* FIXME: Only for sm4. */
- case VKD3DSIH_SLT:
- case VKD3DSIH_SQRT:
- case VKD3DSIH_SUB:
- case VKD3DSIH_SWAPC:
- case VKD3DSIH_TAN:
- case VKD3DSIH_UBFE:
- case VKD3DSIH_UDIV:
- case VKD3DSIH_UGE:
- case VKD3DSIH_ULT:
- case VKD3DSIH_UMAX:
- case VKD3DSIH_UMIN:
- case VKD3DSIH_UMUL:
- case VKD3DSIH_UNO:
- case VKD3DSIH_USHR:
- case VKD3DSIH_UTOD:
- case VKD3DSIH_UTOF:
- case VKD3DSIH_UTOU:
- case VKD3DSIH_WAVE_ACTIVE_ALL_EQUAL:
- case VKD3DSIH_WAVE_ACTIVE_BIT_AND:
- case VKD3DSIH_WAVE_ACTIVE_BIT_OR:
- case VKD3DSIH_WAVE_ACTIVE_BIT_XOR:
- case VKD3DSIH_WAVE_ALL_TRUE:
- case VKD3DSIH_WAVE_ANY_TRUE:
- case VKD3DSIH_WAVE_OP_ADD:
- case VKD3DSIH_WAVE_OP_IMAX:
- case VKD3DSIH_WAVE_OP_IMIN:
- case VKD3DSIH_WAVE_OP_MAX:
- case VKD3DSIH_WAVE_OP_MIN:
- case VKD3DSIH_WAVE_OP_MUL:
- case VKD3DSIH_WAVE_OP_UMAX:
- case VKD3DSIH_WAVE_OP_UMIN:
- case VKD3DSIH_WAVE_READ_LANE_FIRST:
- case VKD3DSIH_XOR:
+ case VSIR_OP_ABS:
+ case VSIR_OP_ACOS:
+ case VSIR_OP_ADD:
+ case VSIR_OP_AND:
+ case VSIR_OP_ASIN:
+ case VSIR_OP_ATAN:
+ case VSIR_OP_BFI:
+ case VSIR_OP_BFREV:
+ case VSIR_OP_CMP:
+ case VSIR_OP_CND:
+ case VSIR_OP_COS:
+ case VSIR_OP_COUNTBITS:
+ case VSIR_OP_DADD: /* NB: These are masked, but the mask is double-sized. */
+ case VSIR_OP_DDIV:
+ case VSIR_OP_DFMA:
+ case VSIR_OP_DIV:
+ case VSIR_OP_DMAX:
+ case VSIR_OP_DMIN:
+ case VSIR_OP_DMOV:
+ case VSIR_OP_DMOVC:
+ case VSIR_OP_DMUL:
+ case VSIR_OP_DRCP:
+ case VSIR_OP_DSX:
+ case VSIR_OP_DSX_COARSE:
+ case VSIR_OP_DSX_FINE:
+ case VSIR_OP_DSY:
+ case VSIR_OP_DSY_COARSE:
+ case VSIR_OP_DSY_FINE:
+ case VSIR_OP_EQO:
+ case VSIR_OP_EQU:
+ case VSIR_OP_EXP:
+ case VSIR_OP_EXPP:
+ case VSIR_OP_F16TOF32:
+ case VSIR_OP_F32TOF16:
+ case VSIR_OP_FIRSTBIT_HI:
+ case VSIR_OP_FIRSTBIT_LO:
+ case VSIR_OP_FIRSTBIT_SHI:
+ case VSIR_OP_FRC:
+ case VSIR_OP_FREM:
+ case VSIR_OP_FTOD:
+ case VSIR_OP_FTOI:
+ case VSIR_OP_FTOU:
+ case VSIR_OP_GEO:
+ case VSIR_OP_GEU:
+ case VSIR_OP_HCOS:
+ case VSIR_OP_HSIN:
+ case VSIR_OP_HTAN:
+ case VSIR_OP_IADD:
+ case VSIR_OP_IBFE:
+ case VSIR_OP_IDIV:
+ case VSIR_OP_IEQ:
+ case VSIR_OP_IGE:
+ case VSIR_OP_ILT:
+ case VSIR_OP_IMAD:
+ case VSIR_OP_IMAX:
+ case VSIR_OP_IMIN:
+ case VSIR_OP_IMUL:
+ case VSIR_OP_IMUL_LOW:
+ case VSIR_OP_INE:
+ case VSIR_OP_INEG:
+ case VSIR_OP_IREM:
+ case VSIR_OP_ISFINITE:
+ case VSIR_OP_ISHL:
+ case VSIR_OP_ISHR:
+ case VSIR_OP_ISINF:
+ case VSIR_OP_ISNAN:
+ case VSIR_OP_ITOD:
+ case VSIR_OP_ITOF:
+ case VSIR_OP_ITOI:
+ case VSIR_OP_LOG:
+ case VSIR_OP_LOGP:
+ case VSIR_OP_LRP:
+ case VSIR_OP_LTO:
+ case VSIR_OP_LTU:
+ case VSIR_OP_MAD:
+ case VSIR_OP_MAX:
+ case VSIR_OP_MIN:
+ case VSIR_OP_MOV:
+ case VSIR_OP_MOVA:
+ case VSIR_OP_MOVC:
+ case VSIR_OP_MSAD: /* FIXME: Is this correct? */
+ case VSIR_OP_MUL:
+ case VSIR_OP_NEO:
+ case VSIR_OP_NEU:
+ case VSIR_OP_NOT:
+ case VSIR_OP_OR:
+ case VSIR_OP_ORD:
+ case VSIR_OP_PHI:
+ case VSIR_OP_POW:
+ case VSIR_OP_QUAD_READ_ACROSS_D:
+ case VSIR_OP_QUAD_READ_ACROSS_X:
+ case VSIR_OP_QUAD_READ_ACROSS_Y:
+ case VSIR_OP_RCP:
+ case VSIR_OP_ROUND_NE:
+ case VSIR_OP_ROUND_NI:
+ case VSIR_OP_ROUND_PI:
+ case VSIR_OP_ROUND_Z:
+ case VSIR_OP_RSQ:
+ case VSIR_OP_SETP:
+ case VSIR_OP_SGE:
+ case VSIR_OP_SGN:
+ case VSIR_OP_SIN:
+ case VSIR_OP_SINCOS: /* FIXME: Only for sm4. */
+ case VSIR_OP_SLT:
+ case VSIR_OP_SQRT:
+ case VSIR_OP_SUB:
+ case VSIR_OP_SWAPC:
+ case VSIR_OP_TAN:
+ case VSIR_OP_UBFE:
+ case VSIR_OP_UDIV:
+ case VSIR_OP_UDIV_SIMPLE:
+ case VSIR_OP_UGE:
+ case VSIR_OP_ULT:
+ case VSIR_OP_UMAX:
+ case VSIR_OP_UMIN:
+ case VSIR_OP_UMUL:
+ case VSIR_OP_UNO:
+ case VSIR_OP_UREM:
+ case VSIR_OP_USHR:
+ case VSIR_OP_UTOD:
+ case VSIR_OP_UTOF:
+ case VSIR_OP_UTOU:
+ case VSIR_OP_WAVE_ACTIVE_ALL_EQUAL:
+ case VSIR_OP_WAVE_ACTIVE_BIT_AND:
+ case VSIR_OP_WAVE_ACTIVE_BIT_OR:
+ case VSIR_OP_WAVE_ACTIVE_BIT_XOR:
+ case VSIR_OP_WAVE_ALL_TRUE:
+ case VSIR_OP_WAVE_ANY_TRUE:
+ case VSIR_OP_WAVE_OP_ADD:
+ case VSIR_OP_WAVE_OP_IMAX:
+ case VSIR_OP_WAVE_OP_IMIN:
+ case VSIR_OP_WAVE_OP_MAX:
+ case VSIR_OP_WAVE_OP_MIN:
+ case VSIR_OP_WAVE_OP_MUL:
+ case VSIR_OP_WAVE_OP_UMAX:
+ case VSIR_OP_WAVE_OP_UMIN:
+ case VSIR_OP_WAVE_READ_LANE_FIRST:
+ case VSIR_OP_XOR:
return true;
/* Atomics can't have a writemask. */
- case VKD3DSIH_ATOMIC_AND:
- case VKD3DSIH_ATOMIC_CMP_STORE:
- case VKD3DSIH_ATOMIC_IADD:
- case VKD3DSIH_ATOMIC_IMAX:
- case VKD3DSIH_ATOMIC_IMIN:
- case VKD3DSIH_ATOMIC_OR:
- case VKD3DSIH_ATOMIC_UMAX:
- case VKD3DSIH_ATOMIC_UMIN:
- case VKD3DSIH_ATOMIC_XOR:
- case VKD3DSIH_BEM:
- case VKD3DSIH_BRANCH:
- case VKD3DSIH_BREAK:
- case VKD3DSIH_BREAKC:
- case VKD3DSIH_BREAKP:
- case VKD3DSIH_BUFINFO:
- case VKD3DSIH_CALL:
- case VKD3DSIH_CALLNZ:
- case VKD3DSIH_CASE:
- case VKD3DSIH_CHECK_ACCESS_FULLY_MAPPED: /* FIXME: Is this correct? */
- case VKD3DSIH_CONTINUE:
- case VKD3DSIH_CONTINUEP:
- case VKD3DSIH_CRS:
- case VKD3DSIH_CUT:
- case VKD3DSIH_CUT_STREAM:
- case VKD3DSIH_DCL:
- case VKD3DSIH_DCL_CONSTANT_BUFFER:
- case VKD3DSIH_DCL_FUNCTION_BODY:
- case VKD3DSIH_DCL_FUNCTION_TABLE:
- case VKD3DSIH_DCL_GLOBAL_FLAGS:
- case VKD3DSIH_DCL_GS_INSTANCES:
- case VKD3DSIH_DCL_HS_FORK_PHASE_INSTANCE_COUNT:
- case VKD3DSIH_DCL_HS_JOIN_PHASE_INSTANCE_COUNT:
- case VKD3DSIH_DCL_HS_MAX_TESSFACTOR:
- case VKD3DSIH_DCL_IMMEDIATE_CONSTANT_BUFFER:
- case VKD3DSIH_DCL_INDEXABLE_TEMP:
- case VKD3DSIH_DCL_INDEX_RANGE:
- case VKD3DSIH_DCL_INPUT:
- case VKD3DSIH_DCL_INPUT_CONTROL_POINT_COUNT:
- case VKD3DSIH_DCL_INPUT_PRIMITIVE:
- case VKD3DSIH_DCL_INPUT_PS:
- case VKD3DSIH_DCL_INPUT_PS_SGV:
- case VKD3DSIH_DCL_INPUT_PS_SIV:
- case VKD3DSIH_DCL_INPUT_SGV:
- case VKD3DSIH_DCL_INPUT_SIV:
- case VKD3DSIH_DCL_INTERFACE:
- case VKD3DSIH_DCL_OUTPUT:
- case VKD3DSIH_DCL_OUTPUT_CONTROL_POINT_COUNT:
- case VKD3DSIH_DCL_OUTPUT_SGV:
- case VKD3DSIH_DCL_OUTPUT_SIV:
- case VKD3DSIH_DCL_OUTPUT_TOPOLOGY:
- case VKD3DSIH_DCL_RESOURCE_RAW:
- case VKD3DSIH_DCL_RESOURCE_STRUCTURED:
- case VKD3DSIH_DCL_SAMPLER:
- case VKD3DSIH_DCL_STREAM:
- case VKD3DSIH_DCL_TEMPS:
- case VKD3DSIH_DCL_TESSELLATOR_DOMAIN:
- case VKD3DSIH_DCL_TESSELLATOR_OUTPUT_PRIMITIVE:
- case VKD3DSIH_DCL_TESSELLATOR_PARTITIONING:
- case VKD3DSIH_DCL_TGSM_RAW:
- case VKD3DSIH_DCL_TGSM_STRUCTURED:
- case VKD3DSIH_DCL_THREAD_GROUP:
- case VKD3DSIH_DCL_UAV_RAW:
- case VKD3DSIH_DCL_UAV_STRUCTURED:
- case VKD3DSIH_DCL_UAV_TYPED:
- case VKD3DSIH_DCL_VERTICES_OUT:
- case VKD3DSIH_DEF:
- case VKD3DSIH_DEFAULT:
- case VKD3DSIH_DEFB:
- case VKD3DSIH_DEFI:
- case VKD3DSIH_DEQO:
- case VKD3DSIH_DGEO:
- case VKD3DSIH_DISCARD:
- case VKD3DSIH_DLT:
- case VKD3DSIH_DNE:
- case VKD3DSIH_DP2:
- case VKD3DSIH_DP2ADD:
- case VKD3DSIH_DP3:
- case VKD3DSIH_DP4:
- case VKD3DSIH_DST:
- case VKD3DSIH_DTOF:
- case VKD3DSIH_DTOI:
- case VKD3DSIH_DTOU:
- case VKD3DSIH_ELSE:
- case VKD3DSIH_EMIT:
- case VKD3DSIH_EMIT_STREAM:
- case VKD3DSIH_ENDIF:
- case VKD3DSIH_ENDLOOP:
- case VKD3DSIH_ENDREP:
- case VKD3DSIH_ENDSWITCH:
- case VKD3DSIH_FCALL:
- case VKD3DSIH_HS_CONTROL_POINT_PHASE:
- case VKD3DSIH_HS_DECLS:
- case VKD3DSIH_HS_FORK_PHASE:
- case VKD3DSIH_HS_JOIN_PHASE:
- case VKD3DSIH_IF:
- case VKD3DSIH_IFC:
+ case VSIR_OP_ATOMIC_AND:
+ case VSIR_OP_ATOMIC_CMP_STORE:
+ case VSIR_OP_ATOMIC_IADD:
+ case VSIR_OP_ATOMIC_IMAX:
+ case VSIR_OP_ATOMIC_IMIN:
+ case VSIR_OP_ATOMIC_OR:
+ case VSIR_OP_ATOMIC_UMAX:
+ case VSIR_OP_ATOMIC_UMIN:
+ case VSIR_OP_ATOMIC_XOR:
+ case VSIR_OP_BEM:
+ case VSIR_OP_BRANCH:
+ case VSIR_OP_BREAK:
+ case VSIR_OP_BREAKC:
+ case VSIR_OP_BREAKP:
+ case VSIR_OP_BUFINFO:
+ case VSIR_OP_CALL:
+ case VSIR_OP_CALLNZ:
+ case VSIR_OP_CASE:
+ case VSIR_OP_CHECK_ACCESS_FULLY_MAPPED: /* FIXME: Is this correct? */
+ case VSIR_OP_CONTINUE:
+ case VSIR_OP_CONTINUEP:
+ case VSIR_OP_CRS:
+ case VSIR_OP_CUT:
+ case VSIR_OP_CUT_STREAM:
+ case VSIR_OP_DCL:
+ case VSIR_OP_DCL_CONSTANT_BUFFER:
+ case VSIR_OP_DCL_FUNCTION_BODY:
+ case VSIR_OP_DCL_FUNCTION_TABLE:
+ case VSIR_OP_DCL_GLOBAL_FLAGS:
+ case VSIR_OP_DCL_GS_INSTANCES:
+ case VSIR_OP_DCL_HS_FORK_PHASE_INSTANCE_COUNT:
+ case VSIR_OP_DCL_HS_JOIN_PHASE_INSTANCE_COUNT:
+ case VSIR_OP_DCL_HS_MAX_TESSFACTOR:
+ case VSIR_OP_DCL_IMMEDIATE_CONSTANT_BUFFER:
+ case VSIR_OP_DCL_INDEXABLE_TEMP:
+ case VSIR_OP_DCL_INDEX_RANGE:
+ case VSIR_OP_DCL_INPUT:
+ case VSIR_OP_DCL_INPUT_CONTROL_POINT_COUNT:
+ case VSIR_OP_DCL_INPUT_PRIMITIVE:
+ case VSIR_OP_DCL_INPUT_PS:
+ case VSIR_OP_DCL_INPUT_PS_SGV:
+ case VSIR_OP_DCL_INPUT_PS_SIV:
+ case VSIR_OP_DCL_INPUT_SGV:
+ case VSIR_OP_DCL_INPUT_SIV:
+ case VSIR_OP_DCL_INTERFACE:
+ case VSIR_OP_DCL_OUTPUT:
+ case VSIR_OP_DCL_OUTPUT_CONTROL_POINT_COUNT:
+ case VSIR_OP_DCL_OUTPUT_SGV:
+ case VSIR_OP_DCL_OUTPUT_SIV:
+ case VSIR_OP_DCL_OUTPUT_TOPOLOGY:
+ case VSIR_OP_DCL_RESOURCE_RAW:
+ case VSIR_OP_DCL_RESOURCE_STRUCTURED:
+ case VSIR_OP_DCL_SAMPLER:
+ case VSIR_OP_DCL_STREAM:
+ case VSIR_OP_DCL_TEMPS:
+ case VSIR_OP_DCL_TESSELLATOR_DOMAIN:
+ case VSIR_OP_DCL_TESSELLATOR_OUTPUT_PRIMITIVE:
+ case VSIR_OP_DCL_TESSELLATOR_PARTITIONING:
+ case VSIR_OP_DCL_TGSM_RAW:
+ case VSIR_OP_DCL_TGSM_STRUCTURED:
+ case VSIR_OP_DCL_THREAD_GROUP:
+ case VSIR_OP_DCL_UAV_RAW:
+ case VSIR_OP_DCL_UAV_STRUCTURED:
+ case VSIR_OP_DCL_UAV_TYPED:
+ case VSIR_OP_DCL_VERTICES_OUT:
+ case VSIR_OP_DEF:
+ case VSIR_OP_DEFAULT:
+ case VSIR_OP_DEFB:
+ case VSIR_OP_DEFI:
+ case VSIR_OP_DEQO:
+ case VSIR_OP_DGEO:
+ case VSIR_OP_DISCARD:
+ case VSIR_OP_DLT:
+ case VSIR_OP_DNE:
+ case VSIR_OP_DP2:
+ case VSIR_OP_DP2ADD:
+ case VSIR_OP_DP3:
+ case VSIR_OP_DP4:
+ case VSIR_OP_DST:
+ case VSIR_OP_DTOF:
+ case VSIR_OP_DTOI:
+ case VSIR_OP_DTOU:
+ case VSIR_OP_ELSE:
+ case VSIR_OP_EMIT:
+ case VSIR_OP_EMIT_STREAM:
+ case VSIR_OP_ENDIF:
+ case VSIR_OP_ENDLOOP:
+ case VSIR_OP_ENDREP:
+ case VSIR_OP_ENDSWITCH:
+ case VSIR_OP_FCALL:
+ case VSIR_OP_HS_CONTROL_POINT_PHASE:
+ case VSIR_OP_HS_DECLS:
+ case VSIR_OP_HS_FORK_PHASE:
+ case VSIR_OP_HS_JOIN_PHASE:
+ case VSIR_OP_IF:
+ case VSIR_OP_IFC:
/* It's unclear if any mapping is done for the source value.
* Does it require replicate swizzle? */
- case VKD3DSIH_IMM_ATOMIC_ALLOC:
- case VKD3DSIH_IMM_ATOMIC_AND:
- case VKD3DSIH_IMM_ATOMIC_CMP_EXCH:
- case VKD3DSIH_IMM_ATOMIC_CONSUME:
- case VKD3DSIH_IMM_ATOMIC_EXCH:
- case VKD3DSIH_IMM_ATOMIC_IADD:
- case VKD3DSIH_IMM_ATOMIC_IMAX:
- case VKD3DSIH_IMM_ATOMIC_IMIN:
- case VKD3DSIH_IMM_ATOMIC_OR:
- case VKD3DSIH_IMM_ATOMIC_UMAX:
- case VKD3DSIH_IMM_ATOMIC_UMIN:
- case VKD3DSIH_IMM_ATOMIC_XOR:
- case VKD3DSIH_LABEL:
- case VKD3DSIH_LOOP:
- case VKD3DSIH_LIT:
- case VKD3DSIH_M3x2:
- case VKD3DSIH_M3x3:
- case VKD3DSIH_M3x4:
- case VKD3DSIH_M4x3:
- case VKD3DSIH_M4x4:
- case VKD3DSIH_NOP:
+ case VSIR_OP_IMM_ATOMIC_ALLOC:
+ case VSIR_OP_IMM_ATOMIC_AND:
+ case VSIR_OP_IMM_ATOMIC_CMP_EXCH:
+ case VSIR_OP_IMM_ATOMIC_CONSUME:
+ case VSIR_OP_IMM_ATOMIC_EXCH:
+ case VSIR_OP_IMM_ATOMIC_IADD:
+ case VSIR_OP_IMM_ATOMIC_IMAX:
+ case VSIR_OP_IMM_ATOMIC_IMIN:
+ case VSIR_OP_IMM_ATOMIC_OR:
+ case VSIR_OP_IMM_ATOMIC_UMAX:
+ case VSIR_OP_IMM_ATOMIC_UMIN:
+ case VSIR_OP_IMM_ATOMIC_XOR:
+ case VSIR_OP_LABEL:
+ case VSIR_OP_LOOP:
+ case VSIR_OP_LIT:
+ case VSIR_OP_M3x2:
+ case VSIR_OP_M3x3:
+ case VSIR_OP_M3x4:
+ case VSIR_OP_M4x3:
+ case VSIR_OP_M4x4:
+ case VSIR_OP_NOP:
/* NRM writemask must be .xyz or .xyzw. */
- case VKD3DSIH_NRM:
- case VKD3DSIH_PHASE:
- case VKD3DSIH_REP:
- case VKD3DSIH_RET:
- case VKD3DSIH_RETP:
+ case VSIR_OP_NRM:
+ case VSIR_OP_PHASE:
+ case VSIR_OP_REP:
+ case VSIR_OP_RET:
+ case VSIR_OP_RETP:
/* Store instructions always require a trivial writemask. */
- case VKD3DSIH_STORE_RAW:
- case VKD3DSIH_STORE_STRUCTURED:
- case VKD3DSIH_STORE_UAV_TYPED:
- case VKD3DSIH_SWITCH:
- case VKD3DSIH_SWITCH_MONOLITHIC:
- case VKD3DSIH_SYNC:
- case VKD3DSIH_TEX:
- case VKD3DSIH_TEXBEM:
- case VKD3DSIH_TEXBEML:
- case VKD3DSIH_TEXCOORD:
- case VKD3DSIH_TEXCRD:
- case VKD3DSIH_TEXDEPTH:
- case VKD3DSIH_TEXDP3:
- case VKD3DSIH_TEXDP3TEX:
- case VKD3DSIH_TEXKILL:
- case VKD3DSIH_TEXLD:
- case VKD3DSIH_TEXLDD:
- case VKD3DSIH_TEXLDL:
- case VKD3DSIH_TEXM3x2DEPTH:
- case VKD3DSIH_TEXM3x2PAD:
- case VKD3DSIH_TEXM3x2TEX:
- case VKD3DSIH_TEXM3x3:
- case VKD3DSIH_TEXM3x3DIFF:
- case VKD3DSIH_TEXM3x3PAD:
- case VKD3DSIH_TEXM3x3SPEC:
- case VKD3DSIH_TEXM3x3TEX:
- case VKD3DSIH_TEXM3x3VSPEC:
- case VKD3DSIH_TEXREG2AR:
- case VKD3DSIH_TEXREG2GB:
- case VKD3DSIH_TEXREG2RGB:
- case VKD3DSIH_WAVE_ACTIVE_BALLOT:
- case VKD3DSIH_WAVE_ALL_BIT_COUNT:
- case VKD3DSIH_WAVE_IS_FIRST_LANE:
- case VKD3DSIH_WAVE_PREFIX_BIT_COUNT:
+ case VSIR_OP_STORE_RAW:
+ case VSIR_OP_STORE_STRUCTURED:
+ case VSIR_OP_STORE_UAV_TYPED:
+ case VSIR_OP_SWITCH:
+ case VSIR_OP_SWITCH_MONOLITHIC:
+ case VSIR_OP_SYNC:
+ case VSIR_OP_TEX:
+ case VSIR_OP_TEXBEM:
+ case VSIR_OP_TEXBEML:
+ case VSIR_OP_TEXCOORD:
+ case VSIR_OP_TEXCRD:
+ case VSIR_OP_TEXDEPTH:
+ case VSIR_OP_TEXDP3:
+ case VSIR_OP_TEXDP3TEX:
+ case VSIR_OP_TEXKILL:
+ case VSIR_OP_TEXLD:
+ case VSIR_OP_TEXLDD:
+ case VSIR_OP_TEXLDL:
+ case VSIR_OP_TEXM3x2DEPTH:
+ case VSIR_OP_TEXM3x2PAD:
+ case VSIR_OP_TEXM3x2TEX:
+ case VSIR_OP_TEXM3x3:
+ case VSIR_OP_TEXM3x3DIFF:
+ case VSIR_OP_TEXM3x3PAD:
+ case VSIR_OP_TEXM3x3SPEC:
+ case VSIR_OP_TEXM3x3TEX:
+ case VSIR_OP_TEXM3x3VSPEC:
+ case VSIR_OP_TEXREG2AR:
+ case VSIR_OP_TEXREG2GB:
+ case VSIR_OP_TEXREG2RGB:
+ case VSIR_OP_WAVE_ACTIVE_BALLOT:
+ case VSIR_OP_WAVE_ALL_BIT_COUNT:
+ case VSIR_OP_WAVE_IS_FIRST_LANE:
+ case VSIR_OP_WAVE_PREFIX_BIT_COUNT:
return false;
- case VKD3DSIH_QUAD_READ_LANE_AT:
- case VKD3DSIH_WAVE_READ_LANE_AT:
+ case VSIR_OP_QUAD_READ_LANE_AT:
+ case VSIR_OP_WAVE_READ_LANE_AT:
return (src_idx == 0);
/* sm4 resource instructions are an odd case, since they're not actually
@@ -8124,48 +8231,48 @@ static bool vsir_src_is_masked(enum vkd3d_shader_opcode opcode, unsigned int src
/* FIXME: The documentation seems to say that these instructions behave
* this way, but is it correct?
* (It's silent about EVAL_*, but presumably they behave the same way.) */
- case VKD3DSIH_EVAL_CENTROID:
- case VKD3DSIH_EVAL_SAMPLE_INDEX:
- case VKD3DSIH_SAMPLE_INFO:
- case VKD3DSIH_SAMPLE_POS:
+ case VSIR_OP_EVAL_CENTROID:
+ case VSIR_OP_EVAL_SAMPLE_INDEX:
+ case VSIR_OP_SAMPLE_INFO:
+ case VSIR_OP_SAMPLE_POS:
return (src_idx == 0);
- case VKD3DSIH_GATHER4:
- case VKD3DSIH_GATHER4_C:
- case VKD3DSIH_GATHER4_C_S:
- case VKD3DSIH_GATHER4_S:
- case VKD3DSIH_LD:
- case VKD3DSIH_LD2DMS:
- case VKD3DSIH_LD2DMS_S:
- case VKD3DSIH_LD_RAW:
- case VKD3DSIH_LD_RAW_S:
- case VKD3DSIH_LD_S:
- case VKD3DSIH_LD_UAV_TYPED:
- case VKD3DSIH_LD_UAV_TYPED_S:
- case VKD3DSIH_LOD:
- case VKD3DSIH_RESINFO:
- case VKD3DSIH_SAMPLE:
- case VKD3DSIH_SAMPLE_B:
- case VKD3DSIH_SAMPLE_B_CL_S:
- case VKD3DSIH_SAMPLE_C:
- case VKD3DSIH_SAMPLE_CL_S:
- case VKD3DSIH_SAMPLE_C_CL_S:
- case VKD3DSIH_SAMPLE_C_LZ:
- case VKD3DSIH_SAMPLE_C_LZ_S:
- case VKD3DSIH_SAMPLE_GRAD:
- case VKD3DSIH_SAMPLE_GRAD_CL_S:
- case VKD3DSIH_SAMPLE_LOD:
- case VKD3DSIH_SAMPLE_LOD_S:
+ case VSIR_OP_GATHER4:
+ case VSIR_OP_GATHER4_C:
+ case VSIR_OP_GATHER4_C_S:
+ case VSIR_OP_GATHER4_S:
+ case VSIR_OP_LD:
+ case VSIR_OP_LD2DMS:
+ case VSIR_OP_LD2DMS_S:
+ case VSIR_OP_LD_RAW:
+ case VSIR_OP_LD_RAW_S:
+ case VSIR_OP_LD_S:
+ case VSIR_OP_LD_UAV_TYPED:
+ case VSIR_OP_LD_UAV_TYPED_S:
+ case VSIR_OP_LOD:
+ case VSIR_OP_RESINFO:
+ case VSIR_OP_SAMPLE:
+ case VSIR_OP_SAMPLE_B:
+ case VSIR_OP_SAMPLE_B_CL_S:
+ case VSIR_OP_SAMPLE_C:
+ case VSIR_OP_SAMPLE_CL_S:
+ case VSIR_OP_SAMPLE_C_CL_S:
+ case VSIR_OP_SAMPLE_C_LZ:
+ case VSIR_OP_SAMPLE_C_LZ_S:
+ case VSIR_OP_SAMPLE_GRAD:
+ case VSIR_OP_SAMPLE_GRAD_CL_S:
+ case VSIR_OP_SAMPLE_LOD:
+ case VSIR_OP_SAMPLE_LOD_S:
return (src_idx == 1);
- case VKD3DSIH_GATHER4_PO:
- case VKD3DSIH_GATHER4_PO_C:
- case VKD3DSIH_GATHER4_PO_C_S:
- case VKD3DSIH_GATHER4_PO_S:
- case VKD3DSIH_LD_STRUCTURED:
- case VKD3DSIH_LD_STRUCTURED_S:
+ case VSIR_OP_GATHER4_PO:
+ case VSIR_OP_GATHER4_PO_C:
+ case VSIR_OP_GATHER4_PO_C_S:
+ case VSIR_OP_GATHER4_PO_S:
+ case VSIR_OP_LD_STRUCTURED:
+ case VSIR_OP_LD_STRUCTURED_S:
return (src_idx == 2);
- case VKD3DSIH_INVALID:
- case VKD3DSIH_COUNT:
+ case VSIR_OP_INVALID:
+ case VSIR_OP_COUNT:
break;
}
@@ -8220,47 +8327,46 @@ static void liveness_track_dst(struct liveness_tracker *tracker, struct vkd3d_sh
switch (opcode)
{
- case VKD3DSIH_BEM:
- case VKD3DSIH_CRS:
- case VKD3DSIH_DST:
- case VKD3DSIH_LIT:
- case VKD3DSIH_M3x2:
- case VKD3DSIH_M3x3:
- case VKD3DSIH_M3x4:
- case VKD3DSIH_M4x3:
- case VKD3DSIH_M4x4:
- case VKD3DSIH_NRM:
- case VKD3DSIH_TEX:
- case VKD3DSIH_TEXBEM:
- case VKD3DSIH_TEXBEML:
- case VKD3DSIH_TEXCOORD:
- case VKD3DSIH_TEXCRD:
- case VKD3DSIH_TEXDEPTH:
- case VKD3DSIH_TEXDP3:
- case VKD3DSIH_TEXDP3TEX:
- case VKD3DSIH_TEXLD:
- case VKD3DSIH_TEXLDD:
- case VKD3DSIH_TEXLDL:
- case VKD3DSIH_TEXM3x2DEPTH:
- case VKD3DSIH_TEXM3x2PAD:
- case VKD3DSIH_TEXM3x2TEX:
- case VKD3DSIH_TEXM3x3:
- case VKD3DSIH_TEXM3x3DIFF:
- case VKD3DSIH_TEXM3x3PAD:
- case VKD3DSIH_TEXM3x3SPEC:
- case VKD3DSIH_TEXM3x3TEX:
- case VKD3DSIH_TEXM3x3VSPEC:
- case VKD3DSIH_TEXREG2AR:
- case VKD3DSIH_TEXREG2GB:
- case VKD3DSIH_TEXREG2RGB:
+ case VSIR_OP_BEM:
+ case VSIR_OP_CRS:
+ case VSIR_OP_DST:
+ case VSIR_OP_LIT:
+ case VSIR_OP_M3x2:
+ case VSIR_OP_M3x3:
+ case VSIR_OP_M3x4:
+ case VSIR_OP_M4x3:
+ case VSIR_OP_M4x4:
+ case VSIR_OP_NRM:
+ case VSIR_OP_TEX:
+ case VSIR_OP_TEXBEM:
+ case VSIR_OP_TEXBEML:
+ case VSIR_OP_TEXCOORD:
+ case VSIR_OP_TEXCRD:
+ case VSIR_OP_TEXDEPTH:
+ case VSIR_OP_TEXDP3:
+ case VSIR_OP_TEXDP3TEX:
+ case VSIR_OP_TEXLD:
+ case VSIR_OP_TEXLDD:
+ case VSIR_OP_TEXLDL:
+ case VSIR_OP_TEXM3x2DEPTH:
+ case VSIR_OP_TEXM3x2PAD:
+ case VSIR_OP_TEXM3x2TEX:
+ case VSIR_OP_TEXM3x3:
+ case VSIR_OP_TEXM3x3DIFF:
+ case VSIR_OP_TEXM3x3PAD:
+ case VSIR_OP_TEXM3x3SPEC:
+ case VSIR_OP_TEXM3x3TEX:
+ case VSIR_OP_TEXM3x3VSPEC:
+ case VSIR_OP_TEXREG2AR:
+ case VSIR_OP_TEXREG2GB:
+ case VSIR_OP_TEXREG2RGB:
/* All of these instructions have fixed destinations—they can
* in some cases be masked, but the destination cannot be
* reallocated to a different set of components. */
- case VKD3DSIH_IDIV:
- case VKD3DSIH_IMUL:
- case VKD3DSIH_SWAPC:
- case VKD3DSIH_UDIV:
- case VKD3DSIH_UMUL:
+ case VSIR_OP_IMUL:
+ case VSIR_OP_SWAPC:
+ case VSIR_OP_UDIV:
+ case VSIR_OP_UMUL:
/* These instructions don't have fixed destinations, but they have
* multiple destination and are per-component, meaning that the
* destination masks for each component have to match.
@@ -8271,7 +8377,7 @@ static void liveness_track_dst(struct liveness_tracker *tracker, struct vkd3d_sh
reg->fixed_mask = true;
break;
- case VKD3DSIH_SINCOS:
+ case VSIR_OP_SINCOS:
/* sm1 has a fixed destination like LIT, NRM.
* sm4 is two-component and masked, like IMUL. */
if (version->major < 3)
@@ -8310,12 +8416,12 @@ static enum vkd3d_result track_liveness(struct vsir_program *program, struct liv
{
const struct vkd3d_shader_instruction *ins = &program->instructions.elements[i];
- if (ins->opcode == VKD3DSIH_LOOP || ins->opcode == VKD3DSIH_REP)
+ if (ins->opcode == VSIR_OP_LOOP || ins->opcode == VSIR_OP_REP)
{
if (!loop_depth++)
loop_start = i;
}
- else if (ins->opcode == VKD3DSIH_ENDLOOP || ins->opcode == VKD3DSIH_ENDREP)
+ else if (ins->opcode == VSIR_OP_ENDLOOP || ins->opcode == VSIR_OP_ENDREP)
{
if (!--loop_depth)
{
@@ -8494,23 +8600,23 @@ static bool vsir_opcode_is_double(enum vkd3d_shader_opcode opcode)
{
switch (opcode)
{
- case VKD3DSIH_DADD:
- case VKD3DSIH_DDIV:
- case VKD3DSIH_DFMA:
- case VKD3DSIH_DMAX:
- case VKD3DSIH_DMIN:
- case VKD3DSIH_DMOV:
- case VKD3DSIH_DMOVC:
- case VKD3DSIH_DMUL:
- case VKD3DSIH_DRCP:
- case VKD3DSIH_DEQO:
- case VKD3DSIH_DGEO:
- case VKD3DSIH_DLT:
- case VKD3DSIH_DNE:
- case VKD3DSIH_DTOF:
- case VKD3DSIH_DTOI:
- case VKD3DSIH_DTOU:
- case VKD3DSIH_FTOD:
+ case VSIR_OP_DADD:
+ case VSIR_OP_DDIV:
+ case VSIR_OP_DFMA:
+ case VSIR_OP_DMAX:
+ case VSIR_OP_DMIN:
+ case VSIR_OP_DMOV:
+ case VSIR_OP_DMOVC:
+ case VSIR_OP_DMUL:
+ case VSIR_OP_DRCP:
+ case VSIR_OP_DEQO:
+ case VSIR_OP_DGEO:
+ case VSIR_OP_DLT:
+ case VSIR_OP_DNE:
+ case VSIR_OP_DTOF:
+ case VSIR_OP_DTOI:
+ case VSIR_OP_DTOU:
+ case VSIR_OP_FTOD:
return true;
default:
@@ -8622,16 +8728,16 @@ enum vkd3d_result vsir_allocate_temp_registers(struct vsir_program *program,
{
struct vkd3d_shader_instruction *ins = &program->instructions.elements[i];
- if (ins->opcode == VKD3DSIH_DCL_TEMPS)
+ if (ins->opcode == VSIR_OP_DCL_TEMPS)
{
ins->declaration.count = temp_count;
temp_count = 0;
continue;
}
if (temp_count && program->shader_version.major >= 4
- && (ins->opcode == VKD3DSIH_HS_CONTROL_POINT_PHASE
- || ins->opcode == VKD3DSIH_HS_FORK_PHASE
- || ins->opcode == VKD3DSIH_HS_JOIN_PHASE))
+ && (ins->opcode == VSIR_OP_HS_CONTROL_POINT_PHASE
+ || ins->opcode == VSIR_OP_HS_FORK_PHASE
+ || ins->opcode == VSIR_OP_HS_JOIN_PHASE))
{
/* The phase didn't have a dcl_temps instruction, but we added
* temps here, so we need to insert one. */
@@ -8643,7 +8749,7 @@ enum vkd3d_result vsir_allocate_temp_registers(struct vsir_program *program,
}
ins = &program->instructions.elements[i + 1];
- vsir_instruction_init(ins, &program->instructions.elements[i].location, VKD3DSIH_DCL_TEMPS);
+ vsir_instruction_init(ins, &program->instructions.elements[i].location, VSIR_OP_DCL_TEMPS);
ins->declaration.count = temp_count;
temp_count = 0;
continue;
@@ -8673,7 +8779,7 @@ enum vkd3d_result vsir_allocate_temp_registers(struct vsir_program *program,
}
ins = &program->instructions.elements[0];
- vsir_instruction_init(ins, &program->instructions.elements[1].location, VKD3DSIH_DCL_TEMPS);
+ vsir_instruction_init(ins, &program->instructions.elements[1].location, VSIR_OP_DCL_TEMPS);
ins->declaration.count = temp_count;
}
@@ -8936,10 +9042,10 @@ static const bool vsir_get_io_register_data(struct validation_context *ctx,
switch (ctx->phase)
{
- case VKD3DSIH_HS_CONTROL_POINT_PHASE: phase = PHASE_CONTROL_POINT; break;
- case VKD3DSIH_HS_FORK_PHASE: phase = PHASE_FORK; break;
- case VKD3DSIH_HS_JOIN_PHASE: phase = PHASE_JOIN; break;
- case VKD3DSIH_INVALID: phase = PHASE_NONE; break;
+ case VSIR_OP_HS_CONTROL_POINT_PHASE: phase = PHASE_CONTROL_POINT; break;
+ case VSIR_OP_HS_FORK_PHASE: phase = PHASE_FORK; break;
+ case VSIR_OP_HS_JOIN_PHASE: phase = PHASE_JOIN; break;
+ case VSIR_OP_INVALID: phase = PHASE_NONE; break;
default:
vkd3d_unreachable();
@@ -10472,6 +10578,7 @@ static void vsir_validate_logic_elementwise_operation(struct validation_context
{
static const bool types[VKD3D_DATA_COUNT] =
{
+ [VKD3D_DATA_INT] = true,
[VKD3D_DATA_UINT] = true,
[VKD3D_DATA_UINT64] = true,
[VKD3D_DATA_BOOL] = true,
@@ -10526,6 +10633,17 @@ static void vsir_validate_double_comparison_operation(struct validation_context
vsir_validate_comparison_operation(ctx, instruction, types);
}
+static void vsir_validate_float_comparison_operation(struct validation_context *ctx,
+ const struct vkd3d_shader_instruction *instruction)
+{
+ static const bool types[VKD3D_DATA_COUNT] =
+ {
+ [VKD3D_DATA_FLOAT] = true,
+ };
+
+ vsir_validate_comparison_operation(ctx, instruction, types);
+}
+
static void vsir_validate_branch(struct validation_context *ctx, const struct vkd3d_shader_instruction *instruction)
{
size_t i;
@@ -10991,18 +11109,18 @@ static void vsir_validate_dcl_vertices_out(struct validation_context *ctx,
static void vsir_validate_else(struct validation_context *ctx, const struct vkd3d_shader_instruction *instruction)
{
vsir_validate_cf_type(ctx, instruction, VSIR_CF_STRUCTURED);
- if (ctx->depth == 0 || ctx->blocks[ctx->depth - 1] != VKD3DSIH_IF)
+ if (ctx->depth == 0 || ctx->blocks[ctx->depth - 1] != VSIR_OP_IF)
validator_error(ctx, VKD3D_SHADER_ERROR_VSIR_INVALID_CONTROL_FLOW,
"ELSE instruction doesn't terminate IF block.");
else
- ctx->blocks[ctx->depth - 1] = VKD3DSIH_ELSE;
+ ctx->blocks[ctx->depth - 1] = VSIR_OP_ELSE;
}
static void vsir_validate_endif(struct validation_context *ctx, const struct vkd3d_shader_instruction *instruction)
{
vsir_validate_cf_type(ctx, instruction, VSIR_CF_STRUCTURED);
- if (ctx->depth == 0 || (ctx->blocks[ctx->depth - 1] != VKD3DSIH_IF
- && ctx->blocks[ctx->depth - 1] != VKD3DSIH_ELSE))
+ if (ctx->depth == 0 || (ctx->blocks[ctx->depth - 1] != VSIR_OP_IF
+ && ctx->blocks[ctx->depth - 1] != VSIR_OP_ELSE))
validator_error(ctx, VKD3D_SHADER_ERROR_VSIR_INVALID_CONTROL_FLOW,
"ENDIF instruction doesn't terminate IF/ELSE block.");
else
@@ -11012,7 +11130,7 @@ static void vsir_validate_endif(struct validation_context *ctx, const struct vkd
static void vsir_validate_endloop(struct validation_context *ctx, const struct vkd3d_shader_instruction *instruction)
{
vsir_validate_cf_type(ctx, instruction, VSIR_CF_STRUCTURED);
- if (ctx->depth == 0 || ctx->blocks[ctx->depth - 1] != VKD3DSIH_LOOP)
+ if (ctx->depth == 0 || ctx->blocks[ctx->depth - 1] != VSIR_OP_LOOP)
validator_error(ctx, VKD3D_SHADER_ERROR_VSIR_INVALID_CONTROL_FLOW,
"ENDLOOP instruction doesn't terminate LOOP block.");
else
@@ -11022,7 +11140,7 @@ static void vsir_validate_endloop(struct validation_context *ctx, const struct v
static void vsir_validate_endrep(struct validation_context *ctx, const struct vkd3d_shader_instruction *instruction)
{
vsir_validate_cf_type(ctx, instruction, VSIR_CF_STRUCTURED);
- if (ctx->depth == 0 || ctx->blocks[ctx->depth - 1] != VKD3DSIH_REP)
+ if (ctx->depth == 0 || ctx->blocks[ctx->depth - 1] != VSIR_OP_REP)
validator_error(ctx, VKD3D_SHADER_ERROR_VSIR_INVALID_CONTROL_FLOW,
"ENDREP instruction doesn't terminate REP block.");
else
@@ -11032,7 +11150,7 @@ static void vsir_validate_endrep(struct validation_context *ctx, const struct vk
static void vsir_validate_endswitch(struct validation_context *ctx, const struct vkd3d_shader_instruction *instruction)
{
vsir_validate_cf_type(ctx, instruction, VSIR_CF_STRUCTURED);
- if (ctx->depth == 0 || ctx->blocks[ctx->depth - 1] != VKD3DSIH_SWITCH)
+ if (ctx->depth == 0 || ctx->blocks[ctx->depth - 1] != VSIR_OP_SWITCH)
validator_error(ctx, VKD3D_SHADER_ERROR_VSIR_INVALID_CONTROL_FLOW,
"ENDSWITCH instruction doesn't terminate SWITCH block.");
else
@@ -11042,13 +11160,13 @@ static void vsir_validate_endswitch(struct validation_context *ctx, const struct
static void vsir_validate_if(struct validation_context *ctx, const struct vkd3d_shader_instruction *instruction)
{
vsir_validate_cf_type(ctx, instruction, VSIR_CF_STRUCTURED);
- vsir_validator_push_block(ctx, VKD3DSIH_IF);
+ vsir_validator_push_block(ctx, VSIR_OP_IF);
}
static void vsir_validate_ifc(struct validation_context *ctx, const struct vkd3d_shader_instruction *instruction)
{
vsir_validate_cf_type(ctx, instruction, VSIR_CF_STRUCTURED);
- vsir_validator_push_block(ctx, VKD3DSIH_IF);
+ vsir_validator_push_block(ctx, VSIR_OP_IF);
}
static void vsir_validate_label(struct validation_context *ctx, const struct vkd3d_shader_instruction *instruction)
@@ -11069,7 +11187,7 @@ static void vsir_validate_loop(struct validation_context *ctx, const struct vkd3
{
vsir_validate_cf_type(ctx, instruction, VSIR_CF_STRUCTURED);
vsir_validate_src_count(ctx, instruction, ctx->program->shader_version.major <= 3 ? 2 : 0);
- vsir_validator_push_block(ctx, VKD3DSIH_LOOP);
+ vsir_validator_push_block(ctx, VSIR_OP_LOOP);
}
static void vsir_validate_nop(struct validation_context *ctx, const struct vkd3d_shader_instruction *instruction)
@@ -11139,7 +11257,7 @@ static void vsir_validate_phi(struct validation_context *ctx, const struct vkd3d
static void vsir_validate_rep(struct validation_context *ctx, const struct vkd3d_shader_instruction *instruction)
{
vsir_validate_cf_type(ctx, instruction, VSIR_CF_STRUCTURED);
- vsir_validator_push_block(ctx, VKD3DSIH_REP);
+ vsir_validator_push_block(ctx, VSIR_OP_REP);
}
static void vsir_validate_ret(struct validation_context *ctx, const struct vkd3d_shader_instruction *instruction)
@@ -11150,7 +11268,7 @@ static void vsir_validate_ret(struct validation_context *ctx, const struct vkd3d
static void vsir_validate_switch(struct validation_context *ctx, const struct vkd3d_shader_instruction *instruction)
{
vsir_validate_cf_type(ctx, instruction, VSIR_CF_STRUCTURED);
- vsir_validator_push_block(ctx, VKD3DSIH_SWITCH);
+ vsir_validator_push_block(ctx, VSIR_OP_SWITCH);
}
static void vsir_validate_switch_monolithic(struct validation_context *ctx,
@@ -11211,69 +11329,77 @@ struct vsir_validator_instruction_desc
static const struct vsir_validator_instruction_desc vsir_validator_instructions[] =
{
- [VKD3DSIH_ABS] = {1, 1, vsir_validate_float_elementwise_operation},
- [VKD3DSIH_ACOS] = {1, 1, vsir_validate_float_elementwise_operation},
- [VKD3DSIH_ADD] = {1, 2, vsir_validate_float_elementwise_operation},
- [VKD3DSIH_AND] = {1, 2, vsir_validate_logic_elementwise_operation},
- [VKD3DSIH_ASIN] = {1, 1, vsir_validate_float_elementwise_operation},
- [VKD3DSIH_ATAN] = {1, 1, vsir_validate_float_elementwise_operation},
- [VKD3DSIH_BRANCH] = {0, ~0u, vsir_validate_branch},
- [VKD3DSIH_DADD] = {1, 2, vsir_validate_double_elementwise_operation},
- [VKD3DSIH_DDIV] = {1, 2, vsir_validate_double_elementwise_operation},
- [VKD3DSIH_DFMA] = {1, 3, vsir_validate_double_elementwise_operation},
- [VKD3DSIH_DGEO] = {1, 2, vsir_validate_double_comparison_operation},
- [VKD3DSIH_DIV] = {1, 2, vsir_validate_float_elementwise_operation},
- [VKD3DSIH_DLT] = {1, 2, vsir_validate_double_comparison_operation},
- [VKD3DSIH_DMAX] = {1, 2, vsir_validate_double_elementwise_operation},
- [VKD3DSIH_DMIN] = {1, 2, vsir_validate_double_elementwise_operation},
- [VKD3DSIH_DMOV] = {1, 1, vsir_validate_double_elementwise_operation},
- [VKD3DSIH_DMUL] = {1, 2, vsir_validate_double_elementwise_operation},
- [VKD3DSIH_DNE] = {1, 2, vsir_validate_double_comparison_operation},
- [VKD3DSIH_DRCP] = {1, 1, vsir_validate_double_elementwise_operation},
- [VKD3DSIH_DSX] = {1, 1, vsir_validate_float_elementwise_operation},
- [VKD3DSIH_DSX_COARSE] = {1, 1, vsir_validate_float_elementwise_operation},
- [VKD3DSIH_DSX_FINE] = {1, 1, vsir_validate_float_elementwise_operation},
- [VKD3DSIH_DSY] = {1, 1, vsir_validate_float_elementwise_operation},
- [VKD3DSIH_DSY_COARSE] = {1, 1, vsir_validate_float_elementwise_operation},
- [VKD3DSIH_DSY_FINE] = {1, 1, vsir_validate_float_elementwise_operation},
- [VKD3DSIH_HS_CONTROL_POINT_PHASE] = {0, 0, vsir_validate_hull_shader_phase},
- [VKD3DSIH_HS_DECLS] = {0, 0, vsir_validate_hull_shader_phase},
- [VKD3DSIH_HS_FORK_PHASE] = {0, 0, vsir_validate_hull_shader_phase},
- [VKD3DSIH_HS_JOIN_PHASE] = {0, 0, vsir_validate_hull_shader_phase},
- [VKD3DSIH_DCL_GS_INSTANCES] = {0, 0, vsir_validate_dcl_gs_instances},
- [VKD3DSIH_DCL_HS_MAX_TESSFACTOR] = {0, 0, vsir_validate_dcl_hs_max_tessfactor},
- [VKD3DSIH_DCL_INDEX_RANGE] = {0, 0, vsir_validate_dcl_index_range},
- [VKD3DSIH_DCL_INPUT] = {0, 0, vsir_validate_dcl_input},
- [VKD3DSIH_DCL_INPUT_PRIMITIVE] = {0, 0, vsir_validate_dcl_input_primitive},
- [VKD3DSIH_DCL_INPUT_PS] = {0, 0, vsir_validate_dcl_input_ps},
- [VKD3DSIH_DCL_INPUT_PS_SGV] = {0, 0, vsir_validate_dcl_input_ps_sgv},
- [VKD3DSIH_DCL_INPUT_PS_SIV] = {0, 0, vsir_validate_dcl_input_ps_siv},
- [VKD3DSIH_DCL_INPUT_SGV] = {0, 0, vsir_validate_dcl_input_sgv},
- [VKD3DSIH_DCL_INPUT_SIV] = {0, 0, vsir_validate_dcl_input_siv},
- [VKD3DSIH_DCL_OUTPUT] = {0, 0, vsir_validate_dcl_output},
- [VKD3DSIH_DCL_OUTPUT_CONTROL_POINT_COUNT] = {0, 0, vsir_validate_dcl_output_control_point_count},
- [VKD3DSIH_DCL_OUTPUT_SIV] = {0, 0, vsir_validate_dcl_output_siv},
- [VKD3DSIH_DCL_OUTPUT_TOPOLOGY] = {0, 0, vsir_validate_dcl_output_topology},
- [VKD3DSIH_DCL_TEMPS] = {0, 0, vsir_validate_dcl_temps},
- [VKD3DSIH_DCL_TESSELLATOR_DOMAIN] = {0, 0, vsir_validate_dcl_tessellator_domain},
- [VKD3DSIH_DCL_TESSELLATOR_OUTPUT_PRIMITIVE] = {0, 0, vsir_validate_dcl_tessellator_output_primitive},
- [VKD3DSIH_DCL_TESSELLATOR_PARTITIONING] = {0, 0, vsir_validate_dcl_tessellator_partitioning},
- [VKD3DSIH_DCL_VERTICES_OUT] = {0, 0, vsir_validate_dcl_vertices_out},
- [VKD3DSIH_ELSE] = {0, 0, vsir_validate_else},
- [VKD3DSIH_ENDIF] = {0, 0, vsir_validate_endif},
- [VKD3DSIH_ENDLOOP] = {0, 0, vsir_validate_endloop},
- [VKD3DSIH_ENDREP] = {0, 0, vsir_validate_endrep},
- [VKD3DSIH_ENDSWITCH] = {0, 0, vsir_validate_endswitch},
- [VKD3DSIH_IF] = {0, 1, vsir_validate_if},
- [VKD3DSIH_IFC] = {0, 2, vsir_validate_ifc},
- [VKD3DSIH_LABEL] = {0, 1, vsir_validate_label},
- [VKD3DSIH_LOOP] = {0, ~0u, vsir_validate_loop},
- [VKD3DSIH_NOP] = {0, 0, vsir_validate_nop},
- [VKD3DSIH_PHI] = {1, ~0u, vsir_validate_phi},
- [VKD3DSIH_REP] = {0, 1, vsir_validate_rep},
- [VKD3DSIH_RET] = {0, 0, vsir_validate_ret},
- [VKD3DSIH_SWITCH] = {0, 1, vsir_validate_switch},
- [VKD3DSIH_SWITCH_MONOLITHIC] = {0, ~0u, vsir_validate_switch_monolithic},
+ [VSIR_OP_ABS] = {1, 1, vsir_validate_float_elementwise_operation},
+ [VSIR_OP_ACOS] = {1, 1, vsir_validate_float_elementwise_operation},
+ [VSIR_OP_ADD] = {1, 2, vsir_validate_float_elementwise_operation},
+ [VSIR_OP_AND] = {1, 2, vsir_validate_logic_elementwise_operation},
+ [VSIR_OP_ASIN] = {1, 1, vsir_validate_float_elementwise_operation},
+ [VSIR_OP_ATAN] = {1, 1, vsir_validate_float_elementwise_operation},
+ [VSIR_OP_BRANCH] = {0, ~0u, vsir_validate_branch},
+ [VSIR_OP_DADD] = {1, 2, vsir_validate_double_elementwise_operation},
+ [VSIR_OP_DDIV] = {1, 2, vsir_validate_double_elementwise_operation},
+ [VSIR_OP_DEQO] = {1, 2, vsir_validate_double_comparison_operation},
+ [VSIR_OP_DFMA] = {1, 3, vsir_validate_double_elementwise_operation},
+ [VSIR_OP_DGEO] = {1, 2, vsir_validate_double_comparison_operation},
+ [VSIR_OP_DIV] = {1, 2, vsir_validate_float_elementwise_operation},
+ [VSIR_OP_DLT] = {1, 2, vsir_validate_double_comparison_operation},
+ [VSIR_OP_DMAX] = {1, 2, vsir_validate_double_elementwise_operation},
+ [VSIR_OP_DMIN] = {1, 2, vsir_validate_double_elementwise_operation},
+ [VSIR_OP_DMOV] = {1, 1, vsir_validate_double_elementwise_operation},
+ [VSIR_OP_DMUL] = {1, 2, vsir_validate_double_elementwise_operation},
+ [VSIR_OP_DNE] = {1, 2, vsir_validate_double_comparison_operation},
+ [VSIR_OP_DRCP] = {1, 1, vsir_validate_double_elementwise_operation},
+ [VSIR_OP_DSX] = {1, 1, vsir_validate_float_elementwise_operation},
+ [VSIR_OP_DSX_COARSE] = {1, 1, vsir_validate_float_elementwise_operation},
+ [VSIR_OP_DSX_FINE] = {1, 1, vsir_validate_float_elementwise_operation},
+ [VSIR_OP_DSY] = {1, 1, vsir_validate_float_elementwise_operation},
+ [VSIR_OP_DSY_COARSE] = {1, 1, vsir_validate_float_elementwise_operation},
+ [VSIR_OP_DSY_FINE] = {1, 1, vsir_validate_float_elementwise_operation},
+ [VSIR_OP_EQO] = {1, 2, vsir_validate_float_comparison_operation},
+ [VSIR_OP_EQU] = {1, 2, vsir_validate_float_comparison_operation},
+ [VSIR_OP_EXP] = {1, 1, vsir_validate_float_elementwise_operation},
+ [VSIR_OP_FRC] = {1, 1, vsir_validate_float_elementwise_operation},
+ [VSIR_OP_FREM] = {1, 2, vsir_validate_float_elementwise_operation},
+ [VSIR_OP_GEO] = {1, 2, vsir_validate_float_comparison_operation},
+ [VSIR_OP_GEU] = {1, 2, vsir_validate_float_comparison_operation},
+ [VSIR_OP_HS_CONTROL_POINT_PHASE] = {0, 0, vsir_validate_hull_shader_phase},
+ [VSIR_OP_HS_DECLS] = {0, 0, vsir_validate_hull_shader_phase},
+ [VSIR_OP_HS_FORK_PHASE] = {0, 0, vsir_validate_hull_shader_phase},
+ [VSIR_OP_HS_JOIN_PHASE] = {0, 0, vsir_validate_hull_shader_phase},
+ [VSIR_OP_DCL_GS_INSTANCES] = {0, 0, vsir_validate_dcl_gs_instances},
+ [VSIR_OP_DCL_HS_MAX_TESSFACTOR] = {0, 0, vsir_validate_dcl_hs_max_tessfactor},
+ [VSIR_OP_DCL_INDEX_RANGE] = {0, 0, vsir_validate_dcl_index_range},
+ [VSIR_OP_DCL_INPUT] = {0, 0, vsir_validate_dcl_input},
+ [VSIR_OP_DCL_INPUT_PRIMITIVE] = {0, 0, vsir_validate_dcl_input_primitive},
+ [VSIR_OP_DCL_INPUT_PS] = {0, 0, vsir_validate_dcl_input_ps},
+ [VSIR_OP_DCL_INPUT_PS_SGV] = {0, 0, vsir_validate_dcl_input_ps_sgv},
+ [VSIR_OP_DCL_INPUT_PS_SIV] = {0, 0, vsir_validate_dcl_input_ps_siv},
+ [VSIR_OP_DCL_INPUT_SGV] = {0, 0, vsir_validate_dcl_input_sgv},
+ [VSIR_OP_DCL_INPUT_SIV] = {0, 0, vsir_validate_dcl_input_siv},
+ [VSIR_OP_DCL_OUTPUT] = {0, 0, vsir_validate_dcl_output},
+ [VSIR_OP_DCL_OUTPUT_CONTROL_POINT_COUNT] = {0, 0, vsir_validate_dcl_output_control_point_count},
+ [VSIR_OP_DCL_OUTPUT_SIV] = {0, 0, vsir_validate_dcl_output_siv},
+ [VSIR_OP_DCL_OUTPUT_TOPOLOGY] = {0, 0, vsir_validate_dcl_output_topology},
+ [VSIR_OP_DCL_TEMPS] = {0, 0, vsir_validate_dcl_temps},
+ [VSIR_OP_DCL_TESSELLATOR_DOMAIN] = {0, 0, vsir_validate_dcl_tessellator_domain},
+ [VSIR_OP_DCL_TESSELLATOR_OUTPUT_PRIMITIVE] = {0, 0, vsir_validate_dcl_tessellator_output_primitive},
+ [VSIR_OP_DCL_TESSELLATOR_PARTITIONING] = {0, 0, vsir_validate_dcl_tessellator_partitioning},
+ [VSIR_OP_DCL_VERTICES_OUT] = {0, 0, vsir_validate_dcl_vertices_out},
+ [VSIR_OP_ELSE] = {0, 0, vsir_validate_else},
+ [VSIR_OP_ENDIF] = {0, 0, vsir_validate_endif},
+ [VSIR_OP_ENDLOOP] = {0, 0, vsir_validate_endloop},
+ [VSIR_OP_ENDREP] = {0, 0, vsir_validate_endrep},
+ [VSIR_OP_ENDSWITCH] = {0, 0, vsir_validate_endswitch},
+ [VSIR_OP_IF] = {0, 1, vsir_validate_if},
+ [VSIR_OP_IFC] = {0, 2, vsir_validate_ifc},
+ [VSIR_OP_LABEL] = {0, 1, vsir_validate_label},
+ [VSIR_OP_LOOP] = {0, ~0u, vsir_validate_loop},
+ [VSIR_OP_NOP] = {0, 0, vsir_validate_nop},
+ [VSIR_OP_PHI] = {1, ~0u, vsir_validate_phi},
+ [VSIR_OP_REP] = {0, 1, vsir_validate_rep},
+ [VSIR_OP_RET] = {0, 0, vsir_validate_ret},
+ [VSIR_OP_SWITCH] = {0, 1, vsir_validate_switch},
+ [VSIR_OP_SWITCH_MONOLITHIC] = {0, ~0u, vsir_validate_switch_monolithic},
};
static void vsir_validate_instruction(struct validation_context *ctx)
@@ -11290,21 +11416,21 @@ static void vsir_validate_instruction(struct validation_context *ctx)
for (i = 0; i < instruction->src_count; ++i)
vsir_validate_src_param(ctx, &instruction->src[i]);
- if (instruction->opcode >= VKD3DSIH_INVALID)
+ if (instruction->opcode >= VSIR_OP_INVALID)
{
validator_error(ctx, VKD3D_SHADER_ERROR_VSIR_INVALID_OPCODE,
"Invalid opcode %#x.", instruction->opcode);
}
- if (version->type == VKD3D_SHADER_TYPE_HULL && ctx->phase == VKD3DSIH_INVALID)
+ if (version->type == VKD3D_SHADER_TYPE_HULL && ctx->phase == VSIR_OP_INVALID)
{
switch (instruction->opcode)
{
- case VKD3DSIH_NOP:
- case VKD3DSIH_HS_DECLS:
- case VKD3DSIH_HS_CONTROL_POINT_PHASE:
- case VKD3DSIH_HS_FORK_PHASE:
- case VKD3DSIH_HS_JOIN_PHASE:
+ case VSIR_OP_NOP:
+ case VSIR_OP_HS_DECLS:
+ case VSIR_OP_HS_CONTROL_POINT_PHASE:
+ case VSIR_OP_HS_FORK_PHASE:
+ case VSIR_OP_HS_JOIN_PHASE:
break;
default:
@@ -11320,12 +11446,12 @@ static void vsir_validate_instruction(struct validation_context *ctx)
{
switch (instruction->opcode)
{
- case VKD3DSIH_NOP:
- case VKD3DSIH_LABEL:
- case VKD3DSIH_HS_DECLS:
- case VKD3DSIH_HS_CONTROL_POINT_PHASE:
- case VKD3DSIH_HS_FORK_PHASE:
- case VKD3DSIH_HS_JOIN_PHASE:
+ case VSIR_OP_NOP:
+ case VSIR_OP_LABEL:
+ case VSIR_OP_HS_DECLS:
+ case VSIR_OP_HS_CONTROL_POINT_PHASE:
+ case VSIR_OP_HS_FORK_PHASE:
+ case VSIR_OP_HS_JOIN_PHASE:
break;
default:
@@ -11363,7 +11489,7 @@ enum vkd3d_result vsir_program_validate(struct vsir_program *program, uint64_t c
.program = program,
.null_location = {.source_name = source_name},
.status = VKD3D_OK,
- .phase = VKD3DSIH_INVALID,
+ .phase = VSIR_OP_INVALID,
.invalid_instruction_idx = true,
.outer_tess_idxs[0] = ~0u,
.outer_tess_idxs[1] = ~0u,
diff --git a/libs/vkd3d/libs/vkd3d-shader/msl.c b/libs/vkd3d/libs/vkd3d-shader/msl.c
index 87c9e989ea4..2a209323de7 100644
--- a/libs/vkd3d/libs/vkd3d-shader/msl.c
+++ b/libs/vkd3d/libs/vkd3d-shader/msl.c
@@ -836,6 +836,12 @@ static void msl_break(struct msl_generator *gen)
vkd3d_string_buffer_printf(gen->buffer, "break;\n");
}
+static void msl_continue(struct msl_generator *gen)
+{
+ msl_print_indent(gen->buffer, gen->indent);
+ vkd3d_string_buffer_printf(gen->buffer, "continue;\n");
+}
+
static void msl_switch(struct msl_generator *gen, const struct vkd3d_shader_instruction *ins)
{
struct msl_src src;
@@ -929,7 +935,7 @@ static void msl_ld(struct msl_generator *gen, const struct vkd3d_shader_instruct
if (resource_type == VKD3D_SHADER_RESOURCE_TEXTURE_CUBE
|| resource_type == VKD3D_SHADER_RESOURCE_TEXTURE_CUBEARRAY
- || (ins->opcode != VKD3DSIH_LD2DMS
+ || (ins->opcode != VSIR_OP_LD2DMS
&& (resource_type == VKD3D_SHADER_RESOURCE_TEXTURE_2DMS
|| resource_type == VKD3D_SHADER_RESOURCE_TEXTURE_2DMSARRAY)))
msl_compiler_error(gen, VKD3D_SHADER_ERROR_MSL_UNSUPPORTED,
@@ -970,7 +976,7 @@ static void msl_ld(struct msl_generator *gen, const struct vkd3d_shader_instruct
if (resource_type != VKD3D_SHADER_RESOURCE_BUFFER)
{
vkd3d_string_buffer_printf(read, ", ");
- if (ins->opcode != VKD3DSIH_LD2DMS)
+ if (ins->opcode != VSIR_OP_LD2DMS)
msl_print_src_with_type(read, gen, &ins->src[0], VKD3DSP_WRITEMASK_3, VKD3D_DATA_UINT);
else
msl_print_src_with_type(read, gen, &ins->src[2], VKD3DSP_WRITEMASK_0, VKD3D_DATA_UINT);
@@ -1001,15 +1007,15 @@ static void msl_sample(struct msl_generator *gen, const struct vkd3d_shader_inst
uint32_t coord_mask;
struct msl_dst dst;
- bias = ins->opcode == VKD3DSIH_SAMPLE_B;
- compare = ins->opcode == VKD3DSIH_GATHER4_C || ins->opcode == VKD3DSIH_SAMPLE_C
- || ins->opcode == VKD3DSIH_SAMPLE_C_LZ;
- dynamic_offset = ins->opcode == VKD3DSIH_GATHER4_PO;
- gather = ins->opcode == VKD3DSIH_GATHER4 || ins->opcode == VKD3DSIH_GATHER4_C
- || ins->opcode == VKD3DSIH_GATHER4_PO;
- grad = ins->opcode == VKD3DSIH_SAMPLE_GRAD;
- lod = ins->opcode == VKD3DSIH_SAMPLE_LOD;
- lod_zero = ins->opcode == VKD3DSIH_SAMPLE_C_LZ;
+ bias = ins->opcode == VSIR_OP_SAMPLE_B;
+ compare = ins->opcode == VSIR_OP_GATHER4_C || ins->opcode == VSIR_OP_SAMPLE_C
+ || ins->opcode == VSIR_OP_SAMPLE_C_LZ;
+ dynamic_offset = ins->opcode == VSIR_OP_GATHER4_PO;
+ gather = ins->opcode == VSIR_OP_GATHER4 || ins->opcode == VSIR_OP_GATHER4_C
+ || ins->opcode == VSIR_OP_GATHER4_PO;
+ grad = ins->opcode == VSIR_OP_SAMPLE_GRAD;
+ lod = ins->opcode == VSIR_OP_SAMPLE_LOD;
+ lod_zero = ins->opcode == VSIR_OP_SAMPLE_C_LZ;
offset = dynamic_offset || vkd3d_shader_instruction_has_texel_offset(ins);
resource = &ins->src[1 + dynamic_offset];
@@ -1273,169 +1279,187 @@ static void msl_handle_instruction(struct msl_generator *gen, const struct vkd3d
switch (ins->opcode)
{
- case VKD3DSIH_ADD:
- case VKD3DSIH_IADD:
+ case VSIR_OP_ADD:
+ case VSIR_OP_IADD:
msl_binop(gen, ins, "+");
break;
- case VKD3DSIH_AND:
+ case VSIR_OP_AND:
msl_binop(gen, ins, "&");
break;
- case VKD3DSIH_BREAK:
+ case VSIR_OP_BREAK:
msl_break(gen);
break;
- case VKD3DSIH_CASE:
+ case VSIR_OP_CASE:
msl_case(gen, ins);
break;
- case VKD3DSIH_DCL_INDEXABLE_TEMP:
+ case VSIR_OP_CONTINUE:
+ msl_continue(gen);
+ break;
+ case VSIR_OP_DCL_INDEXABLE_TEMP:
msl_dcl_indexable_temp(gen, ins);
break;
- case VKD3DSIH_NOP:
+ case VSIR_OP_NOP:
break;
- case VKD3DSIH_DEFAULT:
+ case VSIR_OP_DEFAULT:
msl_default(gen);
break;
- case VKD3DSIH_DISCARD:
+ case VSIR_OP_DISCARD:
msl_discard(gen, ins);
break;
- case VKD3DSIH_DIV:
+ case VSIR_OP_DIV:
msl_binop(gen, ins, "/");
break;
- case VKD3DSIH_DP2:
+ case VSIR_OP_DP2:
msl_dot(gen, ins, vkd3d_write_mask_from_component_count(2));
break;
- case VKD3DSIH_DP3:
+ case VSIR_OP_DP3:
msl_dot(gen, ins, vkd3d_write_mask_from_component_count(3));
break;
- case VKD3DSIH_DP4:
+ case VSIR_OP_DP4:
msl_dot(gen, ins, VKD3DSP_WRITEMASK_ALL);
break;
- case VKD3DSIH_ELSE:
+ case VSIR_OP_DSX:
+ case VSIR_OP_DSX_COARSE:
+ case VSIR_OP_DSX_FINE:
+ /* dfdx() and dfdy() are specified to return "a high precision
+ * partial derivative", which would seem to correspond to
+ * DSX_FINE/DSY_FINE. As of MSL 3.2, coarse/fast variants don't
+ * appear to be available. */
+ msl_intrinsic(gen, ins, "dfdx");
+ break;
+ case VSIR_OP_DSY:
+ case VSIR_OP_DSY_COARSE:
+ case VSIR_OP_DSY_FINE:
+ msl_intrinsic(gen, ins, "dfdy");
+ break;
+ case VSIR_OP_ELSE:
msl_else(gen);
break;
- case VKD3DSIH_ENDIF:
- case VKD3DSIH_ENDLOOP:
- case VKD3DSIH_ENDSWITCH:
+ case VSIR_OP_ENDIF:
+ case VSIR_OP_ENDLOOP:
+ case VSIR_OP_ENDSWITCH:
msl_end_block(gen);
break;
- case VKD3DSIH_EQO:
- case VKD3DSIH_IEQ:
+ case VSIR_OP_EQO:
+ case VSIR_OP_IEQ:
msl_relop(gen, ins, "==");
break;
- case VKD3DSIH_EXP:
+ case VSIR_OP_EXP:
msl_intrinsic(gen, ins, "exp2");
break;
- case VKD3DSIH_FRC:
+ case VSIR_OP_FRC:
msl_intrinsic(gen, ins, "fract");
break;
- case VKD3DSIH_FTOI:
+ case VSIR_OP_FTOI:
msl_cast(gen, ins, "int");
break;
- case VKD3DSIH_FTOU:
+ case VSIR_OP_FTOU:
msl_cast(gen, ins, "uint");
break;
- case VKD3DSIH_GATHER4:
- case VKD3DSIH_GATHER4_C:
- case VKD3DSIH_GATHER4_PO:
- case VKD3DSIH_SAMPLE:
- case VKD3DSIH_SAMPLE_B:
- case VKD3DSIH_SAMPLE_C:
- case VKD3DSIH_SAMPLE_C_LZ:
- case VKD3DSIH_SAMPLE_GRAD:
- case VKD3DSIH_SAMPLE_LOD:
+ case VSIR_OP_GATHER4:
+ case VSIR_OP_GATHER4_C:
+ case VSIR_OP_GATHER4_PO:
+ case VSIR_OP_SAMPLE:
+ case VSIR_OP_SAMPLE_B:
+ case VSIR_OP_SAMPLE_C:
+ case VSIR_OP_SAMPLE_C_LZ:
+ case VSIR_OP_SAMPLE_GRAD:
+ case VSIR_OP_SAMPLE_LOD:
msl_sample(gen, ins);
break;
- case VKD3DSIH_GEO:
- case VKD3DSIH_IGE:
+ case VSIR_OP_GEO:
+ case VSIR_OP_IGE:
msl_relop(gen, ins, ">=");
break;
- case VKD3DSIH_IF:
+ case VSIR_OP_IF:
msl_if(gen, ins);
break;
- case VKD3DSIH_ISHL:
+ case VSIR_OP_ISHL:
msl_binop(gen, ins, "<<");
break;
- case VKD3DSIH_ISHR:
- case VKD3DSIH_USHR:
+ case VSIR_OP_ISHR:
+ case VSIR_OP_USHR:
msl_binop(gen, ins, ">>");
break;
- case VKD3DSIH_ILT:
- case VKD3DSIH_LTO:
- case VKD3DSIH_ULT:
+ case VSIR_OP_ILT:
+ case VSIR_OP_LTO:
+ case VSIR_OP_ULT:
msl_relop(gen, ins, "<");
break;
- case VKD3DSIH_MAD:
+ case VSIR_OP_MAD:
msl_intrinsic(gen, ins, "fma");
break;
- case VKD3DSIH_MAX:
+ case VSIR_OP_IMAX:
+ case VSIR_OP_MAX:
msl_intrinsic(gen, ins, "max");
break;
- case VKD3DSIH_MIN:
+ case VSIR_OP_MIN:
msl_intrinsic(gen, ins, "min");
break;
- case VKD3DSIH_IMUL_LOW:
+ case VSIR_OP_IMUL_LOW:
msl_binop(gen, ins, "*");
break;
- case VKD3DSIH_INE:
- case VKD3DSIH_NEU:
+ case VSIR_OP_INE:
+ case VSIR_OP_NEU:
msl_relop(gen, ins, "!=");
break;
- case VKD3DSIH_INEG:
+ case VSIR_OP_INEG:
msl_unary_op(gen, ins, "-");
break;
- case VKD3DSIH_ITOF:
- case VKD3DSIH_UTOF:
+ case VSIR_OP_ITOF:
+ case VSIR_OP_UTOF:
msl_cast(gen, ins, "float");
break;
- case VKD3DSIH_LD:
- case VKD3DSIH_LD2DMS:
+ case VSIR_OP_LD:
+ case VSIR_OP_LD2DMS:
msl_ld(gen, ins);
break;
- case VKD3DSIH_LOG:
+ case VSIR_OP_LOG:
msl_intrinsic(gen, ins, "log2");
break;
- case VKD3DSIH_LOOP:
+ case VSIR_OP_LOOP:
msl_loop(gen);
break;
- case VKD3DSIH_MOV:
+ case VSIR_OP_MOV:
msl_mov(gen, ins);
break;
- case VKD3DSIH_MOVC:
+ case VSIR_OP_MOVC:
msl_movc(gen, ins);
break;
- case VKD3DSIH_MUL:
+ case VSIR_OP_MUL:
msl_binop(gen, ins, "*");
break;
- case VKD3DSIH_NOT:
+ case VSIR_OP_NOT:
msl_unary_op(gen, ins, "~");
break;
- case VKD3DSIH_OR:
+ case VSIR_OP_OR:
msl_binop(gen, ins, "|");
break;
- case VKD3DSIH_RET:
+ case VSIR_OP_RET:
msl_ret(gen, ins);
break;
- case VKD3DSIH_ROUND_NE:
+ case VSIR_OP_ROUND_NE:
msl_intrinsic(gen, ins, "rint");
break;
- case VKD3DSIH_ROUND_NI:
+ case VSIR_OP_ROUND_NI:
msl_intrinsic(gen, ins, "floor");
break;
- case VKD3DSIH_ROUND_PI:
+ case VSIR_OP_ROUND_PI:
msl_intrinsic(gen, ins, "ceil");
break;
- case VKD3DSIH_ROUND_Z:
+ case VSIR_OP_ROUND_Z:
msl_intrinsic(gen, ins, "trunc");
break;
- case VKD3DSIH_RSQ:
+ case VSIR_OP_RSQ:
msl_intrinsic(gen, ins, "rsqrt");
break;
- case VKD3DSIH_SQRT:
+ case VSIR_OP_SQRT:
msl_intrinsic(gen, ins, "sqrt");
break;
- case VKD3DSIH_SWITCH:
+ case VSIR_OP_SWITCH:
msl_switch(gen, ins);
break;
- case VKD3DSIH_XOR:
+ case VSIR_OP_XOR:
msl_binop(gen, ins, "^");
break;
default:
diff --git a/libs/vkd3d/libs/vkd3d-shader/spirv.c b/libs/vkd3d/libs/vkd3d-shader/spirv.c
index 7eddf47151b..c51a6a394c0 100644
--- a/libs/vkd3d/libs/vkd3d-shader/spirv.c
+++ b/libs/vkd3d/libs/vkd3d-shader/spirv.c
@@ -3102,17 +3102,17 @@ struct spirv_compiler
static bool is_in_default_phase(const struct spirv_compiler *compiler)
{
- return compiler->phase == VKD3DSIH_INVALID;
+ return compiler->phase == VSIR_OP_INVALID;
}
static bool is_in_control_point_phase(const struct spirv_compiler *compiler)
{
- return compiler->phase == VKD3DSIH_HS_CONTROL_POINT_PHASE;
+ return compiler->phase == VSIR_OP_HS_CONTROL_POINT_PHASE;
}
static bool is_in_fork_or_join_phase(const struct spirv_compiler *compiler)
{
- return compiler->phase == VKD3DSIH_HS_FORK_PHASE || compiler->phase == VKD3DSIH_HS_JOIN_PHASE;
+ return compiler->phase == VSIR_OP_HS_FORK_PHASE || compiler->phase == VSIR_OP_HS_JOIN_PHASE;
}
static void spirv_compiler_emit_initial_declarations(struct spirv_compiler *compiler);
@@ -3295,7 +3295,7 @@ static struct spirv_compiler *spirv_compiler_create(const struct vsir_program *p
else if (compiler->shader_type != VKD3D_SHADER_TYPE_GEOMETRY)
compiler->emit_point_size = compiler->xfb_info && compiler->xfb_info->element_count;
- compiler->phase = VKD3DSIH_INVALID;
+ compiler->phase = VSIR_OP_INVALID;
vkd3d_string_buffer_cache_init(&compiler->string_buffers);
@@ -5536,13 +5536,13 @@ static void spirv_compiler_emit_shader_phase_name(struct spirv_compiler *compile
switch (compiler->phase)
{
- case VKD3DSIH_HS_CONTROL_POINT_PHASE:
+ case VSIR_OP_HS_CONTROL_POINT_PHASE:
name = "control";
break;
- case VKD3DSIH_HS_FORK_PHASE:
+ case VSIR_OP_HS_FORK_PHASE:
name = "fork";
break;
- case VKD3DSIH_HS_JOIN_PHASE:
+ case VSIR_OP_HS_JOIN_PHASE:
name = "join";
break;
default:
@@ -7405,7 +7405,7 @@ static void spirv_compiler_enter_shader_phase(struct spirv_compiler *compiler,
compiler->phase = instruction->opcode;
spirv_compiler_emit_shader_phase_name(compiler, function_id, NULL);
- phase = (instruction->opcode == VKD3DSIH_HS_CONTROL_POINT_PHASE)
+ phase = (instruction->opcode == VSIR_OP_HS_CONTROL_POINT_PHASE)
? &compiler->control_point_phase : &compiler->patch_constant_phase;
phase->function_id = function_id;
/* The insertion location must be set after the label is emitted. */
@@ -7419,7 +7419,7 @@ static void spirv_compiler_initialise_block(struct spirv_compiler *compiler)
/* Insertion locations must point immediately after the function's initial label. */
if (compiler->shader_type == VKD3D_SHADER_TYPE_HULL)
{
- struct vkd3d_shader_phase *phase = (compiler->phase == VKD3DSIH_HS_CONTROL_POINT_PHASE)
+ struct vkd3d_shader_phase *phase = (compiler->phase == VSIR_OP_HS_CONTROL_POINT_PHASE)
? &compiler->control_point_phase : &compiler->patch_constant_phase;
if (!phase->function_location)
phase->function_location = vkd3d_spirv_stream_current_location(&builder->function_stream);
@@ -7521,36 +7521,40 @@ static SpvOp spirv_compiler_map_alu_instruction(const struct vkd3d_shader_instru
}
alu_ops[] =
{
- {VKD3DSIH_ADD, SpvOpFAdd},
- {VKD3DSIH_AND, SpvOpBitwiseAnd},
- {VKD3DSIH_BFREV, SpvOpBitReverse},
- {VKD3DSIH_COUNTBITS, SpvOpBitCount},
- {VKD3DSIH_DADD, SpvOpFAdd},
- {VKD3DSIH_DDIV, SpvOpFDiv},
- {VKD3DSIH_DIV, SpvOpFDiv},
- {VKD3DSIH_DMUL, SpvOpFMul},
- {VKD3DSIH_DTOI, SpvOpConvertFToS},
- {VKD3DSIH_DTOU, SpvOpConvertFToU},
- {VKD3DSIH_FREM, SpvOpFRem},
- {VKD3DSIH_FTOD, SpvOpFConvert},
- {VKD3DSIH_IADD, SpvOpIAdd},
- {VKD3DSIH_IMUL_LOW, SpvOpIMul},
- {VKD3DSIH_INEG, SpvOpSNegate},
- {VKD3DSIH_ISHL, SpvOpShiftLeftLogical},
- {VKD3DSIH_ISHR, SpvOpShiftRightArithmetic},
- {VKD3DSIH_ISINF, SpvOpIsInf},
- {VKD3DSIH_ISNAN, SpvOpIsNan},
- {VKD3DSIH_ITOD, SpvOpConvertSToF},
- {VKD3DSIH_ITOF, SpvOpConvertSToF},
- {VKD3DSIH_ITOI, SpvOpSConvert},
- {VKD3DSIH_MUL, SpvOpFMul},
- {VKD3DSIH_NOT, SpvOpNot},
- {VKD3DSIH_OR, SpvOpBitwiseOr},
- {VKD3DSIH_USHR, SpvOpShiftRightLogical},
- {VKD3DSIH_UTOD, SpvOpConvertUToF},
- {VKD3DSIH_UTOF, SpvOpConvertUToF},
- {VKD3DSIH_UTOU, SpvOpUConvert},
- {VKD3DSIH_XOR, SpvOpBitwiseXor},
+ {VSIR_OP_ADD, SpvOpFAdd},
+ {VSIR_OP_AND, SpvOpBitwiseAnd},
+ {VSIR_OP_BFREV, SpvOpBitReverse},
+ {VSIR_OP_COUNTBITS, SpvOpBitCount},
+ {VSIR_OP_DADD, SpvOpFAdd},
+ {VSIR_OP_DDIV, SpvOpFDiv},
+ {VSIR_OP_DIV, SpvOpFDiv},
+ {VSIR_OP_DMUL, SpvOpFMul},
+ {VSIR_OP_DTOI, SpvOpConvertFToS},
+ {VSIR_OP_DTOU, SpvOpConvertFToU},
+ {VSIR_OP_FREM, SpvOpFRem},
+ {VSIR_OP_FTOD, SpvOpFConvert},
+ {VSIR_OP_IADD, SpvOpIAdd},
+ {VSIR_OP_IDIV, SpvOpSDiv},
+ {VSIR_OP_IMUL_LOW, SpvOpIMul},
+ {VSIR_OP_INEG, SpvOpSNegate},
+ {VSIR_OP_IREM, SpvOpSRem},
+ {VSIR_OP_ISHL, SpvOpShiftLeftLogical},
+ {VSIR_OP_ISHR, SpvOpShiftRightArithmetic},
+ {VSIR_OP_ISINF, SpvOpIsInf},
+ {VSIR_OP_ISNAN, SpvOpIsNan},
+ {VSIR_OP_ITOD, SpvOpConvertSToF},
+ {VSIR_OP_ITOF, SpvOpConvertSToF},
+ {VSIR_OP_ITOI, SpvOpSConvert},
+ {VSIR_OP_MUL, SpvOpFMul},
+ {VSIR_OP_NOT, SpvOpNot},
+ {VSIR_OP_OR, SpvOpBitwiseOr},
+ {VSIR_OP_UDIV_SIMPLE, SpvOpUDiv},
+ {VSIR_OP_UREM, SpvOpUMod},
+ {VSIR_OP_USHR, SpvOpShiftRightLogical},
+ {VSIR_OP_UTOD, SpvOpConvertUToF},
+ {VSIR_OP_UTOF, SpvOpConvertUToF},
+ {VSIR_OP_UTOU, SpvOpUConvert},
+ {VSIR_OP_XOR, SpvOpBitwiseXor},
};
unsigned int i;
@@ -7567,11 +7571,11 @@ static SpvOp spirv_compiler_map_logical_instruction(const struct vkd3d_shader_in
{
switch (instruction->opcode)
{
- case VKD3DSIH_AND:
+ case VSIR_OP_AND:
return SpvOpLogicalAnd;
- case VKD3DSIH_OR:
+ case VSIR_OP_OR:
return SpvOpLogicalOr;
- case VKD3DSIH_XOR:
+ case VSIR_OP_XOR:
return SpvOpLogicalNotEqual;
default:
return SpvOpMax;
@@ -7590,20 +7594,20 @@ static void spirv_compiler_emit_bool_cast(struct spirv_compiler *compiler,
val_id = spirv_compiler_emit_load_src(compiler, src, dst->write_mask);
if (dst->reg.data_type == VKD3D_DATA_HALF || dst->reg.data_type == VKD3D_DATA_FLOAT)
{
- val_id = spirv_compiler_emit_bool_to_float(compiler, 1, val_id, instruction->opcode == VKD3DSIH_ITOF);
+ val_id = spirv_compiler_emit_bool_to_float(compiler, 1, val_id, instruction->opcode == VSIR_OP_ITOF);
}
else if (dst->reg.data_type == VKD3D_DATA_DOUBLE)
{
/* ITOD is not supported. Frontends which emit bool casts must use ITOF for double. */
- val_id = spirv_compiler_emit_bool_to_double(compiler, 1, val_id, instruction->opcode == VKD3DSIH_ITOF);
+ val_id = spirv_compiler_emit_bool_to_double(compiler, 1, val_id, instruction->opcode == VSIR_OP_ITOF);
}
else if (dst->reg.data_type == VKD3D_DATA_UINT16 || dst->reg.data_type == VKD3D_DATA_UINT)
{
- val_id = spirv_compiler_emit_bool_to_int(compiler, 1, val_id, instruction->opcode == VKD3DSIH_ITOI);
+ val_id = spirv_compiler_emit_bool_to_int(compiler, 1, val_id, instruction->opcode == VSIR_OP_ITOI);
}
else if (dst->reg.data_type == VKD3D_DATA_UINT64)
{
- val_id = spirv_compiler_emit_bool_to_int64(compiler, 1, val_id, instruction->opcode == VKD3DSIH_ITOI);
+ val_id = spirv_compiler_emit_bool_to_int64(compiler, 1, val_id, instruction->opcode == VSIR_OP_ITOI);
}
else
{
@@ -7618,15 +7622,16 @@ static void spirv_compiler_emit_bool_cast(struct spirv_compiler *compiler,
static enum vkd3d_result spirv_compiler_emit_alu_instruction(struct spirv_compiler *compiler,
const struct vkd3d_shader_instruction *instruction)
{
+ uint32_t src_ids[SPIRV_MAX_SRC_COUNT], condition_id = 0, uint_max_id = 0;
struct vkd3d_spirv_builder *builder = &compiler->spirv_builder;
const struct vkd3d_shader_dst_param *dst = instruction->dst;
const struct vkd3d_shader_src_param *src = instruction->src;
- uint32_t src_ids[SPIRV_MAX_SRC_COUNT];
+ unsigned int i, component_count;
uint32_t type_id, val_id;
SpvOp op = SpvOpMax;
- unsigned int i;
+ bool check_zero;
- if (src->reg.data_type == VKD3D_DATA_UINT64 && instruction->opcode == VKD3DSIH_COUNTBITS)
+ if (src->reg.data_type == VKD3D_DATA_UINT64 && instruction->opcode == VSIR_OP_COUNTBITS)
{
/* At least some drivers support this anyway, but if validation is enabled it will fail. */
FIXME("Unsupported 64-bit source for bit count.\n");
@@ -7642,8 +7647,8 @@ static enum vkd3d_result spirv_compiler_emit_alu_instruction(struct spirv_compil
/* VSIR supports logic ops AND/OR/XOR on bool values. */
op = spirv_compiler_map_logical_instruction(instruction);
}
- else if (instruction->opcode == VKD3DSIH_ITOF || instruction->opcode == VKD3DSIH_UTOF
- || instruction->opcode == VKD3DSIH_ITOI || instruction->opcode == VKD3DSIH_UTOU)
+ else if (instruction->opcode == VSIR_OP_ITOF || instruction->opcode == VSIR_OP_UTOF
+ || instruction->opcode == VSIR_OP_ITOI || instruction->opcode == VSIR_OP_UTOU)
{
/* VSIR supports cast from bool to signed/unsigned integer types and floating point types,
* where bool is treated as a 1-bit integer and a signed 'true' value converts to -1. */
@@ -7664,14 +7669,44 @@ static enum vkd3d_result spirv_compiler_emit_alu_instruction(struct spirv_compil
return VKD3D_ERROR_INVALID_SHADER;
}
+ /* SPIR-V doesn't mandate a behaviour when a denominator is zero,
+ * so we have an explicit check. */
+ switch (instruction->opcode)
+ {
+ case VSIR_OP_IDIV:
+ case VSIR_OP_IREM:
+ case VSIR_OP_UDIV_SIMPLE:
+ case VSIR_OP_UREM:
+ check_zero = true;
+ break;
+
+ default:
+ check_zero = false;
+ break;
+ }
+
VKD3D_ASSERT(instruction->dst_count == 1);
VKD3D_ASSERT(instruction->src_count <= SPIRV_MAX_SRC_COUNT);
+ if (check_zero)
+ VKD3D_ASSERT(instruction->src_count == 2);
+ component_count = vsir_write_mask_component_count(dst[0].write_mask);
type_id = spirv_compiler_get_type_id_for_dst(compiler, dst);
for (i = 0; i < instruction->src_count; ++i)
src_ids[i] = spirv_compiler_emit_load_src(compiler, &src[i], dst->write_mask);
+ if (check_zero)
+ {
+ condition_id = spirv_compiler_emit_int_to_bool(compiler,
+ VKD3D_SHADER_CONDITIONAL_OP_NZ, src[1].reg.data_type, component_count, src_ids[1]);
+
+ if (dst[0].reg.data_type == VKD3D_DATA_UINT64)
+ uint_max_id = spirv_compiler_get_constant_uint64_vector(compiler, UINT64_MAX, component_count);
+ else
+ uint_max_id = spirv_compiler_get_constant_uint_vector(compiler, UINT_MAX, component_count);
+ }
+
/* The SPIR-V specification states, "The resulting value is undefined if
* Shift is greater than or equal to the bit width of the components of
* Base." Direct3D applies only the lowest 5 bits of the shift.
@@ -7679,8 +7714,8 @@ static enum vkd3d_result spirv_compiler_emit_alu_instruction(struct spirv_compil
* Microsoft fxc will compile immediate constants larger than 5 bits.
* Fixing up the constants would be more elegant, but the simplest way is
* to let this handle constants too. */
- if (!(instruction->flags & VKD3DSI_SHIFT_UNMASKED) && (instruction->opcode == VKD3DSIH_ISHL
- || instruction->opcode == VKD3DSIH_ISHR || instruction->opcode == VKD3DSIH_USHR))
+ if (!(instruction->flags & VKD3DSI_SHIFT_UNMASKED) && (instruction->opcode == VSIR_OP_ISHL
+ || instruction->opcode == VSIR_OP_ISHR || instruction->opcode == VSIR_OP_USHR))
{
uint32_t mask_id = spirv_compiler_get_constant_vector(compiler,
VKD3D_SHADER_COMPONENT_UINT, vsir_write_mask_component_count(dst->write_mask), 0x1f);
@@ -7692,6 +7727,9 @@ static enum vkd3d_result spirv_compiler_emit_alu_instruction(struct spirv_compil
if (instruction->flags & VKD3DSI_PRECISE_XYZW)
vkd3d_spirv_build_op_decorate(builder, val_id, SpvDecorationNoContraction, NULL, 0);
+ if (check_zero)
+ val_id = vkd3d_spirv_build_op_select(builder, type_id, condition_id, val_id, uint_max_id);
+
spirv_compiler_emit_store_dst(compiler, dst, val_id);
return VKD3D_OK;
}
@@ -7723,38 +7761,38 @@ static enum GLSLstd450 spirv_compiler_map_ext_glsl_instruction(
}
glsl_insts[] =
{
- {VKD3DSIH_ABS, GLSLstd450FAbs},
- {VKD3DSIH_ACOS, GLSLstd450Acos},
- {VKD3DSIH_ASIN, GLSLstd450Asin},
- {VKD3DSIH_ATAN, GLSLstd450Atan},
- {VKD3DSIH_COS, GLSLstd450Cos},
- {VKD3DSIH_DFMA, GLSLstd450Fma},
- {VKD3DSIH_DMAX, GLSLstd450NMax},
- {VKD3DSIH_DMIN, GLSLstd450NMin},
- {VKD3DSIH_EXP, GLSLstd450Exp2},
- {VKD3DSIH_FIRSTBIT_HI, GLSLstd450FindUMsb},
- {VKD3DSIH_FIRSTBIT_LO, GLSLstd450FindILsb},
- {VKD3DSIH_FIRSTBIT_SHI, GLSLstd450FindSMsb},
- {VKD3DSIH_FRC, GLSLstd450Fract},
- {VKD3DSIH_HCOS, GLSLstd450Cosh},
- {VKD3DSIH_HSIN, GLSLstd450Sinh},
- {VKD3DSIH_HTAN, GLSLstd450Tanh},
- {VKD3DSIH_IMAX, GLSLstd450SMax},
- {VKD3DSIH_IMIN, GLSLstd450SMin},
- {VKD3DSIH_LOG, GLSLstd450Log2},
- {VKD3DSIH_MAD, GLSLstd450Fma},
- {VKD3DSIH_MAX, GLSLstd450NMax},
- {VKD3DSIH_MIN, GLSLstd450NMin},
- {VKD3DSIH_ROUND_NE, GLSLstd450RoundEven},
- {VKD3DSIH_ROUND_NI, GLSLstd450Floor},
- {VKD3DSIH_ROUND_PI, GLSLstd450Ceil},
- {VKD3DSIH_ROUND_Z, GLSLstd450Trunc},
- {VKD3DSIH_RSQ, GLSLstd450InverseSqrt},
- {VKD3DSIH_SIN, GLSLstd450Sin},
- {VKD3DSIH_SQRT, GLSLstd450Sqrt},
- {VKD3DSIH_TAN, GLSLstd450Tan},
- {VKD3DSIH_UMAX, GLSLstd450UMax},
- {VKD3DSIH_UMIN, GLSLstd450UMin},
+ {VSIR_OP_ABS, GLSLstd450FAbs},
+ {VSIR_OP_ACOS, GLSLstd450Acos},
+ {VSIR_OP_ASIN, GLSLstd450Asin},
+ {VSIR_OP_ATAN, GLSLstd450Atan},
+ {VSIR_OP_COS, GLSLstd450Cos},
+ {VSIR_OP_DFMA, GLSLstd450Fma},
+ {VSIR_OP_DMAX, GLSLstd450NMax},
+ {VSIR_OP_DMIN, GLSLstd450NMin},
+ {VSIR_OP_EXP, GLSLstd450Exp2},
+ {VSIR_OP_FIRSTBIT_HI, GLSLstd450FindUMsb},
+ {VSIR_OP_FIRSTBIT_LO, GLSLstd450FindILsb},
+ {VSIR_OP_FIRSTBIT_SHI, GLSLstd450FindSMsb},
+ {VSIR_OP_FRC, GLSLstd450Fract},
+ {VSIR_OP_HCOS, GLSLstd450Cosh},
+ {VSIR_OP_HSIN, GLSLstd450Sinh},
+ {VSIR_OP_HTAN, GLSLstd450Tanh},
+ {VSIR_OP_IMAX, GLSLstd450SMax},
+ {VSIR_OP_IMIN, GLSLstd450SMin},
+ {VSIR_OP_LOG, GLSLstd450Log2},
+ {VSIR_OP_MAD, GLSLstd450Fma},
+ {VSIR_OP_MAX, GLSLstd450NMax},
+ {VSIR_OP_MIN, GLSLstd450NMin},
+ {VSIR_OP_ROUND_NE, GLSLstd450RoundEven},
+ {VSIR_OP_ROUND_NI, GLSLstd450Floor},
+ {VSIR_OP_ROUND_PI, GLSLstd450Ceil},
+ {VSIR_OP_ROUND_Z, GLSLstd450Trunc},
+ {VSIR_OP_RSQ, GLSLstd450InverseSqrt},
+ {VSIR_OP_SIN, GLSLstd450Sin},
+ {VSIR_OP_SQRT, GLSLstd450Sqrt},
+ {VSIR_OP_TAN, GLSLstd450Tan},
+ {VSIR_OP_UMAX, GLSLstd450UMax},
+ {VSIR_OP_UMIN, GLSLstd450UMin},
};
unsigned int i;
@@ -7778,8 +7816,8 @@ static void spirv_compiler_emit_ext_glsl_instruction(struct spirv_compiler *comp
unsigned int i, component_count;
enum GLSLstd450 glsl_inst;
- if (src[0].reg.data_type == VKD3D_DATA_UINT64 && (instruction->opcode == VKD3DSIH_FIRSTBIT_HI
- || instruction->opcode == VKD3DSIH_FIRSTBIT_LO || instruction->opcode == VKD3DSIH_FIRSTBIT_SHI))
+ if (src[0].reg.data_type == VKD3D_DATA_UINT64 && (instruction->opcode == VSIR_OP_FIRSTBIT_HI
+ || instruction->opcode == VSIR_OP_FIRSTBIT_LO || instruction->opcode == VSIR_OP_FIRSTBIT_SHI))
{
/* At least some drivers support this anyway, but if validation is enabled it will fail. */
spirv_compiler_error(compiler, VKD3D_SHADER_ERROR_SPV_NOT_IMPLEMENTED,
@@ -7810,8 +7848,8 @@ static void spirv_compiler_emit_ext_glsl_instruction(struct spirv_compiler *comp
val_id = vkd3d_spirv_build_op_ext_inst(builder, type_id,
instr_set_id, glsl_inst, src_id, instruction->src_count);
- if (instruction->opcode == VKD3DSIH_FIRSTBIT_HI
- || instruction->opcode == VKD3DSIH_FIRSTBIT_SHI)
+ if (instruction->opcode == VSIR_OP_FIRSTBIT_HI
+ || instruction->opcode == VSIR_OP_FIRSTBIT_SHI)
{
/* In D3D bits are numbered from the most significant bit. */
component_count = vsir_write_mask_component_count(dst->write_mask);
@@ -7848,7 +7886,8 @@ static void spirv_compiler_emit_mov(struct spirv_compiler *compiler,
|| dst_reg_info.write_mask != src_reg_info.write_mask)
goto general_implementation;
- if (vkd3d_swizzle_is_equal(dst_reg_info.write_mask, src->swizzle, src_reg_info.write_mask))
+ if (dst_reg_info.write_mask == dst->write_mask
+ && vkd3d_swizzle_is_equal(dst_reg_info.write_mask, src->swizzle, src_reg_info.write_mask))
{
dst_id = spirv_compiler_get_register_id(compiler, &dst->reg);
src_id = spirv_compiler_get_register_id(compiler, &src->reg);
@@ -7919,7 +7958,7 @@ static void spirv_compiler_emit_movc(struct spirv_compiler *compiler,
if (src[0].reg.data_type != VKD3D_DATA_BOOL)
{
- if (instruction->opcode == VKD3DSIH_CMP)
+ if (instruction->opcode == VSIR_OP_CMP)
condition_id = vkd3d_spirv_build_op_tr2(builder, &builder->function_stream, SpvOpFOrdGreaterThanEqual,
vkd3d_spirv_get_type_id(builder, VKD3D_SHADER_COMPONENT_BOOL, component_count), condition_id,
spirv_compiler_get_constant_float_vector(compiler, 0.0f, component_count));
@@ -7973,9 +8012,9 @@ static void spirv_compiler_emit_dot(struct spirv_compiler *compiler,
component_count = vsir_write_mask_component_count(dst->write_mask);
component_type = vkd3d_component_type_from_data_type(dst->reg.data_type);
- if (instruction->opcode == VKD3DSIH_DP4)
+ if (instruction->opcode == VSIR_OP_DP4)
write_mask = VKD3DSP_WRITEMASK_ALL;
- else if (instruction->opcode == VKD3DSIH_DP3)
+ else if (instruction->opcode == VSIR_OP_DP3)
write_mask = VKD3DSP_WRITEMASK_0 | VKD3DSP_WRITEMASK_1 | VKD3DSP_WRITEMASK_2;
else
write_mask = VKD3DSP_WRITEMASK_0 | VKD3DSP_WRITEMASK_1;
@@ -8041,67 +8080,6 @@ static void spirv_compiler_emit_imad(struct spirv_compiler *compiler,
spirv_compiler_emit_store_dst(compiler, dst, val_id);
}
-static void spirv_compiler_emit_int_div(struct spirv_compiler *compiler,
- const struct vkd3d_shader_instruction *instruction)
-{
- uint32_t type_id, val_id, src0_id, src1_id, condition_id, uint_max_id;
- struct vkd3d_spirv_builder *builder = &compiler->spirv_builder;
- const struct vkd3d_shader_dst_param *dst = instruction->dst;
- const struct vkd3d_shader_src_param *src = instruction->src;
- unsigned int component_count = 0;
- SpvOp div_op, mod_op;
-
- div_op = instruction->opcode == VKD3DSIH_IDIV ? SpvOpSDiv : SpvOpUDiv;
- mod_op = instruction->opcode == VKD3DSIH_IDIV ? SpvOpSRem : SpvOpUMod;
-
- if (dst[0].reg.type != VKD3DSPR_NULL)
- {
- component_count = vsir_write_mask_component_count(dst[0].write_mask);
- type_id = spirv_compiler_get_type_id_for_dst(compiler, &dst[0]);
-
- src0_id = spirv_compiler_emit_load_src(compiler, &src[0], dst[0].write_mask);
- src1_id = spirv_compiler_emit_load_src(compiler, &src[1], dst[0].write_mask);
-
- condition_id = spirv_compiler_emit_int_to_bool(compiler,
- VKD3D_SHADER_CONDITIONAL_OP_NZ, src[1].reg.data_type, component_count, src1_id);
- if (dst[0].reg.data_type == VKD3D_DATA_UINT64)
- uint_max_id = spirv_compiler_get_constant_uint64_vector(compiler, UINT64_MAX, component_count);
- else
- uint_max_id = spirv_compiler_get_constant_uint_vector(compiler, 0xffffffff, component_count);
-
- val_id = vkd3d_spirv_build_op_tr2(builder, &builder->function_stream, div_op, type_id, src0_id, src1_id);
- /* The SPIR-V spec says: "The resulting value is undefined if Operand 2 is 0." */
- val_id = vkd3d_spirv_build_op_select(builder, type_id, condition_id, val_id, uint_max_id);
-
- spirv_compiler_emit_store_dst(compiler, &dst[0], val_id);
- }
-
- if (dst[1].reg.type != VKD3DSPR_NULL)
- {
- if (!component_count || dst[0].write_mask != dst[1].write_mask)
- {
- component_count = vsir_write_mask_component_count(dst[1].write_mask);
- type_id = spirv_compiler_get_type_id_for_dst(compiler, &dst[1]);
-
- src0_id = spirv_compiler_emit_load_src(compiler, &src[0], dst[1].write_mask);
- src1_id = spirv_compiler_emit_load_src(compiler, &src[1], dst[1].write_mask);
-
- condition_id = spirv_compiler_emit_int_to_bool(compiler,
- VKD3D_SHADER_CONDITIONAL_OP_NZ, src[1].reg.data_type, component_count, src1_id);
- if (dst[1].reg.data_type == VKD3D_DATA_UINT64)
- uint_max_id = spirv_compiler_get_constant_uint64_vector(compiler, UINT64_MAX, component_count);
- else
- uint_max_id = spirv_compiler_get_constant_uint_vector(compiler, 0xffffffff, component_count);
- }
-
- val_id = vkd3d_spirv_build_op_tr2(builder, &builder->function_stream, mod_op, type_id, src0_id, src1_id);
- /* The SPIR-V spec says: "The resulting value is undefined if Operand 2 is 0." */
- val_id = vkd3d_spirv_build_op_select(builder, type_id, condition_id, val_id, uint_max_id);
-
- spirv_compiler_emit_store_dst(compiler, &dst[1], val_id);
- }
-}
-
static void spirv_compiler_emit_ftoi(struct spirv_compiler *compiler,
const struct vkd3d_shader_instruction *instruction)
{
@@ -8254,9 +8232,9 @@ static void spirv_compiler_emit_bitfield_instruction(struct spirv_compiler *comp
switch (instruction->opcode)
{
- case VKD3DSIH_BFI: op = SpvOpBitFieldInsert; break;
- case VKD3DSIH_IBFE: op = SpvOpBitFieldSExtract; break;
- case VKD3DSIH_UBFE: op = SpvOpBitFieldUExtract; break;
+ case VSIR_OP_BFI: op = SpvOpBitFieldInsert; break;
+ case VSIR_OP_IBFE: op = SpvOpBitFieldSExtract; break;
+ case VSIR_OP_UBFE: op = SpvOpBitFieldUExtract; break;
default:
spirv_compiler_error(compiler, VKD3D_SHADER_ERROR_SPV_NOT_IMPLEMENTED,
"Unhandled instruction \"%s\" (%#x).",
@@ -8374,24 +8352,24 @@ static void spirv_compiler_emit_comparison_instruction(struct spirv_compiler *co
switch (instruction->opcode)
{
- case VKD3DSIH_DEQO:
- case VKD3DSIH_EQO: op = SpvOpFOrdEqual; break;
- case VKD3DSIH_EQU: op = SpvOpFUnordEqual; break;
- case VKD3DSIH_DGEO:
- case VKD3DSIH_GEO: op = SpvOpFOrdGreaterThanEqual; break;
- case VKD3DSIH_GEU: op = SpvOpFUnordGreaterThanEqual; break;
- case VKD3DSIH_IEQ: op = SpvOpIEqual; break;
- case VKD3DSIH_IGE: op = SpvOpSGreaterThanEqual; break;
- case VKD3DSIH_ILT: op = SpvOpSLessThan; break;
- case VKD3DSIH_INE: op = SpvOpINotEqual; break;
- case VKD3DSIH_DLT:
- case VKD3DSIH_LTO: op = SpvOpFOrdLessThan; break;
- case VKD3DSIH_LTU: op = SpvOpFUnordLessThan; break;
- case VKD3DSIH_NEO: op = SpvOpFOrdNotEqual; break;
- case VKD3DSIH_DNE:
- case VKD3DSIH_NEU: op = SpvOpFUnordNotEqual; break;
- case VKD3DSIH_UGE: op = SpvOpUGreaterThanEqual; break;
- case VKD3DSIH_ULT: op = SpvOpULessThan; break;
+ case VSIR_OP_DEQO:
+ case VSIR_OP_EQO: op = SpvOpFOrdEqual; break;
+ case VSIR_OP_EQU: op = SpvOpFUnordEqual; break;
+ case VSIR_OP_DGEO:
+ case VSIR_OP_GEO: op = SpvOpFOrdGreaterThanEqual; break;
+ case VSIR_OP_GEU: op = SpvOpFUnordGreaterThanEqual; break;
+ case VSIR_OP_IEQ: op = SpvOpIEqual; break;
+ case VSIR_OP_IGE: op = SpvOpSGreaterThanEqual; break;
+ case VSIR_OP_ILT: op = SpvOpSLessThan; break;
+ case VSIR_OP_INE: op = SpvOpINotEqual; break;
+ case VSIR_OP_DLT:
+ case VSIR_OP_LTO: op = SpvOpFOrdLessThan; break;
+ case VSIR_OP_LTU: op = SpvOpFUnordLessThan; break;
+ case VSIR_OP_NEO: op = SpvOpFOrdNotEqual; break;
+ case VSIR_OP_DNE:
+ case VSIR_OP_NEU: op = SpvOpFUnordNotEqual; break;
+ case VSIR_OP_UGE: op = SpvOpUGreaterThanEqual; break;
+ case VSIR_OP_ULT: op = SpvOpULessThan; break;
default:
spirv_compiler_error(compiler, VKD3D_SHADER_ERROR_SPV_NOT_IMPLEMENTED,
"Unhandled instruction \"%s\" (%#x).",
@@ -8403,10 +8381,10 @@ static void spirv_compiler_emit_comparison_instruction(struct spirv_compiler *co
switch (instruction->opcode)
{
- case VKD3DSIH_DEQO:
- case VKD3DSIH_DGEO:
- case VKD3DSIH_DLT:
- case VKD3DSIH_DNE:
+ case VSIR_OP_DEQO:
+ case VSIR_OP_DGEO:
+ case VSIR_OP_DLT:
+ case VSIR_OP_DNE:
write_mask = vkd3d_write_mask_from_component_count(component_count);
break;
@@ -8441,7 +8419,7 @@ static void spirv_compiler_emit_orderedness_instruction(struct spirv_compiler *c
src0_id = vkd3d_spirv_build_op_is_nan(builder, type_id, src0_id);
src1_id = vkd3d_spirv_build_op_is_nan(builder, type_id, src1_id);
val_id = vkd3d_spirv_build_op_logical_or(builder, type_id, src0_id, src1_id);
- if (instruction->opcode == VKD3DSIH_ORD)
+ if (instruction->opcode == VSIR_OP_ORD)
val_id = vkd3d_spirv_build_op_logical_not(builder, type_id, val_id);
spirv_compiler_emit_store_dst(compiler, dst, val_id);
}
@@ -8458,8 +8436,8 @@ static void spirv_compiler_emit_float_comparison_instruction(struct spirv_compil
switch (instruction->opcode)
{
- case VKD3DSIH_SLT: op = SpvOpFOrdLessThan; break;
- case VKD3DSIH_SGE: op = SpvOpFOrdGreaterThanEqual; break;
+ case VSIR_OP_SLT: op = SpvOpFOrdLessThan; break;
+ case VSIR_OP_SGE: op = SpvOpFOrdGreaterThanEqual; break;
default:
vkd3d_unreachable();
}
@@ -8762,12 +8740,12 @@ static void spirv_compiler_emit_deriv_instruction(struct spirv_compiler *compile
}
deriv_instructions[] =
{
- {VKD3DSIH_DSX, SpvOpDPdx},
- {VKD3DSIH_DSX_COARSE, SpvOpDPdxCoarse, true},
- {VKD3DSIH_DSX_FINE, SpvOpDPdxFine, true},
- {VKD3DSIH_DSY, SpvOpDPdy},
- {VKD3DSIH_DSY_COARSE, SpvOpDPdyCoarse, true},
- {VKD3DSIH_DSY_FINE, SpvOpDPdyFine, true},
+ {VSIR_OP_DSX, SpvOpDPdx},
+ {VSIR_OP_DSX_COARSE, SpvOpDPdxCoarse, true},
+ {VSIR_OP_DSX_FINE, SpvOpDPdxFine, true},
+ {VSIR_OP_DSY, SpvOpDPdy},
+ {VSIR_OP_DSY_COARSE, SpvOpDPdyCoarse, true},
+ {VSIR_OP_DSY_FINE, SpvOpDPdyFine, true},
};
info = NULL;
@@ -8994,7 +8972,7 @@ static void spirv_compiler_emit_ld(struct spirv_compiler *compiler,
uint32_t coordinate_mask;
bool multisample;
- multisample = instruction->opcode == VKD3DSIH_LD2DMS;
+ multisample = instruction->opcode == VSIR_OP_LD2DMS;
spirv_compiler_prepare_image(compiler, &image, &src[1].reg, NULL, VKD3D_IMAGE_FLAG_NONE);
@@ -9075,16 +9053,16 @@ static void spirv_compiler_emit_sample(struct spirv_compiler *compiler,
switch (instruction->opcode)
{
- case VKD3DSIH_SAMPLE:
+ case VSIR_OP_SAMPLE:
op = SpvOpImageSampleImplicitLod;
break;
- case VKD3DSIH_SAMPLE_B:
+ case VSIR_OP_SAMPLE_B:
op = SpvOpImageSampleImplicitLod;
operands_mask |= SpvImageOperandsBiasMask;
image_operands[image_operand_count++] = spirv_compiler_emit_load_src(compiler,
&src[3], VKD3DSP_WRITEMASK_0);
break;
- case VKD3DSIH_SAMPLE_GRAD:
+ case VSIR_OP_SAMPLE_GRAD:
op = SpvOpImageSampleExplicitLod;
operands_mask |= SpvImageOperandsGradMask;
component_count = image.resource_type_info->coordinate_component_count - image.resource_type_info->arrayed;
@@ -9094,7 +9072,7 @@ static void spirv_compiler_emit_sample(struct spirv_compiler *compiler,
image_operands[image_operand_count++] = spirv_compiler_emit_load_src(compiler,
&src[4], coordinate_mask);
break;
- case VKD3DSIH_SAMPLE_LOD:
+ case VSIR_OP_SAMPLE_LOD:
op = SpvOpImageSampleExplicitLod;
operands_mask |= SpvImageOperandsLodMask;
image_operands[image_operand_count++] = spirv_compiler_emit_load_src(compiler,
@@ -9137,7 +9115,7 @@ static void spirv_compiler_emit_sample_c(struct spirv_compiler *compiler,
uint32_t image_operands[2];
SpvOp op;
- if (instruction->opcode == VKD3DSIH_SAMPLE_C_LZ)
+ if (instruction->opcode == VSIR_OP_SAMPLE_C_LZ)
{
op = SpvOpImageSampleDrefExplicitLod;
operands_mask |= SpvImageOperandsLodMask;
@@ -9187,12 +9165,12 @@ static void spirv_compiler_emit_gather4(struct spirv_compiler *compiler,
uint32_t coordinate_mask;
bool extended_offset;
- if (instruction->opcode == VKD3DSIH_GATHER4_C
- || instruction->opcode == VKD3DSIH_GATHER4_PO_C)
+ if (instruction->opcode == VSIR_OP_GATHER4_C
+ || instruction->opcode == VSIR_OP_GATHER4_PO_C)
image_flags |= VKD3D_IMAGE_FLAG_DEPTH;
- extended_offset = instruction->opcode == VKD3DSIH_GATHER4_PO
- || instruction->opcode == VKD3DSIH_GATHER4_PO_C;
+ extended_offset = instruction->opcode == VSIR_OP_GATHER4_PO
+ || instruction->opcode == VSIR_OP_GATHER4_PO_C;
addr = &src[0];
offset = extended_offset ? &src[1] : NULL;
@@ -9650,7 +9628,7 @@ static void spirv_compiler_emit_uav_counter_instruction(struct spirv_compiler *c
uint32_t operands[3];
SpvOp op;
- op = instruction->opcode == VKD3DSIH_IMM_ATOMIC_ALLOC
+ op = instruction->opcode == VSIR_OP_IMM_ATOMIC_ALLOC
? SpvOpAtomicIIncrement : SpvOpAtomicIDecrement;
resource_symbol = spirv_compiler_find_resource(compiler, &src->reg);
@@ -9721,25 +9699,25 @@ static SpvOp spirv_compiler_map_atomic_instruction(const struct vkd3d_shader_ins
}
atomic_ops[] =
{
- {VKD3DSIH_ATOMIC_AND, SpvOpAtomicAnd},
- {VKD3DSIH_ATOMIC_CMP_STORE, SpvOpAtomicCompareExchange},
- {VKD3DSIH_ATOMIC_IADD, SpvOpAtomicIAdd},
- {VKD3DSIH_ATOMIC_IMAX, SpvOpAtomicSMax},
- {VKD3DSIH_ATOMIC_IMIN, SpvOpAtomicSMin},
- {VKD3DSIH_ATOMIC_OR, SpvOpAtomicOr},
- {VKD3DSIH_ATOMIC_UMAX, SpvOpAtomicUMax},
- {VKD3DSIH_ATOMIC_UMIN, SpvOpAtomicUMin},
- {VKD3DSIH_ATOMIC_XOR, SpvOpAtomicXor},
- {VKD3DSIH_IMM_ATOMIC_AND, SpvOpAtomicAnd},
- {VKD3DSIH_IMM_ATOMIC_CMP_EXCH, SpvOpAtomicCompareExchange},
- {VKD3DSIH_IMM_ATOMIC_EXCH, SpvOpAtomicExchange},
- {VKD3DSIH_IMM_ATOMIC_IADD, SpvOpAtomicIAdd},
- {VKD3DSIH_IMM_ATOMIC_IMAX, SpvOpAtomicSMax},
- {VKD3DSIH_IMM_ATOMIC_IMIN, SpvOpAtomicSMin},
- {VKD3DSIH_IMM_ATOMIC_OR, SpvOpAtomicOr},
- {VKD3DSIH_IMM_ATOMIC_UMAX, SpvOpAtomicUMax},
- {VKD3DSIH_IMM_ATOMIC_UMIN, SpvOpAtomicUMin},
- {VKD3DSIH_IMM_ATOMIC_XOR, SpvOpAtomicXor},
+ {VSIR_OP_ATOMIC_AND, SpvOpAtomicAnd},
+ {VSIR_OP_ATOMIC_CMP_STORE, SpvOpAtomicCompareExchange},
+ {VSIR_OP_ATOMIC_IADD, SpvOpAtomicIAdd},
+ {VSIR_OP_ATOMIC_IMAX, SpvOpAtomicSMax},
+ {VSIR_OP_ATOMIC_IMIN, SpvOpAtomicSMin},
+ {VSIR_OP_ATOMIC_OR, SpvOpAtomicOr},
+ {VSIR_OP_ATOMIC_UMAX, SpvOpAtomicUMax},
+ {VSIR_OP_ATOMIC_UMIN, SpvOpAtomicUMin},
+ {VSIR_OP_ATOMIC_XOR, SpvOpAtomicXor},
+ {VSIR_OP_IMM_ATOMIC_AND, SpvOpAtomicAnd},
+ {VSIR_OP_IMM_ATOMIC_CMP_EXCH, SpvOpAtomicCompareExchange},
+ {VSIR_OP_IMM_ATOMIC_EXCH, SpvOpAtomicExchange},
+ {VSIR_OP_IMM_ATOMIC_IADD, SpvOpAtomicIAdd},
+ {VSIR_OP_IMM_ATOMIC_IMAX, SpvOpAtomicSMax},
+ {VSIR_OP_IMM_ATOMIC_IMIN, SpvOpAtomicSMin},
+ {VSIR_OP_IMM_ATOMIC_OR, SpvOpAtomicOr},
+ {VSIR_OP_IMM_ATOMIC_UMAX, SpvOpAtomicUMax},
+ {VSIR_OP_IMM_ATOMIC_UMIN, SpvOpAtomicUMin},
+ {VSIR_OP_IMM_ATOMIC_XOR, SpvOpAtomicXor},
};
unsigned int i;
@@ -9754,7 +9732,7 @@ static SpvOp spirv_compiler_map_atomic_instruction(const struct vkd3d_shader_ins
static bool is_imm_atomic_instruction(enum vkd3d_shader_opcode opcode)
{
- return VKD3DSIH_IMM_ATOMIC_ALLOC <= opcode && opcode <= VKD3DSIH_IMM_ATOMIC_XOR;
+ return VSIR_OP_IMM_ATOMIC_ALLOC <= opcode && opcode <= VSIR_OP_IMM_ATOMIC_XOR;
}
static void spirv_compiler_emit_atomic_instruction(struct spirv_compiler *compiler,
@@ -10191,13 +10169,13 @@ static void spirv_compiler_emit_eval_attrib(struct spirv_compiler *compiler,
src_ids[src_count++] = register_info.id;
- if (instruction->opcode == VKD3DSIH_EVAL_CENTROID)
+ if (instruction->opcode == VSIR_OP_EVAL_CENTROID)
{
op = GLSLstd450InterpolateAtCentroid;
}
else
{
- VKD3D_ASSERT(instruction->opcode == VKD3DSIH_EVAL_SAMPLE_INDEX);
+ VKD3D_ASSERT(instruction->opcode == VSIR_OP_EVAL_SAMPLE_INDEX);
op = GLSLstd450InterpolateAtSample;
src_ids[src_count++] = spirv_compiler_emit_load_src(compiler, &src[1], VKD3DSP_WRITEMASK_0);
}
@@ -10279,7 +10257,7 @@ static void spirv_compiler_emit_emit_stream(struct spirv_compiler *compiler,
struct vkd3d_spirv_builder *builder = &compiler->spirv_builder;
unsigned int stream_idx;
- if (instruction->opcode == VKD3DSIH_EMIT_STREAM)
+ if (instruction->opcode == VSIR_OP_EMIT_STREAM)
stream_idx = instruction->src[0].reg.idx[0].offset;
else
stream_idx = 0;
@@ -10300,7 +10278,7 @@ static void spirv_compiler_emit_cut_stream(struct spirv_compiler *compiler,
struct vkd3d_spirv_builder *builder = &compiler->spirv_builder;
unsigned int stream_idx;
- if (instruction->opcode == VKD3DSIH_CUT_STREAM)
+ if (instruction->opcode == VSIR_OP_CUT_STREAM)
stream_idx = instruction->src[0].reg.idx[0].offset;
else
stream_idx = 0;
@@ -10318,11 +10296,11 @@ static uint32_t map_quad_read_across_direction(enum vkd3d_shader_opcode opcode)
{
switch (opcode)
{
- case VKD3DSIH_QUAD_READ_ACROSS_X:
+ case VSIR_OP_QUAD_READ_ACROSS_X:
return 0;
- case VKD3DSIH_QUAD_READ_ACROSS_Y:
+ case VSIR_OP_QUAD_READ_ACROSS_Y:
return 1;
- case VKD3DSIH_QUAD_READ_ACROSS_D:
+ case VSIR_OP_QUAD_READ_ACROSS_D:
return 2;
default:
vkd3d_unreachable();
@@ -10377,11 +10355,11 @@ static SpvOp map_wave_bool_op(enum vkd3d_shader_opcode opcode)
{
switch (opcode)
{
- case VKD3DSIH_WAVE_ACTIVE_ALL_EQUAL:
+ case VSIR_OP_WAVE_ACTIVE_ALL_EQUAL:
return SpvOpGroupNonUniformAllEqual;
- case VKD3DSIH_WAVE_ALL_TRUE:
+ case VSIR_OP_WAVE_ALL_TRUE:
return SpvOpGroupNonUniformAll;
- case VKD3DSIH_WAVE_ANY_TRUE:
+ case VSIR_OP_WAVE_ANY_TRUE:
return SpvOpGroupNonUniformAny;
default:
vkd3d_unreachable();
@@ -10435,27 +10413,27 @@ static SpvOp map_wave_alu_op(enum vkd3d_shader_opcode opcode, bool is_float)
{
switch (opcode)
{
- case VKD3DSIH_WAVE_ACTIVE_BIT_AND:
+ case VSIR_OP_WAVE_ACTIVE_BIT_AND:
return SpvOpGroupNonUniformBitwiseAnd;
- case VKD3DSIH_WAVE_ACTIVE_BIT_OR:
+ case VSIR_OP_WAVE_ACTIVE_BIT_OR:
return SpvOpGroupNonUniformBitwiseOr;
- case VKD3DSIH_WAVE_ACTIVE_BIT_XOR:
+ case VSIR_OP_WAVE_ACTIVE_BIT_XOR:
return SpvOpGroupNonUniformBitwiseXor;
- case VKD3DSIH_WAVE_OP_ADD:
+ case VSIR_OP_WAVE_OP_ADD:
return is_float ? SpvOpGroupNonUniformFAdd : SpvOpGroupNonUniformIAdd;
- case VKD3DSIH_WAVE_OP_IMAX:
+ case VSIR_OP_WAVE_OP_IMAX:
return SpvOpGroupNonUniformSMax;
- case VKD3DSIH_WAVE_OP_IMIN:
+ case VSIR_OP_WAVE_OP_IMIN:
return SpvOpGroupNonUniformSMin;
- case VKD3DSIH_WAVE_OP_MAX:
+ case VSIR_OP_WAVE_OP_MAX:
return SpvOpGroupNonUniformFMax;
- case VKD3DSIH_WAVE_OP_MIN:
+ case VSIR_OP_WAVE_OP_MIN:
return SpvOpGroupNonUniformFMin;
- case VKD3DSIH_WAVE_OP_MUL:
+ case VSIR_OP_WAVE_OP_MUL:
return is_float ? SpvOpGroupNonUniformFMul : SpvOpGroupNonUniformIMul;
- case VKD3DSIH_WAVE_OP_UMAX:
+ case VSIR_OP_WAVE_OP_UMAX:
return SpvOpGroupNonUniformUMax;
- case VKD3DSIH_WAVE_OP_UMIN:
+ case VSIR_OP_WAVE_OP_UMIN:
return SpvOpGroupNonUniformUMin;
default:
vkd3d_unreachable();
@@ -10494,7 +10472,7 @@ static void spirv_compiler_emit_wave_bit_count(struct spirv_compiler *compiler,
SpvGroupOperation group_op;
uint32_t type_id, val_id;
- group_op = (instruction->opcode == VKD3DSIH_WAVE_PREFIX_BIT_COUNT) ? SpvGroupOperationExclusiveScan
+ group_op = (instruction->opcode == VSIR_OP_WAVE_PREFIX_BIT_COUNT) ? SpvGroupOperationExclusiveScan
: SpvGroupOperationReduce;
val_id = spirv_compiler_emit_group_nonuniform_ballot(compiler, instruction->src);
@@ -10582,359 +10560,359 @@ static int spirv_compiler_handle_instruction(struct spirv_compiler *compiler,
switch (instruction->opcode)
{
- case VKD3DSIH_DCL_INDEXABLE_TEMP:
+ case VSIR_OP_DCL_INDEXABLE_TEMP:
spirv_compiler_emit_dcl_indexable_temp(compiler, instruction);
break;
- case VKD3DSIH_DCL_IMMEDIATE_CONSTANT_BUFFER:
+ case VSIR_OP_DCL_IMMEDIATE_CONSTANT_BUFFER:
spirv_compiler_emit_dcl_immediate_constant_buffer(compiler, instruction);
break;
- case VKD3DSIH_DCL_TGSM_RAW:
+ case VSIR_OP_DCL_TGSM_RAW:
spirv_compiler_emit_dcl_tgsm_raw(compiler, instruction);
break;
- case VKD3DSIH_DCL_TGSM_STRUCTURED:
+ case VSIR_OP_DCL_TGSM_STRUCTURED:
spirv_compiler_emit_dcl_tgsm_structured(compiler, instruction);
break;
- case VKD3DSIH_DCL_STREAM:
+ case VSIR_OP_DCL_STREAM:
spirv_compiler_emit_dcl_stream(compiler, instruction);
break;
- case VKD3DSIH_DCL_VERTICES_OUT:
+ case VSIR_OP_DCL_VERTICES_OUT:
spirv_compiler_emit_output_vertex_count(compiler, instruction);
break;
- case VKD3DSIH_DCL_INPUT_PRIMITIVE:
+ case VSIR_OP_DCL_INPUT_PRIMITIVE:
spirv_compiler_emit_dcl_input_primitive(compiler, instruction);
break;
- case VKD3DSIH_DCL_OUTPUT_TOPOLOGY:
+ case VSIR_OP_DCL_OUTPUT_TOPOLOGY:
spirv_compiler_emit_dcl_output_topology(compiler, instruction);
break;
- case VKD3DSIH_DCL_GS_INSTANCES:
+ case VSIR_OP_DCL_GS_INSTANCES:
spirv_compiler_emit_dcl_gs_instances(compiler, instruction);
break;
- case VKD3DSIH_DCL_OUTPUT_CONTROL_POINT_COUNT:
+ case VSIR_OP_DCL_OUTPUT_CONTROL_POINT_COUNT:
spirv_compiler_emit_output_vertex_count(compiler, instruction);
break;
- case VKD3DSIH_DCL_TESSELLATOR_OUTPUT_PRIMITIVE:
+ case VSIR_OP_DCL_TESSELLATOR_OUTPUT_PRIMITIVE:
spirv_compiler_emit_tessellator_output_primitive(compiler,
instruction->declaration.tessellator_output_primitive);
break;
- case VKD3DSIH_DCL_TESSELLATOR_PARTITIONING:
+ case VSIR_OP_DCL_TESSELLATOR_PARTITIONING:
spirv_compiler_emit_tessellator_partitioning(compiler,
instruction->declaration.tessellator_partitioning);
break;
- case VKD3DSIH_HS_CONTROL_POINT_PHASE:
- case VKD3DSIH_HS_FORK_PHASE:
- case VKD3DSIH_HS_JOIN_PHASE:
+ case VSIR_OP_HS_CONTROL_POINT_PHASE:
+ case VSIR_OP_HS_FORK_PHASE:
+ case VSIR_OP_HS_JOIN_PHASE:
spirv_compiler_enter_shader_phase(compiler, instruction);
break;
- case VKD3DSIH_DMOV:
- case VKD3DSIH_MOV:
+ case VSIR_OP_DMOV:
+ case VSIR_OP_MOV:
spirv_compiler_emit_mov(compiler, instruction);
break;
- case VKD3DSIH_DMOVC:
- case VKD3DSIH_MOVC:
- case VKD3DSIH_CMP:
+ case VSIR_OP_DMOVC:
+ case VSIR_OP_MOVC:
+ case VSIR_OP_CMP:
spirv_compiler_emit_movc(compiler, instruction);
break;
- case VKD3DSIH_SWAPC:
+ case VSIR_OP_SWAPC:
spirv_compiler_emit_swapc(compiler, instruction);
break;
- case VKD3DSIH_ADD:
- case VKD3DSIH_AND:
- case VKD3DSIH_BFREV:
- case VKD3DSIH_COUNTBITS:
- case VKD3DSIH_DADD:
- case VKD3DSIH_DDIV:
- case VKD3DSIH_DIV:
- case VKD3DSIH_DMUL:
- case VKD3DSIH_FREM:
- case VKD3DSIH_FTOD:
- case VKD3DSIH_IADD:
- case VKD3DSIH_IMUL_LOW:
- case VKD3DSIH_INEG:
- case VKD3DSIH_ISHL:
- case VKD3DSIH_ISHR:
- case VKD3DSIH_ISINF:
- case VKD3DSIH_ISNAN:
- case VKD3DSIH_ITOD:
- case VKD3DSIH_ITOF:
- case VKD3DSIH_ITOI:
- case VKD3DSIH_MUL:
- case VKD3DSIH_NOT:
- case VKD3DSIH_OR:
- case VKD3DSIH_USHR:
- case VKD3DSIH_UTOD:
- case VKD3DSIH_UTOF:
- case VKD3DSIH_UTOU:
- case VKD3DSIH_XOR:
+ case VSIR_OP_ADD:
+ case VSIR_OP_AND:
+ case VSIR_OP_BFREV:
+ case VSIR_OP_COUNTBITS:
+ case VSIR_OP_DADD:
+ case VSIR_OP_DDIV:
+ case VSIR_OP_DIV:
+ case VSIR_OP_DMUL:
+ case VSIR_OP_FREM:
+ case VSIR_OP_FTOD:
+ case VSIR_OP_IADD:
+ case VSIR_OP_IDIV:
+ case VSIR_OP_IMUL_LOW:
+ case VSIR_OP_INEG:
+ case VSIR_OP_IREM:
+ case VSIR_OP_ISHL:
+ case VSIR_OP_ISHR:
+ case VSIR_OP_ISINF:
+ case VSIR_OP_ISNAN:
+ case VSIR_OP_ITOD:
+ case VSIR_OP_ITOF:
+ case VSIR_OP_ITOI:
+ case VSIR_OP_MUL:
+ case VSIR_OP_NOT:
+ case VSIR_OP_OR:
+ case VSIR_OP_UDIV_SIMPLE:
+ case VSIR_OP_UREM:
+ case VSIR_OP_USHR:
+ case VSIR_OP_UTOD:
+ case VSIR_OP_UTOF:
+ case VSIR_OP_UTOU:
+ case VSIR_OP_XOR:
ret = spirv_compiler_emit_alu_instruction(compiler, instruction);
break;
- case VKD3DSIH_ISFINITE:
+ case VSIR_OP_ISFINITE:
spirv_compiler_emit_isfinite(compiler, instruction);
break;
- case VKD3DSIH_ABS:
- case VKD3DSIH_ACOS:
- case VKD3DSIH_ASIN:
- case VKD3DSIH_ATAN:
- case VKD3DSIH_COS:
- case VKD3DSIH_HCOS:
- case VKD3DSIH_HSIN:
- case VKD3DSIH_HTAN:
- case VKD3DSIH_DFMA:
- case VKD3DSIH_DMAX:
- case VKD3DSIH_DMIN:
- case VKD3DSIH_EXP:
- case VKD3DSIH_FIRSTBIT_HI:
- case VKD3DSIH_FIRSTBIT_LO:
- case VKD3DSIH_FIRSTBIT_SHI:
- case VKD3DSIH_FRC:
- case VKD3DSIH_IMAX:
- case VKD3DSIH_IMIN:
- case VKD3DSIH_LOG:
- case VKD3DSIH_MAD:
- case VKD3DSIH_MAX:
- case VKD3DSIH_MIN:
- case VKD3DSIH_ROUND_NE:
- case VKD3DSIH_ROUND_NI:
- case VKD3DSIH_ROUND_PI:
- case VKD3DSIH_ROUND_Z:
- case VKD3DSIH_RSQ:
- case VKD3DSIH_SIN:
- case VKD3DSIH_SQRT:
- case VKD3DSIH_TAN:
- case VKD3DSIH_UMAX:
- case VKD3DSIH_UMIN:
+ case VSIR_OP_ABS:
+ case VSIR_OP_ACOS:
+ case VSIR_OP_ASIN:
+ case VSIR_OP_ATAN:
+ case VSIR_OP_COS:
+ case VSIR_OP_HCOS:
+ case VSIR_OP_HSIN:
+ case VSIR_OP_HTAN:
+ case VSIR_OP_DFMA:
+ case VSIR_OP_DMAX:
+ case VSIR_OP_DMIN:
+ case VSIR_OP_EXP:
+ case VSIR_OP_FIRSTBIT_HI:
+ case VSIR_OP_FIRSTBIT_LO:
+ case VSIR_OP_FIRSTBIT_SHI:
+ case VSIR_OP_FRC:
+ case VSIR_OP_IMAX:
+ case VSIR_OP_IMIN:
+ case VSIR_OP_LOG:
+ case VSIR_OP_MAD:
+ case VSIR_OP_MAX:
+ case VSIR_OP_MIN:
+ case VSIR_OP_ROUND_NE:
+ case VSIR_OP_ROUND_NI:
+ case VSIR_OP_ROUND_PI:
+ case VSIR_OP_ROUND_Z:
+ case VSIR_OP_RSQ:
+ case VSIR_OP_SIN:
+ case VSIR_OP_SQRT:
+ case VSIR_OP_TAN:
+ case VSIR_OP_UMAX:
+ case VSIR_OP_UMIN:
spirv_compiler_emit_ext_glsl_instruction(compiler, instruction);
break;
- case VKD3DSIH_DP4:
- case VKD3DSIH_DP3:
- case VKD3DSIH_DP2:
+ case VSIR_OP_DP4:
+ case VSIR_OP_DP3:
+ case VSIR_OP_DP2:
spirv_compiler_emit_dot(compiler, instruction);
break;
- case VKD3DSIH_DRCP:
- case VKD3DSIH_RCP:
+ case VSIR_OP_DRCP:
+ case VSIR_OP_RCP:
spirv_compiler_emit_rcp(compiler, instruction);
break;
- case VKD3DSIH_IMAD:
+ case VSIR_OP_IMAD:
spirv_compiler_emit_imad(compiler, instruction);
break;
- case VKD3DSIH_IDIV:
- case VKD3DSIH_UDIV:
- spirv_compiler_emit_int_div(compiler, instruction);
- break;
- case VKD3DSIH_DTOI:
- case VKD3DSIH_FTOI:
+ case VSIR_OP_DTOI:
+ case VSIR_OP_FTOI:
spirv_compiler_emit_ftoi(compiler, instruction);
break;
- case VKD3DSIH_DTOU:
- case VKD3DSIH_FTOU:
+ case VSIR_OP_DTOU:
+ case VSIR_OP_FTOU:
spirv_compiler_emit_ftou(compiler, instruction);
break;
- case VKD3DSIH_DTOF:
+ case VSIR_OP_DTOF:
spirv_compiler_emit_dtof(compiler, instruction);
break;
- case VKD3DSIH_DEQO:
- case VKD3DSIH_DGEO:
- case VKD3DSIH_DLT:
- case VKD3DSIH_DNE:
- case VKD3DSIH_EQO:
- case VKD3DSIH_EQU:
- case VKD3DSIH_GEO:
- case VKD3DSIH_GEU:
- case VKD3DSIH_IEQ:
- case VKD3DSIH_IGE:
- case VKD3DSIH_ILT:
- case VKD3DSIH_INE:
- case VKD3DSIH_LTO:
- case VKD3DSIH_LTU:
- case VKD3DSIH_NEO:
- case VKD3DSIH_NEU:
- case VKD3DSIH_UGE:
- case VKD3DSIH_ULT:
+ case VSIR_OP_DEQO:
+ case VSIR_OP_DGEO:
+ case VSIR_OP_DLT:
+ case VSIR_OP_DNE:
+ case VSIR_OP_EQO:
+ case VSIR_OP_EQU:
+ case VSIR_OP_GEO:
+ case VSIR_OP_GEU:
+ case VSIR_OP_IEQ:
+ case VSIR_OP_IGE:
+ case VSIR_OP_ILT:
+ case VSIR_OP_INE:
+ case VSIR_OP_LTO:
+ case VSIR_OP_LTU:
+ case VSIR_OP_NEO:
+ case VSIR_OP_NEU:
+ case VSIR_OP_UGE:
+ case VSIR_OP_ULT:
spirv_compiler_emit_comparison_instruction(compiler, instruction);
break;
- case VKD3DSIH_ORD:
- case VKD3DSIH_UNO:
+ case VSIR_OP_ORD:
+ case VSIR_OP_UNO:
spirv_compiler_emit_orderedness_instruction(compiler, instruction);
break;
- case VKD3DSIH_SLT:
- case VKD3DSIH_SGE:
+ case VSIR_OP_SLT:
+ case VSIR_OP_SGE:
spirv_compiler_emit_float_comparison_instruction(compiler, instruction);
break;
- case VKD3DSIH_BFI:
- case VKD3DSIH_IBFE:
- case VKD3DSIH_UBFE:
+ case VSIR_OP_BFI:
+ case VSIR_OP_IBFE:
+ case VSIR_OP_UBFE:
spirv_compiler_emit_bitfield_instruction(compiler, instruction);
break;
- case VKD3DSIH_F16TOF32:
+ case VSIR_OP_F16TOF32:
spirv_compiler_emit_f16tof32(compiler, instruction);
break;
- case VKD3DSIH_F32TOF16:
+ case VSIR_OP_F32TOF16:
spirv_compiler_emit_f32tof16(compiler, instruction);
break;
- case VKD3DSIH_RET:
+ case VSIR_OP_RET:
spirv_compiler_emit_return(compiler, instruction);
break;
- case VKD3DSIH_RETP:
+ case VSIR_OP_RETP:
spirv_compiler_emit_retc(compiler, instruction);
break;
- case VKD3DSIH_DISCARD:
+ case VSIR_OP_DISCARD:
spirv_compiler_emit_discard(compiler, instruction);
break;
- case VKD3DSIH_LABEL:
+ case VSIR_OP_LABEL:
spirv_compiler_emit_label(compiler, instruction);
break;
- case VKD3DSIH_BRANCH:
+ case VSIR_OP_BRANCH:
spirv_compiler_emit_branch(compiler, instruction);
break;
- case VKD3DSIH_SWITCH_MONOLITHIC:
+ case VSIR_OP_SWITCH_MONOLITHIC:
spirv_compiler_emit_switch(compiler, instruction);
break;
- case VKD3DSIH_DSX:
- case VKD3DSIH_DSX_COARSE:
- case VKD3DSIH_DSX_FINE:
- case VKD3DSIH_DSY:
- case VKD3DSIH_DSY_COARSE:
- case VKD3DSIH_DSY_FINE:
+ case VSIR_OP_DSX:
+ case VSIR_OP_DSX_COARSE:
+ case VSIR_OP_DSX_FINE:
+ case VSIR_OP_DSY:
+ case VSIR_OP_DSY_COARSE:
+ case VSIR_OP_DSY_FINE:
spirv_compiler_emit_deriv_instruction(compiler, instruction);
break;
- case VKD3DSIH_LD2DMS:
- case VKD3DSIH_LD:
+ case VSIR_OP_LD2DMS:
+ case VSIR_OP_LD:
spirv_compiler_emit_ld(compiler, instruction);
break;
- case VKD3DSIH_LOD:
+ case VSIR_OP_LOD:
spirv_compiler_emit_lod(compiler, instruction);
break;
- case VKD3DSIH_SAMPLE:
- case VKD3DSIH_SAMPLE_B:
- case VKD3DSIH_SAMPLE_GRAD:
- case VKD3DSIH_SAMPLE_LOD:
+ case VSIR_OP_SAMPLE:
+ case VSIR_OP_SAMPLE_B:
+ case VSIR_OP_SAMPLE_GRAD:
+ case VSIR_OP_SAMPLE_LOD:
spirv_compiler_emit_sample(compiler, instruction);
break;
- case VKD3DSIH_SAMPLE_C:
- case VKD3DSIH_SAMPLE_C_LZ:
+ case VSIR_OP_SAMPLE_C:
+ case VSIR_OP_SAMPLE_C_LZ:
spirv_compiler_emit_sample_c(compiler, instruction);
break;
- case VKD3DSIH_GATHER4:
- case VKD3DSIH_GATHER4_C:
- case VKD3DSIH_GATHER4_PO:
- case VKD3DSIH_GATHER4_PO_C:
+ case VSIR_OP_GATHER4:
+ case VSIR_OP_GATHER4_C:
+ case VSIR_OP_GATHER4_PO:
+ case VSIR_OP_GATHER4_PO_C:
spirv_compiler_emit_gather4(compiler, instruction);
break;
- case VKD3DSIH_LD_RAW:
- case VKD3DSIH_LD_STRUCTURED:
+ case VSIR_OP_LD_RAW:
+ case VSIR_OP_LD_STRUCTURED:
spirv_compiler_emit_ld_raw_structured(compiler, instruction);
break;
- case VKD3DSIH_STORE_RAW:
- case VKD3DSIH_STORE_STRUCTURED:
+ case VSIR_OP_STORE_RAW:
+ case VSIR_OP_STORE_STRUCTURED:
spirv_compiler_emit_store_raw_structured(compiler, instruction);
break;
- case VKD3DSIH_LD_UAV_TYPED:
+ case VSIR_OP_LD_UAV_TYPED:
spirv_compiler_emit_ld_uav_typed(compiler, instruction);
break;
- case VKD3DSIH_STORE_UAV_TYPED:
+ case VSIR_OP_STORE_UAV_TYPED:
spirv_compiler_emit_store_uav_typed(compiler, instruction);
break;
- case VKD3DSIH_IMM_ATOMIC_ALLOC:
- case VKD3DSIH_IMM_ATOMIC_CONSUME:
+ case VSIR_OP_IMM_ATOMIC_ALLOC:
+ case VSIR_OP_IMM_ATOMIC_CONSUME:
spirv_compiler_emit_uav_counter_instruction(compiler, instruction);
break;
- case VKD3DSIH_ATOMIC_AND:
- case VKD3DSIH_ATOMIC_CMP_STORE:
- case VKD3DSIH_ATOMIC_IADD:
- case VKD3DSIH_ATOMIC_IMAX:
- case VKD3DSIH_ATOMIC_IMIN:
- case VKD3DSIH_ATOMIC_OR:
- case VKD3DSIH_ATOMIC_UMAX:
- case VKD3DSIH_ATOMIC_UMIN:
- case VKD3DSIH_ATOMIC_XOR:
- case VKD3DSIH_IMM_ATOMIC_AND:
- case VKD3DSIH_IMM_ATOMIC_CMP_EXCH:
- case VKD3DSIH_IMM_ATOMIC_EXCH:
- case VKD3DSIH_IMM_ATOMIC_IADD:
- case VKD3DSIH_IMM_ATOMIC_IMAX:
- case VKD3DSIH_IMM_ATOMIC_IMIN:
- case VKD3DSIH_IMM_ATOMIC_OR:
- case VKD3DSIH_IMM_ATOMIC_UMAX:
- case VKD3DSIH_IMM_ATOMIC_UMIN:
- case VKD3DSIH_IMM_ATOMIC_XOR:
+ case VSIR_OP_ATOMIC_AND:
+ case VSIR_OP_ATOMIC_CMP_STORE:
+ case VSIR_OP_ATOMIC_IADD:
+ case VSIR_OP_ATOMIC_IMAX:
+ case VSIR_OP_ATOMIC_IMIN:
+ case VSIR_OP_ATOMIC_OR:
+ case VSIR_OP_ATOMIC_UMAX:
+ case VSIR_OP_ATOMIC_UMIN:
+ case VSIR_OP_ATOMIC_XOR:
+ case VSIR_OP_IMM_ATOMIC_AND:
+ case VSIR_OP_IMM_ATOMIC_CMP_EXCH:
+ case VSIR_OP_IMM_ATOMIC_EXCH:
+ case VSIR_OP_IMM_ATOMIC_IADD:
+ case VSIR_OP_IMM_ATOMIC_IMAX:
+ case VSIR_OP_IMM_ATOMIC_IMIN:
+ case VSIR_OP_IMM_ATOMIC_OR:
+ case VSIR_OP_IMM_ATOMIC_UMAX:
+ case VSIR_OP_IMM_ATOMIC_UMIN:
+ case VSIR_OP_IMM_ATOMIC_XOR:
spirv_compiler_emit_atomic_instruction(compiler, instruction);
break;
- case VKD3DSIH_BUFINFO:
+ case VSIR_OP_BUFINFO:
spirv_compiler_emit_bufinfo(compiler, instruction);
break;
- case VKD3DSIH_RESINFO:
+ case VSIR_OP_RESINFO:
spirv_compiler_emit_resinfo(compiler, instruction);
break;
- case VKD3DSIH_SAMPLE_INFO:
+ case VSIR_OP_SAMPLE_INFO:
spirv_compiler_emit_sample_info(compiler, instruction);
break;
- case VKD3DSIH_SAMPLE_POS:
+ case VSIR_OP_SAMPLE_POS:
spirv_compiler_emit_sample_position(compiler, instruction);
break;
- case VKD3DSIH_EVAL_CENTROID:
- case VKD3DSIH_EVAL_SAMPLE_INDEX:
+ case VSIR_OP_EVAL_CENTROID:
+ case VSIR_OP_EVAL_SAMPLE_INDEX:
spirv_compiler_emit_eval_attrib(compiler, instruction);
break;
- case VKD3DSIH_SYNC:
+ case VSIR_OP_SYNC:
spirv_compiler_emit_sync(compiler, instruction);
break;
- case VKD3DSIH_EMIT:
- case VKD3DSIH_EMIT_STREAM:
+ case VSIR_OP_EMIT:
+ case VSIR_OP_EMIT_STREAM:
spirv_compiler_emit_emit_stream(compiler, instruction);
break;
- case VKD3DSIH_CUT:
- case VKD3DSIH_CUT_STREAM:
+ case VSIR_OP_CUT:
+ case VSIR_OP_CUT_STREAM:
spirv_compiler_emit_cut_stream(compiler, instruction);
break;
- case VKD3DSIH_QUAD_READ_ACROSS_D:
- case VKD3DSIH_QUAD_READ_ACROSS_X:
- case VKD3DSIH_QUAD_READ_ACROSS_Y:
+ case VSIR_OP_QUAD_READ_ACROSS_D:
+ case VSIR_OP_QUAD_READ_ACROSS_X:
+ case VSIR_OP_QUAD_READ_ACROSS_Y:
spirv_compiler_emit_quad_read_across(compiler, instruction);
break;
- case VKD3DSIH_QUAD_READ_LANE_AT:
+ case VSIR_OP_QUAD_READ_LANE_AT:
spirv_compiler_emit_quad_read_lane_at(compiler, instruction);
break;
- case VKD3DSIH_WAVE_ACTIVE_ALL_EQUAL:
- case VKD3DSIH_WAVE_ALL_TRUE:
- case VKD3DSIH_WAVE_ANY_TRUE:
+ case VSIR_OP_WAVE_ACTIVE_ALL_EQUAL:
+ case VSIR_OP_WAVE_ALL_TRUE:
+ case VSIR_OP_WAVE_ANY_TRUE:
spirv_compiler_emit_wave_bool_op(compiler, instruction);
break;
- case VKD3DSIH_WAVE_ACTIVE_BALLOT:
+ case VSIR_OP_WAVE_ACTIVE_BALLOT:
spirv_compiler_emit_wave_active_ballot(compiler, instruction);
break;
- case VKD3DSIH_WAVE_ACTIVE_BIT_AND:
- case VKD3DSIH_WAVE_ACTIVE_BIT_OR:
- case VKD3DSIH_WAVE_ACTIVE_BIT_XOR:
- case VKD3DSIH_WAVE_OP_ADD:
- case VKD3DSIH_WAVE_OP_IMAX:
- case VKD3DSIH_WAVE_OP_IMIN:
- case VKD3DSIH_WAVE_OP_MAX:
- case VKD3DSIH_WAVE_OP_MIN:
- case VKD3DSIH_WAVE_OP_MUL:
- case VKD3DSIH_WAVE_OP_UMAX:
- case VKD3DSIH_WAVE_OP_UMIN:
+ case VSIR_OP_WAVE_ACTIVE_BIT_AND:
+ case VSIR_OP_WAVE_ACTIVE_BIT_OR:
+ case VSIR_OP_WAVE_ACTIVE_BIT_XOR:
+ case VSIR_OP_WAVE_OP_ADD:
+ case VSIR_OP_WAVE_OP_IMAX:
+ case VSIR_OP_WAVE_OP_IMIN:
+ case VSIR_OP_WAVE_OP_MAX:
+ case VSIR_OP_WAVE_OP_MIN:
+ case VSIR_OP_WAVE_OP_MUL:
+ case VSIR_OP_WAVE_OP_UMAX:
+ case VSIR_OP_WAVE_OP_UMIN:
spirv_compiler_emit_wave_alu_op(compiler, instruction);
break;
- case VKD3DSIH_WAVE_ALL_BIT_COUNT:
- case VKD3DSIH_WAVE_PREFIX_BIT_COUNT:
+ case VSIR_OP_WAVE_ALL_BIT_COUNT:
+ case VSIR_OP_WAVE_PREFIX_BIT_COUNT:
spirv_compiler_emit_wave_bit_count(compiler, instruction);
break;
- case VKD3DSIH_WAVE_IS_FIRST_LANE:
+ case VSIR_OP_WAVE_IS_FIRST_LANE:
spirv_compiler_emit_wave_is_first_lane(compiler, instruction);
break;
- case VKD3DSIH_WAVE_READ_LANE_AT:
+ case VSIR_OP_WAVE_READ_LANE_AT:
spirv_compiler_emit_wave_read_lane_at(compiler, instruction);
break;
- case VKD3DSIH_WAVE_READ_LANE_FIRST:
+ case VSIR_OP_WAVE_READ_LANE_FIRST:
spirv_compiler_emit_wave_read_lane_first(compiler, instruction);
break;
- case VKD3DSIH_DCL_HS_MAX_TESSFACTOR:
- case VKD3DSIH_DCL_INPUT_CONTROL_POINT_COUNT:
- case VKD3DSIH_DCL_RESOURCE_RAW:
- case VKD3DSIH_DCL_RESOURCE_STRUCTURED:
- case VKD3DSIH_DCL_UAV_RAW:
- case VKD3DSIH_DCL_UAV_STRUCTURED:
- case VKD3DSIH_HS_DECLS:
- case VKD3DSIH_NOP:
+ case VSIR_OP_DCL_HS_MAX_TESSFACTOR:
+ case VSIR_OP_DCL_INPUT_CONTROL_POINT_COUNT:
+ case VSIR_OP_DCL_RESOURCE_RAW:
+ case VSIR_OP_DCL_RESOURCE_STRUCTURED:
+ case VSIR_OP_DCL_UAV_RAW:
+ case VSIR_OP_DCL_UAV_STRUCTURED:
+ case VSIR_OP_HS_DECLS:
+ case VSIR_OP_NOP:
/* nothing to do */
break;
default:
diff --git a/libs/vkd3d/libs/vkd3d-shader/tpf.c b/libs/vkd3d/libs/vkd3d-shader/tpf.c
index 01af2f6ebbd..c7eafbc79f3 100644
--- a/libs/vkd3d/libs/vkd3d-shader/tpf.c
+++ b/libs/vkd3d/libs/vkd3d-shader/tpf.c
@@ -655,7 +655,7 @@ struct sm4_index_range_array
struct vkd3d_sm4_lookup_tables
{
const struct vkd3d_sm4_opcode_info *opcode_info_from_sm4[VKD3D_SM4_OP_COUNT];
- const struct vkd3d_sm4_opcode_info *opcode_info_from_vsir[VKD3DSIH_COUNT];
+ const struct vkd3d_sm4_opcode_info *opcode_info_from_vsir[VSIR_OP_COUNT];
const struct vkd3d_sm4_register_type_info *register_type_info_from_sm4[VKD3D_SM4_REGISTER_TYPE_COUNT];
const struct vkd3d_sm4_register_type_info *register_type_info_from_vkd3d[VKD3DSPR_COUNT];
const struct vkd3d_sm4_stat_field_info *stat_field_from_sm4[VKD3D_SM4_OP_COUNT];
@@ -823,7 +823,7 @@ static void shader_sm4_read_shader_data(struct vkd3d_shader_instruction *ins, ui
if (type != VKD3D_SM4_SHADER_DATA_IMMEDIATE_CONSTANT_BUFFER)
{
FIXME("Ignoring shader data type %#x.\n", type);
- ins->opcode = VKD3DSIH_NOP;
+ ins->opcode = VSIR_OP_NOP;
return;
}
@@ -832,7 +832,7 @@ static void shader_sm4_read_shader_data(struct vkd3d_shader_instruction *ins, ui
if (icb_size % 4)
{
FIXME("Unexpected immediate constant buffer size %u.\n", icb_size);
- ins->opcode = VKD3DSIH_INVALID;
+ ins->opcode = VSIR_OP_INVALID;
return;
}
@@ -840,7 +840,7 @@ static void shader_sm4_read_shader_data(struct vkd3d_shader_instruction *ins, ui
{
ERR("Failed to allocate immediate constant buffer, size %u.\n", icb_size);
vkd3d_shader_parser_error(&priv->p, VKD3D_SHADER_ERROR_TPF_OUT_OF_MEMORY, "Out of memory.");
- ins->opcode = VKD3DSIH_INVALID;
+ ins->opcode = VSIR_OP_INVALID;
return;
}
icb->register_idx = 0;
@@ -964,7 +964,7 @@ static void shader_sm4_read_dcl_sampler(struct vkd3d_shader_instruction *ins, ui
static bool sm4_parser_is_in_fork_or_join_phase(const struct vkd3d_shader_sm4_parser *sm4)
{
- return sm4->phase == VKD3DSIH_HS_FORK_PHASE || sm4->phase == VKD3DSIH_HS_JOIN_PHASE;
+ return sm4->phase == VSIR_OP_HS_FORK_PHASE || sm4->phase == VSIR_OP_HS_JOIN_PHASE;
}
static void shader_sm4_read_dcl_index_range(struct vkd3d_shader_instruction *ins, uint32_t opcode,
@@ -1441,275 +1441,275 @@ static void init_sm4_lookup_tables(struct vkd3d_sm4_lookup_tables *lookup)
*/
static const struct vkd3d_sm4_opcode_info opcode_table[] =
{
- {VKD3D_SM4_OP_ADD, VKD3DSIH_ADD, "f", "ff"},
- {VKD3D_SM4_OP_AND, VKD3DSIH_AND, "u", "uu"},
- {VKD3D_SM4_OP_BREAK, VKD3DSIH_BREAK, "", ""},
- {VKD3D_SM4_OP_BREAKC, VKD3DSIH_BREAKP, "", "u",
+ {VKD3D_SM4_OP_ADD, VSIR_OP_ADD, "f", "ff"},
+ {VKD3D_SM4_OP_AND, VSIR_OP_AND, "u", "uu"},
+ {VKD3D_SM4_OP_BREAK, VSIR_OP_BREAK, "", ""},
+ {VKD3D_SM4_OP_BREAKC, VSIR_OP_BREAKP, "", "u",
shader_sm4_read_conditional_op, true},
- {VKD3D_SM4_OP_CASE, VKD3DSIH_CASE, "", "u",
+ {VKD3D_SM4_OP_CASE, VSIR_OP_CASE, "", "u",
shader_sm4_read_case_condition},
- {VKD3D_SM4_OP_CONTINUE, VKD3DSIH_CONTINUE, "", ""},
- {VKD3D_SM4_OP_CONTINUEC, VKD3DSIH_CONTINUEP, "", "u",
+ {VKD3D_SM4_OP_CONTINUE, VSIR_OP_CONTINUE, "", ""},
+ {VKD3D_SM4_OP_CONTINUEC, VSIR_OP_CONTINUEP, "", "u",
shader_sm4_read_conditional_op, true},
- {VKD3D_SM4_OP_CUT, VKD3DSIH_CUT, "", ""},
- {VKD3D_SM4_OP_DEFAULT, VKD3DSIH_DEFAULT, "", ""},
- {VKD3D_SM4_OP_DERIV_RTX, VKD3DSIH_DSX, "f", "f"},
- {VKD3D_SM4_OP_DERIV_RTY, VKD3DSIH_DSY, "f", "f"},
- {VKD3D_SM4_OP_DISCARD, VKD3DSIH_DISCARD, "", "u",
+ {VKD3D_SM4_OP_CUT, VSIR_OP_CUT, "", ""},
+ {VKD3D_SM4_OP_DEFAULT, VSIR_OP_DEFAULT, "", ""},
+ {VKD3D_SM4_OP_DERIV_RTX, VSIR_OP_DSX, "f", "f"},
+ {VKD3D_SM4_OP_DERIV_RTY, VSIR_OP_DSY, "f", "f"},
+ {VKD3D_SM4_OP_DISCARD, VSIR_OP_DISCARD, "", "u",
shader_sm4_read_conditional_op, true},
- {VKD3D_SM4_OP_DIV, VKD3DSIH_DIV, "f", "ff"},
- {VKD3D_SM4_OP_DP2, VKD3DSIH_DP2, "f", "ff"},
- {VKD3D_SM4_OP_DP3, VKD3DSIH_DP3, "f", "ff"},
- {VKD3D_SM4_OP_DP4, VKD3DSIH_DP4, "f", "ff"},
- {VKD3D_SM4_OP_ELSE, VKD3DSIH_ELSE, "", ""},
- {VKD3D_SM4_OP_EMIT, VKD3DSIH_EMIT, "", ""},
- {VKD3D_SM4_OP_ENDIF, VKD3DSIH_ENDIF, "", ""},
- {VKD3D_SM4_OP_ENDLOOP, VKD3DSIH_ENDLOOP, "", ""},
- {VKD3D_SM4_OP_ENDSWITCH, VKD3DSIH_ENDSWITCH, "", ""},
- {VKD3D_SM4_OP_EQ, VKD3DSIH_EQO, "u", "ff"},
- {VKD3D_SM4_OP_EXP, VKD3DSIH_EXP, "f", "f"},
- {VKD3D_SM4_OP_FRC, VKD3DSIH_FRC, "f", "f"},
- {VKD3D_SM4_OP_FTOI, VKD3DSIH_FTOI, "i", "f"},
- {VKD3D_SM4_OP_FTOU, VKD3DSIH_FTOU, "u", "f"},
- {VKD3D_SM4_OP_GE, VKD3DSIH_GEO, "u", "ff"},
- {VKD3D_SM4_OP_IADD, VKD3DSIH_IADD, "i", "ii"},
- {VKD3D_SM4_OP_IF, VKD3DSIH_IF, "", "u",
+ {VKD3D_SM4_OP_DIV, VSIR_OP_DIV, "f", "ff"},
+ {VKD3D_SM4_OP_DP2, VSIR_OP_DP2, "f", "ff"},
+ {VKD3D_SM4_OP_DP3, VSIR_OP_DP3, "f", "ff"},
+ {VKD3D_SM4_OP_DP4, VSIR_OP_DP4, "f", "ff"},
+ {VKD3D_SM4_OP_ELSE, VSIR_OP_ELSE, "", ""},
+ {VKD3D_SM4_OP_EMIT, VSIR_OP_EMIT, "", ""},
+ {VKD3D_SM4_OP_ENDIF, VSIR_OP_ENDIF, "", ""},
+ {VKD3D_SM4_OP_ENDLOOP, VSIR_OP_ENDLOOP, "", ""},
+ {VKD3D_SM4_OP_ENDSWITCH, VSIR_OP_ENDSWITCH, "", ""},
+ {VKD3D_SM4_OP_EQ, VSIR_OP_EQO, "u", "ff"},
+ {VKD3D_SM4_OP_EXP, VSIR_OP_EXP, "f", "f"},
+ {VKD3D_SM4_OP_FRC, VSIR_OP_FRC, "f", "f"},
+ {VKD3D_SM4_OP_FTOI, VSIR_OP_FTOI, "i", "f"},
+ {VKD3D_SM4_OP_FTOU, VSIR_OP_FTOU, "u", "f"},
+ {VKD3D_SM4_OP_GE, VSIR_OP_GEO, "u", "ff"},
+ {VKD3D_SM4_OP_IADD, VSIR_OP_IADD, "i", "ii"},
+ {VKD3D_SM4_OP_IF, VSIR_OP_IF, "", "u",
shader_sm4_read_conditional_op, true},
- {VKD3D_SM4_OP_IEQ, VKD3DSIH_IEQ, "u", "ii"},
- {VKD3D_SM4_OP_IGE, VKD3DSIH_IGE, "u", "ii"},
- {VKD3D_SM4_OP_ILT, VKD3DSIH_ILT, "u", "ii"},
- {VKD3D_SM4_OP_IMAD, VKD3DSIH_IMAD, "i", "iii"},
- {VKD3D_SM4_OP_IMAX, VKD3DSIH_IMAX, "i", "ii"},
- {VKD3D_SM4_OP_IMIN, VKD3DSIH_IMIN, "i", "ii"},
- {VKD3D_SM4_OP_IMUL, VKD3DSIH_IMUL, "ii", "ii"},
- {VKD3D_SM4_OP_INE, VKD3DSIH_INE, "u", "ii"},
- {VKD3D_SM4_OP_INEG, VKD3DSIH_INEG, "i", "i"},
- {VKD3D_SM4_OP_ISHL, VKD3DSIH_ISHL, "i", "ii"},
- {VKD3D_SM4_OP_ISHR, VKD3DSIH_ISHR, "i", "ii"},
- {VKD3D_SM4_OP_ITOF, VKD3DSIH_ITOF, "f", "i"},
- {VKD3D_SM4_OP_LABEL, VKD3DSIH_LABEL, "", "O"},
- {VKD3D_SM4_OP_LD, VKD3DSIH_LD, "u", "i*"},
- {VKD3D_SM4_OP_LD2DMS, VKD3DSIH_LD2DMS, "u", "i*i"},
- {VKD3D_SM4_OP_LOG, VKD3DSIH_LOG, "f", "f"},
- {VKD3D_SM4_OP_LOOP, VKD3DSIH_LOOP, "", ""},
- {VKD3D_SM4_OP_LT, VKD3DSIH_LTO, "u", "ff"},
- {VKD3D_SM4_OP_MAD, VKD3DSIH_MAD, "f", "fff"},
- {VKD3D_SM4_OP_MIN, VKD3DSIH_MIN, "f", "ff"},
- {VKD3D_SM4_OP_MAX, VKD3DSIH_MAX, "f", "ff"},
- {VKD3D_SM4_OP_SHADER_DATA, VKD3DSIH_DCL_IMMEDIATE_CONSTANT_BUFFER, "", "",
+ {VKD3D_SM4_OP_IEQ, VSIR_OP_IEQ, "u", "ii"},
+ {VKD3D_SM4_OP_IGE, VSIR_OP_IGE, "u", "ii"},
+ {VKD3D_SM4_OP_ILT, VSIR_OP_ILT, "u", "ii"},
+ {VKD3D_SM4_OP_IMAD, VSIR_OP_IMAD, "i", "iii"},
+ {VKD3D_SM4_OP_IMAX, VSIR_OP_IMAX, "i", "ii"},
+ {VKD3D_SM4_OP_IMIN, VSIR_OP_IMIN, "i", "ii"},
+ {VKD3D_SM4_OP_IMUL, VSIR_OP_IMUL, "ii", "ii"},
+ {VKD3D_SM4_OP_INE, VSIR_OP_INE, "u", "ii"},
+ {VKD3D_SM4_OP_INEG, VSIR_OP_INEG, "i", "i"},
+ {VKD3D_SM4_OP_ISHL, VSIR_OP_ISHL, "i", "ii"},
+ {VKD3D_SM4_OP_ISHR, VSIR_OP_ISHR, "i", "ii"},
+ {VKD3D_SM4_OP_ITOF, VSIR_OP_ITOF, "f", "i"},
+ {VKD3D_SM4_OP_LABEL, VSIR_OP_LABEL, "", "O"},
+ {VKD3D_SM4_OP_LD, VSIR_OP_LD, "u", "i*"},
+ {VKD3D_SM4_OP_LD2DMS, VSIR_OP_LD2DMS, "u", "i*i"},
+ {VKD3D_SM4_OP_LOG, VSIR_OP_LOG, "f", "f"},
+ {VKD3D_SM4_OP_LOOP, VSIR_OP_LOOP, "", ""},
+ {VKD3D_SM4_OP_LT, VSIR_OP_LTO, "u", "ff"},
+ {VKD3D_SM4_OP_MAD, VSIR_OP_MAD, "f", "fff"},
+ {VKD3D_SM4_OP_MIN, VSIR_OP_MIN, "f", "ff"},
+ {VKD3D_SM4_OP_MAX, VSIR_OP_MAX, "f", "ff"},
+ {VKD3D_SM4_OP_SHADER_DATA, VSIR_OP_DCL_IMMEDIATE_CONSTANT_BUFFER, "", "",
shader_sm4_read_shader_data},
- {VKD3D_SM4_OP_MOV, VKD3DSIH_MOV, "f", "f"},
- {VKD3D_SM4_OP_MOVC, VKD3DSIH_MOVC, "f", "uff"},
- {VKD3D_SM4_OP_MUL, VKD3DSIH_MUL, "f", "ff"},
- {VKD3D_SM4_OP_NE, VKD3DSIH_NEU, "u", "ff"},
- {VKD3D_SM4_OP_NOP, VKD3DSIH_NOP, "", ""},
- {VKD3D_SM4_OP_NOT, VKD3DSIH_NOT, "u", "u"},
- {VKD3D_SM4_OP_OR, VKD3DSIH_OR, "u", "uu"},
- {VKD3D_SM4_OP_RESINFO, VKD3DSIH_RESINFO, "f", "i*"},
- {VKD3D_SM4_OP_RET, VKD3DSIH_RET, "", ""},
- {VKD3D_SM4_OP_RETC, VKD3DSIH_RETP, "", "u",
+ {VKD3D_SM4_OP_MOV, VSIR_OP_MOV, "f", "f"},
+ {VKD3D_SM4_OP_MOVC, VSIR_OP_MOVC, "f", "uff"},
+ {VKD3D_SM4_OP_MUL, VSIR_OP_MUL, "f", "ff"},
+ {VKD3D_SM4_OP_NE, VSIR_OP_NEU, "u", "ff"},
+ {VKD3D_SM4_OP_NOP, VSIR_OP_NOP, "", ""},
+ {VKD3D_SM4_OP_NOT, VSIR_OP_NOT, "u", "u"},
+ {VKD3D_SM4_OP_OR, VSIR_OP_OR, "u", "uu"},
+ {VKD3D_SM4_OP_RESINFO, VSIR_OP_RESINFO, "f", "i*"},
+ {VKD3D_SM4_OP_RET, VSIR_OP_RET, "", ""},
+ {VKD3D_SM4_OP_RETC, VSIR_OP_RETP, "", "u",
shader_sm4_read_conditional_op, true},
- {VKD3D_SM4_OP_ROUND_NE, VKD3DSIH_ROUND_NE, "f", "f"},
- {VKD3D_SM4_OP_ROUND_NI, VKD3DSIH_ROUND_NI, "f", "f"},
- {VKD3D_SM4_OP_ROUND_PI, VKD3DSIH_ROUND_PI, "f", "f"},
- {VKD3D_SM4_OP_ROUND_Z, VKD3DSIH_ROUND_Z, "f", "f"},
- {VKD3D_SM4_OP_RSQ, VKD3DSIH_RSQ, "f", "f"},
- {VKD3D_SM4_OP_SAMPLE, VKD3DSIH_SAMPLE, "u", "f**"},
- {VKD3D_SM4_OP_SAMPLE_C, VKD3DSIH_SAMPLE_C, "f", "f**f"},
- {VKD3D_SM4_OP_SAMPLE_C_LZ, VKD3DSIH_SAMPLE_C_LZ, "f", "f**f"},
- {VKD3D_SM4_OP_SAMPLE_LOD, VKD3DSIH_SAMPLE_LOD, "u", "f**f"},
- {VKD3D_SM4_OP_SAMPLE_GRAD, VKD3DSIH_SAMPLE_GRAD, "u", "f**ff"},
- {VKD3D_SM4_OP_SAMPLE_B, VKD3DSIH_SAMPLE_B, "u", "f**f"},
- {VKD3D_SM4_OP_SQRT, VKD3DSIH_SQRT, "f", "f"},
- {VKD3D_SM4_OP_SWITCH, VKD3DSIH_SWITCH, "", "i"},
- {VKD3D_SM4_OP_SINCOS, VKD3DSIH_SINCOS, "ff", "f"},
- {VKD3D_SM4_OP_UDIV, VKD3DSIH_UDIV, "uu", "uu"},
- {VKD3D_SM4_OP_ULT, VKD3DSIH_ULT, "u", "uu"},
- {VKD3D_SM4_OP_UGE, VKD3DSIH_UGE, "u", "uu"},
- {VKD3D_SM4_OP_UMUL, VKD3DSIH_UMUL, "uu", "uu"},
- {VKD3D_SM4_OP_UMAX, VKD3DSIH_UMAX, "u", "uu"},
- {VKD3D_SM4_OP_UMIN, VKD3DSIH_UMIN, "u", "uu"},
- {VKD3D_SM4_OP_USHR, VKD3DSIH_USHR, "u", "uu"},
- {VKD3D_SM4_OP_UTOF, VKD3DSIH_UTOF, "f", "u"},
- {VKD3D_SM4_OP_XOR, VKD3DSIH_XOR, "u", "uu"},
- {VKD3D_SM4_OP_DCL_RESOURCE, VKD3DSIH_DCL, "", "",
+ {VKD3D_SM4_OP_ROUND_NE, VSIR_OP_ROUND_NE, "f", "f"},
+ {VKD3D_SM4_OP_ROUND_NI, VSIR_OP_ROUND_NI, "f", "f"},
+ {VKD3D_SM4_OP_ROUND_PI, VSIR_OP_ROUND_PI, "f", "f"},
+ {VKD3D_SM4_OP_ROUND_Z, VSIR_OP_ROUND_Z, "f", "f"},
+ {VKD3D_SM4_OP_RSQ, VSIR_OP_RSQ, "f", "f"},
+ {VKD3D_SM4_OP_SAMPLE, VSIR_OP_SAMPLE, "u", "f**"},
+ {VKD3D_SM4_OP_SAMPLE_C, VSIR_OP_SAMPLE_C, "f", "f**f"},
+ {VKD3D_SM4_OP_SAMPLE_C_LZ, VSIR_OP_SAMPLE_C_LZ, "f", "f**f"},
+ {VKD3D_SM4_OP_SAMPLE_LOD, VSIR_OP_SAMPLE_LOD, "u", "f**f"},
+ {VKD3D_SM4_OP_SAMPLE_GRAD, VSIR_OP_SAMPLE_GRAD, "u", "f**ff"},
+ {VKD3D_SM4_OP_SAMPLE_B, VSIR_OP_SAMPLE_B, "u", "f**f"},
+ {VKD3D_SM4_OP_SQRT, VSIR_OP_SQRT, "f", "f"},
+ {VKD3D_SM4_OP_SWITCH, VSIR_OP_SWITCH, "", "i"},
+ {VKD3D_SM4_OP_SINCOS, VSIR_OP_SINCOS, "ff", "f"},
+ {VKD3D_SM4_OP_UDIV, VSIR_OP_UDIV, "uu", "uu"},
+ {VKD3D_SM4_OP_ULT, VSIR_OP_ULT, "u", "uu"},
+ {VKD3D_SM4_OP_UGE, VSIR_OP_UGE, "u", "uu"},
+ {VKD3D_SM4_OP_UMUL, VSIR_OP_UMUL, "uu", "uu"},
+ {VKD3D_SM4_OP_UMAX, VSIR_OP_UMAX, "u", "uu"},
+ {VKD3D_SM4_OP_UMIN, VSIR_OP_UMIN, "u", "uu"},
+ {VKD3D_SM4_OP_USHR, VSIR_OP_USHR, "u", "uu"},
+ {VKD3D_SM4_OP_UTOF, VSIR_OP_UTOF, "f", "u"},
+ {VKD3D_SM4_OP_XOR, VSIR_OP_XOR, "u", "uu"},
+ {VKD3D_SM4_OP_DCL_RESOURCE, VSIR_OP_DCL, "", "",
shader_sm4_read_dcl_resource},
- {VKD3D_SM4_OP_DCL_CONSTANT_BUFFER, VKD3DSIH_DCL_CONSTANT_BUFFER, "", "",
+ {VKD3D_SM4_OP_DCL_CONSTANT_BUFFER, VSIR_OP_DCL_CONSTANT_BUFFER, "", "",
shader_sm4_read_dcl_constant_buffer},
- {VKD3D_SM4_OP_DCL_SAMPLER, VKD3DSIH_DCL_SAMPLER, "", "",
+ {VKD3D_SM4_OP_DCL_SAMPLER, VSIR_OP_DCL_SAMPLER, "", "",
shader_sm4_read_dcl_sampler},
- {VKD3D_SM4_OP_DCL_INDEX_RANGE, VKD3DSIH_DCL_INDEX_RANGE, "", "",
+ {VKD3D_SM4_OP_DCL_INDEX_RANGE, VSIR_OP_DCL_INDEX_RANGE, "", "",
shader_sm4_read_dcl_index_range},
- {VKD3D_SM4_OP_DCL_OUTPUT_TOPOLOGY, VKD3DSIH_DCL_OUTPUT_TOPOLOGY, "", "",
+ {VKD3D_SM4_OP_DCL_OUTPUT_TOPOLOGY, VSIR_OP_DCL_OUTPUT_TOPOLOGY, "", "",
shader_sm4_read_dcl_output_topology},
- {VKD3D_SM4_OP_DCL_INPUT_PRIMITIVE, VKD3DSIH_DCL_INPUT_PRIMITIVE, "", "",
+ {VKD3D_SM4_OP_DCL_INPUT_PRIMITIVE, VSIR_OP_DCL_INPUT_PRIMITIVE, "", "",
shader_sm4_read_dcl_input_primitive},
- {VKD3D_SM4_OP_DCL_VERTICES_OUT, VKD3DSIH_DCL_VERTICES_OUT, "", "",
+ {VKD3D_SM4_OP_DCL_VERTICES_OUT, VSIR_OP_DCL_VERTICES_OUT, "", "",
shader_sm4_read_declaration_count},
- {VKD3D_SM4_OP_DCL_INPUT, VKD3DSIH_DCL_INPUT, "", "",
+ {VKD3D_SM4_OP_DCL_INPUT, VSIR_OP_DCL_INPUT, "", "",
shader_sm4_read_declaration_dst},
- {VKD3D_SM4_OP_DCL_INPUT_SGV, VKD3DSIH_DCL_INPUT_SGV, "", "",
+ {VKD3D_SM4_OP_DCL_INPUT_SGV, VSIR_OP_DCL_INPUT_SGV, "", "",
shader_sm4_read_declaration_register_semantic},
- {VKD3D_SM4_OP_DCL_INPUT_SIV, VKD3DSIH_DCL_INPUT_SIV, "", "",
+ {VKD3D_SM4_OP_DCL_INPUT_SIV, VSIR_OP_DCL_INPUT_SIV, "", "",
shader_sm4_read_declaration_register_semantic},
- {VKD3D_SM4_OP_DCL_INPUT_PS, VKD3DSIH_DCL_INPUT_PS, "", "",
+ {VKD3D_SM4_OP_DCL_INPUT_PS, VSIR_OP_DCL_INPUT_PS, "", "",
shader_sm4_read_dcl_input_ps},
- {VKD3D_SM4_OP_DCL_INPUT_PS_SGV, VKD3DSIH_DCL_INPUT_PS_SGV, "", "",
+ {VKD3D_SM4_OP_DCL_INPUT_PS_SGV, VSIR_OP_DCL_INPUT_PS_SGV, "", "",
shader_sm4_read_declaration_register_semantic},
- {VKD3D_SM4_OP_DCL_INPUT_PS_SIV, VKD3DSIH_DCL_INPUT_PS_SIV, "", "",
+ {VKD3D_SM4_OP_DCL_INPUT_PS_SIV, VSIR_OP_DCL_INPUT_PS_SIV, "", "",
shader_sm4_read_dcl_input_ps_siv},
- {VKD3D_SM4_OP_DCL_OUTPUT, VKD3DSIH_DCL_OUTPUT, "", "",
+ {VKD3D_SM4_OP_DCL_OUTPUT, VSIR_OP_DCL_OUTPUT, "", "",
shader_sm4_read_declaration_dst},
- {VKD3D_SM4_OP_DCL_OUTPUT_SGV, VKD3DSIH_DCL_OUTPUT_SGV, "", "",
+ {VKD3D_SM4_OP_DCL_OUTPUT_SGV, VSIR_OP_DCL_OUTPUT_SGV, "", "",
shader_sm4_read_declaration_register_semantic},
- {VKD3D_SM4_OP_DCL_OUTPUT_SIV, VKD3DSIH_DCL_OUTPUT_SIV, "", "",
+ {VKD3D_SM4_OP_DCL_OUTPUT_SIV, VSIR_OP_DCL_OUTPUT_SIV, "", "",
shader_sm4_read_declaration_register_semantic},
- {VKD3D_SM4_OP_DCL_TEMPS, VKD3DSIH_DCL_TEMPS, "", "",
+ {VKD3D_SM4_OP_DCL_TEMPS, VSIR_OP_DCL_TEMPS, "", "",
shader_sm4_read_declaration_count},
- {VKD3D_SM4_OP_DCL_INDEXABLE_TEMP, VKD3DSIH_DCL_INDEXABLE_TEMP, "", "",
+ {VKD3D_SM4_OP_DCL_INDEXABLE_TEMP, VSIR_OP_DCL_INDEXABLE_TEMP, "", "",
shader_sm4_read_dcl_indexable_temp},
- {VKD3D_SM4_OP_DCL_GLOBAL_FLAGS, VKD3DSIH_DCL_GLOBAL_FLAGS, "", "",
+ {VKD3D_SM4_OP_DCL_GLOBAL_FLAGS, VSIR_OP_DCL_GLOBAL_FLAGS, "", "",
shader_sm4_read_dcl_global_flags},
- {VKD3D_SM4_OP_LOD, VKD3DSIH_LOD, "f", "f**"},
- {VKD3D_SM4_OP_GATHER4, VKD3DSIH_GATHER4, "u", "f**"},
- {VKD3D_SM4_OP_SAMPLE_POS, VKD3DSIH_SAMPLE_POS, "f", "*u"},
- {VKD3D_SM4_OP_SAMPLE_INFO, VKD3DSIH_SAMPLE_INFO, "f", "*"},
- {VKD3D_SM5_OP_HS_DECLS, VKD3DSIH_HS_DECLS, "", ""},
- {VKD3D_SM5_OP_HS_CONTROL_POINT_PHASE, VKD3DSIH_HS_CONTROL_POINT_PHASE, "", ""},
- {VKD3D_SM5_OP_HS_FORK_PHASE, VKD3DSIH_HS_FORK_PHASE, "", ""},
- {VKD3D_SM5_OP_HS_JOIN_PHASE, VKD3DSIH_HS_JOIN_PHASE, "", ""},
- {VKD3D_SM5_OP_EMIT_STREAM, VKD3DSIH_EMIT_STREAM, "", "f"},
- {VKD3D_SM5_OP_CUT_STREAM, VKD3DSIH_CUT_STREAM, "", "f"},
- {VKD3D_SM5_OP_FCALL, VKD3DSIH_FCALL, "", "O",
+ {VKD3D_SM4_OP_LOD, VSIR_OP_LOD, "f", "f**"},
+ {VKD3D_SM4_OP_GATHER4, VSIR_OP_GATHER4, "u", "f**"},
+ {VKD3D_SM4_OP_SAMPLE_POS, VSIR_OP_SAMPLE_POS, "f", "*u"},
+ {VKD3D_SM4_OP_SAMPLE_INFO, VSIR_OP_SAMPLE_INFO, "f", "*"},
+ {VKD3D_SM5_OP_HS_DECLS, VSIR_OP_HS_DECLS, "", ""},
+ {VKD3D_SM5_OP_HS_CONTROL_POINT_PHASE, VSIR_OP_HS_CONTROL_POINT_PHASE, "", ""},
+ {VKD3D_SM5_OP_HS_FORK_PHASE, VSIR_OP_HS_FORK_PHASE, "", ""},
+ {VKD3D_SM5_OP_HS_JOIN_PHASE, VSIR_OP_HS_JOIN_PHASE, "", ""},
+ {VKD3D_SM5_OP_EMIT_STREAM, VSIR_OP_EMIT_STREAM, "", "f"},
+ {VKD3D_SM5_OP_CUT_STREAM, VSIR_OP_CUT_STREAM, "", "f"},
+ {VKD3D_SM5_OP_FCALL, VSIR_OP_FCALL, "", "O",
shader_sm5_read_fcall},
- {VKD3D_SM5_OP_BUFINFO, VKD3DSIH_BUFINFO, "i", "*"},
- {VKD3D_SM5_OP_DERIV_RTX_COARSE, VKD3DSIH_DSX_COARSE, "f", "f"},
- {VKD3D_SM5_OP_DERIV_RTX_FINE, VKD3DSIH_DSX_FINE, "f", "f"},
- {VKD3D_SM5_OP_DERIV_RTY_COARSE, VKD3DSIH_DSY_COARSE, "f", "f"},
- {VKD3D_SM5_OP_DERIV_RTY_FINE, VKD3DSIH_DSY_FINE, "f", "f"},
- {VKD3D_SM5_OP_GATHER4_C, VKD3DSIH_GATHER4_C, "f", "f**f"},
- {VKD3D_SM5_OP_GATHER4_PO, VKD3DSIH_GATHER4_PO, "f", "fi**"},
- {VKD3D_SM5_OP_GATHER4_PO_C, VKD3DSIH_GATHER4_PO_C, "f", "fi**f"},
- {VKD3D_SM5_OP_RCP, VKD3DSIH_RCP, "f", "f"},
- {VKD3D_SM5_OP_F32TOF16, VKD3DSIH_F32TOF16, "u", "f"},
- {VKD3D_SM5_OP_F16TOF32, VKD3DSIH_F16TOF32, "f", "u"},
- {VKD3D_SM5_OP_COUNTBITS, VKD3DSIH_COUNTBITS, "u", "u"},
- {VKD3D_SM5_OP_FIRSTBIT_HI, VKD3DSIH_FIRSTBIT_HI, "u", "u"},
- {VKD3D_SM5_OP_FIRSTBIT_LO, VKD3DSIH_FIRSTBIT_LO, "u", "u"},
- {VKD3D_SM5_OP_FIRSTBIT_SHI, VKD3DSIH_FIRSTBIT_SHI, "u", "i"},
- {VKD3D_SM5_OP_UBFE, VKD3DSIH_UBFE, "u", "iiu"},
- {VKD3D_SM5_OP_IBFE, VKD3DSIH_IBFE, "i", "iii"},
- {VKD3D_SM5_OP_BFI, VKD3DSIH_BFI, "u", "iiuu"},
- {VKD3D_SM5_OP_BFREV, VKD3DSIH_BFREV, "u", "u"},
- {VKD3D_SM5_OP_SWAPC, VKD3DSIH_SWAPC, "ff", "uff"},
- {VKD3D_SM5_OP_DCL_STREAM, VKD3DSIH_DCL_STREAM, "", "O"},
- {VKD3D_SM5_OP_DCL_FUNCTION_BODY, VKD3DSIH_DCL_FUNCTION_BODY, "", "",
+ {VKD3D_SM5_OP_BUFINFO, VSIR_OP_BUFINFO, "i", "*"},
+ {VKD3D_SM5_OP_DERIV_RTX_COARSE, VSIR_OP_DSX_COARSE, "f", "f"},
+ {VKD3D_SM5_OP_DERIV_RTX_FINE, VSIR_OP_DSX_FINE, "f", "f"},
+ {VKD3D_SM5_OP_DERIV_RTY_COARSE, VSIR_OP_DSY_COARSE, "f", "f"},
+ {VKD3D_SM5_OP_DERIV_RTY_FINE, VSIR_OP_DSY_FINE, "f", "f"},
+ {VKD3D_SM5_OP_GATHER4_C, VSIR_OP_GATHER4_C, "f", "f**f"},
+ {VKD3D_SM5_OP_GATHER4_PO, VSIR_OP_GATHER4_PO, "f", "fi**"},
+ {VKD3D_SM5_OP_GATHER4_PO_C, VSIR_OP_GATHER4_PO_C, "f", "fi**f"},
+ {VKD3D_SM5_OP_RCP, VSIR_OP_RCP, "f", "f"},
+ {VKD3D_SM5_OP_F32TOF16, VSIR_OP_F32TOF16, "u", "f"},
+ {VKD3D_SM5_OP_F16TOF32, VSIR_OP_F16TOF32, "f", "u"},
+ {VKD3D_SM5_OP_COUNTBITS, VSIR_OP_COUNTBITS, "u", "u"},
+ {VKD3D_SM5_OP_FIRSTBIT_HI, VSIR_OP_FIRSTBIT_HI, "u", "u"},
+ {VKD3D_SM5_OP_FIRSTBIT_LO, VSIR_OP_FIRSTBIT_LO, "u", "u"},
+ {VKD3D_SM5_OP_FIRSTBIT_SHI, VSIR_OP_FIRSTBIT_SHI, "u", "i"},
+ {VKD3D_SM5_OP_UBFE, VSIR_OP_UBFE, "u", "iiu"},
+ {VKD3D_SM5_OP_IBFE, VSIR_OP_IBFE, "i", "iii"},
+ {VKD3D_SM5_OP_BFI, VSIR_OP_BFI, "u", "iiuu"},
+ {VKD3D_SM5_OP_BFREV, VSIR_OP_BFREV, "u", "u"},
+ {VKD3D_SM5_OP_SWAPC, VSIR_OP_SWAPC, "ff", "uff"},
+ {VKD3D_SM5_OP_DCL_STREAM, VSIR_OP_DCL_STREAM, "", "O"},
+ {VKD3D_SM5_OP_DCL_FUNCTION_BODY, VSIR_OP_DCL_FUNCTION_BODY, "", "",
shader_sm5_read_dcl_function_body},
- {VKD3D_SM5_OP_DCL_FUNCTION_TABLE, VKD3DSIH_DCL_FUNCTION_TABLE, "", "",
+ {VKD3D_SM5_OP_DCL_FUNCTION_TABLE, VSIR_OP_DCL_FUNCTION_TABLE, "", "",
shader_sm5_read_dcl_function_table},
- {VKD3D_SM5_OP_DCL_INTERFACE, VKD3DSIH_DCL_INTERFACE, "", "",
+ {VKD3D_SM5_OP_DCL_INTERFACE, VSIR_OP_DCL_INTERFACE, "", "",
shader_sm5_read_dcl_interface},
- {VKD3D_SM5_OP_DCL_INPUT_CONTROL_POINT_COUNT, VKD3DSIH_DCL_INPUT_CONTROL_POINT_COUNT, "", "",
+ {VKD3D_SM5_OP_DCL_INPUT_CONTROL_POINT_COUNT, VSIR_OP_DCL_INPUT_CONTROL_POINT_COUNT, "", "",
shader_sm5_read_control_point_count},
- {VKD3D_SM5_OP_DCL_OUTPUT_CONTROL_POINT_COUNT, VKD3DSIH_DCL_OUTPUT_CONTROL_POINT_COUNT, "", "",
+ {VKD3D_SM5_OP_DCL_OUTPUT_CONTROL_POINT_COUNT, VSIR_OP_DCL_OUTPUT_CONTROL_POINT_COUNT, "", "",
shader_sm5_read_control_point_count},
- {VKD3D_SM5_OP_DCL_TESSELLATOR_DOMAIN, VKD3DSIH_DCL_TESSELLATOR_DOMAIN, "", "",
+ {VKD3D_SM5_OP_DCL_TESSELLATOR_DOMAIN, VSIR_OP_DCL_TESSELLATOR_DOMAIN, "", "",
shader_sm5_read_dcl_tessellator_domain},
- {VKD3D_SM5_OP_DCL_TESSELLATOR_PARTITIONING, VKD3DSIH_DCL_TESSELLATOR_PARTITIONING, "", "",
+ {VKD3D_SM5_OP_DCL_TESSELLATOR_PARTITIONING, VSIR_OP_DCL_TESSELLATOR_PARTITIONING, "", "",
shader_sm5_read_dcl_tessellator_partitioning},
- {VKD3D_SM5_OP_DCL_TESSELLATOR_OUTPUT_PRIMITIVE, VKD3DSIH_DCL_TESSELLATOR_OUTPUT_PRIMITIVE, "", "",
+ {VKD3D_SM5_OP_DCL_TESSELLATOR_OUTPUT_PRIMITIVE, VSIR_OP_DCL_TESSELLATOR_OUTPUT_PRIMITIVE, "", "",
shader_sm5_read_dcl_tessellator_output_primitive},
- {VKD3D_SM5_OP_DCL_HS_MAX_TESSFACTOR, VKD3DSIH_DCL_HS_MAX_TESSFACTOR, "", "",
+ {VKD3D_SM5_OP_DCL_HS_MAX_TESSFACTOR, VSIR_OP_DCL_HS_MAX_TESSFACTOR, "", "",
shader_sm5_read_dcl_hs_max_tessfactor},
- {VKD3D_SM5_OP_DCL_HS_FORK_PHASE_INSTANCE_COUNT, VKD3DSIH_DCL_HS_FORK_PHASE_INSTANCE_COUNT, "", "",
+ {VKD3D_SM5_OP_DCL_HS_FORK_PHASE_INSTANCE_COUNT, VSIR_OP_DCL_HS_FORK_PHASE_INSTANCE_COUNT, "", "",
shader_sm4_read_declaration_count},
- {VKD3D_SM5_OP_DCL_HS_JOIN_PHASE_INSTANCE_COUNT, VKD3DSIH_DCL_HS_JOIN_PHASE_INSTANCE_COUNT, "", "",
+ {VKD3D_SM5_OP_DCL_HS_JOIN_PHASE_INSTANCE_COUNT, VSIR_OP_DCL_HS_JOIN_PHASE_INSTANCE_COUNT, "", "",
shader_sm4_read_declaration_count},
- {VKD3D_SM5_OP_DCL_THREAD_GROUP, VKD3DSIH_DCL_THREAD_GROUP, "", "",
+ {VKD3D_SM5_OP_DCL_THREAD_GROUP, VSIR_OP_DCL_THREAD_GROUP, "", "",
shader_sm5_read_dcl_thread_group},
- {VKD3D_SM5_OP_DCL_UAV_TYPED, VKD3DSIH_DCL_UAV_TYPED, "", "",
+ {VKD3D_SM5_OP_DCL_UAV_TYPED, VSIR_OP_DCL_UAV_TYPED, "", "",
shader_sm4_read_dcl_resource},
- {VKD3D_SM5_OP_DCL_UAV_RAW, VKD3DSIH_DCL_UAV_RAW, "", "",
+ {VKD3D_SM5_OP_DCL_UAV_RAW, VSIR_OP_DCL_UAV_RAW, "", "",
shader_sm5_read_dcl_uav_raw},
- {VKD3D_SM5_OP_DCL_UAV_STRUCTURED, VKD3DSIH_DCL_UAV_STRUCTURED, "", "",
+ {VKD3D_SM5_OP_DCL_UAV_STRUCTURED, VSIR_OP_DCL_UAV_STRUCTURED, "", "",
shader_sm5_read_dcl_uav_structured},
- {VKD3D_SM5_OP_DCL_TGSM_RAW, VKD3DSIH_DCL_TGSM_RAW, "", "",
+ {VKD3D_SM5_OP_DCL_TGSM_RAW, VSIR_OP_DCL_TGSM_RAW, "", "",
shader_sm5_read_dcl_tgsm_raw},
- {VKD3D_SM5_OP_DCL_TGSM_STRUCTURED, VKD3DSIH_DCL_TGSM_STRUCTURED, "", "",
+ {VKD3D_SM5_OP_DCL_TGSM_STRUCTURED, VSIR_OP_DCL_TGSM_STRUCTURED, "", "",
shader_sm5_read_dcl_tgsm_structured},
- {VKD3D_SM5_OP_DCL_RESOURCE_RAW, VKD3DSIH_DCL_RESOURCE_RAW, "", "",
+ {VKD3D_SM5_OP_DCL_RESOURCE_RAW, VSIR_OP_DCL_RESOURCE_RAW, "", "",
shader_sm5_read_dcl_resource_raw},
- {VKD3D_SM5_OP_DCL_RESOURCE_STRUCTURED, VKD3DSIH_DCL_RESOURCE_STRUCTURED, "", "",
+ {VKD3D_SM5_OP_DCL_RESOURCE_STRUCTURED, VSIR_OP_DCL_RESOURCE_STRUCTURED, "", "",
shader_sm5_read_dcl_resource_structured},
- {VKD3D_SM5_OP_LD_UAV_TYPED, VKD3DSIH_LD_UAV_TYPED, "u", "i*"},
- {VKD3D_SM5_OP_STORE_UAV_TYPED, VKD3DSIH_STORE_UAV_TYPED, "*", "iu"},
- {VKD3D_SM5_OP_LD_RAW, VKD3DSIH_LD_RAW, "u", "i*"},
- {VKD3D_SM5_OP_STORE_RAW, VKD3DSIH_STORE_RAW, "*", "uu"},
- {VKD3D_SM5_OP_LD_STRUCTURED, VKD3DSIH_LD_STRUCTURED, "u", "ii*"},
- {VKD3D_SM5_OP_STORE_STRUCTURED, VKD3DSIH_STORE_STRUCTURED, "*", "iiu"},
- {VKD3D_SM5_OP_ATOMIC_AND, VKD3DSIH_ATOMIC_AND, "*", "iu"},
- {VKD3D_SM5_OP_ATOMIC_OR, VKD3DSIH_ATOMIC_OR, "*", "iu"},
- {VKD3D_SM5_OP_ATOMIC_XOR, VKD3DSIH_ATOMIC_XOR, "*", "iu"},
- {VKD3D_SM5_OP_ATOMIC_CMP_STORE, VKD3DSIH_ATOMIC_CMP_STORE, "*", "iuu"},
- {VKD3D_SM5_OP_ATOMIC_IADD, VKD3DSIH_ATOMIC_IADD, "*", "ii"},
- {VKD3D_SM5_OP_ATOMIC_IMAX, VKD3DSIH_ATOMIC_IMAX, "*", "ii"},
- {VKD3D_SM5_OP_ATOMIC_IMIN, VKD3DSIH_ATOMIC_IMIN, "*", "ii"},
- {VKD3D_SM5_OP_ATOMIC_UMAX, VKD3DSIH_ATOMIC_UMAX, "*", "iu"},
- {VKD3D_SM5_OP_ATOMIC_UMIN, VKD3DSIH_ATOMIC_UMIN, "*", "iu"},
- {VKD3D_SM5_OP_IMM_ATOMIC_ALLOC, VKD3DSIH_IMM_ATOMIC_ALLOC, "u", "*"},
- {VKD3D_SM5_OP_IMM_ATOMIC_CONSUME, VKD3DSIH_IMM_ATOMIC_CONSUME, "u", "*"},
- {VKD3D_SM5_OP_IMM_ATOMIC_IADD, VKD3DSIH_IMM_ATOMIC_IADD, "u*", "ii"},
- {VKD3D_SM5_OP_IMM_ATOMIC_AND, VKD3DSIH_IMM_ATOMIC_AND, "u*", "iu"},
- {VKD3D_SM5_OP_IMM_ATOMIC_OR, VKD3DSIH_IMM_ATOMIC_OR, "u*", "iu"},
- {VKD3D_SM5_OP_IMM_ATOMIC_XOR, VKD3DSIH_IMM_ATOMIC_XOR, "u*", "iu"},
- {VKD3D_SM5_OP_IMM_ATOMIC_EXCH, VKD3DSIH_IMM_ATOMIC_EXCH, "u*", "iu"},
- {VKD3D_SM5_OP_IMM_ATOMIC_CMP_EXCH, VKD3DSIH_IMM_ATOMIC_CMP_EXCH, "u*", "iuu"},
- {VKD3D_SM5_OP_IMM_ATOMIC_IMAX, VKD3DSIH_IMM_ATOMIC_IMAX, "i*", "ii"},
- {VKD3D_SM5_OP_IMM_ATOMIC_IMIN, VKD3DSIH_IMM_ATOMIC_IMIN, "i*", "ii"},
- {VKD3D_SM5_OP_IMM_ATOMIC_UMAX, VKD3DSIH_IMM_ATOMIC_UMAX, "u*", "iu"},
- {VKD3D_SM5_OP_IMM_ATOMIC_UMIN, VKD3DSIH_IMM_ATOMIC_UMIN, "u*", "iu"},
- {VKD3D_SM5_OP_SYNC, VKD3DSIH_SYNC, "", "",
+ {VKD3D_SM5_OP_LD_UAV_TYPED, VSIR_OP_LD_UAV_TYPED, "u", "i*"},
+ {VKD3D_SM5_OP_STORE_UAV_TYPED, VSIR_OP_STORE_UAV_TYPED, "*", "iu"},
+ {VKD3D_SM5_OP_LD_RAW, VSIR_OP_LD_RAW, "u", "i*"},
+ {VKD3D_SM5_OP_STORE_RAW, VSIR_OP_STORE_RAW, "*", "uu"},
+ {VKD3D_SM5_OP_LD_STRUCTURED, VSIR_OP_LD_STRUCTURED, "u", "ii*"},
+ {VKD3D_SM5_OP_STORE_STRUCTURED, VSIR_OP_STORE_STRUCTURED, "*", "iiu"},
+ {VKD3D_SM5_OP_ATOMIC_AND, VSIR_OP_ATOMIC_AND, "*", "iu"},
+ {VKD3D_SM5_OP_ATOMIC_OR, VSIR_OP_ATOMIC_OR, "*", "iu"},
+ {VKD3D_SM5_OP_ATOMIC_XOR, VSIR_OP_ATOMIC_XOR, "*", "iu"},
+ {VKD3D_SM5_OP_ATOMIC_CMP_STORE, VSIR_OP_ATOMIC_CMP_STORE, "*", "iuu"},
+ {VKD3D_SM5_OP_ATOMIC_IADD, VSIR_OP_ATOMIC_IADD, "*", "ii"},
+ {VKD3D_SM5_OP_ATOMIC_IMAX, VSIR_OP_ATOMIC_IMAX, "*", "ii"},
+ {VKD3D_SM5_OP_ATOMIC_IMIN, VSIR_OP_ATOMIC_IMIN, "*", "ii"},
+ {VKD3D_SM5_OP_ATOMIC_UMAX, VSIR_OP_ATOMIC_UMAX, "*", "iu"},
+ {VKD3D_SM5_OP_ATOMIC_UMIN, VSIR_OP_ATOMIC_UMIN, "*", "iu"},
+ {VKD3D_SM5_OP_IMM_ATOMIC_ALLOC, VSIR_OP_IMM_ATOMIC_ALLOC, "u", "*"},
+ {VKD3D_SM5_OP_IMM_ATOMIC_CONSUME, VSIR_OP_IMM_ATOMIC_CONSUME, "u", "*"},
+ {VKD3D_SM5_OP_IMM_ATOMIC_IADD, VSIR_OP_IMM_ATOMIC_IADD, "u*", "ii"},
+ {VKD3D_SM5_OP_IMM_ATOMIC_AND, VSIR_OP_IMM_ATOMIC_AND, "u*", "iu"},
+ {VKD3D_SM5_OP_IMM_ATOMIC_OR, VSIR_OP_IMM_ATOMIC_OR, "u*", "iu"},
+ {VKD3D_SM5_OP_IMM_ATOMIC_XOR, VSIR_OP_IMM_ATOMIC_XOR, "u*", "iu"},
+ {VKD3D_SM5_OP_IMM_ATOMIC_EXCH, VSIR_OP_IMM_ATOMIC_EXCH, "u*", "iu"},
+ {VKD3D_SM5_OP_IMM_ATOMIC_CMP_EXCH, VSIR_OP_IMM_ATOMIC_CMP_EXCH, "u*", "iuu"},
+ {VKD3D_SM5_OP_IMM_ATOMIC_IMAX, VSIR_OP_IMM_ATOMIC_IMAX, "i*", "ii"},
+ {VKD3D_SM5_OP_IMM_ATOMIC_IMIN, VSIR_OP_IMM_ATOMIC_IMIN, "i*", "ii"},
+ {VKD3D_SM5_OP_IMM_ATOMIC_UMAX, VSIR_OP_IMM_ATOMIC_UMAX, "u*", "iu"},
+ {VKD3D_SM5_OP_IMM_ATOMIC_UMIN, VSIR_OP_IMM_ATOMIC_UMIN, "u*", "iu"},
+ {VKD3D_SM5_OP_SYNC, VSIR_OP_SYNC, "", "",
shader_sm5_read_sync},
- {VKD3D_SM5_OP_DADD, VKD3DSIH_DADD, "d", "dd"},
- {VKD3D_SM5_OP_DMAX, VKD3DSIH_DMAX, "d", "dd"},
- {VKD3D_SM5_OP_DMIN, VKD3DSIH_DMIN, "d", "dd"},
- {VKD3D_SM5_OP_DMUL, VKD3DSIH_DMUL, "d", "dd"},
- {VKD3D_SM5_OP_DEQ, VKD3DSIH_DEQO, "u", "dd"},
- {VKD3D_SM5_OP_DGE, VKD3DSIH_DGEO, "u", "dd"},
- {VKD3D_SM5_OP_DLT, VKD3DSIH_DLT, "u", "dd"},
- {VKD3D_SM5_OP_DNE, VKD3DSIH_DNE, "u", "dd"},
- {VKD3D_SM5_OP_DMOV, VKD3DSIH_DMOV, "d", "d"},
- {VKD3D_SM5_OP_DMOVC, VKD3DSIH_DMOVC, "d", "udd"},
- {VKD3D_SM5_OP_DTOF, VKD3DSIH_DTOF, "f", "d"},
- {VKD3D_SM5_OP_FTOD, VKD3DSIH_FTOD, "d", "f"},
- {VKD3D_SM5_OP_EVAL_SAMPLE_INDEX, VKD3DSIH_EVAL_SAMPLE_INDEX, "f", "fi"},
- {VKD3D_SM5_OP_EVAL_CENTROID, VKD3DSIH_EVAL_CENTROID, "f", "f"},
- {VKD3D_SM5_OP_DCL_GS_INSTANCES, VKD3DSIH_DCL_GS_INSTANCES, "", "",
+ {VKD3D_SM5_OP_DADD, VSIR_OP_DADD, "d", "dd"},
+ {VKD3D_SM5_OP_DMAX, VSIR_OP_DMAX, "d", "dd"},
+ {VKD3D_SM5_OP_DMIN, VSIR_OP_DMIN, "d", "dd"},
+ {VKD3D_SM5_OP_DMUL, VSIR_OP_DMUL, "d", "dd"},
+ {VKD3D_SM5_OP_DEQ, VSIR_OP_DEQO, "u", "dd"},
+ {VKD3D_SM5_OP_DGE, VSIR_OP_DGEO, "u", "dd"},
+ {VKD3D_SM5_OP_DLT, VSIR_OP_DLT, "u", "dd"},
+ {VKD3D_SM5_OP_DNE, VSIR_OP_DNE, "u", "dd"},
+ {VKD3D_SM5_OP_DMOV, VSIR_OP_DMOV, "d", "d"},
+ {VKD3D_SM5_OP_DMOVC, VSIR_OP_DMOVC, "d", "udd"},
+ {VKD3D_SM5_OP_DTOF, VSIR_OP_DTOF, "f", "d"},
+ {VKD3D_SM5_OP_FTOD, VSIR_OP_FTOD, "d", "f"},
+ {VKD3D_SM5_OP_EVAL_SAMPLE_INDEX, VSIR_OP_EVAL_SAMPLE_INDEX, "f", "fi"},
+ {VKD3D_SM5_OP_EVAL_CENTROID, VSIR_OP_EVAL_CENTROID, "f", "f"},
+ {VKD3D_SM5_OP_DCL_GS_INSTANCES, VSIR_OP_DCL_GS_INSTANCES, "", "",
shader_sm4_read_declaration_count},
- {VKD3D_SM5_OP_DDIV, VKD3DSIH_DDIV, "d", "dd"},
- {VKD3D_SM5_OP_DFMA, VKD3DSIH_DFMA, "d", "ddd"},
- {VKD3D_SM5_OP_DRCP, VKD3DSIH_DRCP, "d", "d"},
- {VKD3D_SM5_OP_MSAD, VKD3DSIH_MSAD, "u", "uuu"},
- {VKD3D_SM5_OP_DTOI, VKD3DSIH_DTOI, "i", "d"},
- {VKD3D_SM5_OP_DTOU, VKD3DSIH_DTOU, "u", "d"},
- {VKD3D_SM5_OP_ITOD, VKD3DSIH_ITOD, "d", "i"},
- {VKD3D_SM5_OP_UTOD, VKD3DSIH_UTOD, "d", "u"},
- {VKD3D_SM5_OP_GATHER4_S, VKD3DSIH_GATHER4_S, "uu", "f**"},
- {VKD3D_SM5_OP_GATHER4_C_S, VKD3DSIH_GATHER4_C_S, "fu", "f**f"},
- {VKD3D_SM5_OP_GATHER4_PO_S, VKD3DSIH_GATHER4_PO_S, "fu", "fi**"},
- {VKD3D_SM5_OP_GATHER4_PO_C_S, VKD3DSIH_GATHER4_PO_C_S, "fu", "fi**f"},
- {VKD3D_SM5_OP_LD_S, VKD3DSIH_LD_S, "uu", "i*"},
- {VKD3D_SM5_OP_LD2DMS_S, VKD3DSIH_LD2DMS_S, "uu", "i*i"},
- {VKD3D_SM5_OP_LD_UAV_TYPED_S, VKD3DSIH_LD_UAV_TYPED_S, "uu", "iU"},
- {VKD3D_SM5_OP_LD_RAW_S, VKD3DSIH_LD_RAW_S, "uu", "iU"},
- {VKD3D_SM5_OP_LD_STRUCTURED_S, VKD3DSIH_LD_STRUCTURED_S, "uu", "ii*"},
- {VKD3D_SM5_OP_SAMPLE_LOD_S, VKD3DSIH_SAMPLE_LOD_S, "uu", "f**f"},
- {VKD3D_SM5_OP_SAMPLE_C_LZ_S, VKD3DSIH_SAMPLE_C_LZ_S, "fu", "f**f"},
- {VKD3D_SM5_OP_SAMPLE_CL_S, VKD3DSIH_SAMPLE_CL_S, "uu", "f**f"},
- {VKD3D_SM5_OP_SAMPLE_B_CL_S, VKD3DSIH_SAMPLE_B_CL_S, "uu", "f**ff"},
- {VKD3D_SM5_OP_SAMPLE_GRAD_CL_S, VKD3DSIH_SAMPLE_GRAD_CL_S, "uu", "f**fff"},
- {VKD3D_SM5_OP_SAMPLE_C_CL_S, VKD3DSIH_SAMPLE_C_CL_S, "fu", "f**ff"},
- {VKD3D_SM5_OP_CHECK_ACCESS_FULLY_MAPPED, VKD3DSIH_CHECK_ACCESS_FULLY_MAPPED, "u", "u"},
+ {VKD3D_SM5_OP_DDIV, VSIR_OP_DDIV, "d", "dd"},
+ {VKD3D_SM5_OP_DFMA, VSIR_OP_DFMA, "d", "ddd"},
+ {VKD3D_SM5_OP_DRCP, VSIR_OP_DRCP, "d", "d"},
+ {VKD3D_SM5_OP_MSAD, VSIR_OP_MSAD, "u", "uuu"},
+ {VKD3D_SM5_OP_DTOI, VSIR_OP_DTOI, "i", "d"},
+ {VKD3D_SM5_OP_DTOU, VSIR_OP_DTOU, "u", "d"},
+ {VKD3D_SM5_OP_ITOD, VSIR_OP_ITOD, "d", "i"},
+ {VKD3D_SM5_OP_UTOD, VSIR_OP_UTOD, "d", "u"},
+ {VKD3D_SM5_OP_GATHER4_S, VSIR_OP_GATHER4_S, "uu", "f**"},
+ {VKD3D_SM5_OP_GATHER4_C_S, VSIR_OP_GATHER4_C_S, "fu", "f**f"},
+ {VKD3D_SM5_OP_GATHER4_PO_S, VSIR_OP_GATHER4_PO_S, "fu", "fi**"},
+ {VKD3D_SM5_OP_GATHER4_PO_C_S, VSIR_OP_GATHER4_PO_C_S, "fu", "fi**f"},
+ {VKD3D_SM5_OP_LD_S, VSIR_OP_LD_S, "uu", "i*"},
+ {VKD3D_SM5_OP_LD2DMS_S, VSIR_OP_LD2DMS_S, "uu", "i*i"},
+ {VKD3D_SM5_OP_LD_UAV_TYPED_S, VSIR_OP_LD_UAV_TYPED_S, "uu", "iU"},
+ {VKD3D_SM5_OP_LD_RAW_S, VSIR_OP_LD_RAW_S, "uu", "iU"},
+ {VKD3D_SM5_OP_LD_STRUCTURED_S, VSIR_OP_LD_STRUCTURED_S, "uu", "ii*"},
+ {VKD3D_SM5_OP_SAMPLE_LOD_S, VSIR_OP_SAMPLE_LOD_S, "uu", "f**f"},
+ {VKD3D_SM5_OP_SAMPLE_C_LZ_S, VSIR_OP_SAMPLE_C_LZ_S, "fu", "f**f"},
+ {VKD3D_SM5_OP_SAMPLE_CL_S, VSIR_OP_SAMPLE_CL_S, "uu", "f**f"},
+ {VKD3D_SM5_OP_SAMPLE_B_CL_S, VSIR_OP_SAMPLE_B_CL_S, "uu", "f**ff"},
+ {VKD3D_SM5_OP_SAMPLE_GRAD_CL_S, VSIR_OP_SAMPLE_GRAD_CL_S, "uu", "f**fff"},
+ {VKD3D_SM5_OP_SAMPLE_C_CL_S, VSIR_OP_SAMPLE_C_CL_S, "fu", "f**ff"},
+ {VKD3D_SM5_OP_CHECK_ACCESS_FULLY_MAPPED, VSIR_OP_CHECK_ACCESS_FULLY_MAPPED, "u", "u"},
};
static const struct vkd3d_sm4_register_type_info register_type_table[] =
@@ -1944,7 +1944,7 @@ static const struct vkd3d_sm4_opcode_info *get_info_from_sm4_opcode(
static const struct vkd3d_sm4_opcode_info *get_info_from_vsir_opcode(
const struct vkd3d_sm4_lookup_tables *lookup, enum vkd3d_shader_opcode vsir_opcode)
{
- if (vsir_opcode >= VKD3DSIH_COUNT)
+ if (vsir_opcode >= VSIR_OP_COUNT)
return NULL;
return lookup->opcode_info_from_vsir[vsir_opcode];
}
@@ -2289,7 +2289,7 @@ static bool register_is_control_point_input(const struct vkd3d_shader_register *
const struct vkd3d_shader_sm4_parser *priv)
{
return reg->type == VKD3DSPR_INCONTROLPOINT || reg->type == VKD3DSPR_OUTCONTROLPOINT
- || (reg->type == VKD3DSPR_INPUT && (priv->phase == VKD3DSIH_HS_CONTROL_POINT_PHASE
+ || (reg->type == VKD3DSPR_INPUT && (priv->phase == VSIR_OP_HS_CONTROL_POINT_PHASE
|| priv->p.program->shader_version.type == VKD3D_SHADER_TYPE_GEOMETRY));
}
@@ -2656,16 +2656,16 @@ static void shader_sm4_read_instruction(struct vkd3d_shader_sm4_parser *sm4, str
if (!(opcode_info = get_info_from_sm4_opcode(&sm4->lookup, opcode)))
{
FIXME("Unrecognized opcode %#x, opcode_token 0x%08x.\n", opcode, opcode_token);
- ins->opcode = VKD3DSIH_INVALID;
+ ins->opcode = VSIR_OP_INVALID;
*ptr += len;
return;
}
vsir_instruction_init(ins, &sm4->p.location, opcode_info->handler_idx);
- if (ins->opcode == VKD3DSIH_HS_CONTROL_POINT_PHASE || ins->opcode == VKD3DSIH_HS_FORK_PHASE
- || ins->opcode == VKD3DSIH_HS_JOIN_PHASE)
+ if (ins->opcode == VSIR_OP_HS_CONTROL_POINT_PHASE || ins->opcode == VSIR_OP_HS_FORK_PHASE
+ || ins->opcode == VSIR_OP_HS_JOIN_PHASE)
sm4->phase = ins->opcode;
- sm4->has_control_point_phase |= ins->opcode == VKD3DSIH_HS_CONTROL_POINT_PHASE;
+ sm4->has_control_point_phase |= ins->opcode == VSIR_OP_HS_CONTROL_POINT_PHASE;
ins->flags = 0;
ins->coissue = false;
ins->raw = false;
@@ -2678,7 +2678,7 @@ static void shader_sm4_read_instruction(struct vkd3d_shader_sm4_parser *sm4, str
{
ERR("Failed to allocate src parameters.\n");
vkd3d_shader_parser_error(&sm4->p, VKD3D_SHADER_ERROR_TPF_OUT_OF_MEMORY, "Out of memory.");
- ins->opcode = VKD3DSIH_INVALID;
+ ins->opcode = VSIR_OP_INVALID;
return;
}
ins->resource_type = VKD3D_SHADER_RESOURCE_NONE;
@@ -2720,7 +2720,7 @@ static void shader_sm4_read_instruction(struct vkd3d_shader_sm4_parser *sm4, str
{
ERR("Failed to allocate dst parameters.\n");
vkd3d_shader_parser_error(&sm4->p, VKD3D_SHADER_ERROR_TPF_OUT_OF_MEMORY, "Out of memory.");
- ins->opcode = VKD3DSIH_INVALID;
+ ins->opcode = VSIR_OP_INVALID;
return;
}
for (i = 0; i < ins->dst_count; ++i)
@@ -2728,7 +2728,7 @@ static void shader_sm4_read_instruction(struct vkd3d_shader_sm4_parser *sm4, str
if (!(shader_sm4_read_dst_param(sm4, &p, *ptr, map_data_type(opcode_info->dst_info[i]),
&dst_params[i])))
{
- ins->opcode = VKD3DSIH_INVALID;
+ ins->opcode = VSIR_OP_INVALID;
return;
}
dst_params[i].modifiers |= instruction_dst_modifier;
@@ -2739,7 +2739,7 @@ static void shader_sm4_read_instruction(struct vkd3d_shader_sm4_parser *sm4, str
if (!(shader_sm4_read_src_param(sm4, &p, *ptr, map_data_type(opcode_info->src_info[i]),
&src_params[i])))
{
- ins->opcode = VKD3DSIH_INVALID;
+ ins->opcode = VSIR_OP_INVALID;
return;
}
}
@@ -2749,7 +2749,7 @@ static void shader_sm4_read_instruction(struct vkd3d_shader_sm4_parser *sm4, str
fail:
*ptr = sm4->end;
- ins->opcode = VKD3DSIH_INVALID;
+ ins->opcode = VSIR_OP_INVALID;
return;
}
@@ -2971,7 +2971,7 @@ int tpf_parse(const struct vkd3d_shader_compile_info *compile_info, uint64_t con
ins = &instructions->elements[instructions->count];
shader_sm4_read_instruction(&sm4, ins);
- if (ins->opcode == VKD3DSIH_INVALID)
+ if (ins->opcode == VSIR_OP_INVALID)
{
WARN("Encountered unrecognized or invalid instruction.\n");
vsir_program_cleanup(program);
@@ -3868,20 +3868,20 @@ static void tpf_dcl_texture(const struct tpf_compiler *tpf, const struct vkd3d_s
info = get_info_from_vsir_opcode(&tpf->lookup, ins->opcode);
VKD3D_ASSERT(info);
- uav = ins->opcode == VKD3DSIH_DCL_UAV_TYPED
- || ins->opcode == VKD3DSIH_DCL_UAV_RAW
- || ins->opcode == VKD3DSIH_DCL_UAV_STRUCTURED;
+ uav = ins->opcode == VSIR_OP_DCL_UAV_TYPED
+ || ins->opcode == VSIR_OP_DCL_UAV_RAW
+ || ins->opcode == VSIR_OP_DCL_UAV_STRUCTURED;
instr.opcode = info->opcode;
- if (ins->opcode == VKD3DSIH_DCL || ins->opcode == VKD3DSIH_DCL_UAV_TYPED)
+ if (ins->opcode == VSIR_OP_DCL || ins->opcode == VSIR_OP_DCL_UAV_TYPED)
{
instr.idx[0] = pack_resource_data_type(ins->declaration.semantic.resource_data_type);
instr.idx_count = 1;
instr.extra_bits |= ins->declaration.semantic.sample_count << VKD3D_SM4_RESOURCE_SAMPLE_COUNT_SHIFT;
resource = &ins->declaration.semantic.resource;
}
- else if (ins->opcode == VKD3DSIH_DCL_RESOURCE_RAW || ins->opcode == VKD3DSIH_DCL_UAV_RAW)
+ else if (ins->opcode == VSIR_OP_DCL_RESOURCE_RAW || ins->opcode == VSIR_OP_DCL_UAV_RAW)
{
resource = &ins->declaration.raw_resource.resource;
}
@@ -4046,6 +4046,39 @@ static void tpf_write_dcl_vertices_out(const struct tpf_compiler *tpf, unsigned
write_sm4_instruction(tpf, &instr);
}
+/* Descriptor registers are stored in shader model 5.1 format regardless
+ * of the program's version. Convert them to the 4.0 format if necessary. */
+static void rewrite_descriptor_register(const struct tpf_compiler *tpf, struct vkd3d_shader_register *reg)
+{
+ if (vkd3d_shader_ver_ge(&tpf->program->shader_version, 5, 1))
+ return;
+
+ switch (reg->type)
+ {
+ case VKD3DSPR_CONSTBUFFER:
+ reg->idx[0] = reg->idx[1];
+ reg->idx[1] = reg->idx[2];
+ reg->idx_count = 2;
+ break;
+
+ case VKD3DSPR_RESOURCE:
+ case VKD3DSPR_SAMPLER:
+ case VKD3DSPR_UAV:
+ reg->idx[0] = reg->idx[1];
+ reg->idx_count = 1;
+ break;
+
+ default:
+ break;
+ }
+
+ for (unsigned int i = 0; i < reg->idx_count; ++i)
+ {
+ if (reg->idx[i].rel_addr)
+ rewrite_descriptor_register(tpf, &reg->idx[i].rel_addr->reg);
+ }
+}
+
static void tpf_simple_instruction(struct tpf_compiler *tpf, const struct vkd3d_shader_instruction *ins)
{
struct sm4_instruction_modifier *modifier;
@@ -4082,6 +4115,7 @@ static void tpf_simple_instruction(struct tpf_compiler *tpf, const struct vkd3d_
for (unsigned int i = 0; i < ins->dst_count; ++i)
{
instr.dsts[i] = ins->dst[i];
+ rewrite_descriptor_register(tpf, &instr.dsts[i].reg);
if (instr.dsts[i].modifiers & VKD3DSPDM_SATURATE)
{
@@ -4092,7 +4126,10 @@ static void tpf_simple_instruction(struct tpf_compiler *tpf, const struct vkd3d_
}
}
for (unsigned int i = 0; i < ins->src_count; ++i)
+ {
instr.srcs[i] = ins->src[i];
+ rewrite_descriptor_register(tpf, &instr.srcs[i].reg);
+ }
if (ins->texel_offset.u || ins->texel_offset.v || ins->texel_offset.w)
{
@@ -4117,184 +4154,184 @@ static void tpf_handle_instruction(struct tpf_compiler *tpf, const struct vkd3d_
{
switch (ins->opcode)
{
- case VKD3DSIH_DCL_CONSTANT_BUFFER:
+ case VSIR_OP_DCL_CONSTANT_BUFFER:
tpf_dcl_constant_buffer(tpf, ins);
break;
- case VKD3DSIH_DCL_TEMPS:
+ case VSIR_OP_DCL_TEMPS:
tpf_dcl_temps(tpf, ins->declaration.count);
break;
- case VKD3DSIH_DCL_INDEXABLE_TEMP:
+ case VSIR_OP_DCL_INDEXABLE_TEMP:
tpf_dcl_indexable_temp(tpf, &ins->declaration.indexable_temp);
break;
- case VKD3DSIH_DCL_INPUT:
+ case VSIR_OP_DCL_INPUT:
tpf_dcl_semantic(tpf, VKD3D_SM4_OP_DCL_INPUT, &ins->declaration.dst, 0);
break;
- case VKD3DSIH_DCL_INPUT_PS:
+ case VSIR_OP_DCL_INPUT_PS:
tpf_dcl_semantic(tpf, VKD3D_SM4_OP_DCL_INPUT_PS, &ins->declaration.dst, ins->flags);
break;
- case VKD3DSIH_DCL_INPUT_PS_SGV:
+ case VSIR_OP_DCL_INPUT_PS_SGV:
tpf_dcl_siv_semantic(tpf, VKD3D_SM4_OP_DCL_INPUT_PS_SGV, &ins->declaration.register_semantic, 0);
break;
- case VKD3DSIH_DCL_INPUT_PS_SIV:
+ case VSIR_OP_DCL_INPUT_PS_SIV:
tpf_dcl_siv_semantic(tpf, VKD3D_SM4_OP_DCL_INPUT_PS_SIV, &ins->declaration.register_semantic, ins->flags);
break;
- case VKD3DSIH_DCL_INPUT_SGV:
+ case VSIR_OP_DCL_INPUT_SGV:
tpf_dcl_siv_semantic(tpf, VKD3D_SM4_OP_DCL_INPUT_SGV, &ins->declaration.register_semantic, 0);
break;
- case VKD3DSIH_DCL_INPUT_SIV:
+ case VSIR_OP_DCL_INPUT_SIV:
tpf_dcl_siv_semantic(tpf, VKD3D_SM4_OP_DCL_INPUT_SIV, &ins->declaration.register_semantic, 0);
break;
- case VKD3DSIH_DCL_OUTPUT:
+ case VSIR_OP_DCL_OUTPUT:
tpf_dcl_semantic(tpf, VKD3D_SM4_OP_DCL_OUTPUT, &ins->declaration.dst, 0);
break;
- case VKD3DSIH_DCL_OUTPUT_SGV:
+ case VSIR_OP_DCL_OUTPUT_SGV:
tpf_dcl_siv_semantic(tpf, VKD3D_SM4_OP_DCL_OUTPUT_SGV, &ins->declaration.register_semantic, 0);
break;
- case VKD3DSIH_DCL_OUTPUT_SIV:
+ case VSIR_OP_DCL_OUTPUT_SIV:
tpf_dcl_siv_semantic(tpf, VKD3D_SM4_OP_DCL_OUTPUT_SIV, &ins->declaration.register_semantic, 0);
break;
- case VKD3DSIH_DCL_SAMPLER:
+ case VSIR_OP_DCL_SAMPLER:
tpf_dcl_sampler(tpf, ins);
break;
- case VKD3DSIH_DCL:
- case VKD3DSIH_DCL_RESOURCE_RAW:
- case VKD3DSIH_DCL_UAV_RAW:
- case VKD3DSIH_DCL_UAV_STRUCTURED:
- case VKD3DSIH_DCL_UAV_TYPED:
+ case VSIR_OP_DCL:
+ case VSIR_OP_DCL_RESOURCE_RAW:
+ case VSIR_OP_DCL_UAV_RAW:
+ case VSIR_OP_DCL_UAV_STRUCTURED:
+ case VSIR_OP_DCL_UAV_TYPED:
tpf_dcl_texture(tpf, ins);
break;
- case VKD3DSIH_ADD:
- case VKD3DSIH_ATOMIC_AND:
- case VKD3DSIH_ATOMIC_CMP_STORE:
- case VKD3DSIH_ATOMIC_IADD:
- case VKD3DSIH_ATOMIC_IMAX:
- case VKD3DSIH_ATOMIC_IMIN:
- case VKD3DSIH_ATOMIC_UMAX:
- case VKD3DSIH_ATOMIC_UMIN:
- case VKD3DSIH_ATOMIC_OR:
- case VKD3DSIH_ATOMIC_XOR:
- case VKD3DSIH_AND:
- case VKD3DSIH_BREAK:
- case VKD3DSIH_CASE:
- case VKD3DSIH_CONTINUE:
- case VKD3DSIH_CUT:
- case VKD3DSIH_CUT_STREAM:
- case VKD3DSIH_DCL_STREAM:
- case VKD3DSIH_DEFAULT:
- case VKD3DSIH_DISCARD:
- case VKD3DSIH_DIV:
- case VKD3DSIH_DP2:
- case VKD3DSIH_DP3:
- case VKD3DSIH_DP4:
- case VKD3DSIH_DSX:
- case VKD3DSIH_DSX_COARSE:
- case VKD3DSIH_DSX_FINE:
- case VKD3DSIH_DSY:
- case VKD3DSIH_DSY_COARSE:
- case VKD3DSIH_DSY_FINE:
- case VKD3DSIH_ELSE:
- case VKD3DSIH_EMIT:
- case VKD3DSIH_EMIT_STREAM:
- case VKD3DSIH_ENDIF:
- case VKD3DSIH_ENDLOOP:
- case VKD3DSIH_ENDSWITCH:
- case VKD3DSIH_EQO:
- case VKD3DSIH_EXP:
- case VKD3DSIH_F16TOF32:
- case VKD3DSIH_F32TOF16:
- case VKD3DSIH_FRC:
- case VKD3DSIH_FTOI:
- case VKD3DSIH_FTOU:
- case VKD3DSIH_GATHER4:
- case VKD3DSIH_GATHER4_PO:
- case VKD3DSIH_GATHER4_C:
- case VKD3DSIH_GATHER4_PO_C:
- case VKD3DSIH_GEO:
- case VKD3DSIH_HS_CONTROL_POINT_PHASE:
- case VKD3DSIH_HS_FORK_PHASE:
- case VKD3DSIH_IADD:
- case VKD3DSIH_IEQ:
- case VKD3DSIH_IF:
- case VKD3DSIH_IGE:
- case VKD3DSIH_ILT:
- case VKD3DSIH_IMAD:
- case VKD3DSIH_IMAX:
- case VKD3DSIH_IMIN:
- case VKD3DSIH_IMM_ATOMIC_AND:
- case VKD3DSIH_IMM_ATOMIC_CMP_EXCH:
- case VKD3DSIH_IMM_ATOMIC_EXCH:
- case VKD3DSIH_IMM_ATOMIC_IADD:
- case VKD3DSIH_IMM_ATOMIC_IMAX:
- case VKD3DSIH_IMM_ATOMIC_IMIN:
- case VKD3DSIH_IMM_ATOMIC_UMAX:
- case VKD3DSIH_IMM_ATOMIC_UMIN:
- case VKD3DSIH_IMM_ATOMIC_OR:
- case VKD3DSIH_IMM_ATOMIC_XOR:
- case VKD3DSIH_SYNC:
- case VKD3DSIH_IMUL:
- case VKD3DSIH_INE:
- case VKD3DSIH_INEG:
- case VKD3DSIH_ISHL:
- case VKD3DSIH_ISHR:
- case VKD3DSIH_ITOF:
- case VKD3DSIH_LD:
- case VKD3DSIH_LD2DMS:
- case VKD3DSIH_LD_RAW:
- case VKD3DSIH_LD_UAV_TYPED:
- case VKD3DSIH_LOG:
- case VKD3DSIH_LOOP:
- case VKD3DSIH_LTO:
- case VKD3DSIH_MAD:
- case VKD3DSIH_MAX:
- case VKD3DSIH_MIN:
- case VKD3DSIH_MOV:
- case VKD3DSIH_MOVC:
- case VKD3DSIH_MUL:
- case VKD3DSIH_NEU:
- case VKD3DSIH_NOT:
- case VKD3DSIH_OR:
- case VKD3DSIH_RCP:
- case VKD3DSIH_RESINFO:
- case VKD3DSIH_RET:
- case VKD3DSIH_ROUND_NE:
- case VKD3DSIH_ROUND_NI:
- case VKD3DSIH_ROUND_PI:
- case VKD3DSIH_ROUND_Z:
- case VKD3DSIH_RSQ:
- case VKD3DSIH_SAMPLE:
- case VKD3DSIH_SAMPLE_B:
- case VKD3DSIH_SAMPLE_C:
- case VKD3DSIH_SAMPLE_C_LZ:
- case VKD3DSIH_SAMPLE_GRAD:
- case VKD3DSIH_SAMPLE_INFO:
- case VKD3DSIH_SAMPLE_LOD:
- case VKD3DSIH_SINCOS:
- case VKD3DSIH_SQRT:
- case VKD3DSIH_STORE_RAW:
- case VKD3DSIH_STORE_UAV_TYPED:
- case VKD3DSIH_SWITCH:
- case VKD3DSIH_UDIV:
- case VKD3DSIH_UGE:
- case VKD3DSIH_ULT:
- case VKD3DSIH_UMAX:
- case VKD3DSIH_UMIN:
- case VKD3DSIH_USHR:
- case VKD3DSIH_UTOF:
- case VKD3DSIH_XOR:
+ case VSIR_OP_ADD:
+ case VSIR_OP_ATOMIC_AND:
+ case VSIR_OP_ATOMIC_CMP_STORE:
+ case VSIR_OP_ATOMIC_IADD:
+ case VSIR_OP_ATOMIC_IMAX:
+ case VSIR_OP_ATOMIC_IMIN:
+ case VSIR_OP_ATOMIC_UMAX:
+ case VSIR_OP_ATOMIC_UMIN:
+ case VSIR_OP_ATOMIC_OR:
+ case VSIR_OP_ATOMIC_XOR:
+ case VSIR_OP_AND:
+ case VSIR_OP_BREAK:
+ case VSIR_OP_CASE:
+ case VSIR_OP_CONTINUE:
+ case VSIR_OP_CUT:
+ case VSIR_OP_CUT_STREAM:
+ case VSIR_OP_DCL_STREAM:
+ case VSIR_OP_DEFAULT:
+ case VSIR_OP_DISCARD:
+ case VSIR_OP_DIV:
+ case VSIR_OP_DP2:
+ case VSIR_OP_DP3:
+ case VSIR_OP_DP4:
+ case VSIR_OP_DSX:
+ case VSIR_OP_DSX_COARSE:
+ case VSIR_OP_DSX_FINE:
+ case VSIR_OP_DSY:
+ case VSIR_OP_DSY_COARSE:
+ case VSIR_OP_DSY_FINE:
+ case VSIR_OP_ELSE:
+ case VSIR_OP_EMIT:
+ case VSIR_OP_EMIT_STREAM:
+ case VSIR_OP_ENDIF:
+ case VSIR_OP_ENDLOOP:
+ case VSIR_OP_ENDSWITCH:
+ case VSIR_OP_EQO:
+ case VSIR_OP_EXP:
+ case VSIR_OP_F16TOF32:
+ case VSIR_OP_F32TOF16:
+ case VSIR_OP_FRC:
+ case VSIR_OP_FTOI:
+ case VSIR_OP_FTOU:
+ case VSIR_OP_GATHER4:
+ case VSIR_OP_GATHER4_PO:
+ case VSIR_OP_GATHER4_C:
+ case VSIR_OP_GATHER4_PO_C:
+ case VSIR_OP_GEO:
+ case VSIR_OP_HS_CONTROL_POINT_PHASE:
+ case VSIR_OP_HS_FORK_PHASE:
+ case VSIR_OP_IADD:
+ case VSIR_OP_IEQ:
+ case VSIR_OP_IF:
+ case VSIR_OP_IGE:
+ case VSIR_OP_ILT:
+ case VSIR_OP_IMAD:
+ case VSIR_OP_IMAX:
+ case VSIR_OP_IMIN:
+ case VSIR_OP_IMM_ATOMIC_AND:
+ case VSIR_OP_IMM_ATOMIC_CMP_EXCH:
+ case VSIR_OP_IMM_ATOMIC_EXCH:
+ case VSIR_OP_IMM_ATOMIC_IADD:
+ case VSIR_OP_IMM_ATOMIC_IMAX:
+ case VSIR_OP_IMM_ATOMIC_IMIN:
+ case VSIR_OP_IMM_ATOMIC_UMAX:
+ case VSIR_OP_IMM_ATOMIC_UMIN:
+ case VSIR_OP_IMM_ATOMIC_OR:
+ case VSIR_OP_IMM_ATOMIC_XOR:
+ case VSIR_OP_SYNC:
+ case VSIR_OP_IMUL:
+ case VSIR_OP_INE:
+ case VSIR_OP_INEG:
+ case VSIR_OP_ISHL:
+ case VSIR_OP_ISHR:
+ case VSIR_OP_ITOF:
+ case VSIR_OP_LD:
+ case VSIR_OP_LD2DMS:
+ case VSIR_OP_LD_RAW:
+ case VSIR_OP_LD_UAV_TYPED:
+ case VSIR_OP_LOG:
+ case VSIR_OP_LOOP:
+ case VSIR_OP_LTO:
+ case VSIR_OP_MAD:
+ case VSIR_OP_MAX:
+ case VSIR_OP_MIN:
+ case VSIR_OP_MOV:
+ case VSIR_OP_MOVC:
+ case VSIR_OP_MUL:
+ case VSIR_OP_NEU:
+ case VSIR_OP_NOT:
+ case VSIR_OP_OR:
+ case VSIR_OP_RCP:
+ case VSIR_OP_RESINFO:
+ case VSIR_OP_RET:
+ case VSIR_OP_ROUND_NE:
+ case VSIR_OP_ROUND_NI:
+ case VSIR_OP_ROUND_PI:
+ case VSIR_OP_ROUND_Z:
+ case VSIR_OP_RSQ:
+ case VSIR_OP_SAMPLE:
+ case VSIR_OP_SAMPLE_B:
+ case VSIR_OP_SAMPLE_C:
+ case VSIR_OP_SAMPLE_C_LZ:
+ case VSIR_OP_SAMPLE_GRAD:
+ case VSIR_OP_SAMPLE_INFO:
+ case VSIR_OP_SAMPLE_LOD:
+ case VSIR_OP_SINCOS:
+ case VSIR_OP_SQRT:
+ case VSIR_OP_STORE_RAW:
+ case VSIR_OP_STORE_UAV_TYPED:
+ case VSIR_OP_SWITCH:
+ case VSIR_OP_UDIV:
+ case VSIR_OP_UGE:
+ case VSIR_OP_ULT:
+ case VSIR_OP_UMAX:
+ case VSIR_OP_UMIN:
+ case VSIR_OP_USHR:
+ case VSIR_OP_UTOF:
+ case VSIR_OP_XOR:
tpf_simple_instruction(tpf, ins);
break;
diff --git a/libs/vkd3d/libs/vkd3d-shader/vkd3d_shader_main.c b/libs/vkd3d/libs/vkd3d-shader/vkd3d_shader_main.c
index 2cd23cba1f5..4cda8493696 100644
--- a/libs/vkd3d/libs/vkd3d-shader/vkd3d_shader_main.c
+++ b/libs/vkd3d/libs/vkd3d-shader/vkd3d_shader_main.c
@@ -1041,11 +1041,11 @@ static bool vkd3d_shader_instruction_is_uav_read(const struct vkd3d_shader_instr
{
enum vkd3d_shader_opcode opcode = instruction->opcode;
- return (VKD3DSIH_ATOMIC_AND <= opcode && opcode <= VKD3DSIH_ATOMIC_XOR)
- || (VKD3DSIH_IMM_ATOMIC_ALLOC <= opcode && opcode <= VKD3DSIH_IMM_ATOMIC_XOR)
- || opcode == VKD3DSIH_LD_UAV_TYPED
- || (opcode == VKD3DSIH_LD_RAW && instruction->src[1].reg.type == VKD3DSPR_UAV)
- || (opcode == VKD3DSIH_LD_STRUCTURED && instruction->src[2].reg.type == VKD3DSPR_UAV);
+ return (VSIR_OP_ATOMIC_AND <= opcode && opcode <= VSIR_OP_ATOMIC_XOR)
+ || (VSIR_OP_IMM_ATOMIC_ALLOC <= opcode && opcode <= VSIR_OP_IMM_ATOMIC_XOR)
+ || opcode == VSIR_OP_LD_UAV_TYPED
+ || (opcode == VSIR_OP_LD_RAW && instruction->src[1].reg.type == VKD3DSPR_UAV)
+ || (opcode == VSIR_OP_LD_STRUCTURED && instruction->src[2].reg.type == VKD3DSPR_UAV);
}
static void vkd3d_shader_scan_record_uav_read(struct vkd3d_shader_scan_context *context,
@@ -1058,7 +1058,7 @@ static bool vkd3d_shader_instruction_is_uav_counter(const struct vkd3d_shader_in
{
enum vkd3d_shader_opcode opcode = instruction->opcode;
- return opcode == VKD3DSIH_IMM_ATOMIC_ALLOC || opcode == VKD3DSIH_IMM_ATOMIC_CONSUME;
+ return opcode == VSIR_OP_IMM_ATOMIC_ALLOC || opcode == VSIR_OP_IMM_ATOMIC_CONSUME;
}
static void vkd3d_shader_scan_record_uav_counter(struct vkd3d_shader_scan_context *context,
@@ -1071,8 +1071,8 @@ static bool vkd3d_shader_instruction_is_uav_atomic_op(const struct vkd3d_shader_
{
enum vkd3d_shader_opcode opcode = instruction->opcode;
- return (VKD3DSIH_ATOMIC_AND <= opcode && opcode <= VKD3DSIH_ATOMIC_XOR)
- || (VKD3DSIH_IMM_ATOMIC_ALLOC <= opcode && opcode <= VKD3DSIH_IMM_ATOMIC_XOR);
+ return (VSIR_OP_ATOMIC_AND <= opcode && opcode <= VSIR_OP_ATOMIC_XOR)
+ || (VSIR_OP_IMM_ATOMIC_ALLOC <= opcode && opcode <= VSIR_OP_IMM_ATOMIC_XOR);
}
static void vkd3d_shader_scan_record_uav_atomic_op(struct vkd3d_shader_scan_context *context,
@@ -1283,13 +1283,13 @@ static int vkd3d_shader_scan_instruction(struct vkd3d_shader_scan_context *conte
switch (instruction->opcode)
{
- case VKD3DSIH_DCL_CONSTANT_BUFFER:
+ case VSIR_OP_DCL_CONSTANT_BUFFER:
vkd3d_shader_scan_constant_buffer_declaration(context, instruction);
break;
- case VKD3DSIH_DCL_SAMPLER:
+ case VSIR_OP_DCL_SAMPLER:
vkd3d_shader_scan_sampler_declaration(context, instruction);
break;
- case VKD3DSIH_DCL:
+ case VSIR_OP_DCL:
if (instruction->declaration.semantic.resource_type == VKD3D_SHADER_RESOURCE_NONE)
break;
@@ -1299,33 +1299,33 @@ static int vkd3d_shader_scan_instruction(struct vkd3d_shader_scan_context *conte
break;
}
/* fall through */
- case VKD3DSIH_DCL_UAV_TYPED:
+ case VSIR_OP_DCL_UAV_TYPED:
vkd3d_shader_scan_typed_resource_declaration(context, instruction);
break;
- case VKD3DSIH_DCL_RESOURCE_RAW:
- case VKD3DSIH_DCL_UAV_RAW:
+ case VSIR_OP_DCL_RESOURCE_RAW:
+ case VSIR_OP_DCL_UAV_RAW:
vkd3d_shader_scan_resource_declaration(context, &instruction->declaration.raw_resource.resource,
VKD3D_SHADER_RESOURCE_BUFFER, VKD3D_DATA_UINT, 0, 0, true, instruction->flags);
break;
- case VKD3DSIH_DCL_RESOURCE_STRUCTURED:
- case VKD3DSIH_DCL_UAV_STRUCTURED:
+ case VSIR_OP_DCL_RESOURCE_STRUCTURED:
+ case VSIR_OP_DCL_UAV_STRUCTURED:
vkd3d_shader_scan_resource_declaration(context, &instruction->declaration.structured_resource.resource,
VKD3D_SHADER_RESOURCE_BUFFER, VKD3D_DATA_UINT, 0,
instruction->declaration.structured_resource.byte_stride, false, instruction->flags);
break;
- case VKD3DSIH_DCL_TESSELLATOR_OUTPUT_PRIMITIVE:
+ case VSIR_OP_DCL_TESSELLATOR_OUTPUT_PRIMITIVE:
context->output_primitive = instruction->declaration.tessellator_output_primitive;
break;
- case VKD3DSIH_DCL_TESSELLATOR_PARTITIONING:
+ case VSIR_OP_DCL_TESSELLATOR_PARTITIONING:
context->partitioning = instruction->declaration.tessellator_partitioning;
break;
- case VKD3DSIH_IF:
- case VKD3DSIH_IFC:
+ case VSIR_OP_IF:
+ case VSIR_OP_IFC:
cf_info = vkd3d_shader_scan_push_cf_info(context);
cf_info->type = VKD3D_SHADER_BLOCK_IF;
cf_info->inside_block = true;
break;
- case VKD3DSIH_ELSE:
+ case VSIR_OP_ELSE:
if (!(cf_info = vkd3d_shader_scan_get_current_cf_info(context)) || cf_info->type != VKD3D_SHADER_BLOCK_IF)
{
vkd3d_shader_scan_error(context, VKD3D_SHADER_ERROR_TPF_MISMATCHED_CF,
@@ -1334,7 +1334,7 @@ static int vkd3d_shader_scan_instruction(struct vkd3d_shader_scan_context *conte
}
cf_info->inside_block = true;
break;
- case VKD3DSIH_ENDIF:
+ case VSIR_OP_ENDIF:
if (!(cf_info = vkd3d_shader_scan_get_current_cf_info(context)) || cf_info->type != VKD3D_SHADER_BLOCK_IF)
{
vkd3d_shader_scan_error(context, VKD3D_SHADER_ERROR_TPF_MISMATCHED_CF,
@@ -1343,12 +1343,12 @@ static int vkd3d_shader_scan_instruction(struct vkd3d_shader_scan_context *conte
}
vkd3d_shader_scan_pop_cf_info(context);
break;
- case VKD3DSIH_LOOP:
+ case VSIR_OP_LOOP:
cf_info = vkd3d_shader_scan_push_cf_info(context);
cf_info->type = VKD3D_SHADER_BLOCK_LOOP;
cf_info->inside_block = true;
break;
- case VKD3DSIH_ENDLOOP:
+ case VSIR_OP_ENDLOOP:
if (!(cf_info = vkd3d_shader_scan_get_current_cf_info(context)) || cf_info->type != VKD3D_SHADER_BLOCK_LOOP)
{
vkd3d_shader_scan_error(context, VKD3D_SHADER_ERROR_TPF_MISMATCHED_CF,
@@ -1357,11 +1357,11 @@ static int vkd3d_shader_scan_instruction(struct vkd3d_shader_scan_context *conte
}
vkd3d_shader_scan_pop_cf_info(context);
break;
- case VKD3DSIH_SWITCH:
+ case VSIR_OP_SWITCH:
cf_info = vkd3d_shader_scan_push_cf_info(context);
cf_info->type = VKD3D_SHADER_BLOCK_SWITCH;
break;
- case VKD3DSIH_ENDSWITCH:
+ case VSIR_OP_ENDSWITCH:
if (!(cf_info = vkd3d_shader_scan_get_current_cf_info(context))
|| cf_info->type != VKD3D_SHADER_BLOCK_SWITCH || cf_info->inside_block)
{
@@ -1371,7 +1371,7 @@ static int vkd3d_shader_scan_instruction(struct vkd3d_shader_scan_context *conte
}
vkd3d_shader_scan_pop_cf_info(context);
break;
- case VKD3DSIH_CASE:
+ case VSIR_OP_CASE:
if (!(cf_info = vkd3d_shader_scan_get_current_cf_info(context))
|| cf_info->type != VKD3D_SHADER_BLOCK_SWITCH)
{
@@ -1381,7 +1381,7 @@ static int vkd3d_shader_scan_instruction(struct vkd3d_shader_scan_context *conte
}
cf_info->inside_block = true;
break;
- case VKD3DSIH_DEFAULT:
+ case VSIR_OP_DEFAULT:
if (!(cf_info = vkd3d_shader_scan_get_current_cf_info(context))
|| cf_info->type != VKD3D_SHADER_BLOCK_SWITCH)
{
@@ -1398,7 +1398,7 @@ static int vkd3d_shader_scan_instruction(struct vkd3d_shader_scan_context *conte
cf_info->inside_block = true;
cf_info->has_default = true;
break;
- case VKD3DSIH_BREAK:
+ case VSIR_OP_BREAK:
if (!(cf_info = vkd3d_shader_scan_find_innermost_breakable_cf_info(context)))
{
vkd3d_shader_scan_error(context, VKD3D_SHADER_ERROR_TPF_MISMATCHED_CF,
@@ -1407,7 +1407,7 @@ static int vkd3d_shader_scan_instruction(struct vkd3d_shader_scan_context *conte
}
cf_info->inside_block = false;
break;
- case VKD3DSIH_BREAKP:
+ case VSIR_OP_BREAKP:
if (!(cf_info = vkd3d_shader_scan_find_innermost_loop_cf_info(context)))
{
vkd3d_shader_scan_error(context, VKD3D_SHADER_ERROR_TPF_MISMATCHED_CF,
@@ -1415,7 +1415,7 @@ static int vkd3d_shader_scan_instruction(struct vkd3d_shader_scan_context *conte
return VKD3D_ERROR_INVALID_SHADER;
}
break;
- case VKD3DSIH_CONTINUE:
+ case VSIR_OP_CONTINUE:
if (!(cf_info = vkd3d_shader_scan_find_innermost_loop_cf_info(context)))
{
vkd3d_shader_scan_error(context, VKD3D_SHADER_ERROR_TPF_MISMATCHED_CF,
@@ -1424,7 +1424,7 @@ static int vkd3d_shader_scan_instruction(struct vkd3d_shader_scan_context *conte
}
cf_info->inside_block = false;
break;
- case VKD3DSIH_CONTINUEP:
+ case VSIR_OP_CONTINUEP:
if (!(cf_info = vkd3d_shader_scan_find_innermost_loop_cf_info(context)))
{
vkd3d_shader_scan_error(context, VKD3D_SHADER_ERROR_TPF_MISMATCHED_CF,
@@ -1432,60 +1432,60 @@ static int vkd3d_shader_scan_instruction(struct vkd3d_shader_scan_context *conte
return VKD3D_ERROR_INVALID_SHADER;
}
break;
- case VKD3DSIH_RET:
+ case VSIR_OP_RET:
if (context->cf_info_count)
context->cf_info[context->cf_info_count - 1].inside_block = false;
break;
- case VKD3DSIH_TEXLD:
+ case VSIR_OP_TEXLD:
if (context->version->major == 1)
sampler_reg = &instruction->dst[0].reg;
else
sampler_reg = &instruction->src[1].reg;
vkd3d_shader_scan_combined_sampler_usage(context, sampler_reg, sampler_reg);
break;
- case VKD3DSIH_TEX:
- case VKD3DSIH_TEXBEM:
- case VKD3DSIH_TEXBEML:
- case VKD3DSIH_TEXDP3TEX:
- case VKD3DSIH_TEXM3x2TEX:
- case VKD3DSIH_TEXM3x3SPEC:
- case VKD3DSIH_TEXM3x3TEX:
- case VKD3DSIH_TEXM3x3VSPEC:
- case VKD3DSIH_TEXREG2AR:
- case VKD3DSIH_TEXREG2GB:
- case VKD3DSIH_TEXREG2RGB:
+ case VSIR_OP_TEX:
+ case VSIR_OP_TEXBEM:
+ case VSIR_OP_TEXBEML:
+ case VSIR_OP_TEXDP3TEX:
+ case VSIR_OP_TEXM3x2TEX:
+ case VSIR_OP_TEXM3x3SPEC:
+ case VSIR_OP_TEXM3x3TEX:
+ case VSIR_OP_TEXM3x3VSPEC:
+ case VSIR_OP_TEXREG2AR:
+ case VSIR_OP_TEXREG2GB:
+ case VSIR_OP_TEXREG2RGB:
sampler_reg = &instruction->dst[0].reg;
vkd3d_shader_scan_combined_sampler_usage(context, sampler_reg, sampler_reg);
break;
- case VKD3DSIH_GATHER4:
- case VKD3DSIH_GATHER4_C:
- case VKD3DSIH_SAMPLE:
- case VKD3DSIH_SAMPLE_B:
- case VKD3DSIH_SAMPLE_C:
- case VKD3DSIH_SAMPLE_C_LZ:
- case VKD3DSIH_SAMPLE_GRAD:
- case VKD3DSIH_SAMPLE_LOD:
+ case VSIR_OP_GATHER4:
+ case VSIR_OP_GATHER4_C:
+ case VSIR_OP_SAMPLE:
+ case VSIR_OP_SAMPLE_B:
+ case VSIR_OP_SAMPLE_C:
+ case VSIR_OP_SAMPLE_C_LZ:
+ case VSIR_OP_SAMPLE_GRAD:
+ case VSIR_OP_SAMPLE_LOD:
vkd3d_shader_scan_combined_sampler_usage(context, &instruction->src[1].reg, &instruction->src[2].reg);
break;
- case VKD3DSIH_GATHER4_PO:
- case VKD3DSIH_GATHER4_PO_C:
+ case VSIR_OP_GATHER4_PO:
+ case VSIR_OP_GATHER4_PO_C:
vkd3d_shader_scan_combined_sampler_usage(context, &instruction->src[2].reg, &instruction->src[3].reg);
break;
- case VKD3DSIH_LD:
- case VKD3DSIH_LD2DMS:
+ case VSIR_OP_LD:
+ case VSIR_OP_LD2DMS:
vkd3d_shader_scan_combined_sampler_usage(context, &instruction->src[1].reg, NULL);
break;
- case VKD3DSIH_BUFINFO:
- case VKD3DSIH_SAMPLE_INFO:
+ case VSIR_OP_BUFINFO:
+ case VSIR_OP_SAMPLE_INFO:
if (instruction->src[0].reg.type == VKD3DSPR_RESOURCE)
vkd3d_shader_scan_combined_sampler_usage(context, &instruction->src[0].reg, NULL);
break;
- case VKD3DSIH_LD_RAW:
- case VKD3DSIH_RESINFO:
+ case VSIR_OP_LD_RAW:
+ case VSIR_OP_RESINFO:
if (instruction->src[1].reg.type == VKD3DSPR_RESOURCE)
vkd3d_shader_scan_combined_sampler_usage(context, &instruction->src[1].reg, NULL);
break;
- case VKD3DSIH_LD_STRUCTURED:
+ case VSIR_OP_LD_STRUCTURED:
if (instruction->src[2].reg.type == VKD3DSPR_RESOURCE)
vkd3d_shader_scan_combined_sampler_usage(context, &instruction->src[2].reg, NULL);
break;
diff --git a/libs/vkd3d/libs/vkd3d-shader/vkd3d_shader_private.h b/libs/vkd3d/libs/vkd3d-shader/vkd3d_shader_private.h
index 254303f1bbb..5d7fc6e1209 100644
--- a/libs/vkd3d/libs/vkd3d-shader/vkd3d_shader_private.h
+++ b/libs/vkd3d/libs/vkd3d-shader/vkd3d_shader_private.h
@@ -281,339 +281,342 @@ enum vkd3d_shader_error
enum vkd3d_shader_opcode
{
- VKD3DSIH_ABS,
- VKD3DSIH_ACOS,
- VKD3DSIH_ADD,
- VKD3DSIH_AND,
- VKD3DSIH_ASIN,
- VKD3DSIH_ATAN,
- VKD3DSIH_ATOMIC_AND,
- VKD3DSIH_ATOMIC_CMP_STORE,
- VKD3DSIH_ATOMIC_IADD,
- VKD3DSIH_ATOMIC_IMAX,
- VKD3DSIH_ATOMIC_IMIN,
- VKD3DSIH_ATOMIC_OR,
- VKD3DSIH_ATOMIC_UMAX,
- VKD3DSIH_ATOMIC_UMIN,
- VKD3DSIH_ATOMIC_XOR,
- VKD3DSIH_BEM,
- VKD3DSIH_BFI,
- VKD3DSIH_BFREV,
- VKD3DSIH_BRANCH,
- VKD3DSIH_BREAK,
- VKD3DSIH_BREAKC,
- VKD3DSIH_BREAKP,
- VKD3DSIH_BUFINFO,
- VKD3DSIH_CALL,
- VKD3DSIH_CALLNZ,
- VKD3DSIH_CASE,
- VKD3DSIH_CHECK_ACCESS_FULLY_MAPPED,
- VKD3DSIH_CMP,
- VKD3DSIH_CND,
- VKD3DSIH_CONTINUE,
- VKD3DSIH_CONTINUEP,
- VKD3DSIH_COS,
- VKD3DSIH_COUNTBITS,
- VKD3DSIH_CRS,
- VKD3DSIH_CUT,
- VKD3DSIH_CUT_STREAM,
- VKD3DSIH_DADD,
- VKD3DSIH_DCL,
- VKD3DSIH_DCL_CONSTANT_BUFFER,
- VKD3DSIH_DCL_FUNCTION_BODY,
- VKD3DSIH_DCL_FUNCTION_TABLE,
- VKD3DSIH_DCL_GLOBAL_FLAGS,
- VKD3DSIH_DCL_GS_INSTANCES,
- VKD3DSIH_DCL_HS_FORK_PHASE_INSTANCE_COUNT,
- VKD3DSIH_DCL_HS_JOIN_PHASE_INSTANCE_COUNT,
- VKD3DSIH_DCL_HS_MAX_TESSFACTOR,
- VKD3DSIH_DCL_IMMEDIATE_CONSTANT_BUFFER,
- VKD3DSIH_DCL_INDEX_RANGE,
- VKD3DSIH_DCL_INDEXABLE_TEMP,
- VKD3DSIH_DCL_INPUT,
- VKD3DSIH_DCL_INPUT_CONTROL_POINT_COUNT,
- VKD3DSIH_DCL_INPUT_PRIMITIVE,
- VKD3DSIH_DCL_INPUT_PS,
- VKD3DSIH_DCL_INPUT_PS_SGV,
- VKD3DSIH_DCL_INPUT_PS_SIV,
- VKD3DSIH_DCL_INPUT_SGV,
- VKD3DSIH_DCL_INPUT_SIV,
- VKD3DSIH_DCL_INTERFACE,
- VKD3DSIH_DCL_OUTPUT,
- VKD3DSIH_DCL_OUTPUT_CONTROL_POINT_COUNT,
- VKD3DSIH_DCL_OUTPUT_SGV,
- VKD3DSIH_DCL_OUTPUT_SIV,
- VKD3DSIH_DCL_OUTPUT_TOPOLOGY,
- VKD3DSIH_DCL_RESOURCE_RAW,
- VKD3DSIH_DCL_RESOURCE_STRUCTURED,
- VKD3DSIH_DCL_SAMPLER,
- VKD3DSIH_DCL_STREAM,
- VKD3DSIH_DCL_TEMPS,
- VKD3DSIH_DCL_TESSELLATOR_DOMAIN,
- VKD3DSIH_DCL_TESSELLATOR_OUTPUT_PRIMITIVE,
- VKD3DSIH_DCL_TESSELLATOR_PARTITIONING,
- VKD3DSIH_DCL_TGSM_RAW,
- VKD3DSIH_DCL_TGSM_STRUCTURED,
- VKD3DSIH_DCL_THREAD_GROUP,
- VKD3DSIH_DCL_UAV_RAW,
- VKD3DSIH_DCL_UAV_STRUCTURED,
- VKD3DSIH_DCL_UAV_TYPED,
- VKD3DSIH_DCL_VERTICES_OUT,
- VKD3DSIH_DDIV,
- VKD3DSIH_DEF,
- VKD3DSIH_DEFAULT,
- VKD3DSIH_DEFB,
- VKD3DSIH_DEFI,
- VKD3DSIH_DEQO,
- VKD3DSIH_DFMA,
- VKD3DSIH_DGEO,
- VKD3DSIH_DISCARD,
- VKD3DSIH_DIV,
- VKD3DSIH_DLT,
- VKD3DSIH_DMAX,
- VKD3DSIH_DMIN,
- VKD3DSIH_DMOV,
- VKD3DSIH_DMOVC,
- VKD3DSIH_DMUL,
- VKD3DSIH_DNE,
- VKD3DSIH_DP2,
- VKD3DSIH_DP2ADD,
- VKD3DSIH_DP3,
- VKD3DSIH_DP4,
- VKD3DSIH_DRCP,
- VKD3DSIH_DST,
- VKD3DSIH_DSX,
- VKD3DSIH_DSX_COARSE,
- VKD3DSIH_DSX_FINE,
- VKD3DSIH_DSY,
- VKD3DSIH_DSY_COARSE,
- VKD3DSIH_DSY_FINE,
- VKD3DSIH_DTOF,
- VKD3DSIH_DTOI,
- VKD3DSIH_DTOU,
- VKD3DSIH_ELSE,
- VKD3DSIH_EMIT,
- VKD3DSIH_EMIT_STREAM,
- VKD3DSIH_ENDIF,
- VKD3DSIH_ENDLOOP,
- VKD3DSIH_ENDREP,
- VKD3DSIH_ENDSWITCH,
- VKD3DSIH_EQO,
- VKD3DSIH_EQU,
- VKD3DSIH_EVAL_CENTROID,
- VKD3DSIH_EVAL_SAMPLE_INDEX,
- VKD3DSIH_EXP,
- VKD3DSIH_EXPP,
- VKD3DSIH_F16TOF32,
- VKD3DSIH_F32TOF16,
- VKD3DSIH_FCALL,
- VKD3DSIH_FIRSTBIT_HI,
- VKD3DSIH_FIRSTBIT_LO,
- VKD3DSIH_FIRSTBIT_SHI,
- VKD3DSIH_FRC,
- VKD3DSIH_FREM,
- VKD3DSIH_FTOD,
- VKD3DSIH_FTOI,
- VKD3DSIH_FTOU,
- VKD3DSIH_GATHER4,
- VKD3DSIH_GATHER4_C,
- VKD3DSIH_GATHER4_C_S,
- VKD3DSIH_GATHER4_PO,
- VKD3DSIH_GATHER4_PO_C,
- VKD3DSIH_GATHER4_PO_C_S,
- VKD3DSIH_GATHER4_PO_S,
- VKD3DSIH_GATHER4_S,
- VKD3DSIH_GEO,
- VKD3DSIH_GEU,
- VKD3DSIH_HCOS,
- VKD3DSIH_HS_CONTROL_POINT_PHASE,
- VKD3DSIH_HS_DECLS,
- VKD3DSIH_HS_FORK_PHASE,
- VKD3DSIH_HS_JOIN_PHASE,
- VKD3DSIH_HSIN,
- VKD3DSIH_HTAN,
- VKD3DSIH_IADD,
- VKD3DSIH_IBFE,
- VKD3DSIH_IDIV,
- VKD3DSIH_IEQ,
- VKD3DSIH_IF,
- VKD3DSIH_IFC,
- VKD3DSIH_IGE,
- VKD3DSIH_ILT,
- VKD3DSIH_IMAD,
- VKD3DSIH_IMAX,
- VKD3DSIH_IMIN,
- VKD3DSIH_IMM_ATOMIC_ALLOC,
- VKD3DSIH_IMM_ATOMIC_AND,
- VKD3DSIH_IMM_ATOMIC_CMP_EXCH,
- VKD3DSIH_IMM_ATOMIC_CONSUME,
- VKD3DSIH_IMM_ATOMIC_EXCH,
- VKD3DSIH_IMM_ATOMIC_IADD,
- VKD3DSIH_IMM_ATOMIC_IMAX,
- VKD3DSIH_IMM_ATOMIC_IMIN,
- VKD3DSIH_IMM_ATOMIC_OR,
- VKD3DSIH_IMM_ATOMIC_UMAX,
- VKD3DSIH_IMM_ATOMIC_UMIN,
- VKD3DSIH_IMM_ATOMIC_XOR,
- VKD3DSIH_IMUL,
- VKD3DSIH_IMUL_LOW,
- VKD3DSIH_INE,
- VKD3DSIH_INEG,
- VKD3DSIH_ISFINITE,
- VKD3DSIH_ISHL,
- VKD3DSIH_ISHR,
- VKD3DSIH_ISINF,
- VKD3DSIH_ISNAN,
- VKD3DSIH_ITOD,
- VKD3DSIH_ITOF,
- VKD3DSIH_ITOI,
- VKD3DSIH_LABEL,
- VKD3DSIH_LD,
- VKD3DSIH_LD2DMS,
- VKD3DSIH_LD2DMS_S,
- VKD3DSIH_LD_RAW,
- VKD3DSIH_LD_RAW_S,
- VKD3DSIH_LD_S,
- VKD3DSIH_LD_STRUCTURED,
- VKD3DSIH_LD_STRUCTURED_S,
- VKD3DSIH_LD_UAV_TYPED,
- VKD3DSIH_LD_UAV_TYPED_S,
- VKD3DSIH_LIT,
- VKD3DSIH_LOD,
- VKD3DSIH_LOG,
- VKD3DSIH_LOGP,
- VKD3DSIH_LOOP,
- VKD3DSIH_LRP,
- VKD3DSIH_LTO,
- VKD3DSIH_LTU,
- VKD3DSIH_M3x2,
- VKD3DSIH_M3x3,
- VKD3DSIH_M3x4,
- VKD3DSIH_M4x3,
- VKD3DSIH_M4x4,
- VKD3DSIH_MAD,
- VKD3DSIH_MAX,
- VKD3DSIH_MIN,
- VKD3DSIH_MOV,
- VKD3DSIH_MOVA,
- VKD3DSIH_MOVC,
- VKD3DSIH_MSAD,
- VKD3DSIH_MUL,
- VKD3DSIH_NEO,
- VKD3DSIH_NEU,
- VKD3DSIH_NOP,
- VKD3DSIH_NOT,
- VKD3DSIH_NRM,
- VKD3DSIH_OR,
- VKD3DSIH_ORD,
- VKD3DSIH_PHASE,
- VKD3DSIH_PHI,
- VKD3DSIH_POW,
- VKD3DSIH_QUAD_READ_ACROSS_D,
- VKD3DSIH_QUAD_READ_ACROSS_X,
- VKD3DSIH_QUAD_READ_ACROSS_Y,
- VKD3DSIH_QUAD_READ_LANE_AT,
- VKD3DSIH_RCP,
- VKD3DSIH_REP,
- VKD3DSIH_RESINFO,
- VKD3DSIH_RET,
- VKD3DSIH_RETP,
- VKD3DSIH_ROUND_NE,
- VKD3DSIH_ROUND_NI,
- VKD3DSIH_ROUND_PI,
- VKD3DSIH_ROUND_Z,
- VKD3DSIH_RSQ,
- VKD3DSIH_SAMPLE,
- VKD3DSIH_SAMPLE_B,
- VKD3DSIH_SAMPLE_B_CL_S,
- VKD3DSIH_SAMPLE_C,
- VKD3DSIH_SAMPLE_C_CL_S,
- VKD3DSIH_SAMPLE_C_LZ,
- VKD3DSIH_SAMPLE_C_LZ_S,
- VKD3DSIH_SAMPLE_CL_S,
- VKD3DSIH_SAMPLE_GRAD,
- VKD3DSIH_SAMPLE_GRAD_CL_S,
- VKD3DSIH_SAMPLE_INFO,
- VKD3DSIH_SAMPLE_LOD,
- VKD3DSIH_SAMPLE_LOD_S,
- VKD3DSIH_SAMPLE_POS,
- VKD3DSIH_SETP,
- VKD3DSIH_SGE,
- VKD3DSIH_SGN,
- VKD3DSIH_SIN,
- VKD3DSIH_SINCOS,
- VKD3DSIH_SLT,
- VKD3DSIH_SQRT,
- VKD3DSIH_STORE_RAW,
- VKD3DSIH_STORE_STRUCTURED,
- VKD3DSIH_STORE_UAV_TYPED,
- VKD3DSIH_SUB,
- VKD3DSIH_SWAPC,
- VKD3DSIH_SWITCH,
- VKD3DSIH_SWITCH_MONOLITHIC,
- VKD3DSIH_SYNC,
- VKD3DSIH_TAN,
- VKD3DSIH_TEX,
- VKD3DSIH_TEXBEM,
- VKD3DSIH_TEXBEML,
- VKD3DSIH_TEXCOORD,
- VKD3DSIH_TEXCRD,
- VKD3DSIH_TEXDEPTH,
- VKD3DSIH_TEXDP3,
- VKD3DSIH_TEXDP3TEX,
- VKD3DSIH_TEXKILL,
- VKD3DSIH_TEXLD,
- VKD3DSIH_TEXLDD,
- VKD3DSIH_TEXLDL,
- VKD3DSIH_TEXM3x2DEPTH,
- VKD3DSIH_TEXM3x2PAD,
- VKD3DSIH_TEXM3x2TEX,
- VKD3DSIH_TEXM3x3,
- VKD3DSIH_TEXM3x3DIFF,
- VKD3DSIH_TEXM3x3PAD,
- VKD3DSIH_TEXM3x3SPEC,
- VKD3DSIH_TEXM3x3TEX,
- VKD3DSIH_TEXM3x3VSPEC,
- VKD3DSIH_TEXREG2AR,
- VKD3DSIH_TEXREG2GB,
- VKD3DSIH_TEXREG2RGB,
- VKD3DSIH_UBFE,
- VKD3DSIH_UDIV,
- VKD3DSIH_UGE,
- VKD3DSIH_ULT,
- VKD3DSIH_UMAX,
- VKD3DSIH_UMIN,
- VKD3DSIH_UMUL,
- VKD3DSIH_UNO,
- VKD3DSIH_USHR,
- VKD3DSIH_UTOD,
- VKD3DSIH_UTOF,
- VKD3DSIH_UTOU,
- VKD3DSIH_WAVE_ACTIVE_ALL_EQUAL,
- VKD3DSIH_WAVE_ACTIVE_BALLOT,
- VKD3DSIH_WAVE_ACTIVE_BIT_AND,
- VKD3DSIH_WAVE_ACTIVE_BIT_OR,
- VKD3DSIH_WAVE_ACTIVE_BIT_XOR,
- VKD3DSIH_WAVE_ALL_BIT_COUNT,
- VKD3DSIH_WAVE_ALL_TRUE,
- VKD3DSIH_WAVE_ANY_TRUE,
- VKD3DSIH_WAVE_IS_FIRST_LANE,
- VKD3DSIH_WAVE_OP_ADD,
- VKD3DSIH_WAVE_OP_IMAX,
- VKD3DSIH_WAVE_OP_IMIN,
- VKD3DSIH_WAVE_OP_MAX,
- VKD3DSIH_WAVE_OP_MIN,
- VKD3DSIH_WAVE_OP_MUL,
- VKD3DSIH_WAVE_OP_UMAX,
- VKD3DSIH_WAVE_OP_UMIN,
- VKD3DSIH_WAVE_PREFIX_BIT_COUNT,
- VKD3DSIH_WAVE_READ_LANE_AT,
- VKD3DSIH_WAVE_READ_LANE_FIRST,
- VKD3DSIH_XOR,
-
- VKD3DSIH_INVALID,
-
- VKD3DSIH_COUNT,
+ VSIR_OP_ABS,
+ VSIR_OP_ACOS,
+ VSIR_OP_ADD,
+ VSIR_OP_AND,
+ VSIR_OP_ASIN,
+ VSIR_OP_ATAN,
+ VSIR_OP_ATOMIC_AND,
+ VSIR_OP_ATOMIC_CMP_STORE,
+ VSIR_OP_ATOMIC_IADD,
+ VSIR_OP_ATOMIC_IMAX,
+ VSIR_OP_ATOMIC_IMIN,
+ VSIR_OP_ATOMIC_OR,
+ VSIR_OP_ATOMIC_UMAX,
+ VSIR_OP_ATOMIC_UMIN,
+ VSIR_OP_ATOMIC_XOR,
+ VSIR_OP_BEM,
+ VSIR_OP_BFI,
+ VSIR_OP_BFREV,
+ VSIR_OP_BRANCH,
+ VSIR_OP_BREAK,
+ VSIR_OP_BREAKC,
+ VSIR_OP_BREAKP,
+ VSIR_OP_BUFINFO,
+ VSIR_OP_CALL,
+ VSIR_OP_CALLNZ,
+ VSIR_OP_CASE,
+ VSIR_OP_CHECK_ACCESS_FULLY_MAPPED,
+ VSIR_OP_CMP,
+ VSIR_OP_CND,
+ VSIR_OP_CONTINUE,
+ VSIR_OP_CONTINUEP,
+ VSIR_OP_COS,
+ VSIR_OP_COUNTBITS,
+ VSIR_OP_CRS,
+ VSIR_OP_CUT,
+ VSIR_OP_CUT_STREAM,
+ VSIR_OP_DADD,
+ VSIR_OP_DCL,
+ VSIR_OP_DCL_CONSTANT_BUFFER,
+ VSIR_OP_DCL_FUNCTION_BODY,
+ VSIR_OP_DCL_FUNCTION_TABLE,
+ VSIR_OP_DCL_GLOBAL_FLAGS,
+ VSIR_OP_DCL_GS_INSTANCES,
+ VSIR_OP_DCL_HS_FORK_PHASE_INSTANCE_COUNT,
+ VSIR_OP_DCL_HS_JOIN_PHASE_INSTANCE_COUNT,
+ VSIR_OP_DCL_HS_MAX_TESSFACTOR,
+ VSIR_OP_DCL_IMMEDIATE_CONSTANT_BUFFER,
+ VSIR_OP_DCL_INDEX_RANGE,
+ VSIR_OP_DCL_INDEXABLE_TEMP,
+ VSIR_OP_DCL_INPUT,
+ VSIR_OP_DCL_INPUT_CONTROL_POINT_COUNT,
+ VSIR_OP_DCL_INPUT_PRIMITIVE,
+ VSIR_OP_DCL_INPUT_PS,
+ VSIR_OP_DCL_INPUT_PS_SGV,
+ VSIR_OP_DCL_INPUT_PS_SIV,
+ VSIR_OP_DCL_INPUT_SGV,
+ VSIR_OP_DCL_INPUT_SIV,
+ VSIR_OP_DCL_INTERFACE,
+ VSIR_OP_DCL_OUTPUT,
+ VSIR_OP_DCL_OUTPUT_CONTROL_POINT_COUNT,
+ VSIR_OP_DCL_OUTPUT_SGV,
+ VSIR_OP_DCL_OUTPUT_SIV,
+ VSIR_OP_DCL_OUTPUT_TOPOLOGY,
+ VSIR_OP_DCL_RESOURCE_RAW,
+ VSIR_OP_DCL_RESOURCE_STRUCTURED,
+ VSIR_OP_DCL_SAMPLER,
+ VSIR_OP_DCL_STREAM,
+ VSIR_OP_DCL_TEMPS,
+ VSIR_OP_DCL_TESSELLATOR_DOMAIN,
+ VSIR_OP_DCL_TESSELLATOR_OUTPUT_PRIMITIVE,
+ VSIR_OP_DCL_TESSELLATOR_PARTITIONING,
+ VSIR_OP_DCL_TGSM_RAW,
+ VSIR_OP_DCL_TGSM_STRUCTURED,
+ VSIR_OP_DCL_THREAD_GROUP,
+ VSIR_OP_DCL_UAV_RAW,
+ VSIR_OP_DCL_UAV_STRUCTURED,
+ VSIR_OP_DCL_UAV_TYPED,
+ VSIR_OP_DCL_VERTICES_OUT,
+ VSIR_OP_DDIV,
+ VSIR_OP_DEF,
+ VSIR_OP_DEFAULT,
+ VSIR_OP_DEFB,
+ VSIR_OP_DEFI,
+ VSIR_OP_DEQO,
+ VSIR_OP_DFMA,
+ VSIR_OP_DGEO,
+ VSIR_OP_DISCARD,
+ VSIR_OP_DIV,
+ VSIR_OP_DLT,
+ VSIR_OP_DMAX,
+ VSIR_OP_DMIN,
+ VSIR_OP_DMOV,
+ VSIR_OP_DMOVC,
+ VSIR_OP_DMUL,
+ VSIR_OP_DNE,
+ VSIR_OP_DP2,
+ VSIR_OP_DP2ADD,
+ VSIR_OP_DP3,
+ VSIR_OP_DP4,
+ VSIR_OP_DRCP,
+ VSIR_OP_DST,
+ VSIR_OP_DSX,
+ VSIR_OP_DSX_COARSE,
+ VSIR_OP_DSX_FINE,
+ VSIR_OP_DSY,
+ VSIR_OP_DSY_COARSE,
+ VSIR_OP_DSY_FINE,
+ VSIR_OP_DTOF,
+ VSIR_OP_DTOI,
+ VSIR_OP_DTOU,
+ VSIR_OP_ELSE,
+ VSIR_OP_EMIT,
+ VSIR_OP_EMIT_STREAM,
+ VSIR_OP_ENDIF,
+ VSIR_OP_ENDLOOP,
+ VSIR_OP_ENDREP,
+ VSIR_OP_ENDSWITCH,
+ VSIR_OP_EQO,
+ VSIR_OP_EQU,
+ VSIR_OP_EVAL_CENTROID,
+ VSIR_OP_EVAL_SAMPLE_INDEX,
+ VSIR_OP_EXP,
+ VSIR_OP_EXPP,
+ VSIR_OP_F16TOF32,
+ VSIR_OP_F32TOF16,
+ VSIR_OP_FCALL,
+ VSIR_OP_FIRSTBIT_HI,
+ VSIR_OP_FIRSTBIT_LO,
+ VSIR_OP_FIRSTBIT_SHI,
+ VSIR_OP_FRC,
+ VSIR_OP_FREM,
+ VSIR_OP_FTOD,
+ VSIR_OP_FTOI,
+ VSIR_OP_FTOU,
+ VSIR_OP_GATHER4,
+ VSIR_OP_GATHER4_C,
+ VSIR_OP_GATHER4_C_S,
+ VSIR_OP_GATHER4_PO,
+ VSIR_OP_GATHER4_PO_C,
+ VSIR_OP_GATHER4_PO_C_S,
+ VSIR_OP_GATHER4_PO_S,
+ VSIR_OP_GATHER4_S,
+ VSIR_OP_GEO,
+ VSIR_OP_GEU,
+ VSIR_OP_HCOS,
+ VSIR_OP_HS_CONTROL_POINT_PHASE,
+ VSIR_OP_HS_DECLS,
+ VSIR_OP_HS_FORK_PHASE,
+ VSIR_OP_HS_JOIN_PHASE,
+ VSIR_OP_HSIN,
+ VSIR_OP_HTAN,
+ VSIR_OP_IADD,
+ VSIR_OP_IBFE,
+ VSIR_OP_IDIV,
+ VSIR_OP_IEQ,
+ VSIR_OP_IF,
+ VSIR_OP_IFC,
+ VSIR_OP_IGE,
+ VSIR_OP_ILT,
+ VSIR_OP_IMAD,
+ VSIR_OP_IMAX,
+ VSIR_OP_IMIN,
+ VSIR_OP_IMM_ATOMIC_ALLOC,
+ VSIR_OP_IMM_ATOMIC_AND,
+ VSIR_OP_IMM_ATOMIC_CMP_EXCH,
+ VSIR_OP_IMM_ATOMIC_CONSUME,
+ VSIR_OP_IMM_ATOMIC_EXCH,
+ VSIR_OP_IMM_ATOMIC_IADD,
+ VSIR_OP_IMM_ATOMIC_IMAX,
+ VSIR_OP_IMM_ATOMIC_IMIN,
+ VSIR_OP_IMM_ATOMIC_OR,
+ VSIR_OP_IMM_ATOMIC_UMAX,
+ VSIR_OP_IMM_ATOMIC_UMIN,
+ VSIR_OP_IMM_ATOMIC_XOR,
+ VSIR_OP_IMUL,
+ VSIR_OP_IMUL_LOW,
+ VSIR_OP_INE,
+ VSIR_OP_INEG,
+ VSIR_OP_IREM,
+ VSIR_OP_ISFINITE,
+ VSIR_OP_ISHL,
+ VSIR_OP_ISHR,
+ VSIR_OP_ISINF,
+ VSIR_OP_ISNAN,
+ VSIR_OP_ITOD,
+ VSIR_OP_ITOF,
+ VSIR_OP_ITOI,
+ VSIR_OP_LABEL,
+ VSIR_OP_LD,
+ VSIR_OP_LD2DMS,
+ VSIR_OP_LD2DMS_S,
+ VSIR_OP_LD_RAW,
+ VSIR_OP_LD_RAW_S,
+ VSIR_OP_LD_S,
+ VSIR_OP_LD_STRUCTURED,
+ VSIR_OP_LD_STRUCTURED_S,
+ VSIR_OP_LD_UAV_TYPED,
+ VSIR_OP_LD_UAV_TYPED_S,
+ VSIR_OP_LIT,
+ VSIR_OP_LOD,
+ VSIR_OP_LOG,
+ VSIR_OP_LOGP,
+ VSIR_OP_LOOP,
+ VSIR_OP_LRP,
+ VSIR_OP_LTO,
+ VSIR_OP_LTU,
+ VSIR_OP_M3x2,
+ VSIR_OP_M3x3,
+ VSIR_OP_M3x4,
+ VSIR_OP_M4x3,
+ VSIR_OP_M4x4,
+ VSIR_OP_MAD,
+ VSIR_OP_MAX,
+ VSIR_OP_MIN,
+ VSIR_OP_MOV,
+ VSIR_OP_MOVA,
+ VSIR_OP_MOVC,
+ VSIR_OP_MSAD,
+ VSIR_OP_MUL,
+ VSIR_OP_NEO,
+ VSIR_OP_NEU,
+ VSIR_OP_NOP,
+ VSIR_OP_NOT,
+ VSIR_OP_NRM,
+ VSIR_OP_OR,
+ VSIR_OP_ORD,
+ VSIR_OP_PHASE,
+ VSIR_OP_PHI,
+ VSIR_OP_POW,
+ VSIR_OP_QUAD_READ_ACROSS_D,
+ VSIR_OP_QUAD_READ_ACROSS_X,
+ VSIR_OP_QUAD_READ_ACROSS_Y,
+ VSIR_OP_QUAD_READ_LANE_AT,
+ VSIR_OP_RCP,
+ VSIR_OP_REP,
+ VSIR_OP_RESINFO,
+ VSIR_OP_RET,
+ VSIR_OP_RETP,
+ VSIR_OP_ROUND_NE,
+ VSIR_OP_ROUND_NI,
+ VSIR_OP_ROUND_PI,
+ VSIR_OP_ROUND_Z,
+ VSIR_OP_RSQ,
+ VSIR_OP_SAMPLE,
+ VSIR_OP_SAMPLE_B,
+ VSIR_OP_SAMPLE_B_CL_S,
+ VSIR_OP_SAMPLE_C,
+ VSIR_OP_SAMPLE_C_CL_S,
+ VSIR_OP_SAMPLE_C_LZ,
+ VSIR_OP_SAMPLE_C_LZ_S,
+ VSIR_OP_SAMPLE_CL_S,
+ VSIR_OP_SAMPLE_GRAD,
+ VSIR_OP_SAMPLE_GRAD_CL_S,
+ VSIR_OP_SAMPLE_INFO,
+ VSIR_OP_SAMPLE_LOD,
+ VSIR_OP_SAMPLE_LOD_S,
+ VSIR_OP_SAMPLE_POS,
+ VSIR_OP_SETP,
+ VSIR_OP_SGE,
+ VSIR_OP_SGN,
+ VSIR_OP_SIN,
+ VSIR_OP_SINCOS,
+ VSIR_OP_SLT,
+ VSIR_OP_SQRT,
+ VSIR_OP_STORE_RAW,
+ VSIR_OP_STORE_STRUCTURED,
+ VSIR_OP_STORE_UAV_TYPED,
+ VSIR_OP_SUB,
+ VSIR_OP_SWAPC,
+ VSIR_OP_SWITCH,
+ VSIR_OP_SWITCH_MONOLITHIC,
+ VSIR_OP_SYNC,
+ VSIR_OP_TAN,
+ VSIR_OP_TEX,
+ VSIR_OP_TEXBEM,
+ VSIR_OP_TEXBEML,
+ VSIR_OP_TEXCOORD,
+ VSIR_OP_TEXCRD,
+ VSIR_OP_TEXDEPTH,
+ VSIR_OP_TEXDP3,
+ VSIR_OP_TEXDP3TEX,
+ VSIR_OP_TEXKILL,
+ VSIR_OP_TEXLD,
+ VSIR_OP_TEXLDD,
+ VSIR_OP_TEXLDL,
+ VSIR_OP_TEXM3x2DEPTH,
+ VSIR_OP_TEXM3x2PAD,
+ VSIR_OP_TEXM3x2TEX,
+ VSIR_OP_TEXM3x3,
+ VSIR_OP_TEXM3x3DIFF,
+ VSIR_OP_TEXM3x3PAD,
+ VSIR_OP_TEXM3x3SPEC,
+ VSIR_OP_TEXM3x3TEX,
+ VSIR_OP_TEXM3x3VSPEC,
+ VSIR_OP_TEXREG2AR,
+ VSIR_OP_TEXREG2GB,
+ VSIR_OP_TEXREG2RGB,
+ VSIR_OP_UBFE,
+ VSIR_OP_UDIV,
+ VSIR_OP_UDIV_SIMPLE,
+ VSIR_OP_UGE,
+ VSIR_OP_ULT,
+ VSIR_OP_UMAX,
+ VSIR_OP_UMIN,
+ VSIR_OP_UMUL,
+ VSIR_OP_UNO,
+ VSIR_OP_UREM,
+ VSIR_OP_USHR,
+ VSIR_OP_UTOD,
+ VSIR_OP_UTOF,
+ VSIR_OP_UTOU,
+ VSIR_OP_WAVE_ACTIVE_ALL_EQUAL,
+ VSIR_OP_WAVE_ACTIVE_BALLOT,
+ VSIR_OP_WAVE_ACTIVE_BIT_AND,
+ VSIR_OP_WAVE_ACTIVE_BIT_OR,
+ VSIR_OP_WAVE_ACTIVE_BIT_XOR,
+ VSIR_OP_WAVE_ALL_BIT_COUNT,
+ VSIR_OP_WAVE_ALL_TRUE,
+ VSIR_OP_WAVE_ANY_TRUE,
+ VSIR_OP_WAVE_IS_FIRST_LANE,
+ VSIR_OP_WAVE_OP_ADD,
+ VSIR_OP_WAVE_OP_IMAX,
+ VSIR_OP_WAVE_OP_IMIN,
+ VSIR_OP_WAVE_OP_MAX,
+ VSIR_OP_WAVE_OP_MIN,
+ VSIR_OP_WAVE_OP_MUL,
+ VSIR_OP_WAVE_OP_UMAX,
+ VSIR_OP_WAVE_OP_UMIN,
+ VSIR_OP_WAVE_PREFIX_BIT_COUNT,
+ VSIR_OP_WAVE_READ_LANE_AT,
+ VSIR_OP_WAVE_READ_LANE_FIRST,
+ VSIR_OP_XOR,
+
+ VSIR_OP_INVALID,
+
+ VSIR_OP_COUNT,
};
const char *vsir_opcode_get_name(enum vkd3d_shader_opcode op, const char *error);
diff --git a/libs/vkd3d/libs/vkd3d/resource.c b/libs/vkd3d/libs/vkd3d/resource.c
index cb184986f2a..14382103acc 100644
--- a/libs/vkd3d/libs/vkd3d/resource.c
+++ b/libs/vkd3d/libs/vkd3d/resource.c
@@ -310,6 +310,9 @@ static ULONG STDMETHODCALLTYPE d3d12_heap_AddRef(ID3D12Heap *iface)
struct d3d12_heap *heap = impl_from_ID3D12Heap(iface);
unsigned int refcount = vkd3d_atomic_increment_u32(&heap->refcount);
+ if (refcount == 1)
+ vkd3d_atomic_increment_u32(&heap->internal_refcount);
+
TRACE("%p increasing refcount to %u.\n", heap, refcount);
VKD3D_ASSERT(!heap->is_private);
@@ -342,6 +345,12 @@ static void d3d12_heap_destroy(struct d3d12_heap *heap)
d3d12_device_release(device);
}
+static void d3d12_heap_decref(struct d3d12_heap *heap)
+{
+ if (!vkd3d_atomic_decrement_u32(&heap->internal_refcount))
+ d3d12_heap_destroy(heap);
+}
+
static ULONG STDMETHODCALLTYPE d3d12_heap_Release(ID3D12Heap *iface)
{
struct d3d12_heap *heap = impl_from_ID3D12Heap(iface);
@@ -350,18 +359,12 @@ static ULONG STDMETHODCALLTYPE d3d12_heap_Release(ID3D12Heap *iface)
TRACE("%p decreasing refcount to %u.\n", heap, refcount);
/* A heap must not be destroyed until all contained resources are destroyed. */
- if (!refcount && !heap->resource_count)
- d3d12_heap_destroy(heap);
+ if (!refcount)
+ d3d12_heap_decref(heap);
return refcount;
}
-static void d3d12_heap_resource_destroyed(struct d3d12_heap *heap)
-{
- if (!vkd3d_atomic_decrement_u32(&heap->resource_count) && (!heap->refcount || heap->is_private))
- d3d12_heap_destroy(heap);
-}
-
static HRESULT STDMETHODCALLTYPE d3d12_heap_GetPrivateData(ID3D12Heap *iface,
REFGUID guid, UINT *data_size, void *data)
{
@@ -487,7 +490,7 @@ static HRESULT d3d12_heap_init(struct d3d12_heap *heap,
heap->ID3D12Heap_iface.lpVtbl = &d3d12_heap_vtbl;
heap->refcount = 1;
- heap->resource_count = 0;
+ heap->internal_refcount = 1;
heap->is_private = !!resource;
@@ -555,8 +558,6 @@ static HRESULT d3d12_heap_init(struct d3d12_heap *heap,
heap->device = device;
if (!heap->is_private)
d3d12_device_add_ref(heap->device);
- else
- heap->resource_count = 1;
if (d3d12_heap_get_memory_property_flags(heap) & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT)
{
@@ -998,7 +999,7 @@ static void d3d12_resource_destroy(struct d3d12_resource *resource, struct d3d12
d3d12_resource_tile_info_cleanup(resource);
if (resource->heap)
- d3d12_heap_resource_destroyed(resource->heap);
+ d3d12_heap_decref(resource->heap);
}
static ULONG d3d12_resource_incref(struct d3d12_resource *resource)
@@ -2200,7 +2201,7 @@ static HRESULT vkd3d_bind_heap_memory(struct d3d12_device *device,
{
resource->heap = heap;
resource->heap_offset = heap_offset;
- vkd3d_atomic_increment_u32(&heap->resource_count);
+ vkd3d_atomic_increment_u32(&heap->internal_refcount);
}
else
{
diff --git a/libs/vkd3d/libs/vkd3d/vkd3d_private.h b/libs/vkd3d/libs/vkd3d/vkd3d_private.h
index 7e54738b19e..3559d0b0944 100644
--- a/libs/vkd3d/libs/vkd3d/vkd3d_private.h
+++ b/libs/vkd3d/libs/vkd3d/vkd3d_private.h
@@ -436,7 +436,7 @@ struct d3d12_heap
{
ID3D12Heap ID3D12Heap_iface;
unsigned int refcount;
- unsigned int resource_count;
+ unsigned int internal_refcount;
bool is_private;
D3D12_HEAP_DESC desc;
--
2.47.2