Files
wine-staging/patches/vkd3d-latest/0001-Updated-vkd3d-to-f576ecc9929dd98c900bb8bc0335b91a1a0.patch
Alistair Leslie-Hughes 36020b4a0e Updated vkd3d-latest patchset
Squash and update.
2025-04-10 08:23:17 +10:00

26067 lines
1006 KiB
Diff

From 8e84e1a5750fac027178bb160050030e2786680b Mon Sep 17 00:00:00 2001
From: Alistair Leslie-Hughes <leslie_alistair@hotmail.com>
Date: Fri, 21 Feb 2025 09:15:01 +1100
Subject: [PATCH 1/2] Updated vkd3d to
f576ecc9929dd98c900bb8bc0335b91a1a0d3bff.
---
libs/vkd3d/include/private/spirv_grammar.h | 10103 ++++++++++++++++
libs/vkd3d/include/private/vkd3d_common.h | 5 +-
.../include/private/vkd3d_shader_utils.h | 4 -
libs/vkd3d/include/private/vkd3d_version.h | 2 +-
libs/vkd3d/include/vkd3d_shader.h | 15 +-
libs/vkd3d/libs/vkd3d-common/blob.c | 1 +
libs/vkd3d/libs/vkd3d-shader/d3d_asm.c | 24 +-
libs/vkd3d/libs/vkd3d-shader/d3dbc.c | 32 +-
libs/vkd3d/libs/vkd3d-shader/dxbc.c | 34 +-
libs/vkd3d/libs/vkd3d-shader/dxil.c | 99 +-
libs/vkd3d/libs/vkd3d-shader/fx.c | 1862 ++-
libs/vkd3d/libs/vkd3d-shader/glsl.c | 27 +-
libs/vkd3d/libs/vkd3d-shader/hlsl.c | 377 +-
libs/vkd3d/libs/vkd3d-shader/hlsl.h | 104 +-
libs/vkd3d/libs/vkd3d-shader/hlsl.l | 5 +
libs/vkd3d/libs/vkd3d-shader/hlsl.y | 1309 +-
libs/vkd3d/libs/vkd3d-shader/hlsl_codegen.c | 3568 +++---
.../libs/vkd3d-shader/hlsl_constant_ops.c | 81 +-
libs/vkd3d/libs/vkd3d-shader/ir.c | 433 +-
libs/vkd3d/libs/vkd3d-shader/msl.c | 15 +-
libs/vkd3d/libs/vkd3d-shader/preproc.l | 1 +
libs/vkd3d/libs/vkd3d-shader/preproc.y | 10 +
libs/vkd3d/libs/vkd3d-shader/spirv.c | 464 +-
libs/vkd3d/libs/vkd3d-shader/tpf.c | 128 +-
.../libs/vkd3d-shader/vkd3d_shader_main.c | 198 +-
.../libs/vkd3d-shader/vkd3d_shader_private.h | 71 +-
.../vkd3d/libs/vkd3d-utils/vkd3d_utils_main.c | 2 +
libs/vkd3d/libs/vkd3d/command.c | 50 +-
libs/vkd3d/libs/vkd3d/device.c | 37 +-
libs/vkd3d/libs/vkd3d/resource.c | 14 +-
libs/vkd3d/libs/vkd3d/state.c | 118 +-
libs/vkd3d/libs/vkd3d/vkd3d_private.h | 2 +-
32 files changed, 16168 insertions(+), 3027 deletions(-)
create mode 100644 libs/vkd3d/include/private/spirv_grammar.h
diff --git a/libs/vkd3d/include/private/spirv_grammar.h b/libs/vkd3d/include/private/spirv_grammar.h
new file mode 100644
index 00000000000..2aac5a6558c
--- /dev/null
+++ b/libs/vkd3d/include/private/spirv_grammar.h
@@ -0,0 +1,10103 @@
+/* This file is automatically generated from version 1.6.4 of the
+ * machine-readable SPIR-V grammar.
+ *
+ * The original source is covered by the following license:
+ *
+ * Copyright (c) 2014-2024 The Khronos Group Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and/or associated documentation files (the "Materials"),
+ * to deal in the Materials without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Materials, and to permit persons to whom the
+ * Materials are furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Materials.
+ *
+ * MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS KHRONOS
+ * STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS SPECIFICATIONS AND
+ * HEADER INFORMATION ARE LOCATED AT https://www.khronos.org/registry/
+ *
+ * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM,OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS
+ * IN THE MATERIALS.
+ */
+
+enum spirv_parser_operand_category
+{
+ SPIRV_PARSER_OPERAND_CATEGORY_BIT_ENUM,
+ SPIRV_PARSER_OPERAND_CATEGORY_COMPOSITE,
+ SPIRV_PARSER_OPERAND_CATEGORY_ID,
+ SPIRV_PARSER_OPERAND_CATEGORY_LITERAL,
+ SPIRV_PARSER_OPERAND_CATEGORY_VALUE_ENUM,
+};
+
+enum spirv_parser_operand_type
+{
+ SPIRV_PARSER_OPERAND_TYPE_ACCESS_QUALIFIER,
+ SPIRV_PARSER_OPERAND_TYPE_ADDRESSING_MODEL,
+ SPIRV_PARSER_OPERAND_TYPE_BUILT_IN,
+ SPIRV_PARSER_OPERAND_TYPE_CAPABILITY,
+ SPIRV_PARSER_OPERAND_TYPE_COOPERATIVE_MATRIX_LAYOUT,
+ SPIRV_PARSER_OPERAND_TYPE_COOPERATIVE_MATRIX_OPERANDS,
+ SPIRV_PARSER_OPERAND_TYPE_COOPERATIVE_MATRIX_REDUCE,
+ SPIRV_PARSER_OPERAND_TYPE_COOPERATIVE_MATRIX_USE,
+ SPIRV_PARSER_OPERAND_TYPE_DECORATION,
+ SPIRV_PARSER_OPERAND_TYPE_DIM,
+ SPIRV_PARSER_OPERAND_TYPE_EXECUTION_MODE,
+ SPIRV_PARSER_OPERAND_TYPE_EXECUTION_MODEL,
+ SPIRV_PARSER_OPERAND_TYPE_FPDENORM_MODE,
+ SPIRV_PARSER_OPERAND_TYPE_FPENCODING,
+ SPIRV_PARSER_OPERAND_TYPE_FPFAST_MATH_MODE,
+ SPIRV_PARSER_OPERAND_TYPE_FPOPERATION_MODE,
+ SPIRV_PARSER_OPERAND_TYPE_FPROUNDING_MODE,
+ SPIRV_PARSER_OPERAND_TYPE_FRAGMENT_SHADING_RATE,
+ SPIRV_PARSER_OPERAND_TYPE_FUNCTION_CONTROL,
+ SPIRV_PARSER_OPERAND_TYPE_FUNCTION_PARAMETER_ATTRIBUTE,
+ SPIRV_PARSER_OPERAND_TYPE_GROUP_OPERATION,
+ SPIRV_PARSER_OPERAND_TYPE_HOST_ACCESS_QUALIFIER,
+ SPIRV_PARSER_OPERAND_TYPE_ID_MEMORY_SEMANTICS,
+ SPIRV_PARSER_OPERAND_TYPE_ID_REF,
+ SPIRV_PARSER_OPERAND_TYPE_ID_RESULT,
+ SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE,
+ SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE,
+ SPIRV_PARSER_OPERAND_TYPE_IMAGE_CHANNEL_DATA_TYPE,
+ SPIRV_PARSER_OPERAND_TYPE_IMAGE_CHANNEL_ORDER,
+ SPIRV_PARSER_OPERAND_TYPE_IMAGE_FORMAT,
+ SPIRV_PARSER_OPERAND_TYPE_IMAGE_OPERANDS,
+ SPIRV_PARSER_OPERAND_TYPE_INITIALIZATION_MODE_QUALIFIER,
+ SPIRV_PARSER_OPERAND_TYPE_KERNEL_ENQUEUE_FLAGS,
+ SPIRV_PARSER_OPERAND_TYPE_KERNEL_PROFILING_INFO,
+ SPIRV_PARSER_OPERAND_TYPE_LINKAGE_TYPE,
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_CONTEXT_DEPENDENT_NUMBER,
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_EXT_INST_INTEGER,
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_FLOAT,
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_SPEC_CONSTANT_OP_INTEGER,
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_STRING,
+ SPIRV_PARSER_OPERAND_TYPE_LOAD_CACHE_CONTROL,
+ SPIRV_PARSER_OPERAND_TYPE_LOOP_CONTROL,
+ SPIRV_PARSER_OPERAND_TYPE_MEMORY_ACCESS,
+ SPIRV_PARSER_OPERAND_TYPE_MEMORY_MODEL,
+ SPIRV_PARSER_OPERAND_TYPE_MEMORY_SEMANTICS,
+ SPIRV_PARSER_OPERAND_TYPE_NAMED_MAXIMUM_NUMBER_OF_REGISTERS,
+ SPIRV_PARSER_OPERAND_TYPE_OVERFLOW_MODES,
+ SPIRV_PARSER_OPERAND_TYPE_PACKED_VECTOR_FORMAT,
+ SPIRV_PARSER_OPERAND_TYPE_PAIR_ID_REF_ID_REF,
+ SPIRV_PARSER_OPERAND_TYPE_PAIR_ID_REF_LITERAL_INTEGER,
+ SPIRV_PARSER_OPERAND_TYPE_PAIR_LITERAL_INTEGER_ID_REF,
+ SPIRV_PARSER_OPERAND_TYPE_QUANTIZATION_MODES,
+ SPIRV_PARSER_OPERAND_TYPE_RAW_ACCESS_CHAIN_OPERANDS,
+ SPIRV_PARSER_OPERAND_TYPE_RAY_FLAGS,
+ SPIRV_PARSER_OPERAND_TYPE_RAY_QUERY_CANDIDATE_INTERSECTION_TYPE,
+ SPIRV_PARSER_OPERAND_TYPE_RAY_QUERY_COMMITTED_INTERSECTION_TYPE,
+ SPIRV_PARSER_OPERAND_TYPE_RAY_QUERY_INTERSECTION,
+ SPIRV_PARSER_OPERAND_TYPE_SAMPLER_ADDRESSING_MODE,
+ SPIRV_PARSER_OPERAND_TYPE_SAMPLER_FILTER_MODE,
+ SPIRV_PARSER_OPERAND_TYPE_SCOPE,
+ SPIRV_PARSER_OPERAND_TYPE_SELECTION_CONTROL,
+ SPIRV_PARSER_OPERAND_TYPE_SOURCE_LANGUAGE,
+ SPIRV_PARSER_OPERAND_TYPE_STORAGE_CLASS,
+ SPIRV_PARSER_OPERAND_TYPE_STORE_CACHE_CONTROL,
+ SPIRV_PARSER_OPERAND_TYPE_TENSOR_ADDRESSING_OPERANDS,
+ SPIRV_PARSER_OPERAND_TYPE_TENSOR_CLAMP_MODE,
+};
+
+static const struct spirv_parser_operand_type_info
+{
+ const char *name;
+ enum spirv_parser_operand_category category;
+ size_t enumerant_count;
+ const struct spirv_parser_enumerant
+ {
+ uint32_t value;
+ const char *name;
+ size_t parameter_count;
+ enum spirv_parser_operand_type *parameters;
+ } *enumerants;
+}
+spirv_parser_operand_type_info[] =
+{
+ [SPIRV_PARSER_OPERAND_TYPE_ACCESS_QUALIFIER] =
+ {
+ "AccessQualifier", SPIRV_PARSER_OPERAND_CATEGORY_VALUE_ENUM, 3,
+ (struct spirv_parser_enumerant[])
+ {
+ {0, "ReadOnly"},
+ {0x1, "WriteOnly"},
+ {0x2, "ReadWrite"},
+ }
+ },
+ [SPIRV_PARSER_OPERAND_TYPE_ADDRESSING_MODEL] =
+ {
+ "AddressingModel", SPIRV_PARSER_OPERAND_CATEGORY_VALUE_ENUM, 4,
+ (struct spirv_parser_enumerant[])
+ {
+ {0, "Logical"},
+ {0x1, "Physical32"},
+ {0x2, "Physical64"},
+ {0x14e4, "PhysicalStorageBuffer64"},
+ }
+ },
+ [SPIRV_PARSER_OPERAND_TYPE_BUILT_IN] =
+ {
+ "BuiltIn", SPIRV_PARSER_OPERAND_CATEGORY_VALUE_ENUM, 116,
+ (struct spirv_parser_enumerant[])
+ {
+ {0, "Position"},
+ {0x1, "PointSize"},
+ {0x3, "ClipDistance"},
+ {0x4, "CullDistance"},
+ {0x5, "VertexId"},
+ {0x6, "InstanceId"},
+ {0x7, "PrimitiveId"},
+ {0x8, "InvocationId"},
+ {0x9, "Layer"},
+ {0xa, "ViewportIndex"},
+ {0xb, "TessLevelOuter"},
+ {0xc, "TessLevelInner"},
+ {0xd, "TessCoord"},
+ {0xe, "PatchVertices"},
+ {0xf, "FragCoord"},
+ {0x10, "PointCoord"},
+ {0x11, "FrontFacing"},
+ {0x12, "SampleId"},
+ {0x13, "SamplePosition"},
+ {0x14, "SampleMask"},
+ {0x16, "FragDepth"},
+ {0x17, "HelperInvocation"},
+ {0x18, "NumWorkgroups"},
+ {0x19, "WorkgroupSize"},
+ {0x1a, "WorkgroupId"},
+ {0x1b, "LocalInvocationId"},
+ {0x1c, "GlobalInvocationId"},
+ {0x1d, "LocalInvocationIndex"},
+ {0x1e, "WorkDim"},
+ {0x1f, "GlobalSize"},
+ {0x20, "EnqueuedWorkgroupSize"},
+ {0x21, "GlobalOffset"},
+ {0x22, "GlobalLinearId"},
+ {0x24, "SubgroupSize"},
+ {0x25, "SubgroupMaxSize"},
+ {0x26, "NumSubgroups"},
+ {0x27, "NumEnqueuedSubgroups"},
+ {0x28, "SubgroupId"},
+ {0x29, "SubgroupLocalInvocationId"},
+ {0x2a, "VertexIndex"},
+ {0x2b, "InstanceIndex"},
+ {0x1040, "CoreIDARM"},
+ {0x1041, "CoreCountARM"},
+ {0x1042, "CoreMaxIDARM"},
+ {0x1043, "WarpIDARM"},
+ {0x1044, "WarpMaxIDARM"},
+ {0x1140, "SubgroupEqMask"},
+ {0x1141, "SubgroupGeMask"},
+ {0x1142, "SubgroupGtMask"},
+ {0x1143, "SubgroupLeMask"},
+ {0x1144, "SubgroupLtMask"},
+ {0x1148, "BaseVertex"},
+ {0x1149, "BaseInstance"},
+ {0x114a, "DrawIndex"},
+ {0x1150, "PrimitiveShadingRateKHR"},
+ {0x1156, "DeviceIndex"},
+ {0x1158, "ViewIndex"},
+ {0x115c, "ShadingRateKHR"},
+ {0x1380, "BaryCoordNoPerspAMD"},
+ {0x1381, "BaryCoordNoPerspCentroidAMD"},
+ {0x1382, "BaryCoordNoPerspSampleAMD"},
+ {0x1383, "BaryCoordSmoothAMD"},
+ {0x1384, "BaryCoordSmoothCentroidAMD"},
+ {0x1385, "BaryCoordSmoothSampleAMD"},
+ {0x1386, "BaryCoordPullModelAMD"},
+ {0x1396, "FragStencilRefEXT"},
+ {0x139d, "RemainingRecursionLevelsAMDX"},
+ {0x13d1, "ShaderIndexAMDX"},
+ {0x1485, "ViewportMaskNV"},
+ {0x1489, "SecondaryPositionNV"},
+ {0x148a, "SecondaryViewportMaskNV"},
+ {0x148d, "PositionPerViewNV"},
+ {0x148e, "ViewportMaskPerViewNV"},
+ {0x1490, "FullyCoveredEXT"},
+ {0x149a, "TaskCountNV"},
+ {0x149b, "PrimitiveCountNV"},
+ {0x149c, "PrimitiveIndicesNV"},
+ {0x149d, "ClipDistancePerViewNV"},
+ {0x149e, "CullDistancePerViewNV"},
+ {0x149f, "LayerPerViewNV"},
+ {0x14a0, "MeshViewCountNV"},
+ {0x14a1, "MeshViewIndicesNV"},
+ {0x14a6, "BaryCoordKHR"},
+ {0x14a7, "BaryCoordNoPerspKHR"},
+ {0x14ac, "FragSizeEXT"},
+ {0x14ad, "FragInvocationCountEXT"},
+ {0x14ae, "PrimitivePointIndicesEXT"},
+ {0x14af, "PrimitiveLineIndicesEXT"},
+ {0x14b0, "PrimitiveTriangleIndicesEXT"},
+ {0x14b3, "CullPrimitiveEXT"},
+ {0x14c7, "LaunchIdKHR"},
+ {0x14c8, "LaunchSizeKHR"},
+ {0x14c9, "WorldRayOriginKHR"},
+ {0x14ca, "WorldRayDirectionKHR"},
+ {0x14cb, "ObjectRayOriginKHR"},
+ {0x14cc, "ObjectRayDirectionKHR"},
+ {0x14cd, "RayTminKHR"},
+ {0x14ce, "RayTmaxKHR"},
+ {0x14cf, "InstanceCustomIndexKHR"},
+ {0x14d2, "ObjectToWorldKHR"},
+ {0x14d3, "WorldToObjectKHR"},
+ {0x14d4, "HitTNV"},
+ {0x14d5, "HitKindKHR"},
+ {0x14d6, "CurrentRayTimeNV"},
+ {0x14d7, "HitTriangleVertexPositionsKHR"},
+ {0x14d9, "HitMicroTriangleVertexPositionsNV"},
+ {0x14e0, "HitMicroTriangleVertexBarycentricsNV"},
+ {0x14e7, "IncomingRayFlagsKHR"},
+ {0x14e8, "RayGeometryIndexKHR"},
+ {0x14fe, "WarpsPerSMNV"},
+ {0x14ff, "SMCountNV"},
+ {0x1500, "WarpIDNV"},
+ {0x1501, "SMIDNV"},
+ {0x151d, "HitKindFrontFacingMicroTriangleNV"},
+ {0x151e, "HitKindBackFacingMicroTriangleNV"},
+ {0x1785, "CullMaskKHR"},
+ }
+ },
+ [SPIRV_PARSER_OPERAND_TYPE_CAPABILITY] =
+ {
+ "Capability", SPIRV_PARSER_OPERAND_CATEGORY_VALUE_ENUM, 245,
+ (struct spirv_parser_enumerant[])
+ {
+ {0, "Matrix"},
+ {0x1, "Shader"},
+ {0x2, "Geometry"},
+ {0x3, "Tessellation"},
+ {0x4, "Addresses"},
+ {0x5, "Linkage"},
+ {0x6, "Kernel"},
+ {0x7, "Vector16"},
+ {0x8, "Float16Buffer"},
+ {0x9, "Float16"},
+ {0xa, "Float64"},
+ {0xb, "Int64"},
+ {0xc, "Int64Atomics"},
+ {0xd, "ImageBasic"},
+ {0xe, "ImageReadWrite"},
+ {0xf, "ImageMipmap"},
+ {0x11, "Pipes"},
+ {0x12, "Groups"},
+ {0x13, "DeviceEnqueue"},
+ {0x14, "LiteralSampler"},
+ {0x15, "AtomicStorage"},
+ {0x16, "Int16"},
+ {0x17, "TessellationPointSize"},
+ {0x18, "GeometryPointSize"},
+ {0x19, "ImageGatherExtended"},
+ {0x1b, "StorageImageMultisample"},
+ {0x1c, "UniformBufferArrayDynamicIndexing"},
+ {0x1d, "SampledImageArrayDynamicIndexing"},
+ {0x1e, "StorageBufferArrayDynamicIndexing"},
+ {0x1f, "StorageImageArrayDynamicIndexing"},
+ {0x20, "ClipDistance"},
+ {0x21, "CullDistance"},
+ {0x22, "ImageCubeArray"},
+ {0x23, "SampleRateShading"},
+ {0x24, "ImageRect"},
+ {0x25, "SampledRect"},
+ {0x26, "GenericPointer"},
+ {0x27, "Int8"},
+ {0x28, "InputAttachment"},
+ {0x29, "SparseResidency"},
+ {0x2a, "MinLod"},
+ {0x2b, "Sampled1D"},
+ {0x2c, "Image1D"},
+ {0x2d, "SampledCubeArray"},
+ {0x2e, "SampledBuffer"},
+ {0x2f, "ImageBuffer"},
+ {0x30, "ImageMSArray"},
+ {0x31, "StorageImageExtendedFormats"},
+ {0x32, "ImageQuery"},
+ {0x33, "DerivativeControl"},
+ {0x34, "InterpolationFunction"},
+ {0x35, "TransformFeedback"},
+ {0x36, "GeometryStreams"},
+ {0x37, "StorageImageReadWithoutFormat"},
+ {0x38, "StorageImageWriteWithoutFormat"},
+ {0x39, "MultiViewport"},
+ {0x3a, "SubgroupDispatch"},
+ {0x3b, "NamedBarrier"},
+ {0x3c, "PipeStorage"},
+ {0x3d, "GroupNonUniform"},
+ {0x3e, "GroupNonUniformVote"},
+ {0x3f, "GroupNonUniformArithmetic"},
+ {0x40, "GroupNonUniformBallot"},
+ {0x41, "GroupNonUniformShuffle"},
+ {0x42, "GroupNonUniformShuffleRelative"},
+ {0x43, "GroupNonUniformClustered"},
+ {0x44, "GroupNonUniformQuad"},
+ {0x45, "ShaderLayer"},
+ {0x46, "ShaderViewportIndex"},
+ {0x47, "UniformDecoration"},
+ {0x1045, "CoreBuiltinsARM"},
+ {0x1046, "TileImageColorReadAccessEXT"},
+ {0x1047, "TileImageDepthReadAccessEXT"},
+ {0x1048, "TileImageStencilReadAccessEXT"},
+ {0x1069, "CooperativeMatrixLayoutsARM"},
+ {0x1146, "FragmentShadingRateKHR"},
+ {0x1147, "SubgroupBallotKHR"},
+ {0x114b, "DrawParameters"},
+ {0x114c, "WorkgroupMemoryExplicitLayoutKHR"},
+ {0x114d, "WorkgroupMemoryExplicitLayout8BitAccessKHR"},
+ {0x114e, "WorkgroupMemoryExplicitLayout16BitAccessKHR"},
+ {0x114f, "SubgroupVoteKHR"},
+ {0x1151, "StorageBuffer16BitAccess"},
+ {0x1152, "UniformAndStorageBuffer16BitAccess"},
+ {0x1153, "StoragePushConstant16"},
+ {0x1154, "StorageInputOutput16"},
+ {0x1155, "DeviceGroup"},
+ {0x1157, "MultiView"},
+ {0x1159, "VariablePointersStorageBuffer"},
+ {0x115a, "VariablePointers"},
+ {0x115d, "AtomicStorageOps"},
+ {0x115f, "SampleMaskPostDepthCoverage"},
+ {0x1160, "StorageBuffer8BitAccess"},
+ {0x1161, "UniformAndStorageBuffer8BitAccess"},
+ {0x1162, "StoragePushConstant8"},
+ {0x1170, "DenormPreserve"},
+ {0x1171, "DenormFlushToZero"},
+ {0x1172, "SignedZeroInfNanPreserve"},
+ {0x1173, "RoundingModeRTE"},
+ {0x1174, "RoundingModeRTZ"},
+ {0x1177, "RayQueryProvisionalKHR"},
+ {0x1178, "RayQueryKHR"},
+ {0x1179, "UntypedPointersKHR"},
+ {0x117e, "RayTraversalPrimitiveCullingKHR"},
+ {0x117f, "RayTracingKHR"},
+ {0x1184, "TextureSampleWeightedQCOM"},
+ {0x1185, "TextureBoxFilterQCOM"},
+ {0x1186, "TextureBlockMatchQCOM"},
+ {0x1192, "TextureBlockMatch2QCOM"},
+ {0x1390, "Float16ImageAMD"},
+ {0x1391, "ImageGatherBiasLodAMD"},
+ {0x1392, "FragmentMaskAMD"},
+ {0x1395, "StencilExportEXT"},
+ {0x1397, "ImageReadWriteLodAMD"},
+ {0x1398, "Int64ImageEXT"},
+ {0x13bf, "ShaderClockKHR"},
+ {0x13cb, "ShaderEnqueueAMDX"},
+ {0x13df, "QuadControlKHR"},
+ {0x1481, "SampleMaskOverrideCoverageNV"},
+ {0x1483, "GeometryShaderPassthroughNV"},
+ {0x1486, "ShaderViewportIndexLayerEXT"},
+ {0x1487, "ShaderViewportMaskNV"},
+ {0x148b, "ShaderStereoViewNV"},
+ {0x148c, "PerViewAttributesNV"},
+ {0x1491, "FragmentFullyCoveredEXT"},
+ {0x1492, "MeshShadingNV"},
+ {0x14a2, "ImageFootprintNV"},
+ {0x14a3, "MeshShadingEXT"},
+ {0x14a4, "FragmentBarycentricKHR"},
+ {0x14a8, "ComputeDerivativeGroupQuadsKHR"},
+ {0x14ab, "FragmentDensityEXT"},
+ {0x14b1, "GroupNonUniformPartitionedNV"},
+ {0x14b5, "ShaderNonUniform"},
+ {0x14b6, "RuntimeDescriptorArray"},
+ {0x14b7, "InputAttachmentArrayDynamicIndexing"},
+ {0x14b8, "UniformTexelBufferArrayDynamicIndexing"},
+ {0x14b9, "StorageTexelBufferArrayDynamicIndexing"},
+ {0x14ba, "UniformBufferArrayNonUniformIndexing"},
+ {0x14bb, "SampledImageArrayNonUniformIndexing"},
+ {0x14bc, "StorageBufferArrayNonUniformIndexing"},
+ {0x14bd, "StorageImageArrayNonUniformIndexing"},
+ {0x14be, "InputAttachmentArrayNonUniformIndexing"},
+ {0x14bf, "UniformTexelBufferArrayNonUniformIndexing"},
+ {0x14c0, "StorageTexelBufferArrayNonUniformIndexing"},
+ {0x14d8, "RayTracingPositionFetchKHR"},
+ {0x14dc, "RayTracingNV"},
+ {0x14dd, "RayTracingMotionBlurNV"},
+ {0x14e1, "VulkanMemoryModel"},
+ {0x14e2, "VulkanMemoryModelDeviceScope"},
+ {0x14e3, "PhysicalStorageBufferAddresses"},
+ {0x14e6, "ComputeDerivativeGroupLinearKHR"},
+ {0x14e9, "RayTracingProvisionalKHR"},
+ {0x14ed, "CooperativeMatrixNV"},
+ {0x14f3, "FragmentShaderSampleInterlockEXT"},
+ {0x14fc, "FragmentShaderShadingRateInterlockEXT"},
+ {0x14fd, "ShaderSMBuiltinsNV"},
+ {0x1502, "FragmentShaderPixelInterlockEXT"},
+ {0x1503, "DemoteToHelperInvocation"},
+ {0x1504, "DisplacementMicromapNV"},
+ {0x1505, "RayTracingOpacityMicromapEXT"},
+ {0x1507, "ShaderInvocationReorderNV"},
+ {0x150e, "BindlessTextureNV"},
+ {0x150f, "RayQueryPositionFetchKHR"},
+ {0x151c, "AtomicFloat16VectorNV"},
+ {0x1521, "RayTracingDisplacementMicromapNV"},
+ {0x1526, "RawAccessChainsNV"},
+ {0x1536, "CooperativeMatrixReductionsNV"},
+ {0x1537, "CooperativeMatrixConversionsNV"},
+ {0x1538, "CooperativeMatrixPerElementOperationsNV"},
+ {0x1539, "CooperativeMatrixTensorAddressingNV"},
+ {0x153a, "CooperativeMatrixBlockLoadsNV"},
+ {0x153f, "TensorAddressingNV"},
+ {0x15c0, "SubgroupShuffleINTEL"},
+ {0x15c1, "SubgroupBufferBlockIOINTEL"},
+ {0x15c2, "SubgroupImageBlockIOINTEL"},
+ {0x15cb, "SubgroupImageMediaBlockIOINTEL"},
+ {0x15ce, "RoundToInfinityINTEL"},
+ {0x15cf, "FloatingPointModeINTEL"},
+ {0x15d0, "IntegerFunctions2INTEL"},
+ {0x15e3, "FunctionPointersINTEL"},
+ {0x15e4, "IndirectReferencesINTEL"},
+ {0x15e6, "AsmINTEL"},
+ {0x15ec, "AtomicFloat32MinMaxEXT"},
+ {0x15ed, "AtomicFloat64MinMaxEXT"},
+ {0x15f0, "AtomicFloat16MinMaxEXT"},
+ {0x15f1, "VectorComputeINTEL"},
+ {0x15f3, "VectorAnyINTEL"},
+ {0x15fd, "ExpectAssumeKHR"},
+ {0x1640, "SubgroupAvcMotionEstimationINTEL"},
+ {0x1641, "SubgroupAvcMotionEstimationIntraINTEL"},
+ {0x1642, "SubgroupAvcMotionEstimationChromaINTEL"},
+ {0x16b9, "VariableLengthArrayINTEL"},
+ {0x16bd, "FunctionFloatControlINTEL"},
+ {0x16c0, "FPGAMemoryAttributesINTEL"},
+ {0x16cd, "FPFastMathModeINTEL"},
+ {0x16d4, "ArbitraryPrecisionIntegersINTEL"},
+ {0x16d5, "ArbitraryPrecisionFloatingPointINTEL"},
+ {0x16fe, "UnstructuredLoopControlsINTEL"},
+ {0x1700, "FPGALoopControlsINTEL"},
+ {0x1704, "KernelAttributesINTEL"},
+ {0x1709, "FPGAKernelAttributesINTEL"},
+ {0x170a, "FPGAMemoryAccessesINTEL"},
+ {0x1710, "FPGAClusterAttributesINTEL"},
+ {0x1712, "LoopFuseINTEL"},
+ {0x1714, "FPGADSPControlINTEL"},
+ {0x1716, "MemoryAccessAliasingINTEL"},
+ {0x171c, "FPGAInvocationPipeliningAttributesINTEL"},
+ {0x1720, "FPGABufferLocationINTEL"},
+ {0x1722, "ArbitraryPrecisionFixedPointINTEL"},
+ {0x172f, "USMStorageClassesINTEL"},
+ {0x1733, "RuntimeAlignedAttributeINTEL"},
+ {0x1737, "IOPipesINTEL"},
+ {0x1739, "BlockingPipesINTEL"},
+ {0x173c, "FPGARegINTEL"},
+ {0x1780, "DotProductInputAll"},
+ {0x1781, "DotProductInput4x8Bit"},
+ {0x1782, "DotProductInput4x8BitPacked"},
+ {0x1783, "DotProduct"},
+ {0x1784, "RayCullMaskKHR"},
+ {0x1786, "CooperativeMatrixKHR"},
+ {0x1788, "ReplicatedCompositesEXT"},
+ {0x1789, "BitInstructions"},
+ {0x178a, "GroupNonUniformRotateKHR"},
+ {0x178d, "FloatControls2"},
+ {0x1791, "AtomicFloat32AddEXT"},
+ {0x1792, "AtomicFloat64AddEXT"},
+ {0x17c9, "LongCompositesINTEL"},
+ {0x17ce, "OptNoneEXT"},
+ {0x17cf, "AtomicFloat16AddEXT"},
+ {0x17e2, "DebugInfoModuleINTEL"},
+ {0x17e3, "BFloat16ConversionINTEL"},
+ {0x17fd, "SplitBarrierINTEL"},
+ {0x1800, "ArithmeticFenceEXT"},
+ {0x1806, "FPGAClusterAttributesV2INTEL"},
+ {0x1811, "FPGAKernelAttributesv2INTEL"},
+ {0x1819, "FPMaxErrorINTEL"},
+ {0x181b, "FPGALatencyControlINTEL"},
+ {0x181e, "FPGAArgumentInterfacesINTEL"},
+ {0x182b, "GlobalVariableHostAccessINTEL"},
+ {0x182d, "GlobalVariableFPGADecorationsINTEL"},
+ {0x184c, "SubgroupBufferPrefetchINTEL"},
+ {0x1900, "GroupUniformArithmeticKHR"},
+ {0x191b, "MaskedGatherScatterINTEL"},
+ {0x1929, "CacheControlsINTEL"},
+ {0x193c, "RegisterLimitsINTEL"},
+ }
+ },
+ [SPIRV_PARSER_OPERAND_TYPE_COOPERATIVE_MATRIX_LAYOUT] =
+ {
+ "CooperativeMatrixLayout", SPIRV_PARSER_OPERAND_CATEGORY_VALUE_ENUM, 4,
+ (struct spirv_parser_enumerant[])
+ {
+ {0, "RowMajorKHR"},
+ {0x1, "ColumnMajorKHR"},
+ {0x106a, "RowBlockedInterleavedARM"},
+ {0x106b, "ColumnBlockedInterleavedARM"},
+ }
+ },
+ [SPIRV_PARSER_OPERAND_TYPE_COOPERATIVE_MATRIX_OPERANDS] =
+ {
+ "CooperativeMatrixOperands", SPIRV_PARSER_OPERAND_CATEGORY_BIT_ENUM, 6,
+ (struct spirv_parser_enumerant[])
+ {
+ {0, "NoneKHR"},
+ {0x1, "MatrixASignedComponentsKHR"},
+ {0x2, "MatrixBSignedComponentsKHR"},
+ {0x4, "MatrixCSignedComponentsKHR"},
+ {0x8, "MatrixResultSignedComponentsKHR"},
+ {0x10, "SaturatingAccumulationKHR"},
+ }
+ },
+ [SPIRV_PARSER_OPERAND_TYPE_COOPERATIVE_MATRIX_REDUCE] =
+ {
+ "CooperativeMatrixReduce", SPIRV_PARSER_OPERAND_CATEGORY_BIT_ENUM, 3,
+ (struct spirv_parser_enumerant[])
+ {
+ {0x1, "Row"},
+ {0x2, "Column"},
+ {0x4, "2x2"},
+ }
+ },
+ [SPIRV_PARSER_OPERAND_TYPE_COOPERATIVE_MATRIX_USE] =
+ {
+ "CooperativeMatrixUse", SPIRV_PARSER_OPERAND_CATEGORY_VALUE_ENUM, 3,
+ (struct spirv_parser_enumerant[])
+ {
+ {0, "MatrixAKHR"},
+ {0x1, "MatrixBKHR"},
+ {0x2, "MatrixAccumulatorKHR"},
+ }
+ },
+ [SPIRV_PARSER_OPERAND_TYPE_DECORATION] =
+ {
+ "Decoration", SPIRV_PARSER_OPERAND_CATEGORY_VALUE_ENUM, 142,
+ (struct spirv_parser_enumerant[])
+ {
+ {0, "RelaxedPrecision"},
+ {
+ 0x1, "SpecId", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
+ }
+ },
+ {0x2, "Block"},
+ {0x3, "BufferBlock"},
+ {0x4, "RowMajor"},
+ {0x5, "ColMajor"},
+ {
+ 0x6, "ArrayStride", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
+ }
+ },
+ {
+ 0x7, "MatrixStride", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
+ }
+ },
+ {0x8, "GLSLShared"},
+ {0x9, "GLSLPacked"},
+ {0xa, "CPacked"},
+ {
+ 0xb, "BuiltIn", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_BUILT_IN,
+ }
+ },
+ {0xd, "NoPerspective"},
+ {0xe, "Flat"},
+ {0xf, "Patch"},
+ {0x10, "Centroid"},
+ {0x11, "Sample"},
+ {0x12, "Invariant"},
+ {0x13, "Restrict"},
+ {0x14, "Aliased"},
+ {0x15, "Volatile"},
+ {0x16, "Constant"},
+ {0x17, "Coherent"},
+ {0x18, "NonWritable"},
+ {0x19, "NonReadable"},
+ {0x1a, "Uniform"},
+ {
+ 0x1b, "UniformId", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE,
+ }
+ },
+ {0x1c, "SaturatedConversion"},
+ {
+ 0x1d, "Stream", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
+ }
+ },
+ {
+ 0x1e, "Location", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
+ }
+ },
+ {
+ 0x1f, "Component", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
+ }
+ },
+ {
+ 0x20, "Index", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
+ }
+ },
+ {
+ 0x21, "Binding", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
+ }
+ },
+ {
+ 0x22, "DescriptorSet", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
+ }
+ },
+ {
+ 0x23, "Offset", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
+ }
+ },
+ {
+ 0x24, "XfbBuffer", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
+ }
+ },
+ {
+ 0x25, "XfbStride", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
+ }
+ },
+ {
+ 0x26, "FuncParamAttr", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_FUNCTION_PARAMETER_ATTRIBUTE,
+ }
+ },
+ {
+ 0x27, "FPRoundingMode", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_FPROUNDING_MODE,
+ }
+ },
+ {
+ 0x28, "FPFastMathMode", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_FPFAST_MATH_MODE,
+ }
+ },
+ {
+ 0x29, "LinkageAttributes", 2,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_STRING,
+ SPIRV_PARSER_OPERAND_TYPE_LINKAGE_TYPE,
+ }
+ },
+ {0x2a, "NoContraction"},
+ {
+ 0x2b, "InputAttachmentIndex", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
+ }
+ },
+ {
+ 0x2c, "Alignment", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
+ }
+ },
+ {
+ 0x2d, "MaxByteOffset", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
+ }
+ },
+ {
+ 0x2e, "AlignmentId", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_ID_REF,
+ }
+ },
+ {
+ 0x2f, "MaxByteOffsetId", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_ID_REF,
+ }
+ },
+ {0x1175, "NoSignedWrap"},
+ {0x1176, "NoUnsignedWrap"},
+ {0x1187, "WeightTextureQCOM"},
+ {0x1188, "BlockMatchTextureQCOM"},
+ {0x1193, "BlockMatchSamplerQCOM"},
+ {0x1387, "ExplicitInterpAMD"},
+ {
+ 0x139b, "NodeSharesPayloadLimitsWithAMDX", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_ID_REF,
+ }
+ },
+ {
+ 0x139c, "NodeMaxPayloadsAMDX", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_ID_REF,
+ }
+ },
+ {0x13d6, "TrackFinishWritingAMDX"},
+ {
+ 0x13e3, "PayloadNodeNameAMDX", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_ID_REF,
+ }
+ },
+ {
+ 0x13ea, "PayloadNodeBaseIndexAMDX", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_ID_REF,
+ }
+ },
+ {0x13eb, "PayloadNodeSparseArrayAMDX"},
+ {
+ 0x13ec, "PayloadNodeArraySizeAMDX", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_ID_REF,
+ }
+ },
+ {0x13f1, "PayloadDispatchIndirectAMDX"},
+ {0x1480, "OverrideCoverageNV"},
+ {0x1482, "PassthroughNV"},
+ {0x1484, "ViewportRelativeNV"},
+ {
+ 0x1488, "SecondaryViewportRelativeNV", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
+ }
+ },
+ {0x1497, "PerPrimitiveEXT"},
+ {0x1498, "PerViewNV"},
+ {0x1499, "PerTaskNV"},
+ {0x14a5, "PerVertexKHR"},
+ {0x14b4, "NonUniform"},
+ {0x14eb, "RestrictPointer"},
+ {0x14ec, "AliasedPointer"},
+ {0x150a, "HitObjectShaderRecordBufferNV"},
+ {0x1516, "BindlessSamplerNV"},
+ {0x1517, "BindlessImageNV"},
+ {0x1518, "BoundSamplerNV"},
+ {0x1519, "BoundImageNV"},
+ {
+ 0x15df, "SIMTCallINTEL", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
+ }
+ },
+ {0x15e2, "ReferencedIndirectlyINTEL"},
+ {
+ 0x15e7, "ClobberINTEL", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_STRING,
+ }
+ },
+ {0x15e8, "SideEffectsINTEL"},
+ {0x15f8, "VectorComputeVariableINTEL"},
+ {
+ 0x15f9, "FuncParamIOKindINTEL", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
+ }
+ },
+ {0x15fa, "VectorComputeFunctionINTEL"},
+ {0x15fb, "StackCallINTEL"},
+ {
+ 0x15fc, "GlobalVariableOffsetINTEL", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
+ }
+ },
+ {
+ 0x1602, "CounterBuffer", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_ID_REF,
+ }
+ },
+ {
+ 0x1603, "UserSemantic", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_STRING,
+ }
+ },
+ {
+ 0x1604, "UserTypeGOOGLE", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_STRING,
+ }
+ },
+ {
+ 0x16be, "FunctionRoundingModeINTEL", 2,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
+ SPIRV_PARSER_OPERAND_TYPE_FPROUNDING_MODE,
+ }
+ },
+ {
+ 0x16bf, "FunctionDenormModeINTEL", 2,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
+ SPIRV_PARSER_OPERAND_TYPE_FPDENORM_MODE,
+ }
+ },
+ {0x16c1, "RegisterINTEL"},
+ {
+ 0x16c2, "MemoryINTEL", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_STRING,
+ }
+ },
+ {
+ 0x16c3, "NumbanksINTEL", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
+ }
+ },
+ {
+ 0x16c4, "BankwidthINTEL", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
+ }
+ },
+ {
+ 0x16c5, "MaxPrivateCopiesINTEL", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
+ }
+ },
+ {0x16c6, "SinglepumpINTEL"},
+ {0x16c7, "DoublepumpINTEL"},
+ {
+ 0x16c8, "MaxReplicatesINTEL", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
+ }
+ },
+ {0x16c9, "SimpleDualPortINTEL"},
+ {
+ 0x16ca, "MergeINTEL", 2,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_STRING,
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_STRING,
+ }
+ },
+ {
+ 0x16cb, "BankBitsINTEL", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
+ }
+ },
+ {
+ 0x16cc, "ForcePow2DepthINTEL", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
+ }
+ },
+ {
+ 0x16fb, "StridesizeINTEL", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
+ }
+ },
+ {
+ 0x16fc, "WordsizeINTEL", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
+ }
+ },
+ {0x16fd, "TrueDualPortINTEL"},
+ {0x170b, "BurstCoalesceINTEL"},
+ {
+ 0x170c, "CacheSizeINTEL", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
+ }
+ },
+ {0x170d, "DontStaticallyCoalesceINTEL"},
+ {
+ 0x170e, "PrefetchINTEL", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
+ }
+ },
+ {0x1711, "StallEnableINTEL"},
+ {0x1713, "FuseLoopsInFunctionINTEL"},
+ {
+ 0x1715, "MathOpDSPModeINTEL", 2,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
+ }
+ },
+ {
+ 0x171a, "AliasScopeINTEL", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_ID_REF,
+ }
+ },
+ {
+ 0x171b, "NoAliasINTEL", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_ID_REF,
+ }
+ },
+ {
+ 0x171d, "InitiationIntervalINTEL", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
+ }
+ },
+ {
+ 0x171e, "MaxConcurrencyINTEL", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
+ }
+ },
+ {
+ 0x171f, "PipelineEnableINTEL", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
+ }
+ },
+ {
+ 0x1721, "BufferLocationINTEL", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
+ }
+ },
+ {
+ 0x1738, "IOPipeStorageINTEL", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
+ }
+ },
+ {
+ 0x17c0, "FunctionFloatingPointModeINTEL", 2,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
+ SPIRV_PARSER_OPERAND_TYPE_FPOPERATION_MODE,
+ }
+ },
+ {0x17c5, "SingleElementVectorINTEL"},
+ {0x17c7, "VectorComputeCallableFunctionINTEL"},
+ {0x17fc, "MediaBlockIOINTEL"},
+ {0x1807, "StallFreeINTEL"},
+ {
+ 0x181a, "FPMaxErrorDecorationINTEL", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_FLOAT,
+ }
+ },
+ {
+ 0x181c, "LatencyControlLabelINTEL", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
+ }
+ },
+ {
+ 0x181d, "LatencyControlConstraintINTEL", 3,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
+ }
+ },
+ {0x181f, "ConduitKernelArgumentINTEL"},
+ {0x1820, "RegisterMapKernelArgumentINTEL"},
+ {
+ 0x1821, "MMHostInterfaceAddressWidthINTEL", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
+ }
+ },
+ {
+ 0x1822, "MMHostInterfaceDataWidthINTEL", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
+ }
+ },
+ {
+ 0x1823, "MMHostInterfaceLatencyINTEL", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
+ }
+ },
+ {
+ 0x1824, "MMHostInterfaceReadWriteModeINTEL", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_ACCESS_QUALIFIER,
+ }
+ },
+ {
+ 0x1825, "MMHostInterfaceMaxBurstINTEL", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
+ }
+ },
+ {
+ 0x1826, "MMHostInterfaceWaitRequestINTEL", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
+ }
+ },
+ {0x1827, "StableKernelArgumentINTEL"},
+ {
+ 0x182c, "HostAccessINTEL", 2,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_HOST_ACCESS_QUALIFIER,
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_STRING,
+ }
+ },
+ {
+ 0x182e, "InitModeINTEL", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_INITIALIZATION_MODE_QUALIFIER,
+ }
+ },
+ {
+ 0x182f, "ImplementInRegisterMapINTEL", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
+ }
+ },
+ {
+ 0x192a, "CacheControlLoadINTEL", 2,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
+ SPIRV_PARSER_OPERAND_TYPE_LOAD_CACHE_CONTROL,
+ }
+ },
+ {
+ 0x192b, "CacheControlStoreINTEL", 2,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
+ SPIRV_PARSER_OPERAND_TYPE_STORE_CACHE_CONTROL,
+ }
+ },
+ }
+ },
+ [SPIRV_PARSER_OPERAND_TYPE_DIM] =
+ {
+ "Dim", SPIRV_PARSER_OPERAND_CATEGORY_VALUE_ENUM, 8,
+ (struct spirv_parser_enumerant[])
+ {
+ {0, "1D"},
+ {0x1, "2D"},
+ {0x2, "3D"},
+ {0x3, "Cube"},
+ {0x4, "Rect"},
+ {0x5, "Buffer"},
+ {0x6, "SubpassData"},
+ {0x104d, "TileImageDataEXT"},
+ }
+ },
+ [SPIRV_PARSER_OPERAND_TYPE_EXECUTION_MODE] =
+ {
+ "ExecutionMode", SPIRV_PARSER_OPERAND_CATEGORY_VALUE_ENUM, 94,
+ (struct spirv_parser_enumerant[])
+ {
+ {
+ 0, "Invocations", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
+ }
+ },
+ {0x1, "SpacingEqual"},
+ {0x2, "SpacingFractionalEven"},
+ {0x3, "SpacingFractionalOdd"},
+ {0x4, "VertexOrderCw"},
+ {0x5, "VertexOrderCcw"},
+ {0x6, "PixelCenterInteger"},
+ {0x7, "OriginUpperLeft"},
+ {0x8, "OriginLowerLeft"},
+ {0x9, "EarlyFragmentTests"},
+ {0xa, "PointMode"},
+ {0xb, "Xfb"},
+ {0xc, "DepthReplacing"},
+ {0xe, "DepthGreater"},
+ {0xf, "DepthLess"},
+ {0x10, "DepthUnchanged"},
+ {
+ 0x11, "LocalSize", 3,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
+ }
+ },
+ {
+ 0x12, "LocalSizeHint", 3,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
+ }
+ },
+ {0x13, "InputPoints"},
+ {0x14, "InputLines"},
+ {0x15, "InputLinesAdjacency"},
+ {0x16, "Triangles"},
+ {0x17, "InputTrianglesAdjacency"},
+ {0x18, "Quads"},
+ {0x19, "Isolines"},
+ {
+ 0x1a, "OutputVertices", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
+ }
+ },
+ {0x1b, "OutputPoints"},
+ {0x1c, "OutputLineStrip"},
+ {0x1d, "OutputTriangleStrip"},
+ {
+ 0x1e, "VecTypeHint", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
+ }
+ },
+ {0x1f, "ContractionOff"},
+ {0x21, "Initializer"},
+ {0x22, "Finalizer"},
+ {
+ 0x23, "SubgroupSize", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
+ }
+ },
+ {
+ 0x24, "SubgroupsPerWorkgroup", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
+ }
+ },
+ {
+ 0x25, "SubgroupsPerWorkgroupId", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_ID_REF,
+ }
+ },
+ {
+ 0x26, "LocalSizeId", 3,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_ID_REF,
+ SPIRV_PARSER_OPERAND_TYPE_ID_REF,
+ SPIRV_PARSER_OPERAND_TYPE_ID_REF,
+ }
+ },
+ {
+ 0x27, "LocalSizeHintId", 3,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_ID_REF,
+ SPIRV_PARSER_OPERAND_TYPE_ID_REF,
+ SPIRV_PARSER_OPERAND_TYPE_ID_REF,
+ }
+ },
+ {0x1049, "NonCoherentColorAttachmentReadEXT"},
+ {0x104a, "NonCoherentDepthAttachmentReadEXT"},
+ {0x104b, "NonCoherentStencilAttachmentReadEXT"},
+ {0x1145, "SubgroupUniformControlFlowKHR"},
+ {0x115e, "PostDepthCoverage"},
+ {
+ 0x116b, "DenormPreserve", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
+ }
+ },
+ {
+ 0x116c, "DenormFlushToZero", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
+ }
+ },
+ {
+ 0x116d, "SignedZeroInfNanPreserve", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
+ }
+ },
+ {
+ 0x116e, "RoundingModeRTE", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
+ }
+ },
+ {
+ 0x116f, "RoundingModeRTZ", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
+ }
+ },
+ {0x1399, "EarlyAndLateFragmentTestsAMD"},
+ {0x13a3, "StencilRefReplacingEXT"},
+ {0x13cd, "CoalescingAMDX"},
+ {
+ 0x13ce, "IsApiEntryAMDX", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_ID_REF,
+ }
+ },
+ {
+ 0x13cf, "MaxNodeRecursionAMDX", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_ID_REF,
+ }
+ },
+ {
+ 0x13d0, "StaticNumWorkgroupsAMDX", 3,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_ID_REF,
+ SPIRV_PARSER_OPERAND_TYPE_ID_REF,
+ SPIRV_PARSER_OPERAND_TYPE_ID_REF,
+ }
+ },
+ {
+ 0x13d1, "ShaderIndexAMDX", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_ID_REF,
+ }
+ },
+ {
+ 0x13d5, "MaxNumWorkgroupsAMDX", 3,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_ID_REF,
+ SPIRV_PARSER_OPERAND_TYPE_ID_REF,
+ SPIRV_PARSER_OPERAND_TYPE_ID_REF,
+ }
+ },
+ {0x13d7, "StencilRefUnchangedFrontAMD"},
+ {0x13d8, "StencilRefGreaterFrontAMD"},
+ {0x13d9, "StencilRefLessFrontAMD"},
+ {0x13da, "StencilRefUnchangedBackAMD"},
+ {0x13db, "StencilRefGreaterBackAMD"},
+ {0x13dc, "StencilRefLessBackAMD"},
+ {0x13e0, "QuadDerivativesKHR"},
+ {0x13e1, "RequireFullQuadsKHR"},
+ {
+ 0x13ee, "SharesInputWithAMDX", 2,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_ID_REF,
+ SPIRV_PARSER_OPERAND_TYPE_ID_REF,
+ }
+ },
+ {0x1495, "OutputLinesEXT"},
+ {
+ 0x1496, "OutputPrimitivesEXT", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
+ }
+ },
+ {0x14a9, "DerivativeGroupQuadsKHR"},
+ {0x14aa, "DerivativeGroupLinearKHR"},
+ {0x14b2, "OutputTrianglesEXT"},
+ {0x14f6, "PixelInterlockOrderedEXT"},
+ {0x14f7, "PixelInterlockUnorderedEXT"},
+ {0x14f8, "SampleInterlockOrderedEXT"},
+ {0x14f9, "SampleInterlockUnorderedEXT"},
+ {0x14fa, "ShadingRateInterlockOrderedEXT"},
+ {0x14fb, "ShadingRateInterlockUnorderedEXT"},
+ {
+ 0x15f2, "SharedLocalMemorySizeINTEL", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
+ }
+ },
+ {
+ 0x15f4, "RoundingModeRTPINTEL", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
+ }
+ },
+ {
+ 0x15f5, "RoundingModeRTNINTEL", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
+ }
+ },
+ {
+ 0x15f6, "FloatingPointModeALTINTEL", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
+ }
+ },
+ {
+ 0x15f7, "FloatingPointModeIEEEINTEL", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
+ }
+ },
+ {
+ 0x1705, "MaxWorkgroupSizeINTEL", 3,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
+ }
+ },
+ {
+ 0x1706, "MaxWorkDimINTEL", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
+ }
+ },
+ {0x1707, "NoGlobalOffsetINTEL"},
+ {
+ 0x1708, "NumSIMDWorkitemsINTEL", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
+ }
+ },
+ {
+ 0x170f, "SchedulerTargetFmaxMhzINTEL", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
+ }
+ },
+ {0x1787, "MaximallyReconvergesKHR"},
+ {
+ 0x178c, "FPFastMathDefault", 2,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_ID_REF,
+ SPIRV_PARSER_OPERAND_TYPE_ID_REF,
+ }
+ },
+ {
+ 0x180a, "StreamingInterfaceINTEL", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
+ }
+ },
+ {
+ 0x1810, "RegisterMapInterfaceINTEL", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
+ }
+ },
+ {
+ 0x1911, "NamedBarrierCountINTEL", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
+ }
+ },
+ {
+ 0x193d, "MaximumRegistersINTEL", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
+ }
+ },
+ {
+ 0x193e, "MaximumRegistersIdINTEL", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_ID_REF,
+ }
+ },
+ {
+ 0x193f, "NamedMaximumRegistersINTEL", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_NAMED_MAXIMUM_NUMBER_OF_REGISTERS,
+ }
+ },
+ }
+ },
+ [SPIRV_PARSER_OPERAND_TYPE_EXECUTION_MODEL] =
+ {
+ "ExecutionModel", SPIRV_PARSER_OPERAND_CATEGORY_VALUE_ENUM, 17,
+ (struct spirv_parser_enumerant[])
+ {
+ {0, "Vertex"},
+ {0x1, "TessellationControl"},
+ {0x2, "TessellationEvaluation"},
+ {0x3, "Geometry"},
+ {0x4, "Fragment"},
+ {0x5, "GLCompute"},
+ {0x6, "Kernel"},
+ {0x1493, "TaskNV"},
+ {0x1494, "MeshNV"},
+ {0x14c1, "RayGenerationKHR"},
+ {0x14c2, "IntersectionKHR"},
+ {0x14c3, "AnyHitKHR"},
+ {0x14c4, "ClosestHitKHR"},
+ {0x14c5, "MissKHR"},
+ {0x14c6, "CallableKHR"},
+ {0x14f4, "TaskEXT"},
+ {0x14f5, "MeshEXT"},
+ }
+ },
+ [SPIRV_PARSER_OPERAND_TYPE_FPDENORM_MODE] =
+ {
+ "FPDenormMode", SPIRV_PARSER_OPERAND_CATEGORY_VALUE_ENUM, 2,
+ (struct spirv_parser_enumerant[])
+ {
+ {0, "Preserve"},
+ {0x1, "FlushToZero"},
+ }
+ },
+ [SPIRV_PARSER_OPERAND_TYPE_FPENCODING] =
+ {
+ "FPEncoding", SPIRV_PARSER_OPERAND_CATEGORY_VALUE_ENUM
+ },
+ [SPIRV_PARSER_OPERAND_TYPE_FPFAST_MATH_MODE] =
+ {
+ "FPFastMathMode", SPIRV_PARSER_OPERAND_CATEGORY_BIT_ENUM, 9,
+ (struct spirv_parser_enumerant[])
+ {
+ {0, "None"},
+ {0x1, "NotNaN"},
+ {0x2, "NotInf"},
+ {0x4, "NSZ"},
+ {0x8, "AllowRecip"},
+ {0x10, "Fast"},
+ {0x10000, "AllowContract"},
+ {0x20000, "AllowReassoc"},
+ {0x40000, "AllowTransform"},
+ }
+ },
+ [SPIRV_PARSER_OPERAND_TYPE_FPOPERATION_MODE] =
+ {
+ "FPOperationMode", SPIRV_PARSER_OPERAND_CATEGORY_VALUE_ENUM, 2,
+ (struct spirv_parser_enumerant[])
+ {
+ {0, "IEEE"},
+ {0x1, "ALT"},
+ }
+ },
+ [SPIRV_PARSER_OPERAND_TYPE_FPROUNDING_MODE] =
+ {
+ "FPRoundingMode", SPIRV_PARSER_OPERAND_CATEGORY_VALUE_ENUM, 4,
+ (struct spirv_parser_enumerant[])
+ {
+ {0, "RTE"},
+ {0x1, "RTZ"},
+ {0x2, "RTP"},
+ {0x3, "RTN"},
+ }
+ },
+ [SPIRV_PARSER_OPERAND_TYPE_FRAGMENT_SHADING_RATE] =
+ {
+ "FragmentShadingRate", SPIRV_PARSER_OPERAND_CATEGORY_BIT_ENUM, 4,
+ (struct spirv_parser_enumerant[])
+ {
+ {0x1, "Vertical2Pixels"},
+ {0x2, "Vertical4Pixels"},
+ {0x4, "Horizontal2Pixels"},
+ {0x8, "Horizontal4Pixels"},
+ }
+ },
+ [SPIRV_PARSER_OPERAND_TYPE_FUNCTION_CONTROL] =
+ {
+ "FunctionControl", SPIRV_PARSER_OPERAND_CATEGORY_BIT_ENUM, 6,
+ (struct spirv_parser_enumerant[])
+ {
+ {0, "None"},
+ {0x1, "Inline"},
+ {0x2, "DontInline"},
+ {0x4, "Pure"},
+ {0x8, "Const"},
+ {0x10000, "OptNoneEXT"},
+ }
+ },
+ [SPIRV_PARSER_OPERAND_TYPE_FUNCTION_PARAMETER_ATTRIBUTE] =
+ {
+ "FunctionParameterAttribute", SPIRV_PARSER_OPERAND_CATEGORY_VALUE_ENUM, 9,
+ (struct spirv_parser_enumerant[])
+ {
+ {0, "Zext"},
+ {0x1, "Sext"},
+ {0x2, "ByVal"},
+ {0x3, "Sret"},
+ {0x4, "NoAlias"},
+ {0x5, "NoCapture"},
+ {0x6, "NoWrite"},
+ {0x7, "NoReadWrite"},
+ {0x1734, "RuntimeAlignedINTEL"},
+ }
+ },
+ [SPIRV_PARSER_OPERAND_TYPE_GROUP_OPERATION] =
+ {
+ "GroupOperation", SPIRV_PARSER_OPERAND_CATEGORY_VALUE_ENUM, 7,
+ (struct spirv_parser_enumerant[])
+ {
+ {0, "Reduce"},
+ {0x1, "InclusiveScan"},
+ {0x2, "ExclusiveScan"},
+ {0x3, "ClusteredReduce"},
+ {0x6, "PartitionedReduceNV"},
+ {0x7, "PartitionedInclusiveScanNV"},
+ {0x8, "PartitionedExclusiveScanNV"},
+ }
+ },
+ [SPIRV_PARSER_OPERAND_TYPE_HOST_ACCESS_QUALIFIER] =
+ {
+ "HostAccessQualifier", SPIRV_PARSER_OPERAND_CATEGORY_VALUE_ENUM, 4,
+ (struct spirv_parser_enumerant[])
+ {
+ {0, "NoneINTEL"},
+ {0x1, "ReadINTEL"},
+ {0x2, "WriteINTEL"},
+ {0x3, "ReadWriteINTEL"},
+ }
+ },
+ [SPIRV_PARSER_OPERAND_TYPE_ID_MEMORY_SEMANTICS] =
+ {
+ "IdMemorySemantics", SPIRV_PARSER_OPERAND_CATEGORY_ID
+ },
+ [SPIRV_PARSER_OPERAND_TYPE_ID_REF] =
+ {
+ "IdRef", SPIRV_PARSER_OPERAND_CATEGORY_ID
+ },
+ [SPIRV_PARSER_OPERAND_TYPE_ID_RESULT] =
+ {
+ "IdResult", SPIRV_PARSER_OPERAND_CATEGORY_ID
+ },
+ [SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE] =
+ {
+ "IdResultType", SPIRV_PARSER_OPERAND_CATEGORY_ID
+ },
+ [SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE] =
+ {
+ "IdScope", SPIRV_PARSER_OPERAND_CATEGORY_ID
+ },
+ [SPIRV_PARSER_OPERAND_TYPE_IMAGE_CHANNEL_DATA_TYPE] =
+ {
+ "ImageChannelDataType", SPIRV_PARSER_OPERAND_CATEGORY_VALUE_ENUM, 20,
+ (struct spirv_parser_enumerant[])
+ {
+ {0, "SnormInt8"},
+ {0x1, "SnormInt16"},
+ {0x2, "UnormInt8"},
+ {0x3, "UnormInt16"},
+ {0x4, "UnormShort565"},
+ {0x5, "UnormShort555"},
+ {0x6, "UnormInt101010"},
+ {0x7, "SignedInt8"},
+ {0x8, "SignedInt16"},
+ {0x9, "SignedInt32"},
+ {0xa, "UnsignedInt8"},
+ {0xb, "UnsignedInt16"},
+ {0xc, "UnsignedInt32"},
+ {0xd, "HalfFloat"},
+ {0xe, "Float"},
+ {0xf, "UnormInt24"},
+ {0x10, "UnormInt101010_2"},
+ {0x13, "UnsignedIntRaw10EXT"},
+ {0x14, "UnsignedIntRaw12EXT"},
+ {0x15, "UnormInt2_101010EXT"},
+ }
+ },
+ [SPIRV_PARSER_OPERAND_TYPE_IMAGE_CHANNEL_ORDER] =
+ {
+ "ImageChannelOrder", SPIRV_PARSER_OPERAND_CATEGORY_VALUE_ENUM, 20,
+ (struct spirv_parser_enumerant[])
+ {
+ {0, "R"},
+ {0x1, "A"},
+ {0x2, "RG"},
+ {0x3, "RA"},
+ {0x4, "RGB"},
+ {0x5, "RGBA"},
+ {0x6, "BGRA"},
+ {0x7, "ARGB"},
+ {0x8, "Intensity"},
+ {0x9, "Luminance"},
+ {0xa, "Rx"},
+ {0xb, "RGx"},
+ {0xc, "RGBx"},
+ {0xd, "Depth"},
+ {0xe, "DepthStencil"},
+ {0xf, "sRGB"},
+ {0x10, "sRGBx"},
+ {0x11, "sRGBA"},
+ {0x12, "sBGRA"},
+ {0x13, "ABGR"},
+ }
+ },
+ [SPIRV_PARSER_OPERAND_TYPE_IMAGE_FORMAT] =
+ {
+ "ImageFormat", SPIRV_PARSER_OPERAND_CATEGORY_VALUE_ENUM, 42,
+ (struct spirv_parser_enumerant[])
+ {
+ {0, "Unknown"},
+ {0x1, "Rgba32f"},
+ {0x2, "Rgba16f"},
+ {0x3, "R32f"},
+ {0x4, "Rgba8"},
+ {0x5, "Rgba8Snorm"},
+ {0x6, "Rg32f"},
+ {0x7, "Rg16f"},
+ {0x8, "R11fG11fB10f"},
+ {0x9, "R16f"},
+ {0xa, "Rgba16"},
+ {0xb, "Rgb10A2"},
+ {0xc, "Rg16"},
+ {0xd, "Rg8"},
+ {0xe, "R16"},
+ {0xf, "R8"},
+ {0x10, "Rgba16Snorm"},
+ {0x11, "Rg16Snorm"},
+ {0x12, "Rg8Snorm"},
+ {0x13, "R16Snorm"},
+ {0x14, "R8Snorm"},
+ {0x15, "Rgba32i"},
+ {0x16, "Rgba16i"},
+ {0x17, "Rgba8i"},
+ {0x18, "R32i"},
+ {0x19, "Rg32i"},
+ {0x1a, "Rg16i"},
+ {0x1b, "Rg8i"},
+ {0x1c, "R16i"},
+ {0x1d, "R8i"},
+ {0x1e, "Rgba32ui"},
+ {0x1f, "Rgba16ui"},
+ {0x20, "Rgba8ui"},
+ {0x21, "R32ui"},
+ {0x22, "Rgb10a2ui"},
+ {0x23, "Rg32ui"},
+ {0x24, "Rg16ui"},
+ {0x25, "Rg8ui"},
+ {0x26, "R16ui"},
+ {0x27, "R8ui"},
+ {0x28, "R64ui"},
+ {0x29, "R64i"},
+ }
+ },
+ [SPIRV_PARSER_OPERAND_TYPE_IMAGE_OPERANDS] =
+ {
+ "ImageOperands", SPIRV_PARSER_OPERAND_CATEGORY_BIT_ENUM, 17,
+ (struct spirv_parser_enumerant[])
+ {
+ {0, "None"},
+ {
+ 0x1, "Bias", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_ID_REF,
+ }
+ },
+ {
+ 0x2, "Lod", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_ID_REF,
+ }
+ },
+ {
+ 0x4, "Grad", 2,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_ID_REF,
+ SPIRV_PARSER_OPERAND_TYPE_ID_REF,
+ }
+ },
+ {
+ 0x8, "ConstOffset", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_ID_REF,
+ }
+ },
+ {
+ 0x10, "Offset", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_ID_REF,
+ }
+ },
+ {
+ 0x20, "ConstOffsets", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_ID_REF,
+ }
+ },
+ {
+ 0x40, "Sample", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_ID_REF,
+ }
+ },
+ {
+ 0x80, "MinLod", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_ID_REF,
+ }
+ },
+ {
+ 0x100, "MakeTexelAvailable", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE,
+ }
+ },
+ {
+ 0x200, "MakeTexelVisible", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE,
+ }
+ },
+ {0x400, "NonPrivateTexel"},
+ {0x800, "VolatileTexel"},
+ {0x1000, "SignExtend"},
+ {0x2000, "ZeroExtend"},
+ {0x4000, "Nontemporal"},
+ {
+ 0x10000, "Offsets", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_ID_REF,
+ }
+ },
+ }
+ },
+ [SPIRV_PARSER_OPERAND_TYPE_INITIALIZATION_MODE_QUALIFIER] =
+ {
+ "InitializationModeQualifier", SPIRV_PARSER_OPERAND_CATEGORY_VALUE_ENUM, 2,
+ (struct spirv_parser_enumerant[])
+ {
+ {0, "InitOnDeviceReprogramINTEL"},
+ {0x1, "InitOnDeviceResetINTEL"},
+ }
+ },
+ [SPIRV_PARSER_OPERAND_TYPE_KERNEL_ENQUEUE_FLAGS] =
+ {
+ "KernelEnqueueFlags", SPIRV_PARSER_OPERAND_CATEGORY_VALUE_ENUM, 3,
+ (struct spirv_parser_enumerant[])
+ {
+ {0, "NoWait"},
+ {0x1, "WaitKernel"},
+ {0x2, "WaitWorkGroup"},
+ }
+ },
+ [SPIRV_PARSER_OPERAND_TYPE_KERNEL_PROFILING_INFO] =
+ {
+ "KernelProfilingInfo", SPIRV_PARSER_OPERAND_CATEGORY_BIT_ENUM, 2,
+ (struct spirv_parser_enumerant[])
+ {
+ {0, "None"},
+ {0x1, "CmdExecTime"},
+ }
+ },
+ [SPIRV_PARSER_OPERAND_TYPE_LINKAGE_TYPE] =
+ {
+ "LinkageType", SPIRV_PARSER_OPERAND_CATEGORY_VALUE_ENUM, 3,
+ (struct spirv_parser_enumerant[])
+ {
+ {0, "Export"},
+ {0x1, "Import"},
+ {0x2, "LinkOnceODR"},
+ }
+ },
+ [SPIRV_PARSER_OPERAND_TYPE_LITERAL_CONTEXT_DEPENDENT_NUMBER] =
+ {
+ "LiteralContextDependentNumber", SPIRV_PARSER_OPERAND_CATEGORY_LITERAL
+ },
+ [SPIRV_PARSER_OPERAND_TYPE_LITERAL_EXT_INST_INTEGER] =
+ {
+ "LiteralExtInstInteger", SPIRV_PARSER_OPERAND_CATEGORY_LITERAL
+ },
+ [SPIRV_PARSER_OPERAND_TYPE_LITERAL_FLOAT] =
+ {
+ "LiteralFloat", SPIRV_PARSER_OPERAND_CATEGORY_LITERAL
+ },
+ [SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER] =
+ {
+ "LiteralInteger", SPIRV_PARSER_OPERAND_CATEGORY_LITERAL
+ },
+ [SPIRV_PARSER_OPERAND_TYPE_LITERAL_SPEC_CONSTANT_OP_INTEGER] =
+ {
+ "LiteralSpecConstantOpInteger", SPIRV_PARSER_OPERAND_CATEGORY_LITERAL
+ },
+ [SPIRV_PARSER_OPERAND_TYPE_LITERAL_STRING] =
+ {
+ "LiteralString", SPIRV_PARSER_OPERAND_CATEGORY_LITERAL
+ },
+ [SPIRV_PARSER_OPERAND_TYPE_LOAD_CACHE_CONTROL] =
+ {
+ "LoadCacheControl", SPIRV_PARSER_OPERAND_CATEGORY_VALUE_ENUM, 5,
+ (struct spirv_parser_enumerant[])
+ {
+ {0, "UncachedINTEL"},
+ {0x1, "CachedINTEL"},
+ {0x2, "StreamingINTEL"},
+ {0x3, "InvalidateAfterReadINTEL"},
+ {0x4, "ConstCachedINTEL"},
+ }
+ },
+ [SPIRV_PARSER_OPERAND_TYPE_LOOP_CONTROL] =
+ {
+ "LoopControl", SPIRV_PARSER_OPERAND_CATEGORY_BIT_ENUM, 20,
+ (struct spirv_parser_enumerant[])
+ {
+ {0, "None"},
+ {0x1, "Unroll"},
+ {0x2, "DontUnroll"},
+ {0x4, "DependencyInfinite"},
+ {
+ 0x8, "DependencyLength", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
+ }
+ },
+ {
+ 0x10, "MinIterations", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
+ }
+ },
+ {
+ 0x20, "MaxIterations", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
+ }
+ },
+ {
+ 0x40, "IterationMultiple", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
+ }
+ },
+ {
+ 0x80, "PeelCount", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
+ }
+ },
+ {
+ 0x100, "PartialCount", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
+ }
+ },
+ {
+ 0x10000, "InitiationIntervalINTEL", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
+ }
+ },
+ {
+ 0x20000, "MaxConcurrencyINTEL", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
+ }
+ },
+ {
+ 0x40000, "DependencyArrayINTEL", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
+ }
+ },
+ {
+ 0x80000, "PipelineEnableINTEL", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
+ }
+ },
+ {
+ 0x100000, "LoopCoalesceINTEL", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
+ }
+ },
+ {
+ 0x200000, "MaxInterleavingINTEL", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
+ }
+ },
+ {
+ 0x400000, "SpeculatedIterationsINTEL", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
+ }
+ },
+ {0x800000, "NoFusionINTEL"},
+ {
+ 0x1000000, "LoopCountINTEL", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
+ }
+ },
+ {
+ 0x2000000, "MaxReinvocationDelayINTEL", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
+ }
+ },
+ }
+ },
+ [SPIRV_PARSER_OPERAND_TYPE_MEMORY_ACCESS] =
+ {
+ "MemoryAccess", SPIRV_PARSER_OPERAND_CATEGORY_BIT_ENUM, 9,
+ (struct spirv_parser_enumerant[])
+ {
+ {0, "None"},
+ {0x1, "Volatile"},
+ {
+ 0x2, "Aligned", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
+ }
+ },
+ {0x4, "Nontemporal"},
+ {
+ 0x8, "MakePointerAvailable", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE,
+ }
+ },
+ {
+ 0x10, "MakePointerVisible", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE,
+ }
+ },
+ {0x20, "NonPrivatePointer"},
+ {
+ 0x10000, "AliasScopeINTELMask", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_ID_REF,
+ }
+ },
+ {
+ 0x20000, "NoAliasINTELMask", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_ID_REF,
+ }
+ },
+ }
+ },
+ [SPIRV_PARSER_OPERAND_TYPE_MEMORY_MODEL] =
+ {
+ "MemoryModel", SPIRV_PARSER_OPERAND_CATEGORY_VALUE_ENUM, 4,
+ (struct spirv_parser_enumerant[])
+ {
+ {0, "Simple"},
+ {0x1, "GLSL450"},
+ {0x2, "OpenCL"},
+ {0x3, "Vulkan"},
+ }
+ },
+ [SPIRV_PARSER_OPERAND_TYPE_MEMORY_SEMANTICS] =
+ {
+ "MemorySemantics", SPIRV_PARSER_OPERAND_CATEGORY_BIT_ENUM, 15,
+ (struct spirv_parser_enumerant[])
+ {
+ {0, "Relaxed"},
+ {0x2, "Acquire"},
+ {0x4, "Release"},
+ {0x8, "AcquireRelease"},
+ {0x10, "SequentiallyConsistent"},
+ {0x40, "UniformMemory"},
+ {0x80, "SubgroupMemory"},
+ {0x100, "WorkgroupMemory"},
+ {0x200, "CrossWorkgroupMemory"},
+ {0x400, "AtomicCounterMemory"},
+ {0x800, "ImageMemory"},
+ {0x1000, "OutputMemory"},
+ {0x2000, "MakeAvailable"},
+ {0x4000, "MakeVisible"},
+ {0x8000, "Volatile"},
+ }
+ },
+ [SPIRV_PARSER_OPERAND_TYPE_NAMED_MAXIMUM_NUMBER_OF_REGISTERS] =
+ {
+ "NamedMaximumNumberOfRegisters", SPIRV_PARSER_OPERAND_CATEGORY_VALUE_ENUM, 1,
+ (struct spirv_parser_enumerant[])
+ {
+ {0, "AutoINTEL"},
+ }
+ },
+ [SPIRV_PARSER_OPERAND_TYPE_OVERFLOW_MODES] =
+ {
+ "OverflowModes", SPIRV_PARSER_OPERAND_CATEGORY_VALUE_ENUM, 4,
+ (struct spirv_parser_enumerant[])
+ {
+ {0, "WRAP"},
+ {0x1, "SAT"},
+ {0x2, "SAT_ZERO"},
+ {0x3, "SAT_SYM"},
+ }
+ },
+ [SPIRV_PARSER_OPERAND_TYPE_PACKED_VECTOR_FORMAT] =
+ {
+ "PackedVectorFormat", SPIRV_PARSER_OPERAND_CATEGORY_VALUE_ENUM, 1,
+ (struct spirv_parser_enumerant[])
+ {
+ {0, "PackedVectorFormat4x8Bit"},
+ }
+ },
+ [SPIRV_PARSER_OPERAND_TYPE_PAIR_ID_REF_ID_REF] =
+ {
+ "PairIdRefIdRef", SPIRV_PARSER_OPERAND_CATEGORY_COMPOSITE
+ },
+ [SPIRV_PARSER_OPERAND_TYPE_PAIR_ID_REF_LITERAL_INTEGER] =
+ {
+ "PairIdRefLiteralInteger", SPIRV_PARSER_OPERAND_CATEGORY_COMPOSITE
+ },
+ [SPIRV_PARSER_OPERAND_TYPE_PAIR_LITERAL_INTEGER_ID_REF] =
+ {
+ "PairLiteralIntegerIdRef", SPIRV_PARSER_OPERAND_CATEGORY_COMPOSITE
+ },
+ [SPIRV_PARSER_OPERAND_TYPE_QUANTIZATION_MODES] =
+ {
+ "QuantizationModes", SPIRV_PARSER_OPERAND_CATEGORY_VALUE_ENUM, 8,
+ (struct spirv_parser_enumerant[])
+ {
+ {0, "TRN"},
+ {0x1, "TRN_ZERO"},
+ {0x2, "RND"},
+ {0x3, "RND_ZERO"},
+ {0x4, "RND_INF"},
+ {0x5, "RND_MIN_INF"},
+ {0x6, "RND_CONV"},
+ {0x7, "RND_CONV_ODD"},
+ }
+ },
+ [SPIRV_PARSER_OPERAND_TYPE_RAW_ACCESS_CHAIN_OPERANDS] =
+ {
+ "RawAccessChainOperands", SPIRV_PARSER_OPERAND_CATEGORY_BIT_ENUM, 3,
+ (struct spirv_parser_enumerant[])
+ {
+ {0, "None"},
+ {0x1, "RobustnessPerComponentNV"},
+ {0x2, "RobustnessPerElementNV"},
+ }
+ },
+ [SPIRV_PARSER_OPERAND_TYPE_RAY_FLAGS] =
+ {
+ "RayFlags", SPIRV_PARSER_OPERAND_CATEGORY_BIT_ENUM, 12,
+ (struct spirv_parser_enumerant[])
+ {
+ {0, "NoneKHR"},
+ {0x1, "OpaqueKHR"},
+ {0x2, "NoOpaqueKHR"},
+ {0x4, "TerminateOnFirstHitKHR"},
+ {0x8, "SkipClosestHitShaderKHR"},
+ {0x10, "CullBackFacingTrianglesKHR"},
+ {0x20, "CullFrontFacingTrianglesKHR"},
+ {0x40, "CullOpaqueKHR"},
+ {0x80, "CullNoOpaqueKHR"},
+ {0x100, "SkipTrianglesKHR"},
+ {0x200, "SkipAABBsKHR"},
+ {0x400, "ForceOpacityMicromap2StateEXT"},
+ }
+ },
+ [SPIRV_PARSER_OPERAND_TYPE_RAY_QUERY_CANDIDATE_INTERSECTION_TYPE] =
+ {
+ "RayQueryCandidateIntersectionType", SPIRV_PARSER_OPERAND_CATEGORY_VALUE_ENUM, 2,
+ (struct spirv_parser_enumerant[])
+ {
+ {0, "RayQueryCandidateIntersectionTriangleKHR"},
+ {0x1, "RayQueryCandidateIntersectionAABBKHR"},
+ }
+ },
+ [SPIRV_PARSER_OPERAND_TYPE_RAY_QUERY_COMMITTED_INTERSECTION_TYPE] =
+ {
+ "RayQueryCommittedIntersectionType", SPIRV_PARSER_OPERAND_CATEGORY_VALUE_ENUM, 3,
+ (struct spirv_parser_enumerant[])
+ {
+ {0, "RayQueryCommittedIntersectionNoneKHR"},
+ {0x1, "RayQueryCommittedIntersectionTriangleKHR"},
+ {0x2, "RayQueryCommittedIntersectionGeneratedKHR"},
+ }
+ },
+ [SPIRV_PARSER_OPERAND_TYPE_RAY_QUERY_INTERSECTION] =
+ {
+ "RayQueryIntersection", SPIRV_PARSER_OPERAND_CATEGORY_VALUE_ENUM, 2,
+ (struct spirv_parser_enumerant[])
+ {
+ {0, "RayQueryCandidateIntersectionKHR"},
+ {0x1, "RayQueryCommittedIntersectionKHR"},
+ }
+ },
+ [SPIRV_PARSER_OPERAND_TYPE_SAMPLER_ADDRESSING_MODE] =
+ {
+ "SamplerAddressingMode", SPIRV_PARSER_OPERAND_CATEGORY_VALUE_ENUM, 5,
+ (struct spirv_parser_enumerant[])
+ {
+ {0, "None"},
+ {0x1, "ClampToEdge"},
+ {0x2, "Clamp"},
+ {0x3, "Repeat"},
+ {0x4, "RepeatMirrored"},
+ }
+ },
+ [SPIRV_PARSER_OPERAND_TYPE_SAMPLER_FILTER_MODE] =
+ {
+ "SamplerFilterMode", SPIRV_PARSER_OPERAND_CATEGORY_VALUE_ENUM, 2,
+ (struct spirv_parser_enumerant[])
+ {
+ {0, "Nearest"},
+ {0x1, "Linear"},
+ }
+ },
+ [SPIRV_PARSER_OPERAND_TYPE_SCOPE] =
+ {
+ "Scope", SPIRV_PARSER_OPERAND_CATEGORY_VALUE_ENUM, 7,
+ (struct spirv_parser_enumerant[])
+ {
+ {0, "CrossDevice"},
+ {0x1, "Device"},
+ {0x2, "Workgroup"},
+ {0x3, "Subgroup"},
+ {0x4, "Invocation"},
+ {0x5, "QueueFamily"},
+ {0x6, "ShaderCallKHR"},
+ }
+ },
+ [SPIRV_PARSER_OPERAND_TYPE_SELECTION_CONTROL] =
+ {
+ "SelectionControl", SPIRV_PARSER_OPERAND_CATEGORY_BIT_ENUM, 3,
+ (struct spirv_parser_enumerant[])
+ {
+ {0, "None"},
+ {0x1, "Flatten"},
+ {0x2, "DontFlatten"},
+ }
+ },
+ [SPIRV_PARSER_OPERAND_TYPE_SOURCE_LANGUAGE] =
+ {
+ "SourceLanguage", SPIRV_PARSER_OPERAND_CATEGORY_VALUE_ENUM, 13,
+ (struct spirv_parser_enumerant[])
+ {
+ {0, "Unknown"},
+ {0x1, "ESSL"},
+ {0x2, "GLSL"},
+ {0x3, "OpenCL_C"},
+ {0x4, "OpenCL_CPP"},
+ {0x5, "HLSL"},
+ {0x6, "CPP_for_OpenCL"},
+ {0x7, "SYCL"},
+ {0x8, "HERO_C"},
+ {0x9, "NZSL"},
+ {0xa, "WGSL"},
+ {0xb, "Slang"},
+ {0xc, "Zig"},
+ }
+ },
+ [SPIRV_PARSER_OPERAND_TYPE_STORAGE_CLASS] =
+ {
+ "StorageClass", SPIRV_PARSER_OPERAND_CATEGORY_VALUE_ENUM, 27,
+ (struct spirv_parser_enumerant[])
+ {
+ {0, "UniformConstant"},
+ {0x1, "Input"},
+ {0x2, "Uniform"},
+ {0x3, "Output"},
+ {0x4, "Workgroup"},
+ {0x5, "CrossWorkgroup"},
+ {0x6, "Private"},
+ {0x7, "Function"},
+ {0x8, "Generic"},
+ {0x9, "PushConstant"},
+ {0xa, "AtomicCounter"},
+ {0xb, "Image"},
+ {0xc, "StorageBuffer"},
+ {0x104c, "TileImageEXT"},
+ {0x13cc, "NodePayloadAMDX"},
+ {0x14d0, "CallableDataKHR"},
+ {0x14d1, "IncomingCallableDataKHR"},
+ {0x14da, "RayPayloadKHR"},
+ {0x14db, "HitAttributeKHR"},
+ {0x14de, "IncomingRayPayloadKHR"},
+ {0x14df, "ShaderRecordBufferKHR"},
+ {0x14e5, "PhysicalStorageBuffer"},
+ {0x1509, "HitObjectAttributeNV"},
+ {0x151a, "TaskPayloadWorkgroupEXT"},
+ {0x15e5, "CodeSectionINTEL"},
+ {0x1730, "DeviceOnlyINTEL"},
+ {0x1731, "HostOnlyINTEL"},
+ }
+ },
+ [SPIRV_PARSER_OPERAND_TYPE_STORE_CACHE_CONTROL] =
+ {
+ "StoreCacheControl", SPIRV_PARSER_OPERAND_CATEGORY_VALUE_ENUM, 4,
+ (struct spirv_parser_enumerant[])
+ {
+ {0, "UncachedINTEL"},
+ {0x1, "WriteThroughINTEL"},
+ {0x2, "WriteBackINTEL"},
+ {0x3, "StreamingINTEL"},
+ }
+ },
+ [SPIRV_PARSER_OPERAND_TYPE_TENSOR_ADDRESSING_OPERANDS] =
+ {
+ "TensorAddressingOperands", SPIRV_PARSER_OPERAND_CATEGORY_BIT_ENUM, 3,
+ (struct spirv_parser_enumerant[])
+ {
+ {0, "None"},
+ {
+ 0x1, "TensorView", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_ID_REF,
+ }
+ },
+ {
+ 0x2, "DecodeFunc", 1,
+ (enum spirv_parser_operand_type[])
+ {
+ SPIRV_PARSER_OPERAND_TYPE_ID_REF,
+ }
+ },
+ }
+ },
+ [SPIRV_PARSER_OPERAND_TYPE_TENSOR_CLAMP_MODE] =
+ {
+ "TensorClampMode", SPIRV_PARSER_OPERAND_CATEGORY_VALUE_ENUM, 5,
+ (struct spirv_parser_enumerant[])
+ {
+ {0, "Undefined"},
+ {0x1, "Constant"},
+ {0x2, "ClampToEdge"},
+ {0x3, "Repeat"},
+ {0x4, "RepeatMirrored"},
+ }
+ },
+};
+
+static const struct spirv_parser_opcode_info
+{
+ uint16_t op;
+ const char *name;
+ size_t operand_count;
+ const struct spirv_parser_instruction_operand
+ {
+ enum spirv_parser_operand_type type;
+ char quantifier;
+ } *operands;
+}
+spirv_parser_opcode_info[] =
+{
+ {0x0000, "OpNop"},
+ {
+ 0x0001, "OpUndef", 2,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ }
+ },
+ {
+ 0x0002, "OpSourceContinued", 1,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_STRING},
+ }
+ },
+ {
+ 0x0003, "OpSource", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_SOURCE_LANGUAGE},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF, '?'},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_STRING, '?'},
+ }
+ },
+ {
+ 0x0004, "OpSourceExtension", 1,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_STRING},
+ }
+ },
+ {
+ 0x0005, "OpName", 2,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_STRING},
+ }
+ },
+ {
+ 0x0006, "OpMemberName", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_STRING},
+ }
+ },
+ {
+ 0x0007, "OpString", 2,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_STRING},
+ }
+ },
+ {
+ 0x0008, "OpLine", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ }
+ },
+ {
+ 0x000a, "OpExtension", 1,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_STRING},
+ }
+ },
+ {
+ 0x000b, "OpExtInstImport", 2,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_STRING},
+ }
+ },
+ {
+ 0x000c, "OpExtInst", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_EXT_INST_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF, '*'},
+ }
+ },
+ {
+ 0x000e, "OpMemoryModel", 2,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ADDRESSING_MODEL},
+ {SPIRV_PARSER_OPERAND_TYPE_MEMORY_MODEL},
+ }
+ },
+ {
+ 0x000f, "OpEntryPoint", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_EXECUTION_MODEL},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_STRING},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF, '*'},
+ }
+ },
+ {
+ 0x0010, "OpExecutionMode", 2,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_EXECUTION_MODE},
+ }
+ },
+ {
+ 0x0011, "OpCapability", 1,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_CAPABILITY},
+ }
+ },
+ {
+ 0x0013, "OpTypeVoid", 1,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ }
+ },
+ {
+ 0x0014, "OpTypeBool", 1,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ }
+ },
+ {
+ 0x0015, "OpTypeInt", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ }
+ },
+ {
+ 0x0016, "OpTypeFloat", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_FPENCODING, '?'},
+ }
+ },
+ {
+ 0x0017, "OpTypeVector", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ }
+ },
+ {
+ 0x0018, "OpTypeMatrix", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ }
+ },
+ {
+ 0x0019, "OpTypeImage", 9,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_DIM},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_IMAGE_FORMAT},
+ {SPIRV_PARSER_OPERAND_TYPE_ACCESS_QUALIFIER, '?'},
+ }
+ },
+ {
+ 0x001a, "OpTypeSampler", 1,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ }
+ },
+ {
+ 0x001b, "OpTypeSampledImage", 2,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x001c, "OpTypeArray", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x001d, "OpTypeRuntimeArray", 2,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x001e, "OpTypeStruct", 2,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF, '*'},
+ }
+ },
+ {
+ 0x001f, "OpTypeOpaque", 2,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_STRING},
+ }
+ },
+ {
+ 0x0020, "OpTypePointer", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_STORAGE_CLASS},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x0021, "OpTypeFunction", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF, '*'},
+ }
+ },
+ {
+ 0x0022, "OpTypeEvent", 1,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ }
+ },
+ {
+ 0x0023, "OpTypeDeviceEvent", 1,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ }
+ },
+ {
+ 0x0024, "OpTypeReserveId", 1,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ }
+ },
+ {
+ 0x0025, "OpTypeQueue", 1,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ }
+ },
+ {
+ 0x0026, "OpTypePipe", 2,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ACCESS_QUALIFIER},
+ }
+ },
+ {
+ 0x0027, "OpTypeForwardPointer", 2,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_STORAGE_CLASS},
+ }
+ },
+ {
+ 0x0029, "OpConstantTrue", 2,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ }
+ },
+ {
+ 0x002a, "OpConstantFalse", 2,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ }
+ },
+ {
+ 0x002b, "OpConstant", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_CONTEXT_DEPENDENT_NUMBER},
+ }
+ },
+ {
+ 0x002c, "OpConstantComposite", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF, '*'},
+ }
+ },
+ {
+ 0x002d, "OpConstantSampler", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_SAMPLER_ADDRESSING_MODE},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_SAMPLER_FILTER_MODE},
+ }
+ },
+ {
+ 0x002e, "OpConstantNull", 2,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ }
+ },
+ {
+ 0x0030, "OpSpecConstantTrue", 2,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ }
+ },
+ {
+ 0x0031, "OpSpecConstantFalse", 2,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ }
+ },
+ {
+ 0x0032, "OpSpecConstant", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_CONTEXT_DEPENDENT_NUMBER},
+ }
+ },
+ {
+ 0x0033, "OpSpecConstantComposite", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF, '*'},
+ }
+ },
+ {
+ 0x0034, "OpSpecConstantOp", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_SPEC_CONSTANT_OP_INTEGER},
+ }
+ },
+ {
+ 0x0036, "OpFunction", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_FUNCTION_CONTROL},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x0037, "OpFunctionParameter", 2,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ }
+ },
+ {0x0038, "OpFunctionEnd"},
+ {
+ 0x0039, "OpFunctionCall", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF, '*'},
+ }
+ },
+ {
+ 0x003b, "OpVariable", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_STORAGE_CLASS},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF, '?'},
+ }
+ },
+ {
+ 0x003c, "OpImageTexelPointer", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x003d, "OpLoad", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_MEMORY_ACCESS, '?'},
+ }
+ },
+ {
+ 0x003e, "OpStore", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_MEMORY_ACCESS, '?'},
+ }
+ },
+ {
+ 0x003f, "OpCopyMemory", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_MEMORY_ACCESS, '?'},
+ {SPIRV_PARSER_OPERAND_TYPE_MEMORY_ACCESS, '?'},
+ }
+ },
+ {
+ 0x0040, "OpCopyMemorySized", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_MEMORY_ACCESS, '?'},
+ {SPIRV_PARSER_OPERAND_TYPE_MEMORY_ACCESS, '?'},
+ }
+ },
+ {
+ 0x0041, "OpAccessChain", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF, '*'},
+ }
+ },
+ {
+ 0x0042, "OpInBoundsAccessChain", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF, '*'},
+ }
+ },
+ {
+ 0x0043, "OpPtrAccessChain", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF, '*'},
+ }
+ },
+ {
+ 0x0044, "OpArrayLength", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ }
+ },
+ {
+ 0x0045, "OpGenericPtrMemSemantics", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x0046, "OpInBoundsPtrAccessChain", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF, '*'},
+ }
+ },
+ {
+ 0x0047, "OpDecorate", 2,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_DECORATION},
+ }
+ },
+ {
+ 0x0048, "OpMemberDecorate", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_DECORATION},
+ }
+ },
+ {
+ 0x0049, "OpDecorationGroup", 1,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ }
+ },
+ {
+ 0x004a, "OpGroupDecorate", 2,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF, '*'},
+ }
+ },
+ {
+ 0x004b, "OpGroupMemberDecorate", 2,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_PAIR_ID_REF_LITERAL_INTEGER, '*'},
+ }
+ },
+ {
+ 0x004d, "OpVectorExtractDynamic", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x004e, "OpVectorInsertDynamic", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x004f, "OpVectorShuffle", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER, '*'},
+ }
+ },
+ {
+ 0x0050, "OpCompositeConstruct", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF, '*'},
+ }
+ },
+ {
+ 0x0051, "OpCompositeExtract", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER, '*'},
+ }
+ },
+ {
+ 0x0052, "OpCompositeInsert", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER, '*'},
+ }
+ },
+ {
+ 0x0053, "OpCopyObject", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x0054, "OpTranspose", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x0056, "OpSampledImage", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x0057, "OpImageSampleImplicitLod", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_IMAGE_OPERANDS, '?'},
+ }
+ },
+ {
+ 0x0058, "OpImageSampleExplicitLod", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_IMAGE_OPERANDS},
+ }
+ },
+ {
+ 0x0059, "OpImageSampleDrefImplicitLod", 6,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_IMAGE_OPERANDS, '?'},
+ }
+ },
+ {
+ 0x005a, "OpImageSampleDrefExplicitLod", 6,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_IMAGE_OPERANDS},
+ }
+ },
+ {
+ 0x005b, "OpImageSampleProjImplicitLod", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_IMAGE_OPERANDS, '?'},
+ }
+ },
+ {
+ 0x005c, "OpImageSampleProjExplicitLod", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_IMAGE_OPERANDS},
+ }
+ },
+ {
+ 0x005d, "OpImageSampleProjDrefImplicitLod", 6,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_IMAGE_OPERANDS, '?'},
+ }
+ },
+ {
+ 0x005e, "OpImageSampleProjDrefExplicitLod", 6,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_IMAGE_OPERANDS},
+ }
+ },
+ {
+ 0x005f, "OpImageFetch", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_IMAGE_OPERANDS, '?'},
+ }
+ },
+ {
+ 0x0060, "OpImageGather", 6,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_IMAGE_OPERANDS, '?'},
+ }
+ },
+ {
+ 0x0061, "OpImageDrefGather", 6,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_IMAGE_OPERANDS, '?'},
+ }
+ },
+ {
+ 0x0062, "OpImageRead", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_IMAGE_OPERANDS, '?'},
+ }
+ },
+ {
+ 0x0063, "OpImageWrite", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_IMAGE_OPERANDS, '?'},
+ }
+ },
+ {
+ 0x0064, "OpImage", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x0065, "OpImageQueryFormat", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x0066, "OpImageQueryOrder", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x0067, "OpImageQuerySizeLod", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x0068, "OpImageQuerySize", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x0069, "OpImageQueryLod", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x006a, "OpImageQueryLevels", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x006b, "OpImageQuerySamples", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x006d, "OpConvertFToU", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x006e, "OpConvertFToS", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x006f, "OpConvertSToF", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x0070, "OpConvertUToF", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x0071, "OpUConvert", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x0072, "OpSConvert", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x0073, "OpFConvert", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x0074, "OpQuantizeToF16", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x0075, "OpConvertPtrToU", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x0076, "OpSatConvertSToU", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x0077, "OpSatConvertUToS", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x0078, "OpConvertUToPtr", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x0079, "OpPtrCastToGeneric", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x007a, "OpGenericCastToPtr", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x007b, "OpGenericCastToPtrExplicit", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_STORAGE_CLASS},
+ }
+ },
+ {
+ 0x007c, "OpBitcast", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x007e, "OpSNegate", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x007f, "OpFNegate", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x0080, "OpIAdd", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x0081, "OpFAdd", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x0082, "OpISub", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x0083, "OpFSub", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x0084, "OpIMul", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x0085, "OpFMul", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x0086, "OpUDiv", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x0087, "OpSDiv", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x0088, "OpFDiv", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x0089, "OpUMod", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x008a, "OpSRem", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x008b, "OpSMod", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x008c, "OpFRem", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x008d, "OpFMod", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x008e, "OpVectorTimesScalar", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x008f, "OpMatrixTimesScalar", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x0090, "OpVectorTimesMatrix", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x0091, "OpMatrixTimesVector", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x0092, "OpMatrixTimesMatrix", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x0093, "OpOuterProduct", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x0094, "OpDot", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x0095, "OpIAddCarry", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x0096, "OpISubBorrow", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x0097, "OpUMulExtended", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x0098, "OpSMulExtended", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x009a, "OpAny", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x009b, "OpAll", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x009c, "OpIsNan", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x009d, "OpIsInf", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x009e, "OpIsFinite", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x009f, "OpIsNormal", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x00a0, "OpSignBitSet", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x00a1, "OpLessOrGreater", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x00a2, "OpOrdered", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x00a3, "OpUnordered", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x00a4, "OpLogicalEqual", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x00a5, "OpLogicalNotEqual", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x00a6, "OpLogicalOr", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x00a7, "OpLogicalAnd", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x00a8, "OpLogicalNot", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x00a9, "OpSelect", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x00aa, "OpIEqual", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x00ab, "OpINotEqual", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x00ac, "OpUGreaterThan", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x00ad, "OpSGreaterThan", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x00ae, "OpUGreaterThanEqual", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x00af, "OpSGreaterThanEqual", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x00b0, "OpULessThan", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x00b1, "OpSLessThan", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x00b2, "OpULessThanEqual", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x00b3, "OpSLessThanEqual", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x00b4, "OpFOrdEqual", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x00b5, "OpFUnordEqual", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x00b6, "OpFOrdNotEqual", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x00b7, "OpFUnordNotEqual", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x00b8, "OpFOrdLessThan", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x00b9, "OpFUnordLessThan", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x00ba, "OpFOrdGreaterThan", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x00bb, "OpFUnordGreaterThan", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x00bc, "OpFOrdLessThanEqual", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x00bd, "OpFUnordLessThanEqual", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x00be, "OpFOrdGreaterThanEqual", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x00bf, "OpFUnordGreaterThanEqual", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x00c2, "OpShiftRightLogical", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x00c3, "OpShiftRightArithmetic", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x00c4, "OpShiftLeftLogical", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x00c5, "OpBitwiseOr", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x00c6, "OpBitwiseXor", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x00c7, "OpBitwiseAnd", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x00c8, "OpNot", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x00c9, "OpBitFieldInsert", 6,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x00ca, "OpBitFieldSExtract", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x00cb, "OpBitFieldUExtract", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x00cc, "OpBitReverse", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x00cd, "OpBitCount", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x00cf, "OpDPdx", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x00d0, "OpDPdy", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x00d1, "OpFwidth", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x00d2, "OpDPdxFine", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x00d3, "OpDPdyFine", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x00d4, "OpFwidthFine", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x00d5, "OpDPdxCoarse", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x00d6, "OpDPdyCoarse", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x00d7, "OpFwidthCoarse", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {0x00da, "OpEmitVertex"},
+ {0x00db, "OpEndPrimitive"},
+ {
+ 0x00dc, "OpEmitStreamVertex", 1,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x00dd, "OpEndStreamPrimitive", 1,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x00e0, "OpControlBarrier", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_MEMORY_SEMANTICS},
+ }
+ },
+ {
+ 0x00e1, "OpMemoryBarrier", 2,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_MEMORY_SEMANTICS},
+ }
+ },
+ {
+ 0x00e3, "OpAtomicLoad", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_MEMORY_SEMANTICS},
+ }
+ },
+ {
+ 0x00e4, "OpAtomicStore", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_MEMORY_SEMANTICS},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x00e5, "OpAtomicExchange", 6,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_MEMORY_SEMANTICS},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x00e6, "OpAtomicCompareExchange", 8,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_MEMORY_SEMANTICS},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_MEMORY_SEMANTICS},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x00e7, "OpAtomicCompareExchangeWeak", 8,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_MEMORY_SEMANTICS},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_MEMORY_SEMANTICS},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x00e8, "OpAtomicIIncrement", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_MEMORY_SEMANTICS},
+ }
+ },
+ {
+ 0x00e9, "OpAtomicIDecrement", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_MEMORY_SEMANTICS},
+ }
+ },
+ {
+ 0x00ea, "OpAtomicIAdd", 6,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_MEMORY_SEMANTICS},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x00eb, "OpAtomicISub", 6,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_MEMORY_SEMANTICS},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x00ec, "OpAtomicSMin", 6,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_MEMORY_SEMANTICS},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x00ed, "OpAtomicUMin", 6,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_MEMORY_SEMANTICS},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x00ee, "OpAtomicSMax", 6,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_MEMORY_SEMANTICS},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x00ef, "OpAtomicUMax", 6,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_MEMORY_SEMANTICS},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x00f0, "OpAtomicAnd", 6,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_MEMORY_SEMANTICS},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x00f1, "OpAtomicOr", 6,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_MEMORY_SEMANTICS},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x00f2, "OpAtomicXor", 6,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_MEMORY_SEMANTICS},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x00f5, "OpPhi", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_PAIR_ID_REF_ID_REF, '*'},
+ }
+ },
+ {
+ 0x00f6, "OpLoopMerge", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_LOOP_CONTROL},
+ }
+ },
+ {
+ 0x00f7, "OpSelectionMerge", 2,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_SELECTION_CONTROL},
+ }
+ },
+ {
+ 0x00f8, "OpLabel", 1,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ }
+ },
+ {
+ 0x00f9, "OpBranch", 1,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x00fa, "OpBranchConditional", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER, '*'},
+ }
+ },
+ {
+ 0x00fb, "OpSwitch", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_PAIR_LITERAL_INTEGER_ID_REF, '*'},
+ }
+ },
+ {0x00fc, "OpKill"},
+ {0x00fd, "OpReturn"},
+ {
+ 0x00fe, "OpReturnValue", 1,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {0x00ff, "OpUnreachable"},
+ {
+ 0x0100, "OpLifetimeStart", 2,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ }
+ },
+ {
+ 0x0101, "OpLifetimeStop", 2,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ }
+ },
+ {
+ 0x0103, "OpGroupAsyncCopy", 8,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x0104, "OpGroupWaitEvents", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x0105, "OpGroupAll", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x0106, "OpGroupAny", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x0107, "OpGroupBroadcast", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x0108, "OpGroupIAdd", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ {SPIRV_PARSER_OPERAND_TYPE_GROUP_OPERATION},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x0109, "OpGroupFAdd", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ {SPIRV_PARSER_OPERAND_TYPE_GROUP_OPERATION},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x010a, "OpGroupFMin", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ {SPIRV_PARSER_OPERAND_TYPE_GROUP_OPERATION},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x010b, "OpGroupUMin", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ {SPIRV_PARSER_OPERAND_TYPE_GROUP_OPERATION},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x010c, "OpGroupSMin", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ {SPIRV_PARSER_OPERAND_TYPE_GROUP_OPERATION},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x010d, "OpGroupFMax", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ {SPIRV_PARSER_OPERAND_TYPE_GROUP_OPERATION},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x010e, "OpGroupUMax", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ {SPIRV_PARSER_OPERAND_TYPE_GROUP_OPERATION},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x010f, "OpGroupSMax", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ {SPIRV_PARSER_OPERAND_TYPE_GROUP_OPERATION},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x0112, "OpReadPipe", 6,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x0113, "OpWritePipe", 6,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x0114, "OpReservedReadPipe", 8,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x0115, "OpReservedWritePipe", 8,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x0116, "OpReserveReadPipePackets", 6,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x0117, "OpReserveWritePipePackets", 6,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x0118, "OpCommitReadPipe", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x0119, "OpCommitWritePipe", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x011a, "OpIsValidReserveId", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x011b, "OpGetNumPipePackets", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x011c, "OpGetMaxPipePackets", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x011d, "OpGroupReserveReadPipePackets", 7,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x011e, "OpGroupReserveWritePipePackets", 7,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x011f, "OpGroupCommitReadPipe", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x0120, "OpGroupCommitWritePipe", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x0123, "OpEnqueueMarker", 6,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x0124, "OpEnqueueKernel", 13,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF, '*'},
+ }
+ },
+ {
+ 0x0125, "OpGetKernelNDrangeSubGroupCount", 7,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x0126, "OpGetKernelNDrangeMaxSubGroupSize", 7,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x0127, "OpGetKernelWorkGroupSize", 6,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x0128, "OpGetKernelPreferredWorkGroupSizeMultiple", 6,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x0129, "OpRetainEvent", 1,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x012a, "OpReleaseEvent", 1,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x012b, "OpCreateUserEvent", 2,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ }
+ },
+ {
+ 0x012c, "OpIsValidEvent", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x012d, "OpSetUserEventStatus", 2,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x012e, "OpCaptureEventProfilingInfo", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x012f, "OpGetDefaultQueue", 2,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ }
+ },
+ {
+ 0x0130, "OpBuildNDRange", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x0131, "OpImageSparseSampleImplicitLod", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_IMAGE_OPERANDS, '?'},
+ }
+ },
+ {
+ 0x0132, "OpImageSparseSampleExplicitLod", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_IMAGE_OPERANDS},
+ }
+ },
+ {
+ 0x0133, "OpImageSparseSampleDrefImplicitLod", 6,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_IMAGE_OPERANDS, '?'},
+ }
+ },
+ {
+ 0x0134, "OpImageSparseSampleDrefExplicitLod", 6,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_IMAGE_OPERANDS},
+ }
+ },
+ {
+ 0x0135, "OpImageSparseSampleProjImplicitLod", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_IMAGE_OPERANDS, '?'},
+ }
+ },
+ {
+ 0x0136, "OpImageSparseSampleProjExplicitLod", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_IMAGE_OPERANDS},
+ }
+ },
+ {
+ 0x0137, "OpImageSparseSampleProjDrefImplicitLod", 6,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_IMAGE_OPERANDS, '?'},
+ }
+ },
+ {
+ 0x0138, "OpImageSparseSampleProjDrefExplicitLod", 6,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_IMAGE_OPERANDS},
+ }
+ },
+ {
+ 0x0139, "OpImageSparseFetch", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_IMAGE_OPERANDS, '?'},
+ }
+ },
+ {
+ 0x013a, "OpImageSparseGather", 6,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_IMAGE_OPERANDS, '?'},
+ }
+ },
+ {
+ 0x013b, "OpImageSparseDrefGather", 6,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_IMAGE_OPERANDS, '?'},
+ }
+ },
+ {
+ 0x013c, "OpImageSparseTexelsResident", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {0x013d, "OpNoLine"},
+ {
+ 0x013e, "OpAtomicFlagTestAndSet", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_MEMORY_SEMANTICS},
+ }
+ },
+ {
+ 0x013f, "OpAtomicFlagClear", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_MEMORY_SEMANTICS},
+ }
+ },
+ {
+ 0x0140, "OpImageSparseRead", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_IMAGE_OPERANDS, '?'},
+ }
+ },
+ {
+ 0x0141, "OpSizeOf", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x0142, "OpTypePipeStorage", 1,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ }
+ },
+ {
+ 0x0143, "OpConstantPipeStorage", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ }
+ },
+ {
+ 0x0144, "OpCreatePipeFromPipeStorage", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x0145, "OpGetKernelLocalSizeForSubgroupCount", 7,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x0146, "OpGetKernelMaxNumSubgroups", 6,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x0147, "OpTypeNamedBarrier", 1,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ }
+ },
+ {
+ 0x0148, "OpNamedBarrierInitialize", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x0149, "OpMemoryNamedBarrier", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_MEMORY_SEMANTICS},
+ }
+ },
+ {
+ 0x014a, "OpModuleProcessed", 1,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_STRING},
+ }
+ },
+ {
+ 0x014b, "OpExecutionModeId", 2,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_EXECUTION_MODE},
+ }
+ },
+ {
+ 0x014c, "OpDecorateId", 2,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_DECORATION},
+ }
+ },
+ {
+ 0x014d, "OpGroupNonUniformElect", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ }
+ },
+ {
+ 0x014e, "OpGroupNonUniformAll", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x014f, "OpGroupNonUniformAny", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x0150, "OpGroupNonUniformAllEqual", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x0151, "OpGroupNonUniformBroadcast", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x0152, "OpGroupNonUniformBroadcastFirst", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x0153, "OpGroupNonUniformBallot", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x0154, "OpGroupNonUniformInverseBallot", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x0155, "OpGroupNonUniformBallotBitExtract", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x0156, "OpGroupNonUniformBallotBitCount", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ {SPIRV_PARSER_OPERAND_TYPE_GROUP_OPERATION},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x0157, "OpGroupNonUniformBallotFindLSB", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x0158, "OpGroupNonUniformBallotFindMSB", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x0159, "OpGroupNonUniformShuffle", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x015a, "OpGroupNonUniformShuffleXor", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x015b, "OpGroupNonUniformShuffleUp", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x015c, "OpGroupNonUniformShuffleDown", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x015d, "OpGroupNonUniformIAdd", 6,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ {SPIRV_PARSER_OPERAND_TYPE_GROUP_OPERATION},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF, '?'},
+ }
+ },
+ {
+ 0x015e, "OpGroupNonUniformFAdd", 6,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ {SPIRV_PARSER_OPERAND_TYPE_GROUP_OPERATION},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF, '?'},
+ }
+ },
+ {
+ 0x015f, "OpGroupNonUniformIMul", 6,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ {SPIRV_PARSER_OPERAND_TYPE_GROUP_OPERATION},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF, '?'},
+ }
+ },
+ {
+ 0x0160, "OpGroupNonUniformFMul", 6,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ {SPIRV_PARSER_OPERAND_TYPE_GROUP_OPERATION},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF, '?'},
+ }
+ },
+ {
+ 0x0161, "OpGroupNonUniformSMin", 6,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ {SPIRV_PARSER_OPERAND_TYPE_GROUP_OPERATION},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF, '?'},
+ }
+ },
+ {
+ 0x0162, "OpGroupNonUniformUMin", 6,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ {SPIRV_PARSER_OPERAND_TYPE_GROUP_OPERATION},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF, '?'},
+ }
+ },
+ {
+ 0x0163, "OpGroupNonUniformFMin", 6,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ {SPIRV_PARSER_OPERAND_TYPE_GROUP_OPERATION},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF, '?'},
+ }
+ },
+ {
+ 0x0164, "OpGroupNonUniformSMax", 6,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ {SPIRV_PARSER_OPERAND_TYPE_GROUP_OPERATION},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF, '?'},
+ }
+ },
+ {
+ 0x0165, "OpGroupNonUniformUMax", 6,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ {SPIRV_PARSER_OPERAND_TYPE_GROUP_OPERATION},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF, '?'},
+ }
+ },
+ {
+ 0x0166, "OpGroupNonUniformFMax", 6,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ {SPIRV_PARSER_OPERAND_TYPE_GROUP_OPERATION},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF, '?'},
+ }
+ },
+ {
+ 0x0167, "OpGroupNonUniformBitwiseAnd", 6,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ {SPIRV_PARSER_OPERAND_TYPE_GROUP_OPERATION},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF, '?'},
+ }
+ },
+ {
+ 0x0168, "OpGroupNonUniformBitwiseOr", 6,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ {SPIRV_PARSER_OPERAND_TYPE_GROUP_OPERATION},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF, '?'},
+ }
+ },
+ {
+ 0x0169, "OpGroupNonUniformBitwiseXor", 6,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ {SPIRV_PARSER_OPERAND_TYPE_GROUP_OPERATION},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF, '?'},
+ }
+ },
+ {
+ 0x016a, "OpGroupNonUniformLogicalAnd", 6,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ {SPIRV_PARSER_OPERAND_TYPE_GROUP_OPERATION},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF, '?'},
+ }
+ },
+ {
+ 0x016b, "OpGroupNonUniformLogicalOr", 6,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ {SPIRV_PARSER_OPERAND_TYPE_GROUP_OPERATION},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF, '?'},
+ }
+ },
+ {
+ 0x016c, "OpGroupNonUniformLogicalXor", 6,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ {SPIRV_PARSER_OPERAND_TYPE_GROUP_OPERATION},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF, '?'},
+ }
+ },
+ {
+ 0x016d, "OpGroupNonUniformQuadBroadcast", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x016e, "OpGroupNonUniformQuadSwap", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x0190, "OpCopyLogical", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x0191, "OpPtrEqual", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x0192, "OpPtrNotEqual", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x0193, "OpPtrDiff", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1040, "OpColorAttachmentReadEXT", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF, '?'},
+ }
+ },
+ {
+ 0x1041, "OpDepthAttachmentReadEXT", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF, '?'},
+ }
+ },
+ {
+ 0x1042, "OpStencilAttachmentReadEXT", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF, '?'},
+ }
+ },
+ {0x1140, "OpTerminateInvocation"},
+ {
+ 0x1141, "OpTypeUntypedPointerKHR", 2,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_STORAGE_CLASS},
+ }
+ },
+ {
+ 0x1142, "OpUntypedVariableKHR", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_STORAGE_CLASS},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF, '?'},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF, '?'},
+ }
+ },
+ {
+ 0x1143, "OpUntypedAccessChainKHR", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF, '*'},
+ }
+ },
+ {
+ 0x1144, "OpUntypedInBoundsAccessChainKHR", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF, '*'},
+ }
+ },
+ {
+ 0x1145, "OpSubgroupBallotKHR", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1146, "OpSubgroupFirstInvocationKHR", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1147, "OpUntypedPtrAccessChainKHR", 6,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF, '*'},
+ }
+ },
+ {
+ 0x1148, "OpUntypedInBoundsPtrAccessChainKHR", 6,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF, '*'},
+ }
+ },
+ {
+ 0x1149, "OpUntypedArrayLengthKHR", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ }
+ },
+ {
+ 0x114a, "OpUntypedPrefetchKHR", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF, '?'},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF, '?'},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF, '?'},
+ }
+ },
+ {
+ 0x114c, "OpSubgroupAllKHR", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x114d, "OpSubgroupAnyKHR", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x114e, "OpSubgroupAllEqualKHR", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x114f, "OpGroupNonUniformRotateKHR", 6,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF, '?'},
+ }
+ },
+ {
+ 0x1150, "OpSubgroupReadInvocationKHR", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1151, "OpExtInstWithForwardRefsKHR", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_EXT_INST_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF, '*'},
+ }
+ },
+ {
+ 0x115d, "OpTraceRayKHR", 11,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x115e, "OpExecuteCallableKHR", 2,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x115f, "OpConvertUToAccelerationStructureKHR", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {0x1160, "OpIgnoreIntersectionKHR"},
+ {0x1161, "OpTerminateRayKHR"},
+ {
+ 0x1162, "OpSDot", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_PACKED_VECTOR_FORMAT, '?'},
+ }
+ },
+ {
+ 0x1163, "OpUDot", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_PACKED_VECTOR_FORMAT, '?'},
+ }
+ },
+ {
+ 0x1164, "OpSUDot", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_PACKED_VECTOR_FORMAT, '?'},
+ }
+ },
+ {
+ 0x1165, "OpSDotAccSat", 6,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_PACKED_VECTOR_FORMAT, '?'},
+ }
+ },
+ {
+ 0x1166, "OpUDotAccSat", 6,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_PACKED_VECTOR_FORMAT, '?'},
+ }
+ },
+ {
+ 0x1167, "OpSUDotAccSat", 6,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_PACKED_VECTOR_FORMAT, '?'},
+ }
+ },
+ {
+ 0x1168, "OpTypeCooperativeMatrixKHR", 6,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1169, "OpCooperativeMatrixLoadKHR", 6,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF, '?'},
+ {SPIRV_PARSER_OPERAND_TYPE_MEMORY_ACCESS, '?'},
+ }
+ },
+ {
+ 0x116a, "OpCooperativeMatrixStoreKHR", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF, '?'},
+ {SPIRV_PARSER_OPERAND_TYPE_MEMORY_ACCESS, '?'},
+ }
+ },
+ {
+ 0x116b, "OpCooperativeMatrixMulAddKHR", 6,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_COOPERATIVE_MATRIX_OPERANDS, '?'},
+ }
+ },
+ {
+ 0x116c, "OpCooperativeMatrixLengthKHR", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x116d, "OpConstantCompositeReplicateEXT", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x116e, "OpSpecConstantCompositeReplicateEXT", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x116f, "OpCompositeConstructReplicateEXT", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1178, "OpTypeRayQueryKHR", 1,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ }
+ },
+ {
+ 0x1179, "OpRayQueryInitializeKHR", 8,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x117a, "OpRayQueryTerminateKHR", 1,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x117b, "OpRayQueryGenerateIntersectionKHR", 2,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x117c, "OpRayQueryConfirmIntersectionKHR", 1,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x117d, "OpRayQueryProceedKHR", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x117f, "OpRayQueryGetIntersectionTypeKHR", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1180, "OpImageSampleWeightedQCOM", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1181, "OpImageBoxFilterQCOM", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1182, "OpImageBlockMatchSSDQCOM", 7,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1183, "OpImageBlockMatchSADQCOM", 7,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1194, "OpImageBlockMatchWindowSSDQCOM", 7,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1195, "OpImageBlockMatchWindowSADQCOM", 7,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1196, "OpImageBlockMatchGatherSSDQCOM", 7,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1197, "OpImageBlockMatchGatherSADQCOM", 7,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1388, "OpGroupIAddNonUniformAMD", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ {SPIRV_PARSER_OPERAND_TYPE_GROUP_OPERATION},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1389, "OpGroupFAddNonUniformAMD", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ {SPIRV_PARSER_OPERAND_TYPE_GROUP_OPERATION},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x138a, "OpGroupFMinNonUniformAMD", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ {SPIRV_PARSER_OPERAND_TYPE_GROUP_OPERATION},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x138b, "OpGroupUMinNonUniformAMD", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ {SPIRV_PARSER_OPERAND_TYPE_GROUP_OPERATION},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x138c, "OpGroupSMinNonUniformAMD", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ {SPIRV_PARSER_OPERAND_TYPE_GROUP_OPERATION},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x138d, "OpGroupFMaxNonUniformAMD", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ {SPIRV_PARSER_OPERAND_TYPE_GROUP_OPERATION},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x138e, "OpGroupUMaxNonUniformAMD", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ {SPIRV_PARSER_OPERAND_TYPE_GROUP_OPERATION},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x138f, "OpGroupSMaxNonUniformAMD", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ {SPIRV_PARSER_OPERAND_TYPE_GROUP_OPERATION},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1393, "OpFragmentMaskFetchAMD", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1394, "OpFragmentFetchAMD", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x13c0, "OpReadClockKHR", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ }
+ },
+ {
+ 0x13d2, "OpAllocateNodePayloadsAMDX", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x13d3, "OpEnqueueNodePayloadsAMDX", 1,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x13d4, "OpTypeNodePayloadArrayAMDX", 2,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x13d6, "OpFinishWritingNodePayloadAMDX", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x13e2, "OpNodePayloadArrayLengthAMDX", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x13ed, "OpIsNodePayloadValidAMDX", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x13ef, "OpConstantStringAMDX", 2,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_STRING},
+ }
+ },
+ {
+ 0x13f0, "OpSpecConstantStringAMDX", 2,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_STRING},
+ }
+ },
+ {
+ 0x13f6, "OpGroupNonUniformQuadAllKHR", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x13f7, "OpGroupNonUniformQuadAnyKHR", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1481, "OpHitObjectRecordHitMotionNV", 14,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1482, "OpHitObjectRecordHitWithIndexMotionNV", 13,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1483, "OpHitObjectRecordMissMotionNV", 7,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1484, "OpHitObjectGetWorldToObjectNV", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1485, "OpHitObjectGetObjectToWorldNV", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1486, "OpHitObjectGetObjectRayDirectionNV", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1487, "OpHitObjectGetObjectRayOriginNV", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1488, "OpHitObjectTraceRayMotionNV", 13,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1489, "OpHitObjectGetShaderRecordBufferHandleNV", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x148a, "OpHitObjectGetShaderBindingTableRecordIndexNV", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x148b, "OpHitObjectRecordEmptyNV", 1,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x148c, "OpHitObjectTraceRayNV", 12,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x148d, "OpHitObjectRecordHitNV", 13,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x148e, "OpHitObjectRecordHitWithIndexNV", 12,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x148f, "OpHitObjectRecordMissNV", 6,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1490, "OpHitObjectExecuteShaderNV", 2,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1491, "OpHitObjectGetCurrentTimeNV", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1492, "OpHitObjectGetAttributesNV", 2,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1493, "OpHitObjectGetHitKindNV", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1494, "OpHitObjectGetPrimitiveIndexNV", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1495, "OpHitObjectGetGeometryIndexNV", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1496, "OpHitObjectGetInstanceIdNV", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1497, "OpHitObjectGetInstanceCustomIndexNV", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1498, "OpHitObjectGetWorldRayDirectionNV", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1499, "OpHitObjectGetWorldRayOriginNV", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x149a, "OpHitObjectGetRayTMaxNV", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x149b, "OpHitObjectGetRayTMinNV", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x149c, "OpHitObjectIsEmptyNV", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x149d, "OpHitObjectIsHitNV", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x149e, "OpHitObjectIsMissNV", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x149f, "OpReorderThreadWithHitObjectNV", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF, '?'},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF, '?'},
+ }
+ },
+ {
+ 0x14a0, "OpReorderThreadWithHintNV", 2,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x14a1, "OpTypeHitObjectNV", 1,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ }
+ },
+ {
+ 0x14a3, "OpImageSampleFootprintNV", 7,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_IMAGE_OPERANDS, '?'},
+ }
+ },
+ {
+ 0x14ad, "OpCooperativeMatrixConvertNV", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x14ae, "OpEmitMeshTasksEXT", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF, '?'},
+ }
+ },
+ {
+ 0x14af, "OpSetMeshOutputsEXT", 2,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x14b0, "OpGroupNonUniformPartitionNV", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x14b3, "OpWritePackedPrimitiveIndices4x8NV", 2,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x14b4, "OpFetchMicroTriangleVertexPositionNV", 7,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x14b5, "OpFetchMicroTriangleVertexBarycentricNV", 7,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x14d6, "OpReportIntersectionKHR", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {0x14d7, "OpIgnoreIntersectionNV"},
+ {0x14d8, "OpTerminateRayNV"},
+ {
+ 0x14d9, "OpTraceNV", 11,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x14da, "OpTraceMotionNV", 12,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x14db, "OpTraceRayMotionNV", 12,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x14dc, "OpRayQueryGetIntersectionTriangleVertexPositionsKHR", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x14dd, "OpTypeAccelerationStructureKHR", 1,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ }
+ },
+ {
+ 0x14e0, "OpExecuteCallableNV", 2,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x14ee, "OpTypeCooperativeMatrixNV", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x14ef, "OpCooperativeMatrixLoadNV", 6,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_MEMORY_ACCESS, '?'},
+ }
+ },
+ {
+ 0x14f0, "OpCooperativeMatrixStoreNV", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_MEMORY_ACCESS, '?'},
+ }
+ },
+ {
+ 0x14f1, "OpCooperativeMatrixMulAddNV", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x14f2, "OpCooperativeMatrixLengthNV", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {0x14f4, "OpBeginInvocationInterlockEXT"},
+ {0x14f5, "OpEndInvocationInterlockEXT"},
+ {
+ 0x14f6, "OpCooperativeMatrixReduceNV", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_COOPERATIVE_MATRIX_REDUCE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x14f7, "OpCooperativeMatrixLoadTensorNV", 7,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_MEMORY_ACCESS},
+ {SPIRV_PARSER_OPERAND_TYPE_TENSOR_ADDRESSING_OPERANDS},
+ }
+ },
+ {
+ 0x14f8, "OpCooperativeMatrixStoreTensorNV", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_MEMORY_ACCESS},
+ {SPIRV_PARSER_OPERAND_TYPE_TENSOR_ADDRESSING_OPERANDS},
+ }
+ },
+ {
+ 0x14f9, "OpCooperativeMatrixPerElementOpNV", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF, '*'},
+ }
+ },
+ {
+ 0x14fa, "OpTypeTensorLayoutNV", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x14fb, "OpTypeTensorViewNV", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF, '*'},
+ }
+ },
+ {
+ 0x14fc, "OpCreateTensorLayoutNV", 2,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ }
+ },
+ {
+ 0x14fd, "OpTensorLayoutSetDimensionNV", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF, '*'},
+ }
+ },
+ {
+ 0x14fe, "OpTensorLayoutSetStrideNV", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF, '*'},
+ }
+ },
+ {
+ 0x14ff, "OpTensorLayoutSliceNV", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF, '*'},
+ }
+ },
+ {
+ 0x1500, "OpTensorLayoutSetClampValueNV", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1501, "OpCreateTensorViewNV", 2,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ }
+ },
+ {
+ 0x1502, "OpTensorViewSetDimensionNV", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF, '*'},
+ }
+ },
+ {
+ 0x1503, "OpTensorViewSetStrideNV", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF, '*'},
+ }
+ },
+ {0x1504, "OpDemoteToHelperInvocation"},
+ {
+ 0x1505, "OpIsHelperInvocationEXT", 2,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ }
+ },
+ {
+ 0x1506, "OpTensorViewSetClipNV", 7,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1508, "OpTensorLayoutSetBlockSizeNV", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF, '*'},
+ }
+ },
+ {
+ 0x150e, "OpCooperativeMatrixTransposeNV", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x150f, "OpConvertUToImageNV", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1510, "OpConvertUToSamplerNV", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1511, "OpConvertImageToUNV", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1512, "OpConvertSamplerToUNV", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1513, "OpConvertUToSampledImageNV", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1514, "OpConvertSampledImageToUNV", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1515, "OpSamplerImageAddressingModeNV", 1,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ }
+ },
+ {
+ 0x1516, "OpRawAccessChainNV", 7,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_RAW_ACCESS_CHAIN_OPERANDS, '?'},
+ }
+ },
+ {
+ 0x15c3, "OpSubgroupShuffleINTEL", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x15c4, "OpSubgroupShuffleDownINTEL", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x15c5, "OpSubgroupShuffleUpINTEL", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x15c6, "OpSubgroupShuffleXorINTEL", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x15c7, "OpSubgroupBlockReadINTEL", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x15c8, "OpSubgroupBlockWriteINTEL", 2,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x15c9, "OpSubgroupImageBlockReadINTEL", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x15ca, "OpSubgroupImageBlockWriteINTEL", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x15cc, "OpSubgroupImageMediaBlockReadINTEL", 6,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x15cd, "OpSubgroupImageMediaBlockWriteINTEL", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x15d1, "OpUCountLeadingZerosINTEL", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x15d2, "OpUCountTrailingZerosINTEL", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x15d3, "OpAbsISubINTEL", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x15d4, "OpAbsUSubINTEL", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x15d5, "OpIAddSatINTEL", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x15d6, "OpUAddSatINTEL", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x15d7, "OpIAverageINTEL", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x15d8, "OpUAverageINTEL", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x15d9, "OpIAverageRoundedINTEL", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x15da, "OpUAverageRoundedINTEL", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x15db, "OpISubSatINTEL", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x15dc, "OpUSubSatINTEL", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x15dd, "OpIMul32x16INTEL", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x15de, "OpUMul32x16INTEL", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x15e0, "OpConstantFunctionPointerINTEL", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x15e1, "OpFunctionPointerCallINTEL", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF, '*'},
+ }
+ },
+ {
+ 0x15e9, "OpAsmTargetINTEL", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_STRING},
+ }
+ },
+ {
+ 0x15ea, "OpAsmINTEL", 6,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_STRING},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_STRING},
+ }
+ },
+ {
+ 0x15eb, "OpAsmCallINTEL", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF, '*'},
+ }
+ },
+ {
+ 0x15ee, "OpAtomicFMinEXT", 6,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_MEMORY_SEMANTICS},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x15ef, "OpAtomicFMaxEXT", 6,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_MEMORY_SEMANTICS},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x15fe, "OpAssumeTrueKHR", 1,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x15ff, "OpExpectKHR", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1600, "OpDecorateString", 2,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_DECORATION},
+ }
+ },
+ {
+ 0x1601, "OpMemberDecorateString", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_DECORATION},
+ }
+ },
+ {
+ 0x1643, "OpVmeImageINTEL", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1644, "OpTypeVmeImageINTEL", 2,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1645, "OpTypeAvcImePayloadINTEL", 1,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ }
+ },
+ {
+ 0x1646, "OpTypeAvcRefPayloadINTEL", 1,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ }
+ },
+ {
+ 0x1647, "OpTypeAvcSicPayloadINTEL", 1,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ }
+ },
+ {
+ 0x1648, "OpTypeAvcMcePayloadINTEL", 1,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ }
+ },
+ {
+ 0x1649, "OpTypeAvcMceResultINTEL", 1,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ }
+ },
+ {
+ 0x164a, "OpTypeAvcImeResultINTEL", 1,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ }
+ },
+ {
+ 0x164b, "OpTypeAvcImeResultSingleReferenceStreamoutINTEL", 1,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ }
+ },
+ {
+ 0x164c, "OpTypeAvcImeResultDualReferenceStreamoutINTEL", 1,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ }
+ },
+ {
+ 0x164d, "OpTypeAvcImeSingleReferenceStreaminINTEL", 1,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ }
+ },
+ {
+ 0x164e, "OpTypeAvcImeDualReferenceStreaminINTEL", 1,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ }
+ },
+ {
+ 0x164f, "OpTypeAvcRefResultINTEL", 1,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ }
+ },
+ {
+ 0x1650, "OpTypeAvcSicResultINTEL", 1,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ }
+ },
+ {
+ 0x1651, "OpSubgroupAvcMceGetDefaultInterBaseMultiReferencePenaltyINTEL", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1652, "OpSubgroupAvcMceSetInterBaseMultiReferencePenaltyINTEL", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1653, "OpSubgroupAvcMceGetDefaultInterShapePenaltyINTEL", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1654, "OpSubgroupAvcMceSetInterShapePenaltyINTEL", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1655, "OpSubgroupAvcMceGetDefaultInterDirectionPenaltyINTEL", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1656, "OpSubgroupAvcMceSetInterDirectionPenaltyINTEL", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1657, "OpSubgroupAvcMceGetDefaultIntraLumaShapePenaltyINTEL", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1658, "OpSubgroupAvcMceGetDefaultInterMotionVectorCostTableINTEL", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1659, "OpSubgroupAvcMceGetDefaultHighPenaltyCostTableINTEL", 2,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ }
+ },
+ {
+ 0x165a, "OpSubgroupAvcMceGetDefaultMediumPenaltyCostTableINTEL", 2,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ }
+ },
+ {
+ 0x165b, "OpSubgroupAvcMceGetDefaultLowPenaltyCostTableINTEL", 2,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ }
+ },
+ {
+ 0x165c, "OpSubgroupAvcMceSetMotionVectorCostFunctionINTEL", 6,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x165d, "OpSubgroupAvcMceGetDefaultIntraLumaModePenaltyINTEL", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x165e, "OpSubgroupAvcMceGetDefaultNonDcLumaIntraPenaltyINTEL", 2,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ }
+ },
+ {
+ 0x165f, "OpSubgroupAvcMceGetDefaultIntraChromaModeBasePenaltyINTEL", 2,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ }
+ },
+ {
+ 0x1660, "OpSubgroupAvcMceSetAcOnlyHaarINTEL", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1661, "OpSubgroupAvcMceSetSourceInterlacedFieldPolarityINTEL", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1662, "OpSubgroupAvcMceSetSingleReferenceInterlacedFieldPolarityINTEL", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1663, "OpSubgroupAvcMceSetDualReferenceInterlacedFieldPolaritiesINTEL", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1664, "OpSubgroupAvcMceConvertToImePayloadINTEL", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1665, "OpSubgroupAvcMceConvertToImeResultINTEL", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1666, "OpSubgroupAvcMceConvertToRefPayloadINTEL", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1667, "OpSubgroupAvcMceConvertToRefResultINTEL", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1668, "OpSubgroupAvcMceConvertToSicPayloadINTEL", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1669, "OpSubgroupAvcMceConvertToSicResultINTEL", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x166a, "OpSubgroupAvcMceGetMotionVectorsINTEL", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x166b, "OpSubgroupAvcMceGetInterDistortionsINTEL", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x166c, "OpSubgroupAvcMceGetBestInterDistortionsINTEL", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x166d, "OpSubgroupAvcMceGetInterMajorShapeINTEL", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x166e, "OpSubgroupAvcMceGetInterMinorShapeINTEL", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x166f, "OpSubgroupAvcMceGetInterDirectionsINTEL", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1670, "OpSubgroupAvcMceGetInterMotionVectorCountINTEL", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1671, "OpSubgroupAvcMceGetInterReferenceIdsINTEL", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1672, "OpSubgroupAvcMceGetInterReferenceInterlacedFieldPolaritiesINTEL", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1673, "OpSubgroupAvcImeInitializeINTEL", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1674, "OpSubgroupAvcImeSetSingleReferenceINTEL", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1675, "OpSubgroupAvcImeSetDualReferenceINTEL", 6,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1676, "OpSubgroupAvcImeRefWindowSizeINTEL", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1677, "OpSubgroupAvcImeAdjustRefOffsetINTEL", 6,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1678, "OpSubgroupAvcImeConvertToMcePayloadINTEL", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1679, "OpSubgroupAvcImeSetMaxMotionVectorCountINTEL", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x167a, "OpSubgroupAvcImeSetUnidirectionalMixDisableINTEL", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x167b, "OpSubgroupAvcImeSetEarlySearchTerminationThresholdINTEL", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x167c, "OpSubgroupAvcImeSetWeightedSadINTEL", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x167d, "OpSubgroupAvcImeEvaluateWithSingleReferenceINTEL", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x167e, "OpSubgroupAvcImeEvaluateWithDualReferenceINTEL", 6,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x167f, "OpSubgroupAvcImeEvaluateWithSingleReferenceStreaminINTEL", 6,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1680, "OpSubgroupAvcImeEvaluateWithDualReferenceStreaminINTEL", 7,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1681, "OpSubgroupAvcImeEvaluateWithSingleReferenceStreamoutINTEL", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1682, "OpSubgroupAvcImeEvaluateWithDualReferenceStreamoutINTEL", 6,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1683, "OpSubgroupAvcImeEvaluateWithSingleReferenceStreaminoutINTEL", 6,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1684, "OpSubgroupAvcImeEvaluateWithDualReferenceStreaminoutINTEL", 7,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1685, "OpSubgroupAvcImeConvertToMceResultINTEL", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1686, "OpSubgroupAvcImeGetSingleReferenceStreaminINTEL", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1687, "OpSubgroupAvcImeGetDualReferenceStreaminINTEL", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1688, "OpSubgroupAvcImeStripSingleReferenceStreamoutINTEL", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1689, "OpSubgroupAvcImeStripDualReferenceStreamoutINTEL", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x168a, "OpSubgroupAvcImeGetStreamoutSingleReferenceMajorShapeMotionVectorsINTEL", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x168b, "OpSubgroupAvcImeGetStreamoutSingleReferenceMajorShapeDistortionsINTEL", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x168c, "OpSubgroupAvcImeGetStreamoutSingleReferenceMajorShapeReferenceIdsINTEL", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x168d, "OpSubgroupAvcImeGetStreamoutDualReferenceMajorShapeMotionVectorsINTEL", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x168e, "OpSubgroupAvcImeGetStreamoutDualReferenceMajorShapeDistortionsINTEL", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x168f, "OpSubgroupAvcImeGetStreamoutDualReferenceMajorShapeReferenceIdsINTEL", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1690, "OpSubgroupAvcImeGetBorderReachedINTEL", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1691, "OpSubgroupAvcImeGetTruncatedSearchIndicationINTEL", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1692, "OpSubgroupAvcImeGetUnidirectionalEarlySearchTerminationINTEL", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1693, "OpSubgroupAvcImeGetWeightingPatternMinimumMotionVectorINTEL", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1694, "OpSubgroupAvcImeGetWeightingPatternMinimumDistortionINTEL", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1695, "OpSubgroupAvcFmeInitializeINTEL", 9,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1696, "OpSubgroupAvcBmeInitializeINTEL", 10,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1697, "OpSubgroupAvcRefConvertToMcePayloadINTEL", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1698, "OpSubgroupAvcRefSetBidirectionalMixDisableINTEL", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1699, "OpSubgroupAvcRefSetBilinearFilterEnableINTEL", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x169a, "OpSubgroupAvcRefEvaluateWithSingleReferenceINTEL", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x169b, "OpSubgroupAvcRefEvaluateWithDualReferenceINTEL", 6,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x169c, "OpSubgroupAvcRefEvaluateWithMultiReferenceINTEL", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x169d, "OpSubgroupAvcRefEvaluateWithMultiReferenceInterlacedINTEL", 6,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x169e, "OpSubgroupAvcRefConvertToMceResultINTEL", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x169f, "OpSubgroupAvcSicInitializeINTEL", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x16a0, "OpSubgroupAvcSicConfigureSkcINTEL", 8,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x16a1, "OpSubgroupAvcSicConfigureIpeLumaINTEL", 10,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x16a2, "OpSubgroupAvcSicConfigureIpeLumaChromaINTEL", 13,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x16a3, "OpSubgroupAvcSicGetMotionVectorMaskINTEL", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x16a4, "OpSubgroupAvcSicConvertToMcePayloadINTEL", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x16a5, "OpSubgroupAvcSicSetIntraLumaShapePenaltyINTEL", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x16a6, "OpSubgroupAvcSicSetIntraLumaModeCostFunctionINTEL", 6,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x16a7, "OpSubgroupAvcSicSetIntraChromaModeCostFunctionINTEL", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x16a8, "OpSubgroupAvcSicSetBilinearFilterEnableINTEL", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x16a9, "OpSubgroupAvcSicSetSkcForwardTransformEnableINTEL", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x16aa, "OpSubgroupAvcSicSetBlockBasedRawSkipSadINTEL", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x16ab, "OpSubgroupAvcSicEvaluateIpeINTEL", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x16ac, "OpSubgroupAvcSicEvaluateWithSingleReferenceINTEL", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x16ad, "OpSubgroupAvcSicEvaluateWithDualReferenceINTEL", 6,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x16ae, "OpSubgroupAvcSicEvaluateWithMultiReferenceINTEL", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x16af, "OpSubgroupAvcSicEvaluateWithMultiReferenceInterlacedINTEL", 6,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x16b0, "OpSubgroupAvcSicConvertToMceResultINTEL", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x16b1, "OpSubgroupAvcSicGetIpeLumaShapeINTEL", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x16b2, "OpSubgroupAvcSicGetBestIpeLumaDistortionINTEL", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x16b3, "OpSubgroupAvcSicGetBestIpeChromaDistortionINTEL", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x16b4, "OpSubgroupAvcSicGetPackedIpeLumaModesINTEL", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x16b5, "OpSubgroupAvcSicGetIpeChromaModeINTEL", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x16b6, "OpSubgroupAvcSicGetPackedSkcLumaCountThresholdINTEL", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x16b7, "OpSubgroupAvcSicGetPackedSkcLumaSumThresholdINTEL", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x16b8, "OpSubgroupAvcSicGetInterRawSadsINTEL", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x16ba, "OpVariableLengthArrayINTEL", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x16bb, "OpSaveMemoryINTEL", 2,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ }
+ },
+ {
+ 0x16bc, "OpRestoreMemoryINTEL", 1,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x16d0, "OpArbitraryFloatSinCosPiINTEL", 9,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ }
+ },
+ {
+ 0x16d1, "OpArbitraryFloatCastINTEL", 8,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ }
+ },
+ {
+ 0x16d2, "OpArbitraryFloatCastFromIntINTEL", 8,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ }
+ },
+ {
+ 0x16d3, "OpArbitraryFloatCastToIntINTEL", 7,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ }
+ },
+ {
+ 0x16d6, "OpArbitraryFloatAddINTEL", 10,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ }
+ },
+ {
+ 0x16d7, "OpArbitraryFloatSubINTEL", 10,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ }
+ },
+ {
+ 0x16d8, "OpArbitraryFloatMulINTEL", 10,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ }
+ },
+ {
+ 0x16d9, "OpArbitraryFloatDivINTEL", 10,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ }
+ },
+ {
+ 0x16da, "OpArbitraryFloatGTINTEL", 6,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ }
+ },
+ {
+ 0x16db, "OpArbitraryFloatGEINTEL", 6,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ }
+ },
+ {
+ 0x16dc, "OpArbitraryFloatLTINTEL", 6,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ }
+ },
+ {
+ 0x16dd, "OpArbitraryFloatLEINTEL", 6,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ }
+ },
+ {
+ 0x16de, "OpArbitraryFloatEQINTEL", 6,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ }
+ },
+ {
+ 0x16df, "OpArbitraryFloatRecipINTEL", 8,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ }
+ },
+ {
+ 0x16e0, "OpArbitraryFloatRSqrtINTEL", 8,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ }
+ },
+ {
+ 0x16e1, "OpArbitraryFloatCbrtINTEL", 8,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ }
+ },
+ {
+ 0x16e2, "OpArbitraryFloatHypotINTEL", 10,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ }
+ },
+ {
+ 0x16e3, "OpArbitraryFloatSqrtINTEL", 8,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ }
+ },
+ {
+ 0x16e4, "OpArbitraryFloatLogINTEL", 8,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ }
+ },
+ {
+ 0x16e5, "OpArbitraryFloatLog2INTEL", 8,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ }
+ },
+ {
+ 0x16e6, "OpArbitraryFloatLog10INTEL", 8,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ }
+ },
+ {
+ 0x16e7, "OpArbitraryFloatLog1pINTEL", 8,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ }
+ },
+ {
+ 0x16e8, "OpArbitraryFloatExpINTEL", 8,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ }
+ },
+ {
+ 0x16e9, "OpArbitraryFloatExp2INTEL", 8,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ }
+ },
+ {
+ 0x16ea, "OpArbitraryFloatExp10INTEL", 8,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ }
+ },
+ {
+ 0x16eb, "OpArbitraryFloatExpm1INTEL", 8,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ }
+ },
+ {
+ 0x16ec, "OpArbitraryFloatSinINTEL", 8,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ }
+ },
+ {
+ 0x16ed, "OpArbitraryFloatCosINTEL", 8,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ }
+ },
+ {
+ 0x16ee, "OpArbitraryFloatSinCosINTEL", 8,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ }
+ },
+ {
+ 0x16ef, "OpArbitraryFloatSinPiINTEL", 8,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ }
+ },
+ {
+ 0x16f0, "OpArbitraryFloatCosPiINTEL", 8,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ }
+ },
+ {
+ 0x16f1, "OpArbitraryFloatASinINTEL", 8,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ }
+ },
+ {
+ 0x16f2, "OpArbitraryFloatASinPiINTEL", 8,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ }
+ },
+ {
+ 0x16f3, "OpArbitraryFloatACosINTEL", 8,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ }
+ },
+ {
+ 0x16f4, "OpArbitraryFloatACosPiINTEL", 8,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ }
+ },
+ {
+ 0x16f5, "OpArbitraryFloatATanINTEL", 8,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ }
+ },
+ {
+ 0x16f6, "OpArbitraryFloatATanPiINTEL", 8,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ }
+ },
+ {
+ 0x16f7, "OpArbitraryFloatATan2INTEL", 10,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ }
+ },
+ {
+ 0x16f8, "OpArbitraryFloatPowINTEL", 10,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ }
+ },
+ {
+ 0x16f9, "OpArbitraryFloatPowRINTEL", 10,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ }
+ },
+ {
+ 0x16fa, "OpArbitraryFloatPowNINTEL", 9,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ }
+ },
+ {
+ 0x16ff, "OpLoopControlINTEL", 1,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER, '*'},
+ }
+ },
+ {
+ 0x1717, "OpAliasDomainDeclINTEL", 2,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF, '?'},
+ }
+ },
+ {
+ 0x1718, "OpAliasScopeDeclINTEL", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF, '?'},
+ }
+ },
+ {
+ 0x1719, "OpAliasScopeListDeclINTEL", 2,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF, '*'},
+ }
+ },
+ {
+ 0x1723, "OpFixedSqrtINTEL", 9,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ }
+ },
+ {
+ 0x1724, "OpFixedRecipINTEL", 9,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ }
+ },
+ {
+ 0x1725, "OpFixedRsqrtINTEL", 9,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ }
+ },
+ {
+ 0x1726, "OpFixedSinINTEL", 9,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ }
+ },
+ {
+ 0x1727, "OpFixedCosINTEL", 9,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ }
+ },
+ {
+ 0x1728, "OpFixedSinCosINTEL", 9,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ }
+ },
+ {
+ 0x1729, "OpFixedSinPiINTEL", 9,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ }
+ },
+ {
+ 0x172a, "OpFixedCosPiINTEL", 9,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ }
+ },
+ {
+ 0x172b, "OpFixedSinCosPiINTEL", 9,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ }
+ },
+ {
+ 0x172c, "OpFixedLogINTEL", 9,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ }
+ },
+ {
+ 0x172d, "OpFixedExpINTEL", 9,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ }
+ },
+ {
+ 0x172e, "OpPtrCastToCrossWorkgroupINTEL", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1732, "OpCrossWorkgroupCastToPtrINTEL", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x173a, "OpReadPipeBlockingINTEL", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x173b, "OpWritePipeBlockingINTEL", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x173d, "OpFPGARegINTEL", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1780, "OpRayQueryGetRayTMinKHR", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1781, "OpRayQueryGetRayFlagsKHR", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1782, "OpRayQueryGetIntersectionTKHR", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1783, "OpRayQueryGetIntersectionInstanceCustomIndexKHR", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1784, "OpRayQueryGetIntersectionInstanceIdKHR", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1785, "OpRayQueryGetIntersectionInstanceShaderBindingTableRecordOffsetKHR", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1786, "OpRayQueryGetIntersectionGeometryIndexKHR", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1787, "OpRayQueryGetIntersectionPrimitiveIndexKHR", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1788, "OpRayQueryGetIntersectionBarycentricsKHR", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1789, "OpRayQueryGetIntersectionFrontFaceKHR", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x178a, "OpRayQueryGetIntersectionCandidateAABBOpaqueKHR", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x178b, "OpRayQueryGetIntersectionObjectRayDirectionKHR", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x178c, "OpRayQueryGetIntersectionObjectRayOriginKHR", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x178d, "OpRayQueryGetWorldRayDirectionKHR", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x178e, "OpRayQueryGetWorldRayOriginKHR", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x178f, "OpRayQueryGetIntersectionObjectToWorldKHR", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1790, "OpRayQueryGetIntersectionWorldToObjectKHR", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1793, "OpAtomicFAddEXT", 6,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_MEMORY_SEMANTICS},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x17c6, "OpTypeBufferSurfaceINTEL", 2,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ACCESS_QUALIFIER},
+ }
+ },
+ {
+ 0x17ca, "OpTypeStructContinuedINTEL", 1,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF, '*'},
+ }
+ },
+ {
+ 0x17cb, "OpConstantCompositeContinuedINTEL", 1,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF, '*'},
+ }
+ },
+ {
+ 0x17cc, "OpSpecConstantCompositeContinuedINTEL", 1,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF, '*'},
+ }
+ },
+ {
+ 0x17d0, "OpCompositeConstructContinuedINTEL", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF, '*'},
+ }
+ },
+ {
+ 0x17e4, "OpConvertFToBF16INTEL", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x17e5, "OpConvertBF16ToFINTEL", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x17fe, "OpControlBarrierArriveINTEL", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_MEMORY_SEMANTICS},
+ }
+ },
+ {
+ 0x17ff, "OpControlBarrierWaitINTEL", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_MEMORY_SEMANTICS},
+ }
+ },
+ {
+ 0x1801, "OpArithmeticFenceEXT", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x184d, "OpSubgroupBlockPrefetchINTEL", 3,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_MEMORY_ACCESS, '?'},
+ }
+ },
+ {
+ 0x1901, "OpGroupIMulKHR", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ {SPIRV_PARSER_OPERAND_TYPE_GROUP_OPERATION},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1902, "OpGroupFMulKHR", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ {SPIRV_PARSER_OPERAND_TYPE_GROUP_OPERATION},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1903, "OpGroupBitwiseAndKHR", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ {SPIRV_PARSER_OPERAND_TYPE_GROUP_OPERATION},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1904, "OpGroupBitwiseOrKHR", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ {SPIRV_PARSER_OPERAND_TYPE_GROUP_OPERATION},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1905, "OpGroupBitwiseXorKHR", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ {SPIRV_PARSER_OPERAND_TYPE_GROUP_OPERATION},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1906, "OpGroupLogicalAndKHR", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ {SPIRV_PARSER_OPERAND_TYPE_GROUP_OPERATION},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1907, "OpGroupLogicalOrKHR", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ {SPIRV_PARSER_OPERAND_TYPE_GROUP_OPERATION},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x1908, "OpGroupLogicalXorKHR", 5,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_SCOPE},
+ {SPIRV_PARSER_OPERAND_TYPE_GROUP_OPERATION},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x191c, "OpMaskedGatherINTEL", 6,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+ {
+ 0x191d, "OpMaskedScatterINTEL", 4,
+ (struct spirv_parser_instruction_operand[])
+ {
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
+ }
+ },
+};
diff --git a/libs/vkd3d/include/private/vkd3d_common.h b/libs/vkd3d/include/private/vkd3d_common.h
index ec1dd70c9b2..93c8a0bec7c 100644
--- a/libs/vkd3d/include/private/vkd3d_common.h
+++ b/libs/vkd3d/include/private/vkd3d_common.h
@@ -65,9 +65,12 @@
#define vkd3d_clamp(value, lower, upper) max(min(value, upper), lower)
#define TAG_AON9 VKD3D_MAKE_TAG('A', 'o', 'n', '9')
+#define TAG_CLI4 VKD3D_MAKE_TAG('C', 'L', 'I', '4')
+#define TAG_CTAB VKD3D_MAKE_TAG('C', 'T', 'A', 'B')
#define TAG_DXBC VKD3D_MAKE_TAG('D', 'X', 'B', 'C')
#define TAG_DXIL VKD3D_MAKE_TAG('D', 'X', 'I', 'L')
#define TAG_FX10 VKD3D_MAKE_TAG('F', 'X', '1', '0')
+#define TAG_FXLC VKD3D_MAKE_TAG('F', 'X', 'L', 'C')
#define TAG_ISG1 VKD3D_MAKE_TAG('I', 'S', 'G', '1')
#define TAG_ISGN VKD3D_MAKE_TAG('I', 'S', 'G', 'N')
#define TAG_OSG1 VKD3D_MAKE_TAG('O', 'S', 'G', '1')
@@ -275,7 +278,7 @@ static inline unsigned int vkd3d_popcount(unsigned int v)
{
#ifdef _MSC_VER
return __popcnt(v);
-#elif defined(__MINGW32__)
+#elif defined(HAVE_BUILTIN_POPCOUNT)
return __builtin_popcount(v);
#else
v -= (v >> 1) & 0x55555555;
diff --git a/libs/vkd3d/include/private/vkd3d_shader_utils.h b/libs/vkd3d/include/private/vkd3d_shader_utils.h
index c9f8001e590..00052a89988 100644
--- a/libs/vkd3d/include/private/vkd3d_shader_utils.h
+++ b/libs/vkd3d/include/private/vkd3d_shader_utils.h
@@ -43,16 +43,12 @@ static inline enum vkd3d_result vkd3d_shader_parse_dxbc_source_type(const struct
if (tag == TAG_SHDR || tag == TAG_SHEX)
{
*type = VKD3D_SHADER_SOURCE_DXBC_TPF;
-#ifndef VKD3D_SHADER_UNSUPPORTED_DXIL
- break;
-#else
}
else if (tag == TAG_DXIL)
{
*type = VKD3D_SHADER_SOURCE_DXBC_DXIL;
/* Default to DXIL if both are present. */
break;
-#endif
}
}
diff --git a/libs/vkd3d/include/private/vkd3d_version.h b/libs/vkd3d/include/private/vkd3d_version.h
index 0edc4428022..795bc2dc490 100644
--- a/libs/vkd3d/include/private/vkd3d_version.h
+++ b/libs/vkd3d/include/private/vkd3d_version.h
@@ -1 +1 @@
-#define VKD3D_VCS_ID " (Wine bundled)"
+#define VKD3D_VCS_ID " (git a4f58be0)"
diff --git a/libs/vkd3d/include/vkd3d_shader.h b/libs/vkd3d/include/vkd3d_shader.h
index 058166aa2f9..2e1f37f12e6 100644
--- a/libs/vkd3d/include/vkd3d_shader.h
+++ b/libs/vkd3d/include/vkd3d_shader.h
@@ -249,6 +249,10 @@ enum vkd3d_shader_compile_option_feature_flags
* QUAD bits set.
* - supportedStages include COMPUTE and FRAGMENT. \since 1.12 */
VKD3D_SHADER_COMPILE_OPTION_FEATURE_WAVE_OPS = 0x00000004,
+ /** The SPIR-V target environment supports zero-initializing workgroup
+ * memory. This corresponds to the "shaderZeroInitializeWorkgroupMemory"
+ * Vulkan feature. \since 1.16 */
+ VKD3D_SHADER_COMPILE_OPTION_FEATURE_ZERO_INITIALIZE_WORKGROUP_MEMORY = 0x00000008,
VKD3D_FORCE_32_BIT_ENUM(VKD3D_SHADER_COMPILE_OPTION_FEATURE_FLAGS),
};
@@ -2286,6 +2290,14 @@ enum vkd3d_shader_component_type
VKD3D_SHADER_COMPONENT_DOUBLE = 0x5,
/** 64-bit unsigned integer. \since 1.11 */
VKD3D_SHADER_COMPONENT_UINT64 = 0x6,
+ /** 64-bit signed integer. \since 1.16 */
+ VKD3D_SHADER_COMPONENT_INT64 = 0x7,
+ /** 16-bit IEEE floating-point. \since 1.16 */
+ VKD3D_SHADER_COMPONENT_FLOAT16 = 0x8,
+ /** 16-bit unsigned integer. \since 1.16 */
+ VKD3D_SHADER_COMPONENT_UINT16 = 0x9,
+ /** 16-bit signed integer. \since 1.16 */
+ VKD3D_SHADER_COMPONENT_INT16 = 0xa,
VKD3D_FORCE_32_BIT_ENUM(VKD3D_SHADER_COMPONENT_TYPE),
};
@@ -2991,7 +3003,8 @@ VKD3D_SHADER_API void vkd3d_shader_free_scan_descriptor_info(
* signature. To retrieve signatures from other shader types, or other signature
* types, use vkd3d_shader_scan() and struct vkd3d_shader_scan_signature_info.
* This function returns the same input signature that is returned in
- * struct vkd3d_shader_scan_signature_info.
+ * struct vkd3d_shader_scan_signature_info for dxbc-tpf shaders, but may return
+ * different information for dxbc-dxil shaders.
*
* \param dxbc Compiled byte code, in DXBC format.
*
diff --git a/libs/vkd3d/libs/vkd3d-common/blob.c b/libs/vkd3d/libs/vkd3d-common/blob.c
index f60ef7db769..c2c6ad67804 100644
--- a/libs/vkd3d/libs/vkd3d-common/blob.c
+++ b/libs/vkd3d/libs/vkd3d-common/blob.c
@@ -20,6 +20,7 @@
#define WIDL_C_INLINE_WRAPPERS
#endif
#define COBJMACROS
+
#define CONST_VTABLE
#include "vkd3d.h"
#include "vkd3d_blob.h"
diff --git a/libs/vkd3d/libs/vkd3d-shader/d3d_asm.c b/libs/vkd3d/libs/vkd3d-shader/d3d_asm.c
index 0639da83aa6..764f0888490 100644
--- a/libs/vkd3d/libs/vkd3d-shader/d3d_asm.c
+++ b/libs/vkd3d/libs/vkd3d-shader/d3d_asm.c
@@ -2069,15 +2069,22 @@ static const char *get_component_type_name(enum vkd3d_shader_component_type type
{
switch (type)
{
- case VKD3D_SHADER_COMPONENT_VOID: return "void";
- case VKD3D_SHADER_COMPONENT_UINT: return "uint";
- case VKD3D_SHADER_COMPONENT_INT: return "int";
- case VKD3D_SHADER_COMPONENT_FLOAT: return "float";
- case VKD3D_SHADER_COMPONENT_BOOL: return "bool";
- case VKD3D_SHADER_COMPONENT_DOUBLE: return "double";
- case VKD3D_SHADER_COMPONENT_UINT64: return "uint64";
- default: return "??";
+ case VKD3D_SHADER_COMPONENT_VOID: return "void";
+ case VKD3D_SHADER_COMPONENT_UINT: return "uint";
+ case VKD3D_SHADER_COMPONENT_INT: return "int";
+ case VKD3D_SHADER_COMPONENT_FLOAT: return "float";
+ case VKD3D_SHADER_COMPONENT_BOOL: return "bool";
+ case VKD3D_SHADER_COMPONENT_DOUBLE: return "double";
+ case VKD3D_SHADER_COMPONENT_UINT64: return "uint64";
+ case VKD3D_SHADER_COMPONENT_INT64: return "int64";
+ case VKD3D_SHADER_COMPONENT_FLOAT16: return "float16";
+ case VKD3D_SHADER_COMPONENT_UINT16: return "uint16";
+ case VKD3D_SHADER_COMPONENT_INT16: return "int16";
+ case VKD3D_SHADER_COMPONENT_TYPE_FORCE_32BIT:
+ break;
}
+
+ return "??";
}
static const char *get_minimum_precision_name(enum vkd3d_shader_minimum_precision prec)
@@ -2097,6 +2104,7 @@ static const char *get_semantic_register_name(enum vkd3d_shader_sysval_semantic
{
switch (semantic)
{
+ case VKD3D_SHADER_SV_PRIMITIVE_ID: return "primID";
case VKD3D_SHADER_SV_DEPTH: return "oDepth";
case VKD3D_SHADER_SV_DEPTH_GREATER_EQUAL: return "oDepthGE";
case VKD3D_SHADER_SV_DEPTH_LESS_EQUAL: return "oDepthLE";
diff --git a/libs/vkd3d/libs/vkd3d-shader/d3dbc.c b/libs/vkd3d/libs/vkd3d-shader/d3dbc.c
index 58e35cf22e8..b49ef9865db 100644
--- a/libs/vkd3d/libs/vkd3d-shader/d3dbc.c
+++ b/libs/vkd3d/libs/vkd3d-shader/d3dbc.c
@@ -1759,27 +1759,40 @@ static bool is_inconsequential_instr(const struct vkd3d_shader_instruction *ins)
static void write_sm1_dst_register(struct vkd3d_bytecode_buffer *buffer, const struct vkd3d_shader_dst_param *reg)
{
+ uint32_t offset = reg->reg.idx_count ? reg->reg.idx[0].offset : 0;
+
VKD3D_ASSERT(reg->write_mask);
put_u32(buffer, VKD3D_SM1_INSTRUCTION_PARAMETER
| sm1_encode_register_type(&reg->reg)
| (reg->modifiers << VKD3D_SM1_DST_MODIFIER_SHIFT)
| (reg->write_mask << VKD3D_SM1_WRITEMASK_SHIFT)
- | (reg->reg.idx[0].offset & VKD3D_SM1_REGISTER_NUMBER_MASK));
+ | (offset & VKD3D_SM1_REGISTER_NUMBER_MASK));
}
static void write_sm1_src_register(struct vkd3d_bytecode_buffer *buffer, const struct vkd3d_shader_src_param *reg)
{
+ uint32_t address_mode = VKD3D_SM1_ADDRESS_MODE_ABSOLUTE, offset = 0;
+
+ if (reg->reg.idx_count)
+ {
+ offset = reg->reg.idx[0].offset;
+ if (reg->reg.idx[0].rel_addr)
+ address_mode = VKD3D_SM1_ADDRESS_MODE_RELATIVE;
+ }
+
put_u32(buffer, VKD3D_SM1_INSTRUCTION_PARAMETER
| sm1_encode_register_type(&reg->reg)
+ | (address_mode << VKD3D_SM1_ADDRESS_MODE_SHIFT)
| (reg->modifiers << VKD3D_SM1_SRC_MODIFIER_SHIFT)
| (swizzle_from_vsir(reg->swizzle) << VKD3D_SM1_SWIZZLE_SHIFT)
- | (reg->reg.idx[0].offset & VKD3D_SM1_REGISTER_NUMBER_MASK));
+ | (offset & VKD3D_SM1_REGISTER_NUMBER_MASK));
}
static void d3dbc_write_instruction(struct d3dbc_compiler *d3dbc, const struct vkd3d_shader_instruction *ins)
{
const struct vkd3d_shader_version *version = &d3dbc->program->shader_version;
struct vkd3d_bytecode_buffer *buffer = &d3dbc->buffer;
+ const struct vkd3d_shader_src_param *src;
const struct vkd3d_sm1_opcode_info *info;
unsigned int i;
uint32_t token;
@@ -1810,13 +1823,10 @@ static void d3dbc_write_instruction(struct d3dbc_compiler *d3dbc, const struct v
for (i = 0; i < ins->src_count; ++i)
{
- if (ins->src[i].reg.idx[0].rel_addr)
- {
- vkd3d_shader_error(d3dbc->message_context, &ins->location, VKD3D_SHADER_ERROR_D3DBC_NOT_IMPLEMENTED,
- "Unhandled relative addressing on source register.");
- d3dbc->failed = true;
- }
- write_sm1_src_register(buffer, &ins->src[i]);
+ src = &ins->src[i];
+ write_sm1_src_register(buffer, src);
+ if (src->reg.idx_count && src->reg.idx[0].rel_addr)
+ write_sm1_src_register(buffer, src->reg.idx[0].rel_addr);
}
};
@@ -1831,6 +1841,7 @@ static void d3dbc_write_vsir_def(struct d3dbc_compiler *d3dbc, const struct vkd3
.reg.type = VKD3DSPR_CONST,
.write_mask = VKD3DSP_WRITEMASK_ALL,
.reg.idx[0].offset = ins->dst[0].reg.idx[0].offset,
+ .reg.idx_count = 1,
};
token = VKD3D_SM1_OP_DEF;
@@ -1863,6 +1874,7 @@ static void d3dbc_write_vsir_sampler_dcl(struct d3dbc_compiler *d3dbc,
reg.reg.type = VKD3DSPR_COMBINED_SAMPLER;
reg.write_mask = VKD3DSP_WRITEMASK_ALL;
reg.reg.idx[0].offset = reg_id;
+ reg.reg.idx_count = 1;
write_sm1_dst_register(buffer, &reg);
}
@@ -1938,6 +1950,7 @@ static void d3dbc_write_vsir_instruction(struct d3dbc_compiler *d3dbc, const str
case VKD3DSIH_MAX:
case VKD3DSIH_MIN:
case VKD3DSIH_MOV:
+ case VKD3DSIH_MOVA:
case VKD3DSIH_MUL:
case VKD3DSIH_SINCOS:
case VKD3DSIH_SLT:
@@ -1982,6 +1995,7 @@ static void d3dbc_write_semantic_dcl(struct d3dbc_compiler *d3dbc,
uint32_t token, usage_idx;
bool ret;
+ reg.reg.idx_count = 1;
if (sm1_register_from_semantic_name(version, element->semantic_name,
element->semantic_index, output, &reg.reg.type, &reg.reg.idx[0].offset))
{
diff --git a/libs/vkd3d/libs/vkd3d-shader/dxbc.c b/libs/vkd3d/libs/vkd3d-shader/dxbc.c
index 81af62f7810..9e3a57132a1 100644
--- a/libs/vkd3d/libs/vkd3d-shader/dxbc.c
+++ b/libs/vkd3d/libs/vkd3d-shader/dxbc.c
@@ -381,7 +381,8 @@ static int shader_parse_signature(const struct vkd3d_shader_dxbc_section_desc *s
uint32_t count, header_size;
struct signature_element *e;
const char *ptr = data;
- unsigned int i, j;
+ bool fail = false;
+ unsigned int i;
if (!require_space(0, 2, sizeof(uint32_t), section->data.size))
{
@@ -436,17 +437,19 @@ static int shader_parse_signature(const struct vkd3d_shader_dxbc_section_desc *s
if (!(name = shader_get_string(data, section->data.size, name_offset))
|| !(e[i].semantic_name = vkd3d_strdup(name)))
{
- WARN("Invalid name offset %#zx (data size %#zx).\n", name_offset, section->data.size);
- for (j = 0; j < i; ++j)
- {
- vkd3d_free((void *)e[j].semantic_name);
- }
- vkd3d_free(e);
- return VKD3D_ERROR_INVALID_ARGUMENT;
+ vkd3d_shader_error(message_context, NULL, VKD3D_SHADER_ERROR_DXBC_INVALID_STRING_REFERENCE,
+ "Element %u has invalid semantic name reference %#zx (data size %#zx).\n",
+ i, name_offset, section->data.size);
+ fail = true;
}
e[i].semantic_index = read_u32(&ptr);
e[i].sysval_semantic = read_u32(&ptr);
- e[i].component_type = read_u32(&ptr);
+ if ((e[i].component_type = read_u32(&ptr)) > VKD3D_SHADER_COMPONENT_FLOAT)
+ {
+ vkd3d_shader_error(message_context, NULL, VKD3D_SHADER_ERROR_DXBC_INVALID_COMPONENT_TYPE,
+ "Element %u has invalid component type %#x.\n", i, e[i].component_type);
+ fail = true;
+ }
e[i].register_index = read_u32(&ptr);
e[i].target_location = e[i].register_index;
e[i].register_count = 1;
@@ -477,8 +480,15 @@ static int shader_parse_signature(const struct vkd3d_shader_dxbc_section_desc *s
}
s->elements = e;
+ s->elements_capacity = count;
s->element_count = count;
+ if (fail)
+ {
+ shader_signature_cleanup(s);
+ return VKD3D_ERROR_INVALID_ARGUMENT;
+ }
+
return VKD3D_OK;
}
@@ -542,6 +552,8 @@ static int shdr_handler(const struct vkd3d_shader_dxbc_section_desc *section,
{
case TAG_ISGN:
case TAG_ISG1:
+ if (desc->is_dxil)
+ break;
if (desc->input_signature.elements)
{
FIXME("Multiple input signatures.\n");
@@ -554,6 +566,8 @@ static int shdr_handler(const struct vkd3d_shader_dxbc_section_desc *section,
case TAG_OSGN:
case TAG_OSG5:
case TAG_OSG1:
+ if (desc->is_dxil)
+ break;
if (desc->output_signature.elements)
{
FIXME("Multiple output signatures.\n");
@@ -565,6 +579,8 @@ static int shdr_handler(const struct vkd3d_shader_dxbc_section_desc *section,
case TAG_PCSG:
case TAG_PSG1:
+ if (desc->is_dxil)
+ break;
if (desc->patch_constant_signature.elements)
{
FIXME("Multiple patch constant signatures.\n");
diff --git a/libs/vkd3d/libs/vkd3d-shader/dxil.c b/libs/vkd3d/libs/vkd3d-shader/dxil.c
index a10de68008a..ac4828d6f59 100644
--- a/libs/vkd3d/libs/vkd3d-shader/dxil.c
+++ b/libs/vkd3d/libs/vkd3d-shader/dxil.c
@@ -3911,23 +3911,51 @@ static void sm6_parser_init_signature(struct sm6_parser *sm6, const struct shade
}
}
-static void sm6_parser_init_output_signature(struct sm6_parser *sm6, const struct shader_signature *output_signature)
+static int sm6_parser_init_output_signature(struct sm6_parser *sm6, const struct shader_signature *output_signature)
{
+ if (!(sm6->output_params = vsir_program_get_dst_params(sm6->p.program, output_signature->element_count)))
+ {
+ vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_OUT_OF_MEMORY,
+ "Failed to allocate output parameters.");
+ return VKD3D_ERROR_OUT_OF_MEMORY;
+ }
+
sm6_parser_init_signature(sm6, output_signature, false, VKD3DSPR_OUTPUT, sm6->output_params);
+
+ return VKD3D_OK;
}
-static void sm6_parser_init_input_signature(struct sm6_parser *sm6, const struct shader_signature *input_signature)
+static int sm6_parser_init_input_signature(struct sm6_parser *sm6, const struct shader_signature *input_signature)
{
+ if (!(sm6->input_params = vsir_program_get_dst_params(sm6->p.program, input_signature->element_count)))
+ {
+ vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_OUT_OF_MEMORY,
+ "Failed to allocate input parameters.");
+ return VKD3D_ERROR_OUT_OF_MEMORY;
+ }
+
sm6_parser_init_signature(sm6, input_signature, true, VKD3DSPR_INPUT, sm6->input_params);
+
+ return VKD3D_OK;
}
-static void sm6_parser_init_patch_constant_signature(struct sm6_parser *sm6,
+static int sm6_parser_init_patch_constant_signature(struct sm6_parser *sm6,
const struct shader_signature *patch_constant_signature)
{
bool is_input = sm6->p.program->shader_version.type == VKD3D_SHADER_TYPE_DOMAIN;
+ if (!(sm6->patch_constant_params = vsir_program_get_dst_params(sm6->p.program,
+ patch_constant_signature->element_count)))
+ {
+ vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_OUT_OF_MEMORY,
+ "Failed to allocate patch constant parameters.");
+ return VKD3D_ERROR_OUT_OF_MEMORY;
+ }
+
sm6_parser_init_signature(sm6, patch_constant_signature, is_input, VKD3DSPR_PATCHCONST,
sm6->patch_constant_params);
+
+ return VKD3D_OK;
}
static const struct sm6_value *sm6_parser_next_function_definition(struct sm6_parser *sm6)
@@ -8550,19 +8578,29 @@ static enum vkd3d_result sm6_parser_metadata_init(struct sm6_parser *sm6, const
return VKD3D_OK;
}
-static enum vkd3d_shader_component_type vkd3d_component_type_from_dxil_component_type(enum dxil_component_type type)
+static enum vkd3d_shader_component_type vkd3d_component_type_from_dxil_component_type(
+ enum dxil_component_type type, bool native_16bit)
{
switch (type)
{
case COMPONENT_TYPE_I1:
return VKD3D_SHADER_COMPONENT_BOOL;
case COMPONENT_TYPE_I16:
+ if (native_16bit)
+ return VKD3D_SHADER_COMPONENT_INT16;
+ return VKD3D_SHADER_COMPONENT_INT;
case COMPONENT_TYPE_I32:
return VKD3D_SHADER_COMPONENT_INT;
case COMPONENT_TYPE_U16:
+ if (native_16bit)
+ return VKD3D_SHADER_COMPONENT_UINT16;
+ return VKD3D_SHADER_COMPONENT_UINT;
case COMPONENT_TYPE_U32:
return VKD3D_SHADER_COMPONENT_UINT;
case COMPONENT_TYPE_F16:
+ if (native_16bit)
+ return VKD3D_SHADER_COMPONENT_FLOAT16;
+ return VKD3D_SHADER_COMPONENT_FLOAT;
case COMPONENT_TYPE_F32:
case COMPONENT_TYPE_SNORMF32:
case COMPONENT_TYPE_UNORMF32:
@@ -8577,8 +8615,12 @@ static enum vkd3d_shader_component_type vkd3d_component_type_from_dxil_component
}
}
-static enum vkd3d_shader_minimum_precision minimum_precision_from_dxil_component_type(enum dxil_component_type type)
+static enum vkd3d_shader_minimum_precision minimum_precision_from_dxil_component_type(
+ enum dxil_component_type type, bool native_16bit)
{
+ if (native_16bit)
+ return VKD3D_SHADER_MINIMUM_PRECISION_NONE;
+
switch (type)
{
case COMPONENT_TYPE_F16:
@@ -9404,8 +9446,10 @@ static enum vkd3d_result sm6_parser_read_signature(struct sm6_parser *sm6, const
{
unsigned int i, j, column_count, operand_count, index;
const struct sm6_metadata_node *node, *element_node;
+ struct vsir_program *program = sm6->p.program;
struct signature_element *elements, *e;
unsigned int values[10];
+ bool native_16bit;
bool is_register;
if (!m)
@@ -9430,6 +9474,7 @@ static enum vkd3d_result sm6_parser_read_signature(struct sm6_parser *sm6, const
return VKD3D_ERROR_OUT_OF_MEMORY;
}
+ native_16bit = program->global_flags & VKD3DSGF_FORCE_NATIVE_LOW_PRECISION;
for (i = 0; i < operand_count; ++i)
{
m = node->operands[i];
@@ -9490,8 +9535,8 @@ static enum vkd3d_result sm6_parser_read_signature(struct sm6_parser *sm6, const
}
e->semantic_name = element_node->operands[1]->u.string_value;
- e->component_type = vkd3d_component_type_from_dxil_component_type(values[2]);
- e->min_precision = minimum_precision_from_dxil_component_type(values[2]);
+ e->component_type = vkd3d_component_type_from_dxil_component_type(values[2], native_16bit);
+ e->min_precision = minimum_precision_from_dxil_component_type(values[2], native_16bit);
j = values[3];
e->sysval_semantic = sysval_semantic_from_dxil_semantic_kind(j, tessellator_domain);
@@ -9631,23 +9676,24 @@ static enum vkd3d_result sm6_parser_signatures_init(struct sm6_parser *sm6, cons
if (m->u.node->operand_count && (ret = sm6_parser_read_signature(sm6, m->u.node->operands[0],
&program->input_signature, tessellator_domain, true)) < 0)
- {
return ret;
- }
+
if (m->u.node->operand_count > 1 && (ret = sm6_parser_read_signature(sm6, m->u.node->operands[1],
&program->output_signature, tessellator_domain, false)) < 0)
- {
return ret;
- }
+
if (m->u.node->operand_count > 1 && (ret = sm6_parser_read_signature(sm6, m->u.node->operands[2],
&program->patch_constant_signature, tessellator_domain, false)) < 0)
- {
return ret;
- }
- sm6_parser_init_input_signature(sm6, &program->input_signature);
- sm6_parser_init_output_signature(sm6, &program->output_signature);
- sm6_parser_init_patch_constant_signature(sm6, &program->patch_constant_signature);
+ if ((ret = sm6_parser_init_input_signature(sm6, &program->input_signature)) < 0)
+ return ret;
+
+ if ((ret = sm6_parser_init_output_signature(sm6, &program->output_signature) < 0))
+ return ret;
+
+ if ((ret = sm6_parser_init_patch_constant_signature(sm6, &program->patch_constant_signature)) < 0)
+ return ret;
return VKD3D_OK;
}
@@ -9917,6 +9963,7 @@ static void sm6_parser_gs_properties_init(struct sm6_parser *sm6, const struct s
{
input_primitive = VKD3D_PT_PATCH;
patch_vertex_count = i - INPUT_PRIMITIVE_PATCH1 + 1;
+ input_control_point_count = patch_vertex_count;
break;
}
@@ -9927,6 +9974,7 @@ static void sm6_parser_gs_properties_init(struct sm6_parser *sm6, const struct s
}
sm6_parser_emit_dcl_primitive_topology(sm6, VKD3DSIH_DCL_INPUT_PRIMITIVE, input_primitive, patch_vertex_count);
+ sm6->p.program->input_primitive = input_primitive;
sm6->p.program->input_control_point_count = input_control_point_count;
i = operands[1];
@@ -9938,6 +9986,7 @@ static void sm6_parser_gs_properties_init(struct sm6_parser *sm6, const struct s
"Geometry shader output vertex count %u is invalid.", i);
}
sm6_parser_emit_dcl_count(sm6, VKD3DSIH_DCL_VERTICES_OUT, i);
+ sm6->p.program->vertices_out_count = i;
if (operands[2] > 1)
{
@@ -9955,6 +10004,7 @@ static void sm6_parser_gs_properties_init(struct sm6_parser *sm6, const struct s
output_primitive = VKD3D_PT_TRIANGLELIST;
}
sm6_parser_emit_dcl_primitive_topology(sm6, VKD3DSIH_DCL_OUTPUT_TOPOLOGY, output_primitive, 0);
+ sm6->p.program->output_topology = output_primitive;
i = operands[4];
if (!i || i > MAX_GS_INSTANCE_COUNT)
@@ -10432,9 +10482,6 @@ static enum vkd3d_result sm6_parser_init(struct sm6_parser *sm6, struct vsir_pro
input_signature = &program->input_signature;
output_signature = &program->output_signature;
patch_constant_signature = &program->patch_constant_signature;
- *input_signature = dxbc_desc->input_signature;
- *output_signature = dxbc_desc->output_signature;
- *patch_constant_signature = dxbc_desc->patch_constant_signature;
program->features = dxbc_desc->features;
memset(dxbc_desc, 0, sizeof(*dxbc_desc));
@@ -10498,18 +10545,6 @@ static enum vkd3d_result sm6_parser_init(struct sm6_parser *sm6, struct vsir_pro
goto fail;
}
- if (!(sm6->output_params = vsir_program_get_dst_params(program, output_signature->element_count))
- || !(sm6->input_params = vsir_program_get_dst_params(program, input_signature->element_count))
- || !(sm6->patch_constant_params = vsir_program_get_dst_params(program,
- patch_constant_signature->element_count)))
- {
- ERR("Failed to allocate input/output parameters.\n");
- vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_OUT_OF_MEMORY,
- "Out of memory allocating input/output parameters.");
- ret = VKD3D_ERROR_OUT_OF_MEMORY;
- goto fail;
- }
-
function_count = dxil_block_compute_function_count(&sm6->root_block);
if (!(sm6->functions = vkd3d_calloc(function_count, sizeof(*sm6->functions))))
{
@@ -10669,8 +10704,6 @@ int dxil_parse(const struct vkd3d_shader_compile_info *compile_info, uint64_t co
uint32_t *byte_code = NULL;
int ret;
- MESSAGE("Creating a DXIL parser. This is unsupported; you get to keep all the pieces if it breaks.\n");
-
dxbc_desc.is_dxil = true;
if ((ret = shader_extract_from_dxbc(&compile_info->source, message_context, compile_info->source_name,
&dxbc_desc)) < 0)
diff --git a/libs/vkd3d/libs/vkd3d-shader/fx.c b/libs/vkd3d/libs/vkd3d-shader/fx.c
index bd7e7b420db..debcb261811 100644
--- a/libs/vkd3d/libs/vkd3d-shader/fx.c
+++ b/libs/vkd3d/libs/vkd3d-shader/fx.c
@@ -25,6 +25,49 @@ static inline size_t put_u32_unaligned(struct vkd3d_bytecode_buffer *buffer, uin
return bytecode_put_bytes_unaligned(buffer, &value, sizeof(value));
}
+enum fx_2_type_constants
+{
+ /* Assignment types */
+ FX_2_ASSIGNMENT_CODE_BLOB = 0x0,
+ FX_2_ASSIGNMENT_PARAMETER = 0x1,
+ FX_2_ASSIGNMENT_ARRAY_SELECTOR = 0x2,
+};
+
+enum state_property_component_type
+{
+ FX_BOOL,
+ FX_FLOAT,
+ FX_UINT,
+ FX_UINT8,
+ FX_DEPTHSTENCIL,
+ FX_RASTERIZER,
+ FX_DOMAINSHADER,
+ FX_HULLSHADER,
+ FX_COMPUTESHADER,
+ FX_TEXTURE,
+ FX_DEPTHSTENCILVIEW,
+ FX_RENDERTARGETVIEW,
+ FX_BLEND,
+ FX_VERTEXSHADER,
+ FX_PIXELSHADER,
+ FX_GEOMETRYSHADER,
+ FX_COMPONENT_TYPE_COUNT,
+};
+
+struct rhs_named_value
+{
+ const char *name;
+ unsigned int value;
+};
+
+struct fx_assignment
+{
+ uint32_t id;
+ uint32_t lhs_index;
+ uint32_t type;
+ uint32_t value;
+};
+
struct fx_4_binary_type
{
uint32_t name;
@@ -246,6 +289,15 @@ static void set_status(struct fx_write_context *fx, int status)
fx->status = status;
}
+static void fx_print_string(struct vkd3d_string_buffer *buffer, const char *prefix,
+ const char *s, size_t len)
+{
+ if (len)
+ --len; /* Trim terminating null. */
+ vkd3d_string_buffer_printf(buffer, "%s", prefix);
+ vkd3d_string_buffer_print_string_escaped(buffer, s, len);
+}
+
static uint32_t write_string(const char *string, struct fx_write_context *fx)
{
return fx->ops->write_string(string, fx);
@@ -461,6 +513,461 @@ static void write_fx_2_annotations(struct hlsl_ir_var *var, uint32_t count_offse
set_u32(buffer, count_offset, count);
}
+static const struct rhs_named_value fx_2_zenable_values[] =
+{
+ { "USEW", 2 },
+ { NULL }
+};
+
+static const struct rhs_named_value fx_2_fillmode_values[] =
+{
+ { "POINT", 1 },
+ { "WIREFRAME", 2 },
+ { "SOLID", 3 },
+ { NULL },
+};
+
+static const struct rhs_named_value fx_2_shademode_values[] =
+{
+ { "FLAT", 1 },
+ { "GOURAUD", 2 },
+ { "PHONG", 3 },
+ { NULL }
+};
+
+static const struct rhs_named_value fx_2_blendmode_values[] =
+{
+ { "ZERO", 1 },
+ { "ONE", 2 },
+ { "SRCCOLOR", 3 },
+ { "INVSRCCOLOR", 4 },
+ { "SRCALPHA", 5 },
+ { "INVSRCALPHA", 6 },
+ { "DESTALPHA", 7 },
+ { "INVDESTALPHA", 8 },
+ { "DESTCOLOR", 9 },
+ { "INVDESTCOLOR", 10 },
+ { "SRCALPHASAT", 11 },
+ { "BOTHSRCALPHA", 12 },
+ { "BOTHINVSRCALPHA", 13 },
+ { "BLENDFACTOR", 14 },
+ { "INVBLENDFACTOR", 15 },
+ { "SRCCOLOR2", 16 },
+ { "INVSRCCOLOR2", 17 },
+ { NULL }
+};
+
+static const struct rhs_named_value fx_2_cullmode_values[] =
+{
+ { "NONE", 1 },
+ { "CW", 2 },
+ { "CCW", 3 },
+ { NULL }
+};
+
+static const struct rhs_named_value fx_2_cmpfunc_values[] =
+{
+ { "NEVER", 1 },
+ { "LESS", 2 },
+ { "EQUAL", 3 },
+ { "LESSEQUAL", 4 },
+ { "GREATER", 5 },
+ { "NOTEQUAL", 6 },
+ { "GREATEREQUAL", 7 },
+ { "ALWAYS", 8 },
+ { NULL }
+};
+
+static const struct rhs_named_value fx_2_fogmode_values[] =
+{
+ { "NONE", 0 },
+ { "EXP", 1 },
+ { "EXP2", 2 },
+ { "LINEAR", 3 },
+ { NULL }
+};
+
+static const struct rhs_named_value fx_2_stencilcaps_values[] =
+{
+ { "KEEP", 0x1 },
+ { "ZERO", 0x2 },
+ { "REPLACE", 0x4 },
+ { "INCRSAT", 0x8 },
+ { "DECRSAT", 0x10 },
+ { "INVERT", 0x20 },
+ { "INCR", 0x40 },
+ { "DECR", 0x80 },
+ { "TWOSIDED", 0x100 },
+ { NULL }
+};
+
+static const struct rhs_named_value fx_2_wrap_values[] =
+{
+ { "COORD_0", 0x1 },
+ { "COORD_1", 0x2 },
+ { "COORD_2", 0x4 },
+ { "COORD_3", 0x8 },
+ { "U", 0x1 },
+ { "V", 0x2 },
+ { "W", 0x4 },
+ { NULL }
+};
+
+static const struct rhs_named_value fx_2_materialcolorsource_values[] =
+{
+ { "MATERIAL", 0 },
+ { "COORD1", 1 },
+ { "COORD2", 2 },
+ { NULL }
+};
+
+static const struct rhs_named_value fx_2_vertexblend_values[] =
+{
+ { "DISABLE", 0 },
+ { "1WEIGHTS", 1 },
+ { "2WEIGHTS", 2 },
+ { "3WEIGHTS", 3 },
+ { "TWEENING", 255 },
+ { "0WEIGHTS", 256 },
+ { NULL }
+};
+
+static const struct rhs_named_value fx_2_clipplane_values[] =
+{
+ { "CLIPPLANE0", 0x1 },
+ { "CLIPPLANE1", 0x2 },
+ { "CLIPPLANE2", 0x4 },
+ { "CLIPPLANE3", 0x8 },
+ { "CLIPPLANE4", 0x10 },
+ { "CLIPPLANE5", 0x20 },
+ { NULL }
+};
+
+static const struct rhs_named_value fx_2_patchedgestyle_values[] =
+{
+ { "DISCRETE", 0 },
+ { "CONTINUOUS", 1 },
+ { NULL }
+};
+
+static const struct rhs_named_value fx_2_colorwriteenable_values[] =
+{
+ { "RED", 0x1 },
+ { "GREEN", 0x2 },
+ { "BLUE", 0x4 },
+ { "ALPHA", 0x8 },
+ { NULL }
+};
+
+static const struct rhs_named_value fx_2_blendop_values[] =
+{
+ { "ADD", 1 },
+ { "SUBTRACT", 2 },
+ { "REVSUBTRACT", 3 },
+ { "MIN", 4 },
+ { "MAX", 5 },
+ { NULL }
+};
+
+static const struct rhs_named_value fx_2_degree_values[] =
+{
+ { "LINEAR", 1 },
+ { "QUADRATIC", 2 },
+ { "CUBIC", 3 },
+ { "QUINTIC", 4 },
+ { NULL }
+};
+
+static const struct rhs_named_value fx_2_textureop_values[] =
+{
+ { "DISABLE", 1 },
+ { "SELECTARG1", 2 },
+ { "SELECTARG2", 3 },
+ { "MODULATE", 4 },
+ { "MODULATE2X", 5 },
+ { "MODULATE4X", 6 },
+ { "ADD", 7 },
+ { "ADDSIGNED", 8 },
+ { "ADDSIGNED2X", 9 },
+ { "SUBTRACT", 10 },
+ { "ADDSMOOTH", 11 },
+ { "BLENDDIFFUSEALPHA", 12 },
+ { "BLENDTEXTUREALPHA", 13 },
+ { "BLENDFACTORALPHA", 14 },
+ { "BLENDTEXTUREALPHAPM", 15 },
+ { "BLENDCURRENTALPHA", 16 },
+ { "PREMODULATE", 17 },
+ { "MODULATEALPHA_ADDCOLOR", 18 },
+ { "MODULATECOLOR_ADDALPHA", 19 },
+ { "MODULATEINVALPHA_ADDCOLOR", 20 },
+ { "MODULATEINVCOLOR_ADDALPHA", 21 },
+ { "BUMPENVMAP", 22 },
+ { "BUMPENVMAPLUMINANCE", 23 },
+ { "DOTPRODUCT3", 24 },
+ { "MULTIPLYADD", 25 },
+ { "LERP", 26 },
+ { NULL }
+};
+
+static const struct rhs_named_value fx_2_colorarg_values[] =
+{
+ { "DIFFUSE", 0x0 },
+ { "CURRENT", 0x1 },
+ { "TEXTURE", 0x2 },
+ { "TFACTOR", 0x3 },
+ { "SPECULAR", 0x4 },
+ { "TEMP", 0x5 },
+ { "CONSTANT", 0x6 },
+ { "COMPLEMENT", 0x10 },
+ { "ALPHAREPLICATE", 0x20 },
+ { NULL }
+};
+
+static const struct rhs_named_value fx_2_texturetransform_values[] =
+{
+ { "DISABLE", 0 },
+ { "COUNT1", 1 },
+ { "COUNT2", 2 },
+ { "COUNT3", 3 },
+ { "COUNT4", 4 },
+ { "PROJECTED", 256 },
+ { NULL }
+};
+
+static const struct rhs_named_value fx_2_lighttype_values[] =
+{
+ { "POINT", 1 },
+ { "SPOT", 2 },
+ { "DIRECTIONAL", 3 },
+ { NULL }
+};
+
+static const struct rhs_named_value fx_2_address_values[] =
+{
+ { "WRAP", 1 },
+ { "MIRROR", 2 },
+ { "CLAMP", 3 },
+ { "BORDER", 4 },
+ { "MIRROR_ONCE", 5 },
+ { NULL }
+};
+
+static const struct rhs_named_value fx_2_filter_values[] =
+{
+ { "NONE", 0 },
+ { "POINT", 1 },
+ { "LINEAR", 2 },
+ { "ANISOTROPIC", 3 },
+ { "PYRAMIDALQUAD", 6 },
+ { "GAUSSIANQUAD", 7 },
+ { "CONVOLUTIONMONO", 8 },
+ { NULL }
+};
+
+static const struct fx_2_state
+{
+ const char *name;
+ enum hlsl_type_class class;
+ enum state_property_component_type type;
+ unsigned int dimx;
+ uint32_t array_size;
+ uint32_t id;
+ const struct rhs_named_value *values;
+}
+fx_2_states[] =
+{
+ { "ZEnable", HLSL_CLASS_SCALAR, FX_UINT, 1, 1, 0, fx_2_zenable_values },
+ { "FillMode", HLSL_CLASS_SCALAR, FX_UINT, 1, 1, 1, fx_2_fillmode_values },
+ { "ShadeMode", HLSL_CLASS_SCALAR, FX_UINT, 1, 1, 2, fx_2_shademode_values },
+ { "ZWriteEnable", HLSL_CLASS_SCALAR, FX_UINT, 1, 1, 3 },
+ { "AlphaTestEnable", HLSL_CLASS_SCALAR, FX_UINT, 1, 1, 4 },
+ { "LastPixel", HLSL_CLASS_SCALAR, FX_UINT, 1, 1, 5 },
+ { "SrcBlend", HLSL_CLASS_SCALAR, FX_UINT, 1, 1, 6, fx_2_blendmode_values },
+ { "DestBlend", HLSL_CLASS_SCALAR, FX_UINT, 1, 1, 7, fx_2_blendmode_values },
+ { "CullMode", HLSL_CLASS_SCALAR, FX_UINT, 1, 1, 8, fx_2_cullmode_values },
+ { "ZFunc", HLSL_CLASS_SCALAR, FX_UINT, 1, 1, 9, fx_2_cmpfunc_values },
+ { "AlphaRef", HLSL_CLASS_SCALAR, FX_UINT, 1, 1, 10 },
+ { "AlphaFunc", HLSL_CLASS_SCALAR, FX_UINT, 1, 1, 11, fx_2_cmpfunc_values },
+ { "DitherEnable", HLSL_CLASS_SCALAR, FX_UINT, 1, 1, 12 },
+ { "AlphaBlendEnable", HLSL_CLASS_SCALAR, FX_UINT, 1, 1, 13 },
+ { "FogEnable", HLSL_CLASS_SCALAR, FX_UINT, 1, 1, 14 },
+ { "SpecularEnable", HLSL_CLASS_SCALAR, FX_UINT, 1, 1, 15 },
+ { "FogColor", HLSL_CLASS_SCALAR, FX_UINT, 1, 1, 16 },
+ { "FogTableMode", HLSL_CLASS_SCALAR, FX_UINT, 1, 1, 17, fx_2_fogmode_values },
+ { "FogStart", HLSL_CLASS_SCALAR, FX_FLOAT, 1, 1, 18 },
+ { "FogEnd", HLSL_CLASS_SCALAR, FX_FLOAT, 1, 1, 19 },
+ { "FogDensity", HLSL_CLASS_SCALAR, FX_FLOAT, 1, 1, 20 },
+ { "RangeFogEnable", HLSL_CLASS_SCALAR, FX_UINT, 1, 1, 21 },
+ { "StencilEnable", HLSL_CLASS_SCALAR, FX_UINT, 1, 1, 22 },
+ { "StencilFail", HLSL_CLASS_SCALAR, FX_UINT, 1, 1, 23, fx_2_stencilcaps_values },
+ { "StencilZFail", HLSL_CLASS_SCALAR, FX_UINT, 1, 1, 24, fx_2_stencilcaps_values },
+ { "StencilPass", HLSL_CLASS_SCALAR, FX_UINT, 1, 1, 25, fx_2_stencilcaps_values },
+ { "StencilFunc", HLSL_CLASS_SCALAR, FX_UINT, 1, 1, 26, fx_2_cmpfunc_values },
+ { "StencilRef", HLSL_CLASS_SCALAR, FX_UINT, 1, 1, 27 },
+ { "StencilMask", HLSL_CLASS_SCALAR, FX_UINT, 1, 1, 28 },
+ { "StencilWriteMask", HLSL_CLASS_SCALAR, FX_UINT, 1, 1, 29 },
+ { "TextureFactor", HLSL_CLASS_SCALAR, FX_UINT, 1, 1, 30 },
+ { "Wrap0", HLSL_CLASS_SCALAR, FX_UINT, 1, 1, 31, fx_2_wrap_values },
+ { "Wrap1", HLSL_CLASS_SCALAR, FX_UINT, 1, 1, 32, fx_2_wrap_values },
+ { "Wrap2", HLSL_CLASS_SCALAR, FX_UINT, 1, 1, 33, fx_2_wrap_values },
+ { "Wrap3", HLSL_CLASS_SCALAR, FX_UINT, 1, 1, 34, fx_2_wrap_values },
+ { "Wrap4", HLSL_CLASS_SCALAR, FX_UINT, 1, 1, 35, fx_2_wrap_values },
+ { "Wrap5", HLSL_CLASS_SCALAR, FX_UINT, 1, 1, 36, fx_2_wrap_values },
+ { "Wrap6", HLSL_CLASS_SCALAR, FX_UINT, 1, 1, 37, fx_2_wrap_values },
+ { "Wrap7", HLSL_CLASS_SCALAR, FX_UINT, 1, 1, 38, fx_2_wrap_values },
+ { "Wrap8", HLSL_CLASS_SCALAR, FX_UINT, 1, 1, 39, fx_2_wrap_values },
+ { "Wrap9", HLSL_CLASS_SCALAR, FX_UINT, 1, 1, 40, fx_2_wrap_values },
+ { "Wrap10", HLSL_CLASS_SCALAR, FX_UINT, 1, 1, 41, fx_2_wrap_values },
+ { "Wrap11", HLSL_CLASS_SCALAR, FX_UINT, 1, 1, 42, fx_2_wrap_values },
+ { "Wrap12", HLSL_CLASS_SCALAR, FX_UINT, 1, 1, 43, fx_2_wrap_values },
+ { "Wrap13", HLSL_CLASS_SCALAR, FX_UINT, 1, 1, 44, fx_2_wrap_values },
+ { "Wrap14", HLSL_CLASS_SCALAR, FX_UINT, 1, 1, 45, fx_2_wrap_values },
+ { "Wrap15", HLSL_CLASS_SCALAR, FX_UINT, 1, 1, 46, fx_2_wrap_values },
+ { "Clipping", HLSL_CLASS_SCALAR, FX_UINT, 1, 1, 47 },
+ { "Lighting", HLSL_CLASS_SCALAR, FX_UINT, 1, 1, 48 },
+ { "Ambient", HLSL_CLASS_VECTOR, FX_FLOAT, 4, 1, 49 },
+ { "FogVertexMode", HLSL_CLASS_SCALAR, FX_UINT, 1, 1, 50, fx_2_fogmode_values },
+ { "ColorVertex", HLSL_CLASS_SCALAR, FX_UINT, 1, 1, 51 },
+ { "LocalViewer", HLSL_CLASS_SCALAR, FX_UINT, 1, 1, 52 },
+ { "NormalizeNormals", HLSL_CLASS_SCALAR, FX_UINT, 1, 1, 53 },
+
+ { "DiffuseMaterialSource", HLSL_CLASS_SCALAR, FX_UINT, 1, 1, 54, fx_2_materialcolorsource_values },
+ { "SpecularMaterialSource", HLSL_CLASS_SCALAR, FX_UINT, 1, 1, 55, fx_2_materialcolorsource_values },
+ { "AmbientMaterialSource", HLSL_CLASS_SCALAR, FX_UINT, 1, 1, 56, fx_2_materialcolorsource_values },
+ { "EmissiveMaterialSource", HLSL_CLASS_SCALAR, FX_UINT, 1, 1, 57, fx_2_materialcolorsource_values },
+
+ { "VertexBlend", HLSL_CLASS_SCALAR, FX_UINT, 1, 1, 58, fx_2_vertexblend_values },
+ { "ClipPlaneEnable", HLSL_CLASS_SCALAR, FX_UINT, 1, 1, 59, fx_2_clipplane_values },
+ { "PointSize", HLSL_CLASS_SCALAR, FX_FLOAT, 1, 1, 60 },
+ { "PointSize_Min", HLSL_CLASS_SCALAR, FX_FLOAT, 1, 1, 61 },
+ { "PointSize_Max", HLSL_CLASS_SCALAR, FX_FLOAT, 1, 1, 62 },
+ { "PointSpriteEnable", HLSL_CLASS_SCALAR, FX_UINT, 1, 1, 63 },
+ { "PointScaleEnable", HLSL_CLASS_SCALAR, FX_UINT, 1, 1, 64 },
+ { "PointScale_A", HLSL_CLASS_SCALAR, FX_FLOAT, 1, 1, 65 },
+ { "PointScale_B", HLSL_CLASS_SCALAR, FX_FLOAT, 1, 1, 66 },
+ { "PointScale_C", HLSL_CLASS_SCALAR, FX_FLOAT, 1, 1, 67 },
+
+ { "MultiSampleAntialias", HLSL_CLASS_SCALAR, FX_UINT, 1, 1, 68 },
+ { "MultiSampleMask", HLSL_CLASS_SCALAR, FX_UINT, 1, 1, 69 },
+ { "PatchEdgeStyle", HLSL_CLASS_SCALAR, FX_UINT, 1, 1, 70, fx_2_patchedgestyle_values },
+ { "DebugMonitorToken", HLSL_CLASS_SCALAR, FX_UINT, 1, 1, 71 },
+ { "IndexedVertexBlendEnable", HLSL_CLASS_SCALAR, FX_UINT, 1, 1, 72 },
+ { "ColorWriteEnable", HLSL_CLASS_SCALAR, FX_UINT, 1, 1, 73, fx_2_colorwriteenable_values },
+ { "TweenFactor", HLSL_CLASS_SCALAR, FX_FLOAT, 1, 1, 74 },
+ { "BlendOp", HLSL_CLASS_SCALAR, FX_UINT, 1, 1, 75, fx_2_blendop_values },
+ { "PositionDegree", HLSL_CLASS_SCALAR, FX_UINT, 1, 1, 76, fx_2_degree_values },
+ { "NormalDegree", HLSL_CLASS_SCALAR, FX_UINT, 1, 1, 77, fx_2_degree_values },
+ { "ScissorTestEnable", HLSL_CLASS_SCALAR, FX_UINT, 1, 1, 78 },
+ { "SlopeScaleDepthBias", HLSL_CLASS_SCALAR, FX_FLOAT, 1, 1, 79 },
+
+ { "AntialiasedLineEnable", HLSL_CLASS_SCALAR, FX_UINT, 1, 1, 80 },
+ { "MinTessellationLevel", HLSL_CLASS_SCALAR, FX_FLOAT, 1, 1, 81 },
+ { "MaxTessellationLevel", HLSL_CLASS_SCALAR, FX_FLOAT, 1, 1, 82 },
+ { "AdaptiveTess_X", HLSL_CLASS_SCALAR, FX_FLOAT, 1, 1, 83 },
+ { "AdaptiveTess_Y", HLSL_CLASS_SCALAR, FX_FLOAT, 1, 1, 84 },
+ { "AdaptiveTess_Z", HLSL_CLASS_SCALAR, FX_FLOAT, 1, 1, 85 },
+ { "AdaptiveTess_W", HLSL_CLASS_SCALAR, FX_FLOAT, 1, 1, 86 },
+ { "EnableAdaptiveTesselation", HLSL_CLASS_SCALAR, FX_UINT, 1, 1, 87 },
+ { "TwoSidedStencilMode", HLSL_CLASS_SCALAR, FX_UINT, 1, 1, 88 },
+ { "StencilFail", HLSL_CLASS_SCALAR, FX_UINT, 1, 1, 89, fx_2_stencilcaps_values },
+ { "StencilZFail", HLSL_CLASS_SCALAR, FX_UINT, 1, 1, 90, fx_2_stencilcaps_values },
+ { "StencilPass", HLSL_CLASS_SCALAR, FX_UINT, 1, 1, 91, fx_2_stencilcaps_values },
+ { "StencilFunc", HLSL_CLASS_SCALAR, FX_UINT, 1, 1, 92, fx_2_cmpfunc_values },
+
+ { "ColorWriteEnable1", HLSL_CLASS_SCALAR, FX_UINT, 1, 1, 93, fx_2_colorwriteenable_values },
+ { "ColorWriteEnable2", HLSL_CLASS_SCALAR, FX_UINT, 1, 1, 94, fx_2_colorwriteenable_values },
+ { "ColorWriteEnable3", HLSL_CLASS_SCALAR, FX_UINT, 1, 1, 95, fx_2_colorwriteenable_values },
+ { "BlendFactor", HLSL_CLASS_SCALAR, FX_UINT, 1, 1, 96 },
+ { "SRGBWriteEnable", HLSL_CLASS_SCALAR, FX_UINT, 1, 1, 97 },
+ { "DepthBias", HLSL_CLASS_SCALAR, FX_FLOAT, 1, 1, 98 },
+ { "SeparateAlphaBlendEnable", HLSL_CLASS_SCALAR, FX_UINT, 1, 1, 99 },
+ { "SrcBlendAlpha", HLSL_CLASS_SCALAR, FX_UINT, 1, 1, 100, fx_2_blendmode_values },
+ { "DestBlendAlpha", HLSL_CLASS_SCALAR, FX_UINT, 1, 1, 101, fx_2_blendmode_values },
+ { "BlendOpAlpha", HLSL_CLASS_SCALAR, FX_UINT, 1, 1, 102, fx_2_blendmode_values },
+
+ { "ColorOp", HLSL_CLASS_SCALAR, FX_UINT, 1, 8, 103, fx_2_textureop_values },
+ { "ColorArg0", HLSL_CLASS_SCALAR, FX_UINT, 1, 8, 104, fx_2_colorarg_values },
+ { "ColorArg1", HLSL_CLASS_SCALAR, FX_UINT, 1, 8, 105, fx_2_colorarg_values },
+ { "ColorArg2", HLSL_CLASS_SCALAR, FX_UINT, 1, 8, 106, fx_2_colorarg_values },
+ { "AlphaOp", HLSL_CLASS_SCALAR, FX_UINT, 1, 8, 107, fx_2_textureop_values },
+ { "AlphaArg0", HLSL_CLASS_SCALAR, FX_UINT, 1, 8, 108, fx_2_colorarg_values },
+ { "AlphaArg1", HLSL_CLASS_SCALAR, FX_UINT, 1, 8, 109, fx_2_colorarg_values },
+ { "AlphaArg2", HLSL_CLASS_SCALAR, FX_UINT, 1, 8, 110, fx_2_colorarg_values },
+ { "ResultArg", HLSL_CLASS_SCALAR, FX_UINT, 1, 8, 111, fx_2_colorarg_values },
+ { "BumpEnvMat00", HLSL_CLASS_SCALAR, FX_FLOAT, 1, 8, 112 },
+ { "BumpEnvMat01", HLSL_CLASS_SCALAR, FX_FLOAT, 1, 8, 113 },
+ { "BumpEnvMat10", HLSL_CLASS_SCALAR, FX_FLOAT, 1, 8, 114 },
+ { "BumpEnvMat11", HLSL_CLASS_SCALAR, FX_FLOAT, 1, 8, 115 },
+ { "TextCoordIndex", HLSL_CLASS_SCALAR, FX_UINT, 1, 8, 116 },
+ { "BumpEnvLScale", HLSL_CLASS_SCALAR, FX_FLOAT, 1, 8, 117 },
+ { "BumpEnvLOffset", HLSL_CLASS_SCALAR, FX_FLOAT, 1, 8, 118 },
+ { "TextureTransformFlags", HLSL_CLASS_SCALAR, FX_UINT, 1, 8, 119, fx_2_texturetransform_values },
+ { "Constant", HLSL_CLASS_SCALAR, FX_UINT, 1, 8, 120 },
+ { "NPatchMode", HLSL_CLASS_SCALAR, FX_UINT, 1, 1, 121 },
+ { "FVF", HLSL_CLASS_SCALAR, FX_UINT, 1, 1, 122 },
+
+ { "ProjectionTransform", HLSL_CLASS_MATRIX, FX_FLOAT, 4, 1, 123 },
+ { "ViewTransform", HLSL_CLASS_MATRIX, FX_FLOAT, 4, 1, 124 },
+ { "WorldTransform", HLSL_CLASS_MATRIX, FX_FLOAT, 4, 1, 125 },
+ { "TextureTransform", HLSL_CLASS_MATRIX, FX_FLOAT, 4, 8, 126 },
+
+ { "MaterialAmbient", HLSL_CLASS_VECTOR, FX_FLOAT, 4, 1, 127 },
+ { "MaterialDiffuse", HLSL_CLASS_VECTOR, FX_FLOAT, 4, 1, 128 },
+ { "MaterialSpecular", HLSL_CLASS_VECTOR, FX_FLOAT, 4, 1, 129 },
+ { "MaterialEmissive", HLSL_CLASS_VECTOR, FX_FLOAT, 4, 1, 130 },
+ { "MaterialPower", HLSL_CLASS_SCALAR, FX_FLOAT, 1, 1, 131 },
+
+ { "LightType", HLSL_CLASS_SCALAR, FX_UINT, 1, 1, 132, fx_2_lighttype_values },
+ { "LightDiffuse", HLSL_CLASS_VECTOR, FX_FLOAT, 4, 1, 133 },
+ { "LightSpecular", HLSL_CLASS_VECTOR, FX_FLOAT, 4, 1, 134 },
+ { "LightAmbient", HLSL_CLASS_VECTOR, FX_FLOAT, 4, 1, 135 },
+ { "LightPosition", HLSL_CLASS_VECTOR, FX_FLOAT, 3, 1, 136 },
+ { "LightDirection", HLSL_CLASS_VECTOR, FX_FLOAT, 3, 1, 137 },
+ { "LightRange", HLSL_CLASS_SCALAR, FX_FLOAT, 1, 1, 138 },
+ { "LightFalloff", HLSL_CLASS_SCALAR, FX_FLOAT, 1, 1, 139 },
+ { "LightAttenuation0", HLSL_CLASS_SCALAR, FX_FLOAT, 1, 1, 140 },
+ { "LightAttenuation1", HLSL_CLASS_SCALAR, FX_FLOAT, 1, 1, 141 },
+ { "LightAttenuation2", HLSL_CLASS_SCALAR, FX_FLOAT, 1, 1, 142 },
+ { "LightTheta", HLSL_CLASS_SCALAR, FX_FLOAT, 1, 1, 143 },
+ { "LightPhi", HLSL_CLASS_SCALAR, FX_FLOAT, 1, 1, 144 },
+ { "LightEnable", HLSL_CLASS_SCALAR, FX_FLOAT, 1, 8, 145 },
+
+ { "VertexShader", HLSL_CLASS_SCALAR, FX_VERTEXSHADER, 1, 1, 146 },
+ { "PixelShader", HLSL_CLASS_SCALAR, FX_PIXELSHADER, 1, 1, 147 },
+
+ { "VertexShaderConstantF", HLSL_CLASS_SCALAR, FX_FLOAT, 1, ~0u-1, 148 },
+ { "VertexShaderConstantB", HLSL_CLASS_SCALAR, FX_BOOL, 1, ~0u-1, 149 },
+ { "VertexShaderConstantI", HLSL_CLASS_SCALAR, FX_UINT, 1, ~0u-1, 150 },
+ { "VertexShaderConstant", HLSL_CLASS_SCALAR, FX_FLOAT, 1, ~0u-1, 151 },
+ { "VertexShaderConstant1", HLSL_CLASS_SCALAR, FX_FLOAT, 1, ~0u-1, 152 },
+ { "VertexShaderConstant2", HLSL_CLASS_SCALAR, FX_FLOAT, 1, ~0u-1, 153 },
+ { "VertexShaderConstant3", HLSL_CLASS_SCALAR, FX_FLOAT, 1, ~0u-1, 154 },
+ { "VertexShaderConstant4", HLSL_CLASS_SCALAR, FX_FLOAT, 1, ~0u-1, 155 },
+
+ { "PixelShaderConstantF", HLSL_CLASS_SCALAR, FX_FLOAT, 1, ~0u-1, 156 },
+ { "PixelShaderConstantB", HLSL_CLASS_SCALAR, FX_BOOL, 1, ~0u-1, 157 },
+ { "PixelShaderConstantI", HLSL_CLASS_SCALAR, FX_UINT, 1, ~0u-1, 158 },
+ { "PixelShaderConstant", HLSL_CLASS_SCALAR, FX_FLOAT, 1, ~0u-1, 159 },
+ { "PixelShaderConstant1", HLSL_CLASS_SCALAR, FX_FLOAT, 1, ~0u-1, 160 },
+ { "PixelShaderConstant2", HLSL_CLASS_SCALAR, FX_FLOAT, 1, ~0u-1, 161 },
+ { "PixelShaderConstant3", HLSL_CLASS_SCALAR, FX_FLOAT, 1, ~0u-1, 162 },
+ { "PixelShaderConstant4", HLSL_CLASS_SCALAR, FX_FLOAT, 1, ~0u-1, 163 },
+
+ { "Texture", HLSL_CLASS_SCALAR, FX_TEXTURE, 1, 1, 164 },
+ { "AddressU", HLSL_CLASS_SCALAR, FX_UINT, 1, 1, 165, fx_2_address_values },
+ { "AddressV", HLSL_CLASS_SCALAR, FX_UINT, 1, 1, 166, fx_2_address_values },
+ { "AddressW", HLSL_CLASS_SCALAR, FX_UINT, 1, 1, 167, fx_2_address_values },
+ { "BorderColor", HLSL_CLASS_SCALAR, FX_UINT, 1, 1, 168 },
+ { "MagFilter", HLSL_CLASS_SCALAR, FX_UINT, 1, 1, 169, fx_2_filter_values },
+ { "MinFilter", HLSL_CLASS_SCALAR, FX_UINT, 1, 1, 170, fx_2_filter_values },
+ { "MipFilter", HLSL_CLASS_SCALAR, FX_UINT, 1, 1, 171, fx_2_filter_values },
+ { "MipMapLodBias", HLSL_CLASS_SCALAR, FX_UINT, 1, 1, 172 },
+ { "MaxMipLevel", HLSL_CLASS_SCALAR, FX_UINT, 1, 1, 173 },
+ { "MaxAnisotropy", HLSL_CLASS_SCALAR, FX_UINT, 1, 1, 174 },
+ { "SRBTexture", HLSL_CLASS_SCALAR, FX_UINT, 1, 1, 175 },
+ { "ElementIndex", HLSL_CLASS_SCALAR, FX_UINT, 1, 1, 176 },
+};
+
static void write_fx_2_pass(struct hlsl_ir_var *var, struct fx_write_context *fx)
{
struct vkd3d_bytecode_buffer *buffer = &fx->structured;
@@ -560,8 +1067,21 @@ enum fx_4_type_constants
FX_4_ASSIGNMENT_VARIABLE = 0x2,
FX_4_ASSIGNMENT_ARRAY_CONSTANT_INDEX = 0x3,
FX_4_ASSIGNMENT_ARRAY_VARIABLE_INDEX = 0x4,
+ FX_4_ASSIGNMENT_INDEX_EXPRESSION = 0x5,
+ FX_4_ASSIGNMENT_VALUE_EXPRESSION = 0x6,
FX_4_ASSIGNMENT_INLINE_SHADER = 0x7,
FX_5_ASSIGNMENT_INLINE_SHADER = 0x8,
+
+ /* FXLVM constants */
+ FX_4_FXLC_COMP_COUNT_MASK = 0xffff,
+ FX_4_FXLC_OPCODE_MASK = 0x7ff,
+ FX_4_FXLC_OPCODE_SHIFT = 20,
+ FX_4_FXLC_IS_SCALAR_MASK = 0x80000000,
+
+ FX_4_FXLC_REG_LITERAL = 1,
+ FX_4_FXLC_REG_CB = 2,
+ FX_4_FXLC_REG_OUTPUT = 4,
+ FX_4_FXLC_REG_TEMP = 7,
};
static const uint32_t fx_4_numeric_base_types[] =
@@ -1210,7 +1730,13 @@ static uint32_t write_fx_2_object_initializer(const struct hlsl_ir_var *var, str
put_u32(buffer, id);
put_u32(buffer, size);
if (size)
+ {
+ static const uint32_t pad;
+
bytecode_put_bytes(buffer, data, size);
+ if (size % 4)
+ bytecode_put_bytes_unaligned(buffer, &pad, 4 - (size % 4));
+ }
}
}
@@ -1594,12 +2120,6 @@ static void write_fx_4_annotation(struct hlsl_ir_var *var, struct fx_write_conte
}
}
-struct rhs_named_value
-{
- const char *name;
- unsigned int value;
-};
-
static bool get_fx_4_state_enum_value(const struct rhs_named_value *pairs,
const char *name, unsigned int *value)
{
@@ -1831,27 +2351,6 @@ static bool replace_state_block_constant(struct hlsl_ctx *ctx, struct hlsl_ir_no
return true;
}
-enum state_property_component_type
-{
- FX_BOOL,
- FX_FLOAT,
- FX_UINT,
- FX_UINT8,
- FX_DEPTHSTENCIL,
- FX_RASTERIZER,
- FX_DOMAINSHADER,
- FX_HULLSHADER,
- FX_COMPUTESHADER,
- FX_TEXTURE,
- FX_DEPTHSTENCILVIEW,
- FX_RENDERTARGETVIEW,
- FX_BLEND,
- FX_VERTEXSHADER,
- FX_PIXELSHADER,
- FX_GEOMETRYSHADER,
- FX_COMPONENT_TYPE_COUNT,
-};
-
static inline bool is_object_fx_type(enum state_property_component_type type)
{
switch (type)
@@ -1867,6 +2366,7 @@ static inline bool is_object_fx_type(enum state_property_component_type type)
case FX_BLEND:
case FX_VERTEXSHADER:
case FX_PIXELSHADER:
+ case FX_GEOMETRYSHADER:
return true;
default:
return false;
@@ -2262,7 +2762,8 @@ static void resolve_fx_4_state_block_values(struct hlsl_ir_var *var, struct hlsl
struct hlsl_ir_constant *c = hlsl_ir_constant(node);
struct hlsl_type *data_type = c->node.data_type;
- if (data_type->class == HLSL_CLASS_SCALAR && data_type->e.numeric.type == HLSL_TYPE_UINT)
+ if (data_type->class == HLSL_CLASS_SCALAR
+ && (data_type->e.numeric.type == HLSL_TYPE_INT || data_type->e.numeric.type == HLSL_TYPE_UINT))
{
if (c->value.u[0].u != 0)
hlsl_error(ctx, &ctx->location, VKD3D_SHADER_ERROR_HLSL_INVALID_SYNTAX,
@@ -2395,7 +2896,15 @@ static unsigned int decompose_fx_4_state_function_call(struct hlsl_ir_var *var,
static unsigned int decompose_fx_4_state_block_expand_array(struct hlsl_ir_var *var, struct hlsl_state_block *block,
unsigned int entry_index, struct fx_write_context *fx)
{
- static const char *states[] = { "SrcBlend", "DestBlend", "BlendOp", "SrcBlendAlpha", "DestBlendAlpha", "BlendOpAlpha" };
+ static const char *const states[] =
+ {
+ "SrcBlend",
+ "DestBlend",
+ "BlendOp",
+ "SrcBlendAlpha",
+ "DestBlendAlpha",
+ "BlendOpAlpha",
+ };
const struct hlsl_type *type = hlsl_get_multiarray_element_type(var->data_type);
struct hlsl_state_block_entry *entry = block->entries[entry_index];
static const unsigned int array_size = 8;
@@ -2914,6 +3423,11 @@ struct fx_parser
uint32_t buffer_count;
uint32_t object_count;
uint32_t group_count;
+ struct
+ {
+ uint32_t count;
+ uint32_t *types;
+ } objects;
bool failed;
};
@@ -2965,13 +3479,6 @@ static void VKD3D_PRINTF_FUNC(3, 4) fx_parser_error(struct fx_parser *parser, en
parser->failed = true;
}
-static int fx_2_parse(struct fx_parser *parser)
-{
- fx_parser_error(parser, VKD3D_SHADER_ERROR_FX_NOT_IMPLEMENTED, "Parsing fx_2_0 binaries is not implemented.");
-
- return -1;
-}
-
static const void *fx_parser_get_unstructured_ptr(struct fx_parser *parser, uint32_t offset, size_t size)
{
const uint8_t *ptr = parser->unstructured.ptr;
@@ -2986,40 +3493,27 @@ static const void *fx_parser_get_unstructured_ptr(struct fx_parser *parser, uint
return &ptr[offset];
}
-static void fx_parser_read_unstructured(struct fx_parser *parser, void *dst, uint32_t offset, size_t size)
-{
- const uint8_t *ptr;
-
- memset(dst, 0, size);
- if (!(ptr = fx_parser_get_unstructured_ptr(parser, offset, size)))
- return;
-
- memcpy(dst, ptr, size);
-}
-
-static const char *fx_4_get_string(struct fx_parser *parser, uint32_t offset)
+static const void *fx_parser_get_ptr(struct fx_parser *parser, size_t size)
{
- const uint8_t *ptr = parser->unstructured.ptr;
- const uint8_t *end = parser->unstructured.end;
-
- if (offset >= parser->unstructured.size)
+ if (parser->end - parser->ptr < size)
{
parser->failed = true;
- return "<invalid>";
+ return NULL;
}
- ptr += offset;
+ return parser->ptr;
+}
- while (ptr < end && *ptr)
- ++ptr;
+static uint32_t fx_parser_read_unstructured(struct fx_parser *parser, void *dst, uint32_t offset, size_t size)
+{
+ const uint8_t *ptr;
- if (*ptr)
- {
- parser->failed = true;
- return "<invalid>";
- }
+ memset(dst, 0, size);
+ if (!(ptr = fx_parser_get_unstructured_ptr(parser, offset, size)))
+ return offset;
- return (const char *)(parser->unstructured.ptr + offset);
+ memcpy(dst, ptr, size);
+ return offset + size;
}
static void parse_fx_start_indent(struct fx_parser *parser)
@@ -3037,46 +3531,787 @@ static void parse_fx_print_indent(struct fx_parser *parser)
vkd3d_string_buffer_printf(&parser->buffer, "%*s", 4 * parser->indent, "");
}
-static void parse_fx_4_numeric_value(struct fx_parser *parser, uint32_t offset,
- const struct fx_4_binary_type *type)
+static const char *fx_2_get_string(struct fx_parser *parser, uint32_t offset, uint32_t *size)
{
- unsigned int base_type, comp_count;
- size_t i;
+ const char *ptr;
- base_type = (type->typeinfo >> FX_4_NUMERIC_BASE_TYPE_SHIFT) & 0xf;
+ fx_parser_read_unstructured(parser, size, offset, sizeof(*size));
+ ptr = fx_parser_get_unstructured_ptr(parser, offset + 4, *size);
- comp_count = type->packed_size / sizeof(uint32_t);
- for (i = 0; i < comp_count; ++i)
+ if (!ptr)
{
- union hlsl_constant_value_component value;
-
- fx_parser_read_unstructured(parser, &value, offset, sizeof(uint32_t));
-
- if (base_type == FX_4_NUMERIC_TYPE_FLOAT)
- vkd3d_string_buffer_printf(&parser->buffer, "%f", value.f);
- else if (base_type == FX_4_NUMERIC_TYPE_INT)
- vkd3d_string_buffer_printf(&parser->buffer, "%d", value.i);
- else if (base_type == FX_4_NUMERIC_TYPE_UINT)
- vkd3d_string_buffer_printf(&parser->buffer, "%u", value.u);
- else if (base_type == FX_4_NUMERIC_TYPE_BOOL)
- vkd3d_string_buffer_printf(&parser->buffer, "%s", value.u ? "true" : "false" );
- else
- vkd3d_string_buffer_printf(&parser->buffer, "%#x", value.u);
-
- if (i < comp_count - 1)
- vkd3d_string_buffer_printf(&parser->buffer, ", ");
-
- offset += sizeof(uint32_t);
+ parser->failed = true;
+ return "<invalid>";
}
-}
-static void fx_4_parse_string_initializer(struct fx_parser *parser, uint32_t offset)
-{
- const char *str = fx_4_get_string(parser, offset);
- vkd3d_string_buffer_printf(&parser->buffer, "\"%s\"", str);
+ return ptr;
}
-static void fx_parse_fx_4_annotations(struct fx_parser *parser)
+static unsigned int fx_get_fx_2_type_size(struct fx_parser *parser, uint32_t *offset)
+{
+ uint32_t element_count, member_count, class, columns, rows;
+ unsigned int size = 0;
+
+ fx_parser_read_unstructured(parser, &class, *offset + 4, sizeof(class));
+ fx_parser_read_unstructured(parser, &element_count, *offset + 16, sizeof(element_count));
+
+ if (class == D3DXPC_STRUCT)
+ {
+ *offset = fx_parser_read_unstructured(parser, &member_count, *offset + 20, sizeof(member_count));
+
+ for (uint32_t i = 0; i < member_count; ++i)
+ size += fx_get_fx_2_type_size(parser, offset);
+ }
+ else if (class == D3DXPC_VECTOR)
+ {
+ fx_parser_read_unstructured(parser, &columns, *offset + 20, sizeof(columns));
+ *offset = fx_parser_read_unstructured(parser, &rows, *offset + 24, sizeof(rows));
+ size = rows * columns * sizeof(float);
+ }
+ else if (class == D3DXPC_MATRIX_ROWS
+ || class == D3DXPC_MATRIX_COLUMNS
+ || class == D3DXPC_SCALAR)
+ {
+ fx_parser_read_unstructured(parser, &rows, *offset + 20, sizeof(rows));
+ *offset = fx_parser_read_unstructured(parser, &columns, *offset + 24, sizeof(columns));
+ size = rows * columns * sizeof(float);
+ }
+ else
+ {
+ *offset += 20;
+ }
+
+ if (element_count)
+ size *= element_count;
+ return size;
+}
+
+static const char *const fx_2_types[] =
+{
+ [D3DXPT_VOID] = "void",
+ [D3DXPT_BOOL] = "bool",
+ [D3DXPT_INT] = "int",
+ [D3DXPT_FLOAT] = "float",
+ [D3DXPT_STRING] = "string",
+ [D3DXPT_TEXTURE] = "texture",
+ [D3DXPT_TEXTURE1D] = "texture1D",
+ [D3DXPT_TEXTURE2D] = "texture2D",
+ [D3DXPT_TEXTURE3D] = "texture3D",
+ [D3DXPT_TEXTURECUBE] = "textureCUBE",
+ [D3DXPT_SAMPLER] = "sampler",
+ [D3DXPT_SAMPLER1D] = "sampler1D",
+ [D3DXPT_SAMPLER2D] = "sampler2D",
+ [D3DXPT_SAMPLER3D] = "sampler3D",
+ [D3DXPT_SAMPLERCUBE] = "samplerCUBE",
+ [D3DXPT_PIXELSHADER] = "PixelShader",
+ [D3DXPT_VERTEXSHADER] = "VertexShader",
+ [D3DXPT_PIXELFRAGMENT] = "<pixel-fragment>",
+ [D3DXPT_VERTEXFRAGMENT] = "<vertex-fragment>",
+ [D3DXPT_UNSUPPORTED] = "<unsupported>",
+};
+
+static void fx_parse_fx_2_type(struct fx_parser *parser, uint32_t offset)
+{
+ uint32_t type, class, rows, columns;
+ const char *name;
+
+ fx_parser_read_unstructured(parser, &type, offset, sizeof(type));
+ fx_parser_read_unstructured(parser, &class, offset + 4, sizeof(class));
+
+ if (class == D3DXPC_STRUCT)
+ name = "struct";
+ else
+ name = type < ARRAY_SIZE(fx_2_types) ? fx_2_types[type] : "<unknown>";
+
+ vkd3d_string_buffer_printf(&parser->buffer, "%s", name);
+ if (class == D3DXPC_VECTOR)
+ {
+ fx_parser_read_unstructured(parser, &columns, offset + 20, sizeof(columns));
+ fx_parser_read_unstructured(parser, &rows, offset + 24, sizeof(rows));
+ vkd3d_string_buffer_printf(&parser->buffer, "%u", columns);
+ }
+ else if (class == D3DXPC_MATRIX_ROWS || class == D3DXPC_MATRIX_COLUMNS)
+ {
+ fx_parser_read_unstructured(parser, &rows, offset + 20, sizeof(rows));
+ fx_parser_read_unstructured(parser, &columns, offset + 24, sizeof(columns));
+ vkd3d_string_buffer_printf(&parser->buffer, "%ux%u", rows, columns);
+ }
+}
+
+static void parse_fx_2_object_value(struct fx_parser *parser, uint32_t element_count,
+ uint32_t type, uint32_t offset)
+{
+ uint32_t id;
+
+ element_count = max(element_count, 1);
+
+ for (uint32_t i = 0; i < element_count; ++i, offset += 4)
+ {
+ fx_parser_read_unstructured(parser, &id, offset, sizeof(id));
+ vkd3d_string_buffer_printf(&parser->buffer, "<object id %u>", id);
+ if (element_count > 1)
+ vkd3d_string_buffer_printf(&parser->buffer, ", ");
+ if (id < parser->objects.count)
+ parser->objects.types[id] = type;
+ else
+ fx_parser_error(parser, VKD3D_SHADER_ERROR_FX_INVALID_DATA,
+ "Initializer object id exceeds the number of objects in the effect.");
+ }
+
+
+}
+
+static void parse_fx_2_numeric_value(struct fx_parser *parser, uint32_t offset,
+ unsigned int size, uint32_t base_type)
+{
+ unsigned int i, comp_count;
+
+ comp_count = size / sizeof(uint32_t);
+ if (comp_count > 1)
+ vkd3d_string_buffer_printf(&parser->buffer, "{");
+ for (i = 0; i < comp_count; ++i)
+ {
+ union hlsl_constant_value_component value;
+
+ fx_parser_read_unstructured(parser, &value, offset, sizeof(uint32_t));
+
+ if (base_type == D3DXPT_INT)
+ vkd3d_string_buffer_printf(&parser->buffer, "%d", value.i);
+ else if (base_type == D3DXPT_BOOL)
+ vkd3d_string_buffer_printf(&parser->buffer, "%s", value.u ? "true" : "false" );
+ else
+ vkd3d_string_buffer_print_f32(&parser->buffer, value.f);
+
+ if (i < comp_count - 1)
+ vkd3d_string_buffer_printf(&parser->buffer, ", ");
+
+ offset += sizeof(uint32_t);
+ }
+ if (comp_count > 1)
+ vkd3d_string_buffer_printf(&parser->buffer, "}");
+}
+
+static void fx_parse_fx_2_parameter(struct fx_parser *parser, uint32_t offset)
+{
+ struct fx_2_var
+ {
+ uint32_t type;
+ uint32_t class;
+ uint32_t name;
+ uint32_t semantic;
+ uint32_t element_count;
+ } var;
+ const char *name;
+ uint32_t size;
+
+ fx_parser_read_unstructured(parser, &var, offset, sizeof(var));
+
+ fx_parse_fx_2_type(parser, offset);
+
+ name = fx_2_get_string(parser, var.name, &size);
+ fx_print_string(&parser->buffer, " ", name, size);
+ if (var.element_count)
+ vkd3d_string_buffer_printf(&parser->buffer, "[%u]", var.element_count);
+}
+
+static bool is_fx_2_sampler(uint32_t type)
+{
+ return type == D3DXPT_SAMPLER
+ || type == D3DXPT_SAMPLER1D
+ || type == D3DXPT_SAMPLER2D
+ || type == D3DXPT_SAMPLER3D
+ || type == D3DXPT_SAMPLERCUBE;
+}
+
+static void fx_parse_fx_2_assignment(struct fx_parser *parser, const struct fx_assignment *entry);
+
+static void parse_fx_2_sampler(struct fx_parser *parser, uint32_t element_count,
+ uint32_t offset)
+{
+ struct fx_assignment entry;
+ uint32_t count;
+
+ element_count = max(element_count, 1);
+
+ vkd3d_string_buffer_printf(&parser->buffer, "\n");
+ for (uint32_t i = 0; i < element_count; ++i)
+ {
+ fx_parser_read_unstructured(parser, &count, offset, sizeof(count));
+ offset += sizeof(count);
+
+ parse_fx_start_indent(parser);
+ parse_fx_print_indent(parser);
+ vkd3d_string_buffer_printf(&parser->buffer, "{\n");
+ parse_fx_start_indent(parser);
+ for (uint32_t j = 0; j < count; ++j, offset += sizeof(entry))
+ {
+ fx_parser_read_unstructured(parser, &entry, offset, sizeof(entry));
+
+ parse_fx_print_indent(parser);
+ fx_parse_fx_2_assignment(parser, &entry);
+ }
+ parse_fx_end_indent(parser);
+ parse_fx_print_indent(parser);
+ vkd3d_string_buffer_printf(&parser->buffer, "},\n");
+ parse_fx_end_indent(parser);
+ }
+}
+
+static void fx_parse_fx_2_initial_value(struct fx_parser *parser, uint32_t param, uint32_t value)
+{
+ struct fx_2_var
+ {
+ uint32_t type;
+ uint32_t class;
+ uint32_t name;
+ uint32_t semantic;
+ uint32_t element_count;
+ } var;
+ unsigned int size;
+ uint32_t offset;
+
+ if (!value)
+ return;
+
+ fx_parser_read_unstructured(parser, &var, param, sizeof(var));
+
+ offset = param;
+ size = fx_get_fx_2_type_size(parser, &offset);
+
+ vkd3d_string_buffer_printf(&parser->buffer, " = ");
+ if (var.element_count)
+ vkd3d_string_buffer_printf(&parser->buffer, "{ ");
+
+ if (var.class == D3DXPC_OBJECT)
+ {
+ if (is_fx_2_sampler(var.type))
+ parse_fx_2_sampler(parser, var.element_count, value);
+ else
+ parse_fx_2_object_value(parser, var.element_count, var.type, value);
+ }
+ else
+ {
+ parse_fx_2_numeric_value(parser, value, size, var.type);
+ }
+
+ if (var.element_count)
+ vkd3d_string_buffer_printf(&parser->buffer, " }");
+}
+
+static void fx_parse_fx_2_annotations(struct fx_parser *parser, uint32_t count)
+{
+ uint32_t param, value;
+
+ if (parser->failed || !count)
+ return;
+
+ vkd3d_string_buffer_printf(&parser->buffer, "\n");
+ parse_fx_print_indent(parser);
+ vkd3d_string_buffer_printf(&parser->buffer, "<\n");
+ parse_fx_start_indent(parser);
+
+ for (uint32_t i = 0; i < count; ++i)
+ {
+ param = fx_parser_read_u32(parser);
+ value = fx_parser_read_u32(parser);
+
+ parse_fx_print_indent(parser);
+ fx_parse_fx_2_parameter(parser, param);
+ fx_parse_fx_2_initial_value(parser, param, value);
+ vkd3d_string_buffer_printf(&parser->buffer, ";\n");
+ }
+
+ parse_fx_end_indent(parser);
+ parse_fx_print_indent(parser);
+ vkd3d_string_buffer_printf(&parser->buffer, ">");
+}
+
+static void fx_parse_fx_2_assignment(struct fx_parser *parser, const struct fx_assignment *entry)
+{
+ const struct rhs_named_value *named_value = NULL;
+ const struct fx_2_state *state = NULL;
+
+ if (entry->id <= ARRAY_SIZE(fx_2_states))
+ {
+ state = &fx_2_states[entry->id];
+
+ vkd3d_string_buffer_printf(&parser->buffer, "%s", state->name);
+ if (state->array_size > 1)
+ vkd3d_string_buffer_printf(&parser->buffer, "[%u]", entry->lhs_index);
+ }
+ else
+ {
+ vkd3d_string_buffer_printf(&parser->buffer, "<unrecognized state %u>", entry->id);
+ }
+ vkd3d_string_buffer_printf(&parser->buffer, " = ");
+
+ if (state && state->type == FX_UINT)
+ {
+ const struct rhs_named_value *ptr = state->values;
+ uint32_t value;
+
+ fx_parser_read_unstructured(parser, &value, entry->value, sizeof(value));
+
+ while (ptr->name)
+ {
+ if (value == ptr->value)
+ {
+ named_value = ptr;
+ break;
+ }
+ ++ptr;
+ }
+ }
+
+ if (named_value)
+ {
+ vkd3d_string_buffer_printf(&parser->buffer, "%s /* %u */", named_value->name, named_value->value);
+ }
+ else if (state)
+ {
+ if (state->type == FX_UINT || state->type == FX_FLOAT)
+ {
+ uint32_t offset = entry->type;
+ unsigned int size;
+
+ size = fx_get_fx_2_type_size(parser, &offset);
+ parse_fx_2_numeric_value(parser, entry->value, size, entry->type);
+ }
+ else if (state->type == FX_VERTEXSHADER || state->type == FX_PIXELSHADER)
+ {
+ uint32_t id;
+
+ fx_parser_read_unstructured(parser, &id, entry->value, sizeof(id));
+ vkd3d_string_buffer_printf(&parser->buffer, "<object id %u>", id);
+ }
+ else
+ {
+ vkd3d_string_buffer_printf(&parser->buffer, "<ignored>");
+ }
+ }
+ else
+ {
+ vkd3d_string_buffer_printf(&parser->buffer, "<ignored>");
+ }
+ vkd3d_string_buffer_printf(&parser->buffer, ";\n");
+}
+
+static void fx_parse_fx_2_technique(struct fx_parser *parser)
+{
+ struct fx_technique
+ {
+ uint32_t name;
+ uint32_t annotation_count;
+ uint32_t pass_count;
+ } technique;
+ struct fx_pass
+ {
+ uint32_t name;
+ uint32_t annotation_count;
+ uint32_t assignment_count;
+ } pass;
+ const char *name;
+ uint32_t size;
+
+ if (parser->failed)
+ return;
+
+ fx_parser_read_u32s(parser, &technique, sizeof(technique));
+
+ name = fx_2_get_string(parser, technique.name, &size);
+
+ parse_fx_print_indent(parser);
+ fx_print_string(&parser->buffer, "technique ", name, size);
+ fx_parse_fx_2_annotations(parser, technique.annotation_count);
+
+ vkd3d_string_buffer_printf(&parser->buffer, "\n");
+ parse_fx_print_indent(parser);
+ vkd3d_string_buffer_printf(&parser->buffer, "{\n");
+
+ parse_fx_start_indent(parser);
+ for (uint32_t i = 0; i < technique.pass_count; ++i)
+ {
+ fx_parser_read_u32s(parser, &pass, sizeof(pass));
+ name = fx_2_get_string(parser, pass.name, &size);
+
+ parse_fx_print_indent(parser);
+ fx_print_string(&parser->buffer, "pass ", name, size);
+ fx_parse_fx_2_annotations(parser, pass.annotation_count);
+
+ vkd3d_string_buffer_printf(&parser->buffer, "\n");
+ parse_fx_print_indent(parser);
+ vkd3d_string_buffer_printf(&parser->buffer, "{\n");
+
+ parse_fx_start_indent(parser);
+ for (uint32_t j = 0; j < pass.assignment_count; ++j)
+ {
+ struct fx_assignment entry;
+
+ parse_fx_print_indent(parser);
+ fx_parser_read_u32s(parser, &entry, sizeof(entry));
+ fx_parse_fx_2_assignment(parser, &entry);
+ }
+ parse_fx_end_indent(parser);
+
+ parse_fx_print_indent(parser);
+ vkd3d_string_buffer_printf(&parser->buffer, "}\n\n");
+ }
+
+ parse_fx_end_indent(parser);
+
+ parse_fx_print_indent(parser);
+ vkd3d_string_buffer_printf(&parser->buffer, "}\n\n");
+}
+
+static void fx_2_parse_parameters(struct fx_parser *parser, uint32_t count)
+{
+ struct fx_2_parameter
+ {
+ uint32_t type;
+ uint32_t value;
+ uint32_t flags;
+ uint32_t annotation_count;
+ } param;
+
+ for (uint32_t i = 0; i < count; ++i)
+ {
+ fx_parser_read_u32s(parser, &param, sizeof(param));
+
+ fx_parse_fx_2_parameter(parser, param.type);
+ fx_parse_fx_2_annotations(parser, param.annotation_count);
+ fx_parse_fx_2_initial_value(parser, param.type, param.value);
+ vkd3d_string_buffer_printf(&parser->buffer, ";\n");
+ }
+ if (count)
+ vkd3d_string_buffer_printf(&parser->buffer, "\n");
+}
+
+static void fx_parse_shader_blob(struct fx_parser *parser, enum vkd3d_shader_source_type source_type,
+ const void *data, uint32_t data_size)
+{
+ struct vkd3d_shader_compile_info info = { 0 };
+ struct vkd3d_shader_code output;
+ const char *p, *q, *end;
+ int ret;
+
+ static const struct vkd3d_shader_compile_option options[] =
+ {
+ {VKD3D_SHADER_COMPILE_OPTION_API_VERSION, VKD3D_SHADER_API_VERSION_1_15},
+ };
+
+ info.type = VKD3D_SHADER_STRUCTURE_TYPE_COMPILE_INFO;
+ info.source.code = data;
+ info.source.size = data_size;
+ info.source_type = source_type;
+ info.target_type = VKD3D_SHADER_TARGET_D3D_ASM;
+ info.options = options;
+ info.option_count = ARRAY_SIZE(options);
+ info.log_level = VKD3D_SHADER_LOG_INFO;
+
+ if ((ret = vkd3d_shader_compile(&info, &output, NULL)) < 0)
+ {
+ fx_parser_error(parser, VKD3D_SHADER_ERROR_FX_INVALID_DATA,
+ "Failed to disassemble shader blob.");
+ return;
+ }
+ parse_fx_print_indent(parser);
+ vkd3d_string_buffer_printf(&parser->buffer, "asm {\n");
+
+ parse_fx_start_indent(parser);
+
+ end = (const char *)output.code + output.size;
+ for (p = output.code; p < end; p = q)
+ {
+ if (!(q = memchr(p, '\n', end - p)))
+ q = end;
+ else
+ ++q;
+
+ parse_fx_print_indent(parser);
+ vkd3d_string_buffer_printf(&parser->buffer, "%.*s", (int)(q - p), p);
+ }
+
+ parse_fx_end_indent(parser);
+ parse_fx_print_indent(parser);
+ vkd3d_string_buffer_printf(&parser->buffer, "}");
+
+ vkd3d_shader_free_shader_code(&output);
+}
+
+static void fx_parse_fx_2_data_blob(struct fx_parser *parser)
+{
+ uint32_t id, size;
+ const void *data;
+
+ id = fx_parser_read_u32(parser);
+ size = fx_parser_read_u32(parser);
+
+ parse_fx_print_indent(parser);
+ if (id < parser->objects.count)
+ {
+ uint32_t type = parser->objects.types[id];
+ switch (type)
+ {
+ case D3DXPT_STRING:
+ case D3DXPT_TEXTURE:
+ case D3DXPT_TEXTURE1D:
+ case D3DXPT_TEXTURE2D:
+ case D3DXPT_TEXTURE3D:
+ case D3DXPT_TEXTURECUBE:
+ case D3DXPT_PIXELSHADER:
+ case D3DXPT_VERTEXSHADER:
+ vkd3d_string_buffer_printf(&parser->buffer, "%s object %u size %u bytes%s\n",
+ fx_2_types[type], id, size, size ? ":" : ",");
+
+ if (size)
+ {
+ data = fx_parser_get_ptr(parser, size);
+
+ if (type == D3DXPT_STRING)
+ {
+ parse_fx_start_indent(parser);
+ parse_fx_print_indent(parser);
+ fx_print_string(&parser->buffer, "\"", (const char *)data, size);
+ vkd3d_string_buffer_printf(&parser->buffer, "\"");
+ parse_fx_end_indent(parser);
+ }
+ else if (type == D3DXPT_PIXELSHADER || type == D3DXPT_VERTEXSHADER)
+ {
+ fx_parse_shader_blob(parser, VKD3D_SHADER_SOURCE_D3D_BYTECODE, data, size);
+ }
+ vkd3d_string_buffer_printf(&parser->buffer, "\n");
+ }
+ break;
+ default:
+ vkd3d_string_buffer_printf(&parser->buffer, "<type%u> object %u size %u bytes\n", type, id, size);
+ }
+ }
+ else
+ {
+ vkd3d_string_buffer_printf(&parser->buffer, "object %u - out-of-range id\n", id);
+ }
+
+ fx_parser_skip(parser, align(size, 4));
+}
+
+static void fx_dump_blob(struct fx_parser *parser, const void *blob, uint32_t size)
+{
+ const uint32_t *data = blob;
+ unsigned int i, j, n;
+
+ size /= sizeof(*data);
+ i = 0;
+ while (i < size)
+ {
+ parse_fx_print_indent(parser);
+ n = min(size - i, 8);
+ for (j = 0; j < n; ++j)
+ vkd3d_string_buffer_printf(&parser->buffer, "0x%08x,", data[i + j]);
+ i += n;
+ vkd3d_string_buffer_printf(&parser->buffer, "\n");
+ }
+}
+
+static void fx_parse_fx_2_array_selector(struct fx_parser *parser, uint32_t size)
+{
+ const uint8_t *end = parser->ptr + size;
+ uint32_t name_size, blob_size = 0;
+ const void *blob = NULL;
+ const char *name;
+
+ name_size = fx_parser_read_u32(parser);
+ name = fx_parser_get_ptr(parser, name_size);
+ fx_parser_skip(parser, name_size);
+
+ if (!name || (uint8_t *)name >= end)
+ fx_parser_error(parser, VKD3D_SHADER_ERROR_FX_INVALID_DATA,
+ "Malformed name entry in the array selector.");
+
+ if (parser->ptr <= end)
+ {
+ blob_size = end - parser->ptr;
+ blob = fx_parser_get_ptr(parser, blob_size);
+ fx_parser_skip(parser, blob_size);
+ }
+ else
+ {
+ fx_parser_error(parser, VKD3D_SHADER_ERROR_FX_INVALID_DATA,
+ "Malformed blob entry in the array selector.");
+ }
+
+ if (name)
+ {
+ fx_print_string(&parser->buffer, "array \"", name, name_size);
+ vkd3d_string_buffer_printf(&parser->buffer, "\"\n");
+ }
+ if (blob)
+ {
+ parse_fx_print_indent(parser);
+ vkd3d_string_buffer_printf(&parser->buffer, "selector blob size %u\n", blob_size);
+ fx_dump_blob(parser, blob, blob_size);
+ }
+}
+
+static void fx_parse_fx_2_complex_state(struct fx_parser *parser)
+{
+ struct
+ {
+ uint32_t technique;
+ uint32_t index;
+ uint32_t element;
+ uint32_t state;
+ uint32_t assignment_type;
+ } state;
+ const char *data;
+ uint32_t size;
+
+ fx_parser_read_u32s(parser, &state, sizeof(state));
+
+ if (state.technique == ~0u)
+ {
+ vkd3d_string_buffer_printf(&parser->buffer, "parameter %u[%u], state %u =\n",
+ state.index, state.element, state.state);
+ }
+ else
+ {
+ vkd3d_string_buffer_printf(&parser->buffer, "technique %u, pass %u, state %u =\n",
+ state.technique, state.index, state.state);
+ }
+
+ size = fx_parser_read_u32(parser);
+
+ parse_fx_print_indent(parser);
+
+ if (state.assignment_type == FX_2_ASSIGNMENT_PARAMETER)
+ {
+ data = fx_parser_get_ptr(parser, size);
+ fx_print_string(&parser->buffer, "parameter \"", data, size);
+ vkd3d_string_buffer_printf(&parser->buffer, "\"\n");
+ fx_parser_skip(parser, align(size, 4));
+ }
+ else if (state.assignment_type == FX_2_ASSIGNMENT_ARRAY_SELECTOR)
+ {
+ fx_parse_fx_2_array_selector(parser, size);
+ }
+ else
+ {
+ vkd3d_string_buffer_printf(&parser->buffer, "blob size %u\n", size);
+ data = fx_parser_get_ptr(parser, size);
+ fx_dump_blob(parser, data, size);
+ fx_parser_skip(parser, align(size, 4));
+ }
+}
+
+static void fx_2_parse(struct fx_parser *parser)
+{
+ uint32_t i, size, parameter_count, technique_count, blob_count, state_count;
+
+ fx_parser_skip(parser, sizeof(uint32_t)); /* Version */
+ size = fx_parser_read_u32(parser);
+
+ parser->unstructured.ptr = parser->ptr;
+ parser->unstructured.end = parser->ptr + size;
+ parser->unstructured.size = size;
+ fx_parser_skip(parser, size);
+
+ parameter_count = fx_parser_read_u32(parser);
+ technique_count = fx_parser_read_u32(parser);
+ fx_parser_read_u32(parser); /* Shader count */
+ parser->objects.count = fx_parser_read_u32(parser);
+
+ if (!(parser->objects.types = calloc(parser->objects.count, sizeof(*parser->objects.types))))
+ {
+ fx_parser_error(parser, VKD3D_SHADER_ERROR_FX_OUT_OF_MEMORY, "Out of memory.");
+ return;
+ }
+
+ fx_2_parse_parameters(parser, parameter_count);
+ for (i = 0; i < technique_count; ++i)
+ fx_parse_fx_2_technique(parser);
+
+ blob_count = fx_parser_read_u32(parser);
+ state_count = fx_parser_read_u32(parser);
+
+ vkd3d_string_buffer_printf(&parser->buffer, "object data {\n");
+ parse_fx_start_indent(parser);
+ for (i = 0; i < blob_count; ++i)
+ fx_parse_fx_2_data_blob(parser);
+ parse_fx_end_indent(parser);
+ vkd3d_string_buffer_printf(&parser->buffer, "}\n\n");
+
+ vkd3d_string_buffer_printf(&parser->buffer, "state data {\n");
+ parse_fx_start_indent(parser);
+ for (i = 0; i < state_count; ++i)
+ fx_parse_fx_2_complex_state(parser);
+ parse_fx_end_indent(parser);
+ vkd3d_string_buffer_printf(&parser->buffer, "}\n");
+}
+
+static const char *fx_4_get_string(struct fx_parser *parser, uint32_t offset)
+{
+ const uint8_t *ptr = parser->unstructured.ptr;
+ const uint8_t *end = parser->unstructured.end;
+
+ if (offset >= parser->unstructured.size)
+ {
+ parser->failed = true;
+ return "<invalid>";
+ }
+
+ ptr += offset;
+
+ while (ptr < end && *ptr)
+ ++ptr;
+
+ if (*ptr)
+ {
+ parser->failed = true;
+ return "<invalid>";
+ }
+
+ return (const char *)(parser->unstructured.ptr + offset);
+}
+
+static void parse_fx_4_numeric_value(struct fx_parser *parser, uint32_t offset,
+ const struct fx_4_binary_type *type)
+{
+ unsigned int base_type, comp_count;
+ size_t i;
+
+ base_type = (type->typeinfo >> FX_4_NUMERIC_BASE_TYPE_SHIFT) & 0xf;
+
+ comp_count = type->packed_size / sizeof(uint32_t);
+ for (i = 0; i < comp_count; ++i)
+ {
+ union hlsl_constant_value_component value;
+
+ fx_parser_read_unstructured(parser, &value, offset, sizeof(uint32_t));
+
+ if (base_type == FX_4_NUMERIC_TYPE_FLOAT)
+ vkd3d_string_buffer_print_f32(&parser->buffer, value.f);
+ else if (base_type == FX_4_NUMERIC_TYPE_INT)
+ vkd3d_string_buffer_printf(&parser->buffer, "%d", value.i);
+ else if (base_type == FX_4_NUMERIC_TYPE_UINT)
+ vkd3d_string_buffer_printf(&parser->buffer, "%u", value.u);
+ else if (base_type == FX_4_NUMERIC_TYPE_BOOL)
+ vkd3d_string_buffer_printf(&parser->buffer, "%s", value.u ? "true" : "false" );
+ else
+ vkd3d_string_buffer_printf(&parser->buffer, "%#x", value.u);
+
+ if (i < comp_count - 1)
+ vkd3d_string_buffer_printf(&parser->buffer, ", ");
+
+ offset += sizeof(uint32_t);
+ }
+}
+
+static void fx_4_parse_string_initializer(struct fx_parser *parser, uint32_t offset)
+{
+ const char *str = fx_4_get_string(parser, offset);
+ vkd3d_string_buffer_printf(&parser->buffer, "\"%s\"", str);
+}
+
+static void fx_parse_fx_4_annotations(struct fx_parser *parser)
{
struct fx_4_annotation
{
@@ -3228,17 +4463,15 @@ static void fx_parse_buffers(struct fx_parser *parser)
static void fx_4_parse_shader_blob(struct fx_parser *parser, unsigned int object_type, const struct fx_5_shader *shader)
{
- struct vkd3d_shader_compile_info info = { 0 };
- struct vkd3d_shader_code output;
const void *data = NULL;
- const char *p, *q, *end;
uint32_t data_size;
- int ret;
- static const struct vkd3d_shader_compile_option options[] =
+ if (!shader->offset)
{
- {VKD3D_SHADER_COMPILE_OPTION_API_VERSION, VKD3D_SHADER_API_VERSION_1_15},
- };
+ parse_fx_print_indent(parser);
+ vkd3d_string_buffer_printf(&parser->buffer, "NULL");
+ return;
+ }
fx_parser_read_unstructured(parser, &data_size, shader->offset, sizeof(data_size));
if (data_size)
@@ -3247,42 +4480,8 @@ static void fx_4_parse_shader_blob(struct fx_parser *parser, unsigned int object
if (!data)
return;
- info.type = VKD3D_SHADER_STRUCTURE_TYPE_COMPILE_INFO;
- info.source.code = data;
- info.source.size = data_size;
- info.source_type = VKD3D_SHADER_SOURCE_DXBC_TPF;
- info.target_type = VKD3D_SHADER_TARGET_D3D_ASM;
- info.options = options;
- info.option_count = ARRAY_SIZE(options);
- info.log_level = VKD3D_SHADER_LOG_INFO;
-
- if ((ret = vkd3d_shader_compile(&info, &output, NULL)) < 0)
- {
- fx_parser_error(parser, VKD3D_SHADER_ERROR_FX_INVALID_DATA,
- "Failed to disassemble shader blob.");
- return;
- }
- parse_fx_print_indent(parser);
- vkd3d_string_buffer_printf(&parser->buffer, "asm {\n");
-
- parse_fx_start_indent(parser);
-
- end = (const char *)output.code + output.size;
- for (p = output.code; p < end; p = q)
- {
- if (!(q = memchr(p, '\n', end - p)))
- q = end;
- else
- ++q;
-
- parse_fx_print_indent(parser);
- vkd3d_string_buffer_printf(&parser->buffer, "%.*s", (int)(q - p), p);
- }
-
- parse_fx_end_indent(parser);
+ fx_parse_shader_blob(parser, VKD3D_SHADER_SOURCE_DXBC_TPF, data, data_size);
- parse_fx_print_indent(parser);
- vkd3d_string_buffer_printf(&parser->buffer, "}");
if (object_type == FX_4_OBJECT_TYPE_GEOMETRY_SHADER_SO && shader->sodecl[0])
{
vkd3d_string_buffer_printf(&parser->buffer, "\n/* Stream output declaration: \"%s\" */",
@@ -3299,8 +4498,6 @@ static void fx_4_parse_shader_blob(struct fx_parser *parser, unsigned int object
if (shader->sodecl_count)
vkd3d_string_buffer_printf(&parser->buffer, "\n/* Rasterized stream %u */", shader->rast_stream);
}
-
- vkd3d_shader_free_shader_code(&output);
}
static void fx_4_parse_shader_initializer(struct fx_parser *parser, unsigned int object_type)
@@ -3366,16 +4563,298 @@ static int fx_4_state_id_compare(const void *a, const void *b)
return id - state->id;
}
+static const struct
+{
+ uint32_t opcode;
+ const char *name;
+}
+fx_4_fxlc_opcodes[] =
+{
+ { 0x100, "mov" },
+ { 0x101, "neg" },
+ { 0x103, "rcp" },
+ { 0x104, "frc" },
+ { 0x105, "exp" },
+ { 0x106, "log" },
+ { 0x107, "rsq" },
+ { 0x108, "sin" },
+ { 0x109, "cos" },
+ { 0x10a, "asin" },
+ { 0x10b, "acos" },
+ { 0x10c, "atan" },
+ { 0x112, "sqrt" },
+ { 0x120, "ineg" },
+ { 0x121, "not" },
+ { 0x130, "itof" },
+ { 0x131, "utof" },
+ { 0x133, "ftou" },
+ { 0x137, "ftob" },
+ { 0x139, "floor" },
+ { 0x13a, "ceil" },
+ { 0x200, "min" },
+ { 0x201, "max" },
+ { 0x204, "add" },
+ { 0x205, "mul" },
+ { 0x206, "atan2" },
+ { 0x208, "div" },
+ { 0x210, "bilt" },
+ { 0x211, "bige" },
+ { 0x212, "bieq" },
+ { 0x213, "bine" },
+ { 0x214, "buge" },
+ { 0x215, "bult" },
+ { 0x216, "iadd" },
+ { 0x219, "imul" },
+ { 0x21a, "udiv" },
+ { 0x21d, "imin" },
+ { 0x21e, "imax" },
+ { 0x21f, "umin" },
+ { 0x220, "umax" },
+ { 0x230, "and" },
+ { 0x231, "or" },
+ { 0x233, "xor" },
+ { 0x234, "ishl" },
+ { 0x235, "ishr" },
+ { 0x236, "ushr" },
+ { 0x301, "movc" },
+ { 0x500, "dot" },
+ { 0x70e, "d3ds_dotswiz" },
+};
+
+static const char *fx_4_get_fxlc_opcode_name(uint32_t opcode)
+{
+ size_t i;
+
+ for (i = 0; i < ARRAY_SIZE(fx_4_fxlc_opcodes); ++i)
+ {
+ if (fx_4_fxlc_opcodes[i].opcode == opcode)
+ return fx_4_fxlc_opcodes[i].name;
+ }
+
+ return "<unrecognized>";
+}
+
+struct fx_4_fxlc_argument
+{
+ uint32_t flags;
+ uint32_t reg_type;
+ uint32_t address;
+};
+
+struct fx_4_ctab_entry
+{
+ uint32_t name;
+ uint16_t register_set;
+ uint16_t register_index;
+ uint16_t register_count;
+ uint16_t reserved;
+ uint32_t typeinfo;
+ uint32_t default_value;
+};
+
+struct fxlvm_code
+{
+ const float *cli4;
+ uint32_t cli4_count;
+
+ const struct fx_4_ctab_entry *constants;
+ uint32_t ctab_offset;
+ uint32_t ctab_count;
+ const char *ctab;
+
+ unsigned int comp_count;
+ bool scalar;
+};
+
+static void fx_4_parse_print_swizzle(struct fx_parser *parser, const struct fxlvm_code *code, unsigned int addr)
+{
+ unsigned int comp_count = code->scalar ? 1 : code->comp_count;
+ static const char comp[] = "xyzw";
+
+ if (comp_count < 4)
+ vkd3d_string_buffer_printf(&parser->buffer, ".%.*s", comp_count, &comp[addr % 4]);
+}
+
+static void fx_4_parse_fxlc_constant_argument(struct fx_parser *parser,
+ const struct fx_4_fxlc_argument *arg, const struct fxlvm_code *code)
+{
+ uint32_t i, offset, register_index = arg->address / 4; /* Address counts in components. */
+
+ for (i = 0; i < code->ctab_count; ++i)
+ {
+ const struct fx_4_ctab_entry *c = &code->constants[i];
+
+ if (register_index < c->register_index || register_index - c->register_index >= c->register_count)
+ continue;
+
+ vkd3d_string_buffer_printf(&parser->buffer, "%s", &code->ctab[c->name]);
+
+ /* Register offset within variable */
+ offset = arg->address - c->register_index * 4;
+
+ if (offset / 4)
+ vkd3d_string_buffer_printf(&parser->buffer, "[%u]", offset / 4);
+ fx_4_parse_print_swizzle(parser, code, offset);
+ return;
+ }
+
+ vkd3d_string_buffer_printf(&parser->buffer, "(var-not-found)");
+}
+
+static void fx_4_parse_fxlc_argument(struct fx_parser *parser, uint32_t offset, const struct fxlvm_code *code)
+{
+ struct fx_4_fxlc_argument arg;
+ uint32_t count;
+
+ fx_parser_read_unstructured(parser, &arg, offset, sizeof(arg));
+
+ switch (arg.reg_type)
+ {
+ case FX_4_FXLC_REG_LITERAL:
+ count = code->scalar ? 1 : code->comp_count;
+ if (arg.address >= code->cli4_count || count > code->cli4_count - arg.address)
+ {
+ vkd3d_string_buffer_printf(&parser->buffer, "(<out-of-bounds>)");
+ parser->failed = true;
+ break;
+ }
+
+ vkd3d_string_buffer_printf(&parser->buffer, "(");
+ vkd3d_string_buffer_print_f32(&parser->buffer, code->cli4[arg.address]);
+ for (unsigned int i = 1; i < code->comp_count; ++i)
+ {
+ vkd3d_string_buffer_printf(&parser->buffer, ", ");
+ vkd3d_string_buffer_print_f32(&parser->buffer, code->cli4[arg.address + (code->scalar ? 0 : i)]);
+ }
+ vkd3d_string_buffer_printf(&parser->buffer, ")");
+ break;
+
+ case FX_4_FXLC_REG_CB:
+ fx_4_parse_fxlc_constant_argument(parser, &arg, code);
+ break;
+
+ case FX_4_FXLC_REG_OUTPUT:
+ case FX_4_FXLC_REG_TEMP:
+ if (arg.reg_type == FX_4_FXLC_REG_OUTPUT)
+ vkd3d_string_buffer_printf(&parser->buffer, "expr");
+ else
+ vkd3d_string_buffer_printf(&parser->buffer, "r%u", arg.address / 4);
+ fx_4_parse_print_swizzle(parser, code, arg.address);
+ break;
+
+ default:
+ vkd3d_string_buffer_printf(&parser->buffer, "<unknown register %u>", arg.reg_type);
+ break;
+ }
+}
+
+static void fx_4_parse_fxlvm_expression(struct fx_parser *parser, uint32_t offset)
+{
+ struct vkd3d_shader_dxbc_section_desc *section, fxlc, cli4, ctab;
+ struct vkd3d_shader_dxbc_desc dxbc_desc;
+ struct vkd3d_shader_code dxbc;
+ uint32_t size, ins_count;
+ struct fxlvm_code code;
+ size_t i, j;
+
+ offset = fx_parser_read_unstructured(parser, &size, offset, sizeof(size));
+
+ dxbc.size = size;
+ dxbc.code = fx_parser_get_unstructured_ptr(parser, offset, size);
+ if (!dxbc.code)
+ return;
+
+ if (vkd3d_shader_parse_dxbc(&dxbc, 0, &dxbc_desc, NULL) < 0)
+ {
+ parser->failed = true;
+ return;
+ }
+
+ memset(&fxlc, 0, sizeof(fxlc));
+ memset(&cli4, 0, sizeof(cli4));
+ memset(&ctab, 0, sizeof(ctab));
+ for (i = 0; i < dxbc_desc.section_count; ++i)
+ {
+ section = &dxbc_desc.sections[i];
+
+ if (section->tag == TAG_FXLC)
+ fxlc = *section;
+ else if (section->tag == TAG_CLI4)
+ cli4 = *section;
+ else if (section->tag == TAG_CTAB)
+ ctab = *section;
+ }
+
+ vkd3d_shader_free_dxbc(&dxbc_desc);
+
+ if (cli4.data.code)
+ {
+ uint32_t cli4_offset = offset + (size_t)cli4.data.code - (size_t)dxbc.code;
+
+ fx_parser_read_unstructured(parser, &code.cli4_count, cli4_offset, sizeof(code.cli4_count));
+ code.cli4 = fx_parser_get_unstructured_ptr(parser, cli4_offset + 4, code.cli4_count * sizeof(float));
+ }
+
+ if (ctab.data.code)
+ {
+ uint32_t ctab_offset = offset + (size_t)ctab.data.code - (size_t)dxbc.code;
+ uint32_t consts_offset;
+
+ fx_parser_read_unstructured(parser, &code.ctab_count, ctab_offset + 12, sizeof(code.ctab_count));
+ fx_parser_read_unstructured(parser, &consts_offset, ctab_offset + 16, sizeof(consts_offset));
+
+ code.ctab = ctab.data.code;
+ code.constants = fx_parser_get_unstructured_ptr(parser,
+ ctab_offset + consts_offset, code.ctab_count * sizeof(*code.constants));
+ }
+
+ offset += (size_t)fxlc.data.code - (size_t)dxbc.code;
+ offset = fx_parser_read_unstructured(parser, &ins_count, offset, sizeof(ins_count));
+
+ parse_fx_start_indent(parser);
+
+ for (i = 0; i < ins_count; ++i)
+ {
+ uint32_t instr, opcode, src_count;
+ struct fx_4_fxlc_argument arg;
+
+ offset = fx_parser_read_unstructured(parser, &instr, offset, sizeof(instr));
+ offset = fx_parser_read_unstructured(parser, &src_count, offset, sizeof(src_count));
+
+ opcode = (instr >> FX_4_FXLC_OPCODE_SHIFT) & FX_4_FXLC_OPCODE_MASK;
+ code.comp_count = instr & FX_4_FXLC_COMP_COUNT_MASK;
+ code.scalar = false;
+
+ parse_fx_print_indent(parser);
+ vkd3d_string_buffer_printf(&parser->buffer, "%s ", fx_4_get_fxlc_opcode_name(opcode));
+
+ /* Destination first. */
+ fx_4_parse_fxlc_argument(parser, offset + sizeof(arg) * src_count, &code);
+
+ for (j = 0; j < src_count; ++j)
+ {
+ vkd3d_string_buffer_printf(&parser->buffer, ", ");
+
+ /* Scalar modifier applies only to first source. */
+ code.scalar = j == 0 && !!(instr & FX_4_FXLC_IS_SCALAR_MASK);
+ fx_4_parse_fxlc_argument(parser, offset, &code);
+
+ offset += sizeof(arg);
+ }
+
+ /* Destination */
+ offset += sizeof(arg);
+
+ vkd3d_string_buffer_printf(&parser->buffer, "\n");
+ }
+
+ parse_fx_end_indent(parser);
+}
+
static void fx_4_parse_state_object_initializer(struct fx_parser *parser, uint32_t count,
enum hlsl_type_class type_class)
{
- struct fx_4_assignment
- {
- uint32_t id;
- uint32_t lhs_index;
- uint32_t type;
- uint32_t value;
- } entry;
+ struct fx_assignment entry;
struct
{
uint32_t name;
@@ -3390,7 +4869,7 @@ static void fx_4_parse_state_object_initializer(struct fx_parser *parser, uint32
float f;
};
} value;
- static const char *value_types[FX_COMPONENT_TYPE_COUNT] =
+ static const char *const value_types[FX_COMPONENT_TYPE_COUNT] =
{
[FX_BOOL] = "bool",
[FX_FLOAT] = "float",
@@ -3496,6 +4975,19 @@ static void fx_4_parse_state_object_initializer(struct fx_parser *parser, uint32
vkd3d_string_buffer_printf(&parser->buffer, "%s[%s]", fx_4_get_string(parser, index.name),
fx_4_get_string(parser, index.index));
break;
+ case FX_4_ASSIGNMENT_INDEX_EXPRESSION:
+ fx_parser_read_unstructured(parser, &index, entry.value, sizeof(index));
+ vkd3d_string_buffer_printf(&parser->buffer, "%s[eval(\n", fx_4_get_string(parser, index.name));
+ fx_4_parse_fxlvm_expression(parser, index.index);
+ parse_fx_print_indent(parser);
+ vkd3d_string_buffer_printf(&parser->buffer, ")]");
+ break;
+ case FX_4_ASSIGNMENT_VALUE_EXPRESSION:
+ vkd3d_string_buffer_printf(&parser->buffer, "eval(\n");
+ fx_4_parse_fxlvm_expression(parser, entry.value);
+ parse_fx_print_indent(parser);
+ vkd3d_string_buffer_printf(&parser->buffer, ")");
+ break;
case FX_4_ASSIGNMENT_INLINE_SHADER:
case FX_5_ASSIGNMENT_INLINE_SHADER:
{
@@ -3544,12 +5036,14 @@ static void fx_4_parse_object_initializer(struct fx_parser *parser, const struct
};
unsigned int i, element_count, count;
uint32_t value;
+ bool is_array;
if (!fx_4_object_has_initializer(type))
return;
vkd3d_string_buffer_printf(&parser->buffer, " = {\n");
element_count = max(type->element_count, 1);
+ is_array = element_count > 1;
for (i = 0; i < element_count; ++i)
{
switch (type->typeinfo)
@@ -3565,9 +5059,21 @@ static void fx_4_parse_object_initializer(struct fx_parser *parser, const struct
case FX_4_OBJECT_TYPE_SAMPLER_STATE:
count = fx_parser_read_u32(parser);
+ if (is_array)
+ {
+ parse_fx_start_indent(parser);
+ parse_fx_print_indent(parser);
+ vkd3d_string_buffer_printf(&parser->buffer, "{\n");
+ }
parse_fx_start_indent(parser);
fx_4_parse_state_object_initializer(parser, count, type_classes[type->typeinfo]);
parse_fx_end_indent(parser);
+ if (is_array)
+ {
+ parse_fx_print_indent(parser);
+ vkd3d_string_buffer_printf(&parser->buffer, "}");
+ parse_fx_end_indent(parser);
+ }
break;
case FX_4_OBJECT_TYPE_PIXEL_SHADER:
case FX_4_OBJECT_TYPE_VERTEX_SHADER:
@@ -3586,7 +5092,7 @@ static void fx_4_parse_object_initializer(struct fx_parser *parser, const struct
"Parsing object type %u is not implemented.", type->typeinfo);
return;
}
- vkd3d_string_buffer_printf(&parser->buffer, ",\n");
+ vkd3d_string_buffer_printf(&parser->buffer, is_array ? ",\n" : "\n");
}
vkd3d_string_buffer_printf(&parser->buffer, "}");
}
@@ -3719,7 +5225,7 @@ static void fx_parse_groups(struct fx_parser *parser)
}
}
-static int fx_4_parse(struct fx_parser *parser)
+static void fx_4_parse(struct fx_parser *parser)
{
struct fx_4_header
{
@@ -3752,8 +5258,9 @@ static int fx_4_parse(struct fx_parser *parser)
if (parser->end - parser->ptr < header.unstructured_size)
{
- parser->failed = true;
- return -1;
+ fx_parser_error(parser, VKD3D_SHADER_ERROR_FX_INVALID_SIZE,
+ "Invalid unstructured data size %u.", header.unstructured_size);
+ return;
}
parser->unstructured.ptr = parser->ptr;
@@ -3766,11 +5273,9 @@ static int fx_4_parse(struct fx_parser *parser)
for (i = 0; i < header.technique_count; ++i)
fx_parse_fx_4_technique(parser);
-
- return parser->failed ? - 1 : 0;
}
-static int fx_5_parse(struct fx_parser *parser)
+static void fx_5_parse(struct fx_parser *parser)
{
struct fx_5_header
{
@@ -3808,8 +5313,9 @@ static int fx_5_parse(struct fx_parser *parser)
if (parser->end - parser->ptr < header.unstructured_size)
{
- parser->failed = true;
- return -1;
+ fx_parser_error(parser, VKD3D_SHADER_ERROR_FX_INVALID_SIZE,
+ "Invalid unstructured data size %u.", header.unstructured_size);
+ return;
}
parser->unstructured.ptr = parser->ptr;
@@ -3821,48 +5327,62 @@ static int fx_5_parse(struct fx_parser *parser)
fx_4_parse_objects(parser);
fx_parse_groups(parser);
+}
+
+static void fx_parser_init(struct fx_parser *parser, const struct vkd3d_shader_compile_info *compile_info,
+ struct vkd3d_shader_message_context *message_context)
+{
+ memset(parser, 0, sizeof(*parser));
+ parser->start = compile_info->source.code;
+ parser->ptr = compile_info->source.code;
+ parser->end = (uint8_t *)compile_info->source.code + compile_info->source.size;
+ parser->message_context = message_context;
+ vkd3d_string_buffer_init(&parser->buffer);
+}
- return parser->failed ? - 1 : 0;
+static void fx_parser_cleanup(struct fx_parser *parser)
+{
+ free(parser->objects.types);
}
int fx_parse(const struct vkd3d_shader_compile_info *compile_info,
struct vkd3d_shader_code *out, struct vkd3d_shader_message_context *message_context)
{
- struct fx_parser parser =
- {
- .start = compile_info->source.code,
- .ptr = compile_info->source.code,
- .end = (uint8_t *)compile_info->source.code + compile_info->source.size,
- .message_context = message_context,
- };
+ struct fx_parser parser;
uint32_t version;
- int ret;
- vkd3d_string_buffer_init(&parser.buffer);
+ fx_parser_init(&parser, compile_info, message_context);
if (parser.end - parser.start < sizeof(version))
- return -1;
+ {
+ fx_parser_error(&parser, VKD3D_SHADER_ERROR_FX_INVALID_SIZE,
+ "Source size %zu is smaller than the FX header size.", compile_info->source.size);
+ return VKD3D_ERROR_INVALID_SHADER;
+ }
version = *(uint32_t *)parser.ptr;
switch (version)
{
case 0xfeff0901:
- ret = fx_2_parse(&parser);
+ fx_2_parse(&parser);
break;
case 0xfeff1001:
case 0xfeff1011:
- ret = fx_4_parse(&parser);
+ fx_4_parse(&parser);
break;
case 0xfeff2001:
- ret = fx_5_parse(&parser);
+ fx_5_parse(&parser);
break;
default:
fx_parser_error(&parser, VKD3D_SHADER_ERROR_FX_INVALID_VERSION,
"Invalid effect binary version value 0x%08x.", version);
- ret = -1;
+ break;
}
vkd3d_shader_code_from_string_buffer(out, &parser.buffer);
+ fx_parser_cleanup(&parser);
- return ret;
+ if (parser.failed)
+ return VKD3D_ERROR_INVALID_SHADER;
+ return VKD3D_OK;
}
diff --git a/libs/vkd3d/libs/vkd3d-shader/glsl.c b/libs/vkd3d/libs/vkd3d-shader/glsl.c
index ab6604bd703..a87ade5e467 100644
--- a/libs/vkd3d/libs/vkd3d-shader/glsl.c
+++ b/libs/vkd3d/libs/vkd3d-shader/glsl.c
@@ -64,7 +64,6 @@ struct vkd3d_glsl_generator
const struct vkd3d_shader_interface_info *interface_info;
const struct vkd3d_shader_descriptor_offset_info *offset_info;
- const struct vkd3d_shader_scan_descriptor_info1 *descriptor_info;
const struct vkd3d_shader_scan_combined_resource_sampler_info *combined_sampler_info;
};
@@ -130,7 +129,7 @@ static const struct glsl_resource_type_info *shader_glsl_get_resource_type_info(
static const struct vkd3d_shader_descriptor_info1 *shader_glsl_get_descriptor(struct vkd3d_glsl_generator *gen,
enum vkd3d_shader_descriptor_type type, unsigned int idx, unsigned int space)
{
- const struct vkd3d_shader_scan_descriptor_info1 *info = gen->descriptor_info;
+ const struct vkd3d_shader_scan_descriptor_info1 *info = &gen->program->descriptors;
for (unsigned int i = 0; i < info->descriptor_count; ++i)
{
@@ -146,7 +145,7 @@ static const struct vkd3d_shader_descriptor_info1 *shader_glsl_get_descriptor(st
static const struct vkd3d_shader_descriptor_info1 *shader_glsl_get_descriptor_by_id(
struct vkd3d_glsl_generator *gen, enum vkd3d_shader_descriptor_type type, unsigned int id)
{
- const struct vkd3d_shader_scan_descriptor_info1 *info = gen->descriptor_info;
+ const struct vkd3d_shader_scan_descriptor_info1 *info = &gen->program->descriptors;
for (unsigned int i = 0; i < info->descriptor_count; ++i)
{
@@ -269,15 +268,15 @@ static void shader_glsl_print_register_name(struct vkd3d_string_buffer *buffer,
vkd3d_string_buffer_printf(buffer, "<unhandled register %#x>", reg->type);
break;
}
- if (reg->idx[0].rel_addr || reg->idx[2].rel_addr)
+ if (reg->idx[0].rel_addr)
{
vkd3d_glsl_compiler_error(gen, VKD3D_SHADER_ERROR_GLSL_INTERNAL,
"Internal compiler error: Unhandled constant buffer register indirect addressing.");
vkd3d_string_buffer_printf(buffer, "<unhandled register %#x>", reg->type);
break;
}
- vkd3d_string_buffer_printf(buffer, "%s_cb_%u[%u]",
- gen->prefix, reg->idx[0].offset, reg->idx[2].offset);
+ vkd3d_string_buffer_printf(buffer, "%s_cb_%u", gen->prefix, reg->idx[0].offset);
+ shader_glsl_print_subscript(buffer, gen, reg->idx[2].rel_addr, reg->idx[2].offset);
break;
case VKD3DSPR_THREADID:
@@ -485,8 +484,7 @@ static void shader_glsl_print_subscript(struct vkd3d_string_buffer *buffer, stru
vkd3d_string_buffer_printf(buffer, "[%s", r.str->buffer);
if (offset)
vkd3d_string_buffer_printf(buffer, " + %u", offset);
- else
- vkd3d_string_buffer_printf(buffer, "]");
+ vkd3d_string_buffer_printf(buffer, "]");
glsl_src_cleanup(&r, &gen->string_buffers);
}
@@ -1298,7 +1296,7 @@ static void shader_glsl_print_sysval_name(struct vkd3d_string_buffer *buffer, st
vkd3d_glsl_compiler_error(gen, VKD3D_SHADER_ERROR_GLSL_INTERNAL,
"Internal compiler error: Unhandled SV_POSITION index %u.", idx);
if (version->type == VKD3D_SHADER_TYPE_PIXEL)
- vkd3d_string_buffer_printf(buffer, "gl_FragCoord");
+ vkd3d_string_buffer_printf(buffer, "vec4(gl_FragCoord.xyz, 1.0 / gl_FragCoord.w)");
else
vkd3d_string_buffer_printf(buffer, "gl_Position");
break;
@@ -1658,6 +1656,9 @@ static void vkd3d_glsl_handle_instruction(struct vkd3d_glsl_generator *gen,
case VKD3DSIH_SWITCH:
shader_glsl_switch(gen, ins);
break;
+ case VKD3DSIH_XOR:
+ shader_glsl_binop(gen, ins, "^");
+ break;
default:
shader_glsl_unhandled(gen, ins);
break;
@@ -2078,7 +2079,7 @@ static void shader_glsl_generate_sampler_declaration(struct vkd3d_glsl_generator
static void shader_glsl_generate_descriptor_declarations(struct vkd3d_glsl_generator *gen)
{
const struct vkd3d_shader_scan_combined_resource_sampler_info *sampler_info = gen->combined_sampler_info;
- const struct vkd3d_shader_scan_descriptor_info1 *info = gen->descriptor_info;
+ const struct vkd3d_shader_scan_descriptor_info1 *info = &gen->program->descriptors;
const struct vkd3d_shader_descriptor_info1 *descriptor;
unsigned int i;
@@ -2429,7 +2430,6 @@ static void shader_glsl_init_limits(struct vkd3d_glsl_generator *gen, const stru
static void vkd3d_glsl_generator_init(struct vkd3d_glsl_generator *gen,
struct vsir_program *program, const struct vkd3d_shader_compile_info *compile_info,
- const struct vkd3d_shader_scan_descriptor_info1 *descriptor_info,
const struct vkd3d_shader_scan_combined_resource_sampler_info *combined_sampler_info,
struct vkd3d_shader_message_context *message_context)
{
@@ -2453,12 +2453,10 @@ static void vkd3d_glsl_generator_init(struct vkd3d_glsl_generator *gen,
gen->interface_info = vkd3d_find_struct(compile_info->next, INTERFACE_INFO);
gen->offset_info = vkd3d_find_struct(compile_info->next, DESCRIPTOR_OFFSET_INFO);
- gen->descriptor_info = descriptor_info;
gen->combined_sampler_info = combined_sampler_info;
}
int glsl_compile(struct vsir_program *program, uint64_t config_flags,
- const struct vkd3d_shader_scan_descriptor_info1 *descriptor_info,
const struct vkd3d_shader_scan_combined_resource_sampler_info *combined_sampler_info,
const struct vkd3d_shader_compile_info *compile_info,
struct vkd3d_shader_code *out, struct vkd3d_shader_message_context *message_context)
@@ -2470,9 +2468,10 @@ int glsl_compile(struct vsir_program *program, uint64_t config_flags,
return ret;
VKD3D_ASSERT(program->normalisation_level == VSIR_NORMALISED_SM6);
+ VKD3D_ASSERT(program->has_descriptor_info);
vkd3d_glsl_generator_init(&generator, program, compile_info,
- descriptor_info, combined_sampler_info, message_context);
+ combined_sampler_info, message_context);
ret = vkd3d_glsl_generator_generate(&generator, out);
vkd3d_glsl_generator_cleanup(&generator);
diff --git a/libs/vkd3d/libs/vkd3d-shader/hlsl.c b/libs/vkd3d/libs/vkd3d-shader/hlsl.c
index 41586550203..d1d20b7384c 100644
--- a/libs/vkd3d/libs/vkd3d-shader/hlsl.c
+++ b/libs/vkd3d/libs/vkd3d-shader/hlsl.c
@@ -234,6 +234,33 @@ unsigned int hlsl_get_multiarray_size(const struct hlsl_type *type)
return 1;
}
+const struct hlsl_type *hlsl_get_stream_output_type(const struct hlsl_type *type)
+{
+ unsigned int i;
+
+ switch (type->class)
+ {
+ case HLSL_CLASS_ARRAY:
+ return hlsl_get_stream_output_type(type->e.array.type);
+
+ case HLSL_CLASS_STRUCT:
+ for (i = 0; i < type->e.record.field_count; ++i)
+ {
+ const struct hlsl_type *field_type = hlsl_get_stream_output_type(type->e.record.fields[i].type);
+
+ if (field_type)
+ return field_type;
+ }
+ return NULL;
+
+ case HLSL_CLASS_STREAM_OUTPUT:
+ return type;
+
+ default:
+ return NULL;
+ }
+}
+
bool hlsl_type_is_resource(const struct hlsl_type *type)
{
switch (type->class)
@@ -298,6 +325,45 @@ bool hlsl_type_is_patch_array(const struct hlsl_type *type)
|| type->e.array.array_type == HLSL_ARRAY_PATCH_OUTPUT);
}
+bool hlsl_type_is_primitive_array(const struct hlsl_type *type)
+{
+ return type->class == HLSL_CLASS_ARRAY && (type->e.array.array_type != HLSL_ARRAY_GENERIC
+ || (type->modifiers & HLSL_PRIMITIVE_MODIFIERS_MASK));
+}
+
+bool hlsl_base_type_is_integer(enum hlsl_base_type type)
+{
+ switch (type)
+ {
+ case HLSL_TYPE_BOOL:
+ case HLSL_TYPE_INT:
+ case HLSL_TYPE_MIN16UINT:
+ case HLSL_TYPE_UINT:
+ return true;
+
+ case HLSL_TYPE_DOUBLE:
+ case HLSL_TYPE_FLOAT:
+ case HLSL_TYPE_HALF:
+ return false;
+ }
+
+ vkd3d_unreachable();
+}
+
+bool hlsl_type_is_integer(const struct hlsl_type *type)
+{
+ VKD3D_ASSERT(hlsl_is_numeric_type(type));
+ return hlsl_base_type_is_integer(type->e.numeric.type);
+}
+
+bool hlsl_type_is_floating_point(const struct hlsl_type *type)
+{
+ if (!hlsl_is_numeric_type(type))
+ return false;
+
+ return !hlsl_type_is_integer(type);
+}
+
/* Only intended to be used for derefs (after copies have been lowered to components or vectors) or
* resources, since for both their data types span across a single regset. */
static enum hlsl_regset type_get_regset(const struct hlsl_type *type)
@@ -484,6 +550,8 @@ static struct hlsl_type *hlsl_new_type(struct hlsl_ctx *ctx, const char *name, e
{
struct hlsl_type *type;
+ TRACE("New type %s.\n", name);
+
if (!(type = hlsl_alloc(ctx, sizeof(*type))))
return NULL;
if (!(type->name = hlsl_strdup(ctx, name)))
@@ -704,8 +772,7 @@ unsigned int hlsl_type_get_component_offset(struct hlsl_ctx *ctx, struct hlsl_ty
return offset[*regset];
}
-static bool init_deref(struct hlsl_ctx *ctx, struct hlsl_deref *deref, struct hlsl_ir_var *var,
- unsigned int path_len)
+bool hlsl_init_deref(struct hlsl_ctx *ctx, struct hlsl_deref *deref, struct hlsl_ir_var *var, unsigned int path_len)
{
deref->var = var;
deref->path_len = path_len;
@@ -763,7 +830,7 @@ bool hlsl_init_deref_from_index_chain(struct hlsl_ctx *ctx, struct hlsl_deref *d
}
load = hlsl_ir_load(ptr);
- if (!init_deref(ctx, deref, load->src.var, load->src.path_len + chain_len))
+ if (!hlsl_init_deref(ctx, deref, load->src.var, load->src.path_len + chain_len))
return false;
for (i = 0; i < load->src.path_len; ++i)
@@ -832,7 +899,7 @@ static bool init_deref_from_component_index(struct hlsl_ctx *ctx, struct hlsl_bl
++path_len;
}
- if (!init_deref(ctx, deref, prefix->var, prefix->path_len + path_len))
+ if (!hlsl_init_deref(ctx, deref, prefix->var, prefix->path_len + path_len))
return false;
deref_path_len = 0;
@@ -845,13 +912,7 @@ static bool init_deref_from_component_index(struct hlsl_ctx *ctx, struct hlsl_bl
{
unsigned int next_index = traverse_path_from_component_index(ctx, &path_type, &path_index);
- if (!(c = hlsl_new_uint_constant(ctx, next_index, loc)))
- {
- hlsl_block_cleanup(block);
- return false;
- }
- hlsl_block_add_instr(block, c);
-
+ c = hlsl_block_add_uint_constant(ctx, block, next_index, loc);
hlsl_src_from_node(&deref->path[deref_path_len++], c);
}
@@ -1104,6 +1165,7 @@ unsigned int hlsl_type_component_count(const struct hlsl_type *type)
case HLSL_CLASS_HULL_SHADER:
case HLSL_CLASS_GEOMETRY_SHADER:
case HLSL_CLASS_BLEND_STATE:
+ case HLSL_CLASS_STREAM_OUTPUT:
case HLSL_CLASS_NULL:
return 1;
@@ -1111,7 +1173,6 @@ unsigned int hlsl_type_component_count(const struct hlsl_type *type)
case HLSL_CLASS_PASS:
case HLSL_CLASS_TECHNIQUE:
case HLSL_CLASS_VOID:
- case HLSL_CLASS_STREAM_OUTPUT:
break;
}
@@ -1324,15 +1385,16 @@ bool hlsl_scope_add_type(struct hlsl_scope *scope, struct hlsl_type *type)
return true;
}
-struct hlsl_ir_node *hlsl_new_cast(struct hlsl_ctx *ctx, struct hlsl_ir_node *node, struct hlsl_type *type,
- const struct vkd3d_shader_location *loc)
+static struct hlsl_ir_node *append_new_instr(struct hlsl_ctx *ctx, struct hlsl_block *block, struct hlsl_ir_node *instr)
{
- struct hlsl_ir_node *cast;
+ if (!instr)
+ {
+ block->value = ctx->error_instr;
+ return ctx->error_instr;
+ }
- cast = hlsl_new_unary_expr(ctx, HLSL_OP1_CAST, node, loc);
- if (cast)
- cast->data_type = type;
- return cast;
+ hlsl_block_add_instr(block, instr);
+ return instr;
}
struct hlsl_ir_node *hlsl_new_copy(struct hlsl_ctx *ctx, struct hlsl_ir_node *node)
@@ -1429,7 +1491,7 @@ bool hlsl_copy_deref(struct hlsl_ctx *ctx, struct hlsl_deref *deref, const struc
VKD3D_ASSERT(!hlsl_deref_is_lowered(other));
- if (!init_deref(ctx, deref, other->var, other->path_len))
+ if (!hlsl_init_deref(ctx, deref, other->var, other->path_len))
return false;
for (i = 0; i < deref->path_len; ++i)
@@ -1491,7 +1553,7 @@ struct hlsl_ir_node *hlsl_new_store_index(struct hlsl_ctx *ctx, const struct hls
return NULL;
init_node(&store->node, HLSL_IR_STORE, NULL, loc);
- if (!init_deref(ctx, &store->lhs, lhs->var, lhs->path_len + !!idx))
+ if (!hlsl_init_deref(ctx, &store->lhs, lhs->var, lhs->path_len + !!idx))
{
vkd3d_free(store);
return NULL;
@@ -1510,22 +1572,73 @@ struct hlsl_ir_node *hlsl_new_store_index(struct hlsl_ctx *ctx, const struct hls
return &store->node;
}
-bool hlsl_new_store_component(struct hlsl_ctx *ctx, struct hlsl_block *block,
+void hlsl_block_add_store_index(struct hlsl_ctx *ctx, struct hlsl_block *block,
+ const struct hlsl_deref *lhs, struct hlsl_ir_node *idx, struct hlsl_ir_node *rhs,
+ unsigned int writemask, const struct vkd3d_shader_location *loc)
+{
+ append_new_instr(ctx, block, hlsl_new_store_index(ctx, lhs, idx, rhs, writemask, loc));
+}
+
+void hlsl_block_add_simple_store(struct hlsl_ctx *ctx, struct hlsl_block *block,
+ struct hlsl_ir_var *lhs, struct hlsl_ir_node *rhs)
+{
+ struct hlsl_deref lhs_deref;
+
+ hlsl_init_simple_deref_from_var(&lhs_deref, lhs);
+ hlsl_block_add_store_index(ctx, block, &lhs_deref, NULL, rhs, 0, &rhs->loc);
+}
+
+static struct hlsl_ir_node *hlsl_new_store_parent(struct hlsl_ctx *ctx,
+ const struct hlsl_deref *lhs, unsigned int path_len, struct hlsl_ir_node *rhs,
+ unsigned int writemask, const struct vkd3d_shader_location *loc)
+{
+ struct hlsl_ir_store *store;
+
+ VKD3D_ASSERT(!hlsl_deref_is_lowered(lhs));
+ VKD3D_ASSERT(lhs->path_len >= path_len);
+
+ if (!(store = hlsl_alloc(ctx, sizeof(*store))))
+ return NULL;
+ init_node(&store->node, HLSL_IR_STORE, NULL, loc);
+
+ if (!hlsl_init_deref(ctx, &store->lhs, lhs->var, path_len))
+ {
+ vkd3d_free(store);
+ return NULL;
+ }
+ for (unsigned int i = 0; i < path_len; ++i)
+ hlsl_src_from_node(&store->lhs.path[i], lhs->path[i].node);
+
+ hlsl_src_from_node(&store->rhs, rhs);
+
+ if (!writemask && type_is_single_reg(rhs->data_type))
+ writemask = (1 << rhs->data_type->e.numeric.dimx) - 1;
+ store->writemask = writemask;
+
+ return &store->node;
+}
+
+void hlsl_block_add_store_parent(struct hlsl_ctx *ctx, struct hlsl_block *block,
+ const struct hlsl_deref *lhs, unsigned int path_len, struct hlsl_ir_node *rhs,
+ unsigned int writemask, const struct vkd3d_shader_location *loc)
+{
+ append_new_instr(ctx, block, hlsl_new_store_parent(ctx, lhs, path_len, rhs, writemask, loc));
+}
+
+void hlsl_block_add_store_component(struct hlsl_ctx *ctx, struct hlsl_block *block,
const struct hlsl_deref *lhs, unsigned int comp, struct hlsl_ir_node *rhs)
{
struct hlsl_block comp_path_block;
struct hlsl_ir_store *store;
- hlsl_block_init(block);
-
if (!(store = hlsl_alloc(ctx, sizeof(*store))))
- return false;
+ return;
init_node(&store->node, HLSL_IR_STORE, NULL, &rhs->loc);
if (!init_deref_from_component_index(ctx, &comp_path_block, &store->lhs, lhs, comp, &rhs->loc))
{
vkd3d_free(store);
- return false;
+ return;
}
hlsl_block_add_block(block, &comp_path_block);
hlsl_src_from_node(&store->rhs, rhs);
@@ -1534,8 +1647,6 @@ bool hlsl_new_store_component(struct hlsl_ctx *ctx, struct hlsl_block *block,
store->writemask = (1 << rhs->data_type->e.numeric.dimx) - 1;
hlsl_block_add_instr(block, &store->node);
-
- return true;
}
struct hlsl_ir_node *hlsl_new_call(struct hlsl_ctx *ctx, struct hlsl_ir_function_decl *decl,
@@ -1575,7 +1686,7 @@ struct hlsl_ir_node *hlsl_new_bool_constant(struct hlsl_ctx *ctx, bool b, const
return hlsl_new_constant(ctx, hlsl_get_scalar_type(ctx, HLSL_TYPE_BOOL), &value, loc);
}
-struct hlsl_ir_node *hlsl_new_float_constant(struct hlsl_ctx *ctx, float f,
+static struct hlsl_ir_node *hlsl_new_float_constant(struct hlsl_ctx *ctx, float f,
const struct vkd3d_shader_location *loc)
{
struct hlsl_constant_value value;
@@ -1584,7 +1695,14 @@ struct hlsl_ir_node *hlsl_new_float_constant(struct hlsl_ctx *ctx, float f,
return hlsl_new_constant(ctx, hlsl_get_scalar_type(ctx, HLSL_TYPE_FLOAT), &value, loc);
}
-struct hlsl_ir_node *hlsl_new_int_constant(struct hlsl_ctx *ctx, int32_t n, const struct vkd3d_shader_location *loc)
+struct hlsl_ir_node *hlsl_block_add_float_constant(struct hlsl_ctx *ctx, struct hlsl_block *block,
+ float f, const struct vkd3d_shader_location *loc)
+{
+ return append_new_instr(ctx, block, hlsl_new_float_constant(ctx, f, loc));
+}
+
+static struct hlsl_ir_node *hlsl_new_int_constant(struct hlsl_ctx *ctx, int32_t n,
+ const struct vkd3d_shader_location *loc)
{
struct hlsl_constant_value value;
@@ -1592,6 +1710,12 @@ struct hlsl_ir_node *hlsl_new_int_constant(struct hlsl_ctx *ctx, int32_t n, cons
return hlsl_new_constant(ctx, hlsl_get_scalar_type(ctx, HLSL_TYPE_INT), &value, loc);
}
+struct hlsl_ir_node *hlsl_block_add_int_constant(struct hlsl_ctx *ctx, struct hlsl_block *block,
+ int32_t n, const struct vkd3d_shader_location *loc)
+{
+ return append_new_instr(ctx, block, hlsl_new_int_constant(ctx, n, loc));
+}
+
struct hlsl_ir_node *hlsl_new_uint_constant(struct hlsl_ctx *ctx, unsigned int n,
const struct vkd3d_shader_location *loc)
{
@@ -1601,6 +1725,12 @@ struct hlsl_ir_node *hlsl_new_uint_constant(struct hlsl_ctx *ctx, unsigned int n
return hlsl_new_constant(ctx, hlsl_get_scalar_type(ctx, HLSL_TYPE_UINT), &value, loc);
}
+struct hlsl_ir_node *hlsl_block_add_uint_constant(struct hlsl_ctx *ctx, struct hlsl_block *block,
+ unsigned int n, const struct vkd3d_shader_location *loc)
+{
+ return append_new_instr(ctx, block, hlsl_new_uint_constant(ctx, n, loc));
+}
+
struct hlsl_ir_node *hlsl_new_string_constant(struct hlsl_ctx *ctx, const char *str,
const struct vkd3d_shader_location *loc)
{
@@ -1625,7 +1755,7 @@ struct hlsl_ir_node *hlsl_new_null_constant(struct hlsl_ctx *ctx, const struct v
return hlsl_new_constant(ctx, ctx->builtin_types.null, &value, loc);
}
-struct hlsl_ir_node *hlsl_new_expr(struct hlsl_ctx *ctx, enum hlsl_ir_expr_op op,
+static struct hlsl_ir_node *hlsl_new_expr(struct hlsl_ctx *ctx, enum hlsl_ir_expr_op op,
struct hlsl_ir_node *operands[HLSL_MAX_OPERANDS],
struct hlsl_type *data_type, const struct vkd3d_shader_location *loc)
{
@@ -1641,7 +1771,14 @@ struct hlsl_ir_node *hlsl_new_expr(struct hlsl_ctx *ctx, enum hlsl_ir_expr_op op
return &expr->node;
}
-struct hlsl_ir_node *hlsl_new_unary_expr(struct hlsl_ctx *ctx, enum hlsl_ir_expr_op op,
+struct hlsl_ir_node *hlsl_block_add_expr(struct hlsl_ctx *ctx, struct hlsl_block *block,
+ enum hlsl_ir_expr_op op, struct hlsl_ir_node *operands[HLSL_MAX_OPERANDS],
+ struct hlsl_type *data_type, const struct vkd3d_shader_location *loc)
+{
+ return append_new_instr(ctx, block, hlsl_new_expr(ctx, op, operands, data_type, loc));
+}
+
+static struct hlsl_ir_node *hlsl_new_unary_expr(struct hlsl_ctx *ctx, enum hlsl_ir_expr_op op,
struct hlsl_ir_node *arg, const struct vkd3d_shader_location *loc)
{
struct hlsl_ir_node *operands[HLSL_MAX_OPERANDS] = {arg};
@@ -1649,6 +1786,12 @@ struct hlsl_ir_node *hlsl_new_unary_expr(struct hlsl_ctx *ctx, enum hlsl_ir_expr
return hlsl_new_expr(ctx, op, operands, arg->data_type, loc);
}
+struct hlsl_ir_node *hlsl_block_add_unary_expr(struct hlsl_ctx *ctx, struct hlsl_block *block,
+ enum hlsl_ir_expr_op op, struct hlsl_ir_node *arg, const struct vkd3d_shader_location *loc)
+{
+ return append_new_instr(ctx, block, hlsl_new_unary_expr(ctx, op, arg, loc));
+}
+
struct hlsl_ir_node *hlsl_new_binary_expr(struct hlsl_ctx *ctx, enum hlsl_ir_expr_op op,
struct hlsl_ir_node *arg1, struct hlsl_ir_node *arg2)
{
@@ -1657,16 +1800,37 @@ struct hlsl_ir_node *hlsl_new_binary_expr(struct hlsl_ctx *ctx, enum hlsl_ir_exp
return hlsl_new_expr(ctx, op, operands, arg1->data_type, &arg1->loc);
}
+struct hlsl_ir_node *hlsl_block_add_binary_expr(struct hlsl_ctx *ctx, struct hlsl_block *block,
+ enum hlsl_ir_expr_op op, struct hlsl_ir_node *arg1, struct hlsl_ir_node *arg2)
+{
+ return append_new_instr(ctx, block, hlsl_new_binary_expr(ctx, op, arg1, arg2));
+}
+
struct hlsl_ir_node *hlsl_new_ternary_expr(struct hlsl_ctx *ctx, enum hlsl_ir_expr_op op,
struct hlsl_ir_node *arg1, struct hlsl_ir_node *arg2, struct hlsl_ir_node *arg3)
{
struct hlsl_ir_node *operands[HLSL_MAX_OPERANDS] = {arg1, arg2, arg3};
- VKD3D_ASSERT(hlsl_types_are_equal(arg1->data_type, arg2->data_type));
- VKD3D_ASSERT(hlsl_types_are_equal(arg1->data_type, arg3->data_type));
return hlsl_new_expr(ctx, op, operands, arg1->data_type, &arg1->loc);
}
+struct hlsl_ir_node *hlsl_new_cast(struct hlsl_ctx *ctx, struct hlsl_ir_node *node, struct hlsl_type *type,
+ const struct vkd3d_shader_location *loc)
+{
+ struct hlsl_ir_node *cast;
+
+ cast = hlsl_new_unary_expr(ctx, HLSL_OP1_CAST, node, loc);
+ if (cast)
+ cast->data_type = type;
+ return cast;
+}
+
+struct hlsl_ir_node *hlsl_block_add_cast(struct hlsl_ctx *ctx, struct hlsl_block *block,
+ struct hlsl_ir_node *arg, struct hlsl_type *type, const struct vkd3d_shader_location *loc)
+{
+ return append_new_instr(ctx, block, hlsl_new_cast(ctx, arg, type, loc));
+}
+
static struct hlsl_ir_node *hlsl_new_error_expr(struct hlsl_ctx *ctx)
{
static const struct vkd3d_shader_location loc = {.source_name = "<error>"};
@@ -1694,6 +1858,23 @@ struct hlsl_ir_node *hlsl_new_if(struct hlsl_ctx *ctx, struct hlsl_ir_node *cond
return &iff->node;
}
+void hlsl_block_add_if(struct hlsl_ctx *ctx, struct hlsl_block *block, struct hlsl_ir_node *condition,
+ struct hlsl_block *then_block, struct hlsl_block *else_block, const struct vkd3d_shader_location *loc)
+{
+ struct hlsl_ir_node *instr = hlsl_new_if(ctx, condition, then_block, else_block, loc);
+
+ if (instr)
+ {
+ hlsl_block_add_instr(block, instr);
+ }
+ else
+ {
+ hlsl_block_cleanup(then_block);
+ if (else_block)
+ hlsl_block_cleanup(else_block);
+ }
+}
+
struct hlsl_ir_switch_case *hlsl_new_switch_case(struct hlsl_ctx *ctx, unsigned int value,
bool is_default, struct hlsl_block *body, const struct vkd3d_shader_location *loc)
{
@@ -1745,7 +1926,7 @@ struct hlsl_ir_load *hlsl_new_load_index(struct hlsl_ctx *ctx, const struct hlsl
return NULL;
init_node(&load->node, HLSL_IR_LOAD, type, loc);
- if (!init_deref(ctx, &load->src, deref->var, deref->path_len + !!idx))
+ if (!hlsl_init_deref(ctx, &load->src, deref->var, deref->path_len + !!idx))
{
vkd3d_free(load);
return NULL;
@@ -1758,6 +1939,14 @@ struct hlsl_ir_load *hlsl_new_load_index(struct hlsl_ctx *ctx, const struct hlsl
return load;
}
+struct hlsl_ir_node *hlsl_block_add_load_index(struct hlsl_ctx *ctx, struct hlsl_block *block,
+ const struct hlsl_deref *deref, struct hlsl_ir_node *idx, const struct vkd3d_shader_location *loc)
+{
+ struct hlsl_ir_load *load = hlsl_new_load_index(ctx, deref, idx, loc);
+
+ return append_new_instr(ctx, block, load ? &load->node : NULL);
+}
+
struct hlsl_ir_load *hlsl_new_load_parent(struct hlsl_ctx *ctx, const struct hlsl_deref *deref,
const struct vkd3d_shader_location *loc)
{
@@ -1780,17 +1969,27 @@ struct hlsl_ir_load *hlsl_new_var_load(struct hlsl_ctx *ctx, struct hlsl_ir_var
return hlsl_new_load_index(ctx, &var_deref, NULL, loc);
}
-struct hlsl_ir_node *hlsl_new_load_component(struct hlsl_ctx *ctx, struct hlsl_block *block,
+struct hlsl_ir_node *hlsl_block_add_simple_load(struct hlsl_ctx *ctx, struct hlsl_block *block,
+ struct hlsl_ir_var *var, const struct vkd3d_shader_location *loc)
+{
+ struct hlsl_deref var_deref;
+
+ hlsl_init_simple_deref_from_var(&var_deref, var);
+ return hlsl_block_add_load_index(ctx, block, &var_deref, NULL, loc);
+}
+
+struct hlsl_ir_node *hlsl_block_add_load_component(struct hlsl_ctx *ctx, struct hlsl_block *block,
const struct hlsl_deref *deref, unsigned int comp, const struct vkd3d_shader_location *loc)
{
struct hlsl_type *type, *comp_type;
struct hlsl_block comp_path_block;
struct hlsl_ir_load *load;
- hlsl_block_init(block);
-
if (!(load = hlsl_alloc(ctx, sizeof(*load))))
- return NULL;
+ {
+ block->value = ctx->error_instr;
+ return ctx->error_instr;
+ }
type = hlsl_deref_get_type(ctx, deref);
comp_type = hlsl_type_get_component_type(ctx, type, comp);
@@ -1799,7 +1998,8 @@ struct hlsl_ir_node *hlsl_new_load_component(struct hlsl_ctx *ctx, struct hlsl_b
if (!init_deref_from_component_index(ctx, &comp_path_block, &load->src, deref, comp, loc))
{
vkd3d_free(load);
- return NULL;
+ block->value = ctx->error_instr;
+ return ctx->error_instr;
}
hlsl_block_add_block(block, &comp_path_block);
@@ -1808,7 +2008,7 @@ struct hlsl_ir_node *hlsl_new_load_component(struct hlsl_ctx *ctx, struct hlsl_b
return &load->node;
}
-struct hlsl_ir_node *hlsl_new_resource_load(struct hlsl_ctx *ctx,
+static struct hlsl_ir_node *hlsl_new_resource_load(struct hlsl_ctx *ctx,
const struct hlsl_resource_load_params *params, const struct vkd3d_shader_location *loc)
{
struct hlsl_ir_resource_load *load;
@@ -1847,7 +2047,13 @@ struct hlsl_ir_node *hlsl_new_resource_load(struct hlsl_ctx *ctx,
return &load->node;
}
-struct hlsl_ir_node *hlsl_new_resource_store(struct hlsl_ctx *ctx, const struct hlsl_deref *resource,
+struct hlsl_ir_node *hlsl_block_add_resource_load(struct hlsl_ctx *ctx, struct hlsl_block *block,
+ const struct hlsl_resource_load_params *params, const struct vkd3d_shader_location *loc)
+{
+ return append_new_instr(ctx, block, hlsl_new_resource_load(ctx, params, loc));
+}
+
+static struct hlsl_ir_node *hlsl_new_resource_store(struct hlsl_ctx *ctx, const struct hlsl_deref *resource,
struct hlsl_ir_node *coords, struct hlsl_ir_node *value, const struct vkd3d_shader_location *loc)
{
struct hlsl_ir_resource_store *store;
@@ -1861,12 +2067,21 @@ struct hlsl_ir_node *hlsl_new_resource_store(struct hlsl_ctx *ctx, const struct
return &store->node;
}
+void hlsl_block_add_resource_store(struct hlsl_ctx *ctx, struct hlsl_block *block, const struct hlsl_deref *resource,
+ struct hlsl_ir_node *coords, struct hlsl_ir_node *value, const struct vkd3d_shader_location *loc)
+{
+ append_new_instr(ctx, block, hlsl_new_resource_store(ctx, resource, coords, value, loc));
+}
+
struct hlsl_ir_node *hlsl_new_swizzle(struct hlsl_ctx *ctx, uint32_t s, unsigned int component_count,
struct hlsl_ir_node *val, const struct vkd3d_shader_location *loc)
{
struct hlsl_ir_swizzle *swizzle;
struct hlsl_type *type;
+ if (val->data_type->class == HLSL_CLASS_ERROR)
+ return val;
+
VKD3D_ASSERT(val->data_type->class <= HLSL_CLASS_VECTOR);
if (!(swizzle = hlsl_alloc(ctx, sizeof(*swizzle))))
@@ -1882,6 +2097,12 @@ struct hlsl_ir_node *hlsl_new_swizzle(struct hlsl_ctx *ctx, uint32_t s, unsigned
return &swizzle->node;
}
+struct hlsl_ir_node *hlsl_block_add_swizzle(struct hlsl_ctx *ctx, struct hlsl_block *block, uint32_t s,
+ unsigned int width, struct hlsl_ir_node *val, const struct vkd3d_shader_location *loc)
+{
+ return append_new_instr(ctx, block, hlsl_new_swizzle(ctx, s, width, val, loc));
+}
+
struct hlsl_ir_node *hlsl_new_matrix_swizzle(struct hlsl_ctx *ctx, struct hlsl_matrix_swizzle s,
unsigned int component_count, struct hlsl_ir_node *val, const struct vkd3d_shader_location *loc)
{
@@ -2078,7 +2299,7 @@ bool hlsl_index_chain_has_resource_access(struct hlsl_ir_index *index)
return false;
}
-struct hlsl_ir_node *hlsl_new_index(struct hlsl_ctx *ctx, struct hlsl_ir_node *val,
+static struct hlsl_ir_node *hlsl_new_index(struct hlsl_ctx *ctx, struct hlsl_ir_node *val,
struct hlsl_ir_node *idx, const struct vkd3d_shader_location *loc)
{
struct hlsl_type *type = val->data_type;
@@ -2100,7 +2321,13 @@ struct hlsl_ir_node *hlsl_new_index(struct hlsl_ctx *ctx, struct hlsl_ir_node *v
return &index->node;
}
-struct hlsl_ir_node *hlsl_new_jump(struct hlsl_ctx *ctx, enum hlsl_ir_jump_type type,
+struct hlsl_ir_node *hlsl_block_add_index(struct hlsl_ctx *ctx, struct hlsl_block *block,
+ struct hlsl_ir_node *val, struct hlsl_ir_node *idx, const struct vkd3d_shader_location *loc)
+{
+ return append_new_instr(ctx, block, hlsl_new_index(ctx, val, idx, loc));
+}
+
+static struct hlsl_ir_node *hlsl_new_jump(struct hlsl_ctx *ctx, enum hlsl_ir_jump_type type,
struct hlsl_ir_node *condition, const struct vkd3d_shader_location *loc)
{
struct hlsl_ir_jump *jump;
@@ -2113,7 +2340,13 @@ struct hlsl_ir_node *hlsl_new_jump(struct hlsl_ctx *ctx, enum hlsl_ir_jump_type
return &jump->node;
}
-struct hlsl_ir_node *hlsl_new_loop(struct hlsl_ctx *ctx, struct hlsl_block *iter,
+void hlsl_block_add_jump(struct hlsl_ctx *ctx, struct hlsl_block *block, enum hlsl_ir_jump_type type,
+ struct hlsl_ir_node *condition, const struct vkd3d_shader_location *loc)
+{
+ append_new_instr(ctx, block, hlsl_new_jump(ctx, type, condition, loc));
+}
+
+static struct hlsl_ir_node *hlsl_new_loop(struct hlsl_ctx *ctx, struct hlsl_block *iter,
struct hlsl_block *block, enum hlsl_loop_unroll_type unroll_type,
unsigned int unroll_limit, const struct vkd3d_shader_location *loc)
{
@@ -2134,6 +2367,18 @@ struct hlsl_ir_node *hlsl_new_loop(struct hlsl_ctx *ctx, struct hlsl_block *iter
return &loop->node;
}
+void hlsl_block_add_loop(struct hlsl_ctx *ctx, struct hlsl_block *block,
+ struct hlsl_block *iter, struct hlsl_block *body, enum hlsl_loop_unroll_type unroll_type,
+ unsigned int unroll_limit, const struct vkd3d_shader_location *loc)
+{
+ struct hlsl_ir_node *instr = hlsl_new_loop(ctx, iter, body, unroll_type, unroll_limit, loc);
+
+ if (instr)
+ hlsl_block_add_instr(block, instr);
+ else
+ hlsl_block_cleanup(body);
+}
+
struct clone_instr_map
{
struct
@@ -2203,7 +2448,7 @@ static bool clone_deref(struct hlsl_ctx *ctx, struct clone_instr_map *map,
VKD3D_ASSERT(!hlsl_deref_is_lowered(src));
- if (!init_deref(ctx, dst, src->var, src->path_len))
+ if (!hlsl_init_deref(ctx, dst, src->var, src->path_len))
return false;
for (i = 0; i < src->path_len; ++i)
@@ -2650,8 +2895,8 @@ struct hlsl_ir_function_decl *hlsl_new_func_decl(struct hlsl_ctx *ctx,
struct hlsl_type *return_type, const struct hlsl_func_parameters *parameters,
const struct hlsl_semantic *semantic, const struct vkd3d_shader_location *loc)
{
- struct hlsl_ir_node *constant, *store;
struct hlsl_ir_function_decl *decl;
+ struct hlsl_ir_node *constant;
if (!(decl = hlsl_alloc(ctx, sizeof(*decl))))
return NULL;
@@ -2679,9 +2924,7 @@ struct hlsl_ir_function_decl *hlsl_new_func_decl(struct hlsl_ctx *ctx,
return decl;
hlsl_block_add_instr(&decl->body, constant);
- if (!(store = hlsl_new_simple_store(ctx, decl->early_return_var, constant)))
- return decl;
- hlsl_block_add_instr(&decl->body, store);
+ hlsl_block_add_simple_store(ctx, &decl->body, decl->early_return_var, constant);
return decl;
}
@@ -2796,6 +3039,7 @@ static void hlsl_dump_type(struct vkd3d_string_buffer *buffer, const struct hlsl
[HLSL_TYPE_HALF] = "half",
[HLSL_TYPE_DOUBLE] = "double",
[HLSL_TYPE_INT] = "int",
+ [HLSL_TYPE_MIN16UINT] = "min16uint",
[HLSL_TYPE_UINT] = "uint",
[HLSL_TYPE_BOOL] = "bool",
};
@@ -3040,6 +3284,16 @@ struct vkd3d_string_buffer *hlsl_modifiers_to_string(struct hlsl_ctx *ctx, uint3
vkd3d_string_buffer_printf(string, "row_major ");
if (modifiers & HLSL_MODIFIER_COLUMN_MAJOR)
vkd3d_string_buffer_printf(string, "column_major ");
+ if (modifiers & HLSL_PRIMITIVE_POINT)
+ vkd3d_string_buffer_printf(string, "point ");
+ if (modifiers & HLSL_PRIMITIVE_LINE)
+ vkd3d_string_buffer_printf(string, "line ");
+ if (modifiers & HLSL_PRIMITIVE_TRIANGLE)
+ vkd3d_string_buffer_printf(string, "triangle ");
+ if (modifiers & HLSL_PRIMITIVE_LINEADJ)
+ vkd3d_string_buffer_printf(string, "lineadj ");
+ if (modifiers & HLSL_PRIMITIVE_TRIANGLEADJ)
+ vkd3d_string_buffer_printf(string, "triangleadj ");
if ((modifiers & (HLSL_STORAGE_IN | HLSL_STORAGE_OUT)) == (HLSL_STORAGE_IN | HLSL_STORAGE_OUT))
vkd3d_string_buffer_printf(string, "inout ");
else if (modifiers & HLSL_STORAGE_IN)
@@ -3263,6 +3517,7 @@ static void dump_ir_constant(struct vkd3d_string_buffer *buffer, const struct hl
vkd3d_string_buffer_printf(buffer, "%d ", value->i);
break;
+ case HLSL_TYPE_MIN16UINT:
case HLSL_TYPE_UINT:
vkd3d_string_buffer_printf(buffer, "%u ", value->u);
break;
@@ -4289,17 +4544,17 @@ static void declare_predefined_types(struct hlsl_ctx *ctx)
static const char * const names[] =
{
- [HLSL_TYPE_FLOAT] = "float",
- [HLSL_TYPE_HALF] = "half",
- [HLSL_TYPE_DOUBLE] = "double",
- [HLSL_TYPE_INT] = "int",
- [HLSL_TYPE_UINT] = "uint",
- [HLSL_TYPE_BOOL] = "bool",
+ [HLSL_TYPE_FLOAT] = "float",
+ [HLSL_TYPE_HALF] = "half",
+ [HLSL_TYPE_DOUBLE] = "double",
+ [HLSL_TYPE_INT] = "int",
+ [HLSL_TYPE_UINT] = "uint",
+ [HLSL_TYPE_BOOL] = "bool",
+ [HLSL_TYPE_MIN16UINT] = "min16uint",
};
static const char *const variants_float[] = {"min10float", "min16float"};
static const char *const variants_int[] = {"min12int", "min16int"};
- static const char *const variants_uint[] = {"min16uint"};
static const char *const sampler_names[] =
{
@@ -4390,11 +4645,6 @@ static void declare_predefined_types(struct hlsl_ctx *ctx)
n_variants = ARRAY_SIZE(variants_int);
break;
- case HLSL_TYPE_UINT:
- variants = variants_uint;
- n_variants = ARRAY_SIZE(variants_uint);
- break;
-
default:
n_variants = 0;
variants = NULL;
@@ -4577,6 +4827,8 @@ static bool hlsl_ctx_init(struct hlsl_ctx *ctx, const struct vkd3d_shader_compil
ctx->output_primitive = 0;
ctx->partitioning = 0;
ctx->input_control_point_count = UINT_MAX;
+ ctx->max_vertex_count = 0;
+ ctx->input_primitive_type = VKD3D_PT_UNDEFINED;
return true;
}
@@ -4742,6 +4994,7 @@ int hlsl_compile_shader(const struct vkd3d_shader_code *hlsl, const struct vkd3d
if (target_type == VKD3D_SHADER_TARGET_SPIRV_BINARY
|| target_type == VKD3D_SHADER_TARGET_SPIRV_TEXT
+ || target_type == VKD3D_SHADER_TARGET_GLSL
|| target_type == VKD3D_SHADER_TARGET_D3D_ASM)
{
uint64_t config_flags = vkd3d_shader_init_config_flags();
diff --git a/libs/vkd3d/libs/vkd3d-shader/hlsl.h b/libs/vkd3d/libs/vkd3d-shader/hlsl.h
index f614e12036e..fafa5740963 100644
--- a/libs/vkd3d/libs/vkd3d-shader/hlsl.h
+++ b/libs/vkd3d/libs/vkd3d-shader/hlsl.h
@@ -103,6 +103,7 @@ enum hlsl_base_type
HLSL_TYPE_DOUBLE,
HLSL_TYPE_INT,
HLSL_TYPE_UINT,
+ HLSL_TYPE_MIN16UINT,
HLSL_TYPE_BOOL,
HLSL_TYPE_LAST_SCALAR = HLSL_TYPE_BOOL,
};
@@ -416,6 +417,11 @@ struct hlsl_attribute
#define HLSL_STORAGE_ANNOTATION 0x00080000
#define HLSL_MODIFIER_UNORM 0x00100000
#define HLSL_MODIFIER_SNORM 0x00200000
+#define HLSL_PRIMITIVE_POINT 0x00400000
+#define HLSL_PRIMITIVE_LINE 0x00800000
+#define HLSL_PRIMITIVE_TRIANGLE 0x01000000
+#define HLSL_PRIMITIVE_LINEADJ 0x02000000
+#define HLSL_PRIMITIVE_TRIANGLEADJ 0x04000000
#define HLSL_TYPE_MODIFIERS_MASK (HLSL_MODIFIER_PRECISE | HLSL_MODIFIER_VOLATILE | \
HLSL_MODIFIER_CONST | HLSL_MODIFIER_ROW_MAJOR | \
@@ -426,6 +432,9 @@ struct hlsl_attribute
#define HLSL_MODIFIERS_MAJORITY_MASK (HLSL_MODIFIER_ROW_MAJOR | HLSL_MODIFIER_COLUMN_MAJOR)
+#define HLSL_PRIMITIVE_MODIFIERS_MASK (HLSL_PRIMITIVE_POINT | HLSL_PRIMITIVE_LINE | HLSL_PRIMITIVE_TRIANGLE | \
+ HLSL_PRIMITIVE_LINEADJ | HLSL_PRIMITIVE_TRIANGLEADJ)
+
#define HLSL_ARRAY_ELEMENTS_COUNT_IMPLICIT 0
/* Reservation of a register and/or an offset for objects inside constant buffers, to be used as a
@@ -482,6 +491,9 @@ struct hlsl_ir_var
union hlsl_constant_value_component number;
} *default_values;
+ /* Pointer to the temp copy of the variable, in case it is uniform. */
+ struct hlsl_ir_var *temp_copy;
+
/* A dynamic array containing the state block on the variable's declaration, if any.
* An array variable may contain multiple state blocks.
* A technique pass will always contain one.
@@ -1143,6 +1155,7 @@ struct hlsl_ctx
struct hlsl_constant_register
{
uint32_t index;
+ uint32_t allocated_mask;
struct hlsl_vec4 value;
struct vkd3d_shader_location loc;
} *regs;
@@ -1180,10 +1193,18 @@ struct hlsl_ctx
unsigned int input_control_point_count;
struct hlsl_type *input_control_point_type;
+ /* The first declared input primitive parameter in tessellation and geometry shaders. */
+ struct hlsl_ir_var *input_primitive_param;
+
/* Whether the current function being processed during HLSL codegen is
* the patch constant function in a hull shader. */
bool is_patch_constant_func;
+ /* The maximum output vertex count of a geometry shader. */
+ unsigned int max_vertex_count;
+ /* The input primitive type of a geometry shader. */
+ enum vkd3d_primitive_type input_primitive_type;
+
/* In some cases we generate opcodes by parsing an HLSL function and then
* invoking it. If not NULL, this field is the name of the function that we
* are currently parsing, "mangled" with an internal prefix to avoid
@@ -1454,6 +1475,11 @@ static inline bool hlsl_is_numeric_type(const struct hlsl_type *type)
return type->class <= HLSL_CLASS_LAST_NUMERIC;
}
+static inline bool hlsl_is_vec1(const struct hlsl_type *type)
+{
+ return type->class == HLSL_CLASS_SCALAR || (type->class == HLSL_CLASS_VECTOR && type->e.numeric.dimx == 1);
+}
+
static inline unsigned int hlsl_sampler_dim_count(enum hlsl_sampler_dim dim)
{
switch (dim)
@@ -1502,6 +1528,52 @@ struct hlsl_ir_node *hlsl_add_conditional(struct hlsl_ctx *ctx, struct hlsl_bloc
void hlsl_add_function(struct hlsl_ctx *ctx, char *name, struct hlsl_ir_function_decl *decl);
void hlsl_add_var(struct hlsl_ctx *ctx, struct hlsl_ir_var *decl);
+struct hlsl_ir_node *hlsl_block_add_binary_expr(struct hlsl_ctx *ctx, struct hlsl_block *block,
+ enum hlsl_ir_expr_op op, struct hlsl_ir_node *arg1, struct hlsl_ir_node *arg2);
+struct hlsl_ir_node *hlsl_block_add_cast(struct hlsl_ctx *ctx, struct hlsl_block *block,
+ struct hlsl_ir_node *arg, struct hlsl_type *type, const struct vkd3d_shader_location *loc);
+struct hlsl_ir_node *hlsl_block_add_expr(struct hlsl_ctx *ctx, struct hlsl_block *block,
+ enum hlsl_ir_expr_op op, struct hlsl_ir_node *operands[HLSL_MAX_OPERANDS],
+ struct hlsl_type *data_type, const struct vkd3d_shader_location *loc);
+struct hlsl_ir_node *hlsl_block_add_float_constant(struct hlsl_ctx *ctx, struct hlsl_block *block,
+ float f, const struct vkd3d_shader_location *loc);
+void hlsl_block_add_if(struct hlsl_ctx *ctx, struct hlsl_block *block, struct hlsl_ir_node *condition,
+ struct hlsl_block *then_block, struct hlsl_block *else_block, const struct vkd3d_shader_location *loc);
+struct hlsl_ir_node *hlsl_block_add_index(struct hlsl_ctx *ctx, struct hlsl_block *block,
+ struct hlsl_ir_node *val, struct hlsl_ir_node *idx, const struct vkd3d_shader_location *loc);
+struct hlsl_ir_node *hlsl_block_add_int_constant(struct hlsl_ctx *ctx, struct hlsl_block *block,
+ int32_t n, const struct vkd3d_shader_location *loc);
+void hlsl_block_add_jump(struct hlsl_ctx *ctx, struct hlsl_block *block, enum hlsl_ir_jump_type type,
+ struct hlsl_ir_node *condition, const struct vkd3d_shader_location *loc);
+struct hlsl_ir_node *hlsl_block_add_load_component(struct hlsl_ctx *ctx, struct hlsl_block *block,
+ const struct hlsl_deref *deref, unsigned int comp, const struct vkd3d_shader_location *loc);
+struct hlsl_ir_node *hlsl_block_add_load_index(struct hlsl_ctx *ctx, struct hlsl_block *block,
+ const struct hlsl_deref *deref, struct hlsl_ir_node *idx, const struct vkd3d_shader_location *loc);
+void hlsl_block_add_loop(struct hlsl_ctx *ctx, struct hlsl_block *block,
+ struct hlsl_block *iter, struct hlsl_block *body, enum hlsl_loop_unroll_type unroll_type,
+ unsigned int unroll_limit, const struct vkd3d_shader_location *loc);
+struct hlsl_ir_node *hlsl_block_add_resource_load(struct hlsl_ctx *ctx, struct hlsl_block *block,
+ const struct hlsl_resource_load_params *params, const struct vkd3d_shader_location *loc);
+void hlsl_block_add_resource_store(struct hlsl_ctx *ctx, struct hlsl_block *block, const struct hlsl_deref *resource,
+ struct hlsl_ir_node *coords, struct hlsl_ir_node *value, const struct vkd3d_shader_location *loc);
+struct hlsl_ir_node *hlsl_block_add_simple_load(struct hlsl_ctx *ctx, struct hlsl_block *block,
+ struct hlsl_ir_var *var, const struct vkd3d_shader_location *loc);
+void hlsl_block_add_simple_store(struct hlsl_ctx *ctx, struct hlsl_block *block,
+ struct hlsl_ir_var *lhs, struct hlsl_ir_node *rhs);
+void hlsl_block_add_store_component(struct hlsl_ctx *ctx, struct hlsl_block *block,
+ const struct hlsl_deref *lhs, unsigned int comp, struct hlsl_ir_node *rhs);
+void hlsl_block_add_store_index(struct hlsl_ctx *ctx, struct hlsl_block *block,
+ const struct hlsl_deref *lhs, struct hlsl_ir_node *idx, struct hlsl_ir_node *rhs,
+ unsigned int writemask, const struct vkd3d_shader_location *loc);
+void hlsl_block_add_store_parent(struct hlsl_ctx *ctx, struct hlsl_block *block,
+ const struct hlsl_deref *lhs, unsigned int path_len, struct hlsl_ir_node *rhs,
+ unsigned int writemask, const struct vkd3d_shader_location *loc);
+struct hlsl_ir_node *hlsl_block_add_swizzle(struct hlsl_ctx *ctx, struct hlsl_block *block, uint32_t s,
+ unsigned int width, struct hlsl_ir_node *val, const struct vkd3d_shader_location *loc);
+struct hlsl_ir_node *hlsl_block_add_uint_constant(struct hlsl_ctx *ctx, struct hlsl_block *block,
+ unsigned int n, const struct vkd3d_shader_location *loc);
+struct hlsl_ir_node *hlsl_block_add_unary_expr(struct hlsl_ctx *ctx, struct hlsl_block *block,
+ enum hlsl_ir_expr_op op, struct hlsl_ir_node *arg, const struct vkd3d_shader_location *loc);
void hlsl_block_cleanup(struct hlsl_block *block);
bool hlsl_clone_block(struct hlsl_ctx *ctx, struct hlsl_block *dst_block, const struct hlsl_block *src_block);
@@ -1524,6 +1596,7 @@ int hlsl_emit_bytecode(struct hlsl_ctx *ctx, struct hlsl_ir_function_decl *entry
enum vkd3d_shader_target_type target_type, struct vkd3d_shader_code *out);
int hlsl_emit_effect_binary(struct hlsl_ctx *ctx, struct vkd3d_shader_code *out);
+bool hlsl_init_deref(struct hlsl_ctx *ctx, struct hlsl_deref *deref, struct hlsl_ir_var *var, unsigned int path_len);
bool hlsl_init_deref_from_index_chain(struct hlsl_ctx *ctx, struct hlsl_deref *deref, struct hlsl_ir_node *chain);
bool hlsl_copy_deref(struct hlsl_ctx *ctx, struct hlsl_deref *deref, const struct hlsl_deref *other);
@@ -1573,19 +1646,11 @@ struct hlsl_ir_node *hlsl_new_cast(struct hlsl_ctx *ctx, struct hlsl_ir_node *no
struct hlsl_ir_node *hlsl_new_constant(struct hlsl_ctx *ctx, struct hlsl_type *type,
const struct hlsl_constant_value *value, const struct vkd3d_shader_location *loc);
struct hlsl_ir_node *hlsl_new_copy(struct hlsl_ctx *ctx, struct hlsl_ir_node *node);
-struct hlsl_ir_node *hlsl_new_expr(struct hlsl_ctx *ctx, enum hlsl_ir_expr_op op,
- struct hlsl_ir_node *operands[HLSL_MAX_OPERANDS],
- struct hlsl_type *data_type, const struct vkd3d_shader_location *loc);
-struct hlsl_ir_node *hlsl_new_float_constant(struct hlsl_ctx *ctx,
- float f, const struct vkd3d_shader_location *loc);
struct hlsl_ir_function_decl *hlsl_new_func_decl(struct hlsl_ctx *ctx,
struct hlsl_type *return_type, const struct hlsl_func_parameters *parameters,
const struct hlsl_semantic *semantic, const struct vkd3d_shader_location *loc);
struct hlsl_ir_node *hlsl_new_if(struct hlsl_ctx *ctx, struct hlsl_ir_node *condition,
struct hlsl_block *then_block, struct hlsl_block *else_block, const struct vkd3d_shader_location *loc);
-struct hlsl_ir_node *hlsl_new_int_constant(struct hlsl_ctx *ctx, int32_t n, const struct vkd3d_shader_location *loc);
-struct hlsl_ir_node *hlsl_new_jump(struct hlsl_ctx *ctx,
- enum hlsl_ir_jump_type type, struct hlsl_ir_node *condition, const struct vkd3d_shader_location *loc);
struct hlsl_type *hlsl_new_stream_output_type(struct hlsl_ctx *ctx,
enum hlsl_so_object_type so_type, struct hlsl_type *type);
struct hlsl_ir_node *hlsl_new_ternary_expr(struct hlsl_ctx *ctx, enum hlsl_ir_expr_op op,
@@ -1599,16 +1664,12 @@ struct hlsl_ir_load *hlsl_new_load_index(struct hlsl_ctx *ctx, const struct hlsl
struct hlsl_ir_node *idx, const struct vkd3d_shader_location *loc);
struct hlsl_ir_load *hlsl_new_load_parent(struct hlsl_ctx *ctx, const struct hlsl_deref *deref,
const struct vkd3d_shader_location *loc);
-struct hlsl_ir_node *hlsl_new_load_component(struct hlsl_ctx *ctx, struct hlsl_block *block,
- const struct hlsl_deref *deref, unsigned int comp, const struct vkd3d_shader_location *loc);
struct hlsl_ir_node *hlsl_add_load_component(struct hlsl_ctx *ctx, struct hlsl_block *block,
struct hlsl_ir_node *var_instr, unsigned int comp, const struct vkd3d_shader_location *loc);
struct hlsl_ir_node *hlsl_new_simple_store(struct hlsl_ctx *ctx, struct hlsl_ir_var *lhs, struct hlsl_ir_node *rhs);
struct hlsl_ir_node *hlsl_new_store_index(struct hlsl_ctx *ctx, const struct hlsl_deref *lhs,
struct hlsl_ir_node *idx, struct hlsl_ir_node *rhs, unsigned int writemask, const struct vkd3d_shader_location *loc);
-bool hlsl_new_store_component(struct hlsl_ctx *ctx, struct hlsl_block *block,
- const struct hlsl_deref *lhs, unsigned int comp, struct hlsl_ir_node *rhs);
bool hlsl_index_is_noncontiguous(struct hlsl_ir_index *index);
bool hlsl_index_is_resource_access(struct hlsl_ir_index *index);
@@ -1617,20 +1678,11 @@ bool hlsl_index_chain_has_resource_access(struct hlsl_ir_index *index);
struct hlsl_ir_node *hlsl_new_compile(struct hlsl_ctx *ctx, enum hlsl_compile_type compile_type,
const char *profile_name, struct hlsl_ir_node **args, unsigned int args_count,
struct hlsl_block *args_instrs, const struct vkd3d_shader_location *loc);
-struct hlsl_ir_node *hlsl_new_index(struct hlsl_ctx *ctx, struct hlsl_ir_node *val,
- struct hlsl_ir_node *idx, const struct vkd3d_shader_location *loc);
struct hlsl_ir_node *hlsl_new_interlocked(struct hlsl_ctx *ctx, enum hlsl_interlocked_op op, struct hlsl_type *type,
const struct hlsl_deref *dst, struct hlsl_ir_node *coords, struct hlsl_ir_node *cmp_value,
struct hlsl_ir_node *value, const struct vkd3d_shader_location *loc);
-struct hlsl_ir_node *hlsl_new_loop(struct hlsl_ctx *ctx, struct hlsl_block *iter,
- struct hlsl_block *block, enum hlsl_loop_unroll_type unroll_type,
- unsigned int unroll_limit, const struct vkd3d_shader_location *loc);
struct hlsl_ir_node *hlsl_new_matrix_swizzle(struct hlsl_ctx *ctx, struct hlsl_matrix_swizzle s,
unsigned int width, struct hlsl_ir_node *val, const struct vkd3d_shader_location *loc);
-struct hlsl_ir_node *hlsl_new_resource_load(struct hlsl_ctx *ctx,
- const struct hlsl_resource_load_params *params, const struct vkd3d_shader_location *loc);
-struct hlsl_ir_node *hlsl_new_resource_store(struct hlsl_ctx *ctx, const struct hlsl_deref *resource,
- struct hlsl_ir_node *coords, struct hlsl_ir_node *value, const struct vkd3d_shader_location *loc);
struct hlsl_type *hlsl_new_struct_type(struct hlsl_ctx *ctx, const char *name,
struct hlsl_struct_field *fields, size_t field_count);
struct hlsl_ir_node *hlsl_new_swizzle(struct hlsl_ctx *ctx, uint32_t s, unsigned int components,
@@ -1653,8 +1705,6 @@ struct hlsl_type *hlsl_new_cb_type(struct hlsl_ctx *ctx, struct hlsl_type *forma
struct hlsl_ir_node *hlsl_new_uint_constant(struct hlsl_ctx *ctx, unsigned int n,
const struct vkd3d_shader_location *loc);
struct hlsl_ir_node *hlsl_new_null_constant(struct hlsl_ctx *ctx, const struct vkd3d_shader_location *loc);
-struct hlsl_ir_node *hlsl_new_unary_expr(struct hlsl_ctx *ctx, enum hlsl_ir_expr_op op, struct hlsl_ir_node *arg,
- const struct vkd3d_shader_location *loc);
struct hlsl_ir_var *hlsl_new_var(struct hlsl_ctx *ctx, const char *name, struct hlsl_type *type,
const struct vkd3d_shader_location *loc, const struct hlsl_semantic *semantic, uint32_t modifiers,
const struct hlsl_reg_reservation *reg_reservation);
@@ -1677,6 +1727,8 @@ void hlsl_pop_scope(struct hlsl_ctx *ctx);
bool hlsl_scope_add_type(struct hlsl_scope *scope, struct hlsl_type *type);
+bool hlsl_base_type_is_integer(enum hlsl_base_type type);
+
struct hlsl_type *hlsl_type_clone(struct hlsl_ctx *ctx, struct hlsl_type *old,
unsigned int default_majority, uint32_t modifiers);
unsigned int hlsl_type_component_count(const struct hlsl_type *type);
@@ -1685,13 +1737,17 @@ struct hlsl_type *hlsl_type_get_component_type(struct hlsl_ctx *ctx, struct hlsl
unsigned int index);
unsigned int hlsl_type_get_component_offset(struct hlsl_ctx *ctx, struct hlsl_type *type,
unsigned int index, enum hlsl_regset *regset);
+bool hlsl_type_is_integer(const struct hlsl_type *type);
+bool hlsl_type_is_floating_point(const struct hlsl_type *type);
bool hlsl_type_is_row_major(const struct hlsl_type *type);
unsigned int hlsl_type_minor_size(const struct hlsl_type *type);
unsigned int hlsl_type_major_size(const struct hlsl_type *type);
unsigned int hlsl_type_element_count(const struct hlsl_type *type);
+bool hlsl_type_is_integer(const struct hlsl_type *type);
bool hlsl_type_is_resource(const struct hlsl_type *type);
bool hlsl_type_is_shader(const struct hlsl_type *type);
bool hlsl_type_is_patch_array(const struct hlsl_type *type);
+bool hlsl_type_is_primitive_array(const struct hlsl_type *type);
unsigned int hlsl_type_get_sm4_offset(const struct hlsl_type *type, unsigned int offset);
bool hlsl_types_are_equal(const struct hlsl_type *t1, const struct hlsl_type *t2);
@@ -1700,6 +1756,8 @@ void hlsl_calculate_buffer_offsets(struct hlsl_ctx *ctx);
const struct hlsl_type *hlsl_get_multiarray_element_type(const struct hlsl_type *type);
unsigned int hlsl_get_multiarray_size(const struct hlsl_type *type);
+const struct hlsl_type *hlsl_get_stream_output_type(const struct hlsl_type *type);
+
uint32_t hlsl_combine_swizzles(uint32_t first, uint32_t second, unsigned int dim);
unsigned int hlsl_combine_writemasks(unsigned int first, unsigned int second);
uint32_t hlsl_map_swizzle(uint32_t swizzle, unsigned int writemask);
diff --git a/libs/vkd3d/libs/vkd3d-shader/hlsl.l b/libs/vkd3d/libs/vkd3d-shader/hlsl.l
index 605a9abaa93..d9fd43b5e78 100644
--- a/libs/vkd3d/libs/vkd3d-shader/hlsl.l
+++ b/libs/vkd3d/libs/vkd3d-shader/hlsl.l
@@ -106,6 +106,8 @@ inline {return KW_INLINE; }
inout {return KW_INOUT; }
InputPatch {return KW_INPUTPATCH; }
LineStream {return KW_LINESTREAM; }
+line {return KW_LINE; }
+lineadj {return KW_LINEADJ; }
linear {return KW_LINEAR; }
matrix {return KW_MATRIX; }
namespace {return KW_NAMESPACE; }
@@ -119,6 +121,7 @@ pass {return KW_PASS; }
PixelShader {return KW_PIXELSHADER; }
PointStream {return KW_POINTSTREAM; }
pixelshader {return KW_PIXELSHADER; }
+point {return KW_POINT; }
RasterizerOrderedBuffer {return KW_RASTERIZERORDEREDBUFFER; }
RasterizerOrderedStructuredBuffer {return KW_RASTERIZERORDEREDSTRUCTUREDBUFFER; }
RasterizerOrderedTexture1D {return KW_RASTERIZERORDEREDTEXTURE1D; }
@@ -175,6 +178,8 @@ TextureCube {return KW_TEXTURECUBE; }
textureCUBE {return KW_TEXTURECUBE; }
TextureCubeArray {return KW_TEXTURECUBEARRAY; }
TriangleStream {return KW_TRIANGLESTREAM; }
+triangle {return KW_TRIANGLE; }
+triangleadj {return KW_TRIANGLEADJ; }
true {return KW_TRUE; }
typedef {return KW_TYPEDEF; }
unsigned {return KW_UNSIGNED; }
diff --git a/libs/vkd3d/libs/vkd3d-shader/hlsl.y b/libs/vkd3d/libs/vkd3d-shader/hlsl.y
index 7afc9274c2e..ff3d58da8f4 100644
--- a/libs/vkd3d/libs/vkd3d-shader/hlsl.y
+++ b/libs/vkd3d/libs/vkd3d-shader/hlsl.y
@@ -351,7 +351,6 @@ static struct hlsl_ir_node *add_cast(struct hlsl_ctx *ctx, struct hlsl_block *bl
struct hlsl_ir_node *node, struct hlsl_type *dst_type, const struct vkd3d_shader_location *loc)
{
struct hlsl_type *src_type = node->data_type;
- struct hlsl_ir_node *cast;
if (hlsl_types_are_equal(src_type, dst_type))
return node;
@@ -359,11 +358,7 @@ static struct hlsl_ir_node *add_cast(struct hlsl_ctx *ctx, struct hlsl_block *bl
if (src_type->class == HLSL_CLASS_NULL)
return node;
- if (!(cast = hlsl_new_cast(ctx, node, dst_type, loc)))
- return NULL;
- hlsl_block_add_instr(block, cast);
-
- return cast;
+ return hlsl_block_add_cast(ctx, block, node, dst_type, loc);
}
static struct hlsl_ir_node *add_implicit_conversion(struct hlsl_ctx *ctx, struct hlsl_block *block,
@@ -377,7 +372,15 @@ static struct hlsl_ir_node *add_implicit_conversion(struct hlsl_ctx *ctx, struct
if (node->type == HLSL_IR_SAMPLER_STATE && dst_type->class == HLSL_CLASS_SAMPLER)
return node;
- if (!implicit_compatible_data_types(ctx, src_type, dst_type))
+ if (implicit_compatible_data_types(ctx, src_type, dst_type))
+ {
+ if (hlsl_is_numeric_type(dst_type) && hlsl_is_numeric_type(src_type)
+ && dst_type->e.numeric.dimx * dst_type->e.numeric.dimy < src_type->e.numeric.dimx * src_type->e.numeric.dimy
+ && ctx->warn_implicit_truncation)
+ hlsl_warning(ctx, loc, VKD3D_SHADER_WARNING_HLSL_IMPLICIT_TRUNCATION, "Implicit truncation of %s type.",
+ src_type->class == HLSL_CLASS_VECTOR ? "vector" : "matrix");
+ }
+ else
{
struct vkd3d_string_buffer *src_string, *dst_string;
@@ -388,19 +391,12 @@ static struct hlsl_ir_node *add_implicit_conversion(struct hlsl_ctx *ctx, struct
"Can't implicitly convert from %s to %s.", src_string->buffer, dst_string->buffer);
hlsl_release_string_buffer(ctx, src_string);
hlsl_release_string_buffer(ctx, dst_string);
- return NULL;
}
- if (hlsl_is_numeric_type(dst_type) && hlsl_is_numeric_type(src_type)
- && dst_type->e.numeric.dimx * dst_type->e.numeric.dimy < src_type->e.numeric.dimx * src_type->e.numeric.dimy
- && ctx->warn_implicit_truncation)
- hlsl_warning(ctx, loc, VKD3D_SHADER_WARNING_HLSL_IMPLICIT_TRUNCATION, "Implicit truncation of %s type.",
- src_type->class == HLSL_CLASS_VECTOR ? "vector" : "matrix");
-
return add_cast(ctx, block, node, dst_type, loc);
}
-static bool add_explicit_conversion(struct hlsl_ctx *ctx, struct hlsl_block *block,
+static void add_explicit_conversion(struct hlsl_ctx *ctx, struct hlsl_block *block,
struct hlsl_type *dst_type, const struct parse_array_sizes *arrays, const struct vkd3d_shader_location *loc)
{
struct hlsl_ir_node *instr = node_from_block(block);
@@ -419,7 +415,7 @@ static bool add_explicit_conversion(struct hlsl_ctx *ctx, struct hlsl_block *blo
}
if (instr->data_type->class == HLSL_CLASS_ERROR)
- return true;
+ return;
if (!explicit_compatible_data_types(ctx, src_type, dst_type))
{
@@ -432,10 +428,9 @@ static bool add_explicit_conversion(struct hlsl_ctx *ctx, struct hlsl_block *blo
src_string->buffer, dst_string->buffer);
hlsl_release_string_buffer(ctx, src_string);
hlsl_release_string_buffer(ctx, dst_string);
- return false;
}
- return add_cast(ctx, block, instr, dst_type, loc);
+ add_cast(ctx, block, instr, dst_type, loc);
}
static uint32_t add_modifiers(struct hlsl_ctx *ctx, uint32_t modifiers, uint32_t mod,
@@ -454,15 +449,15 @@ static uint32_t add_modifiers(struct hlsl_ctx *ctx, uint32_t modifiers, uint32_t
return modifiers | mod;
}
-static bool append_conditional_break(struct hlsl_ctx *ctx, struct hlsl_block *cond_block)
+static void append_conditional_break(struct hlsl_ctx *ctx, struct hlsl_block *cond_block)
{
- struct hlsl_ir_node *condition, *cast, *not, *iff, *jump;
+ struct hlsl_ir_node *condition, *cast, *not;
struct hlsl_block then_block;
struct hlsl_type *bool_type;
/* E.g. "for (i = 0; ; ++i)". */
if (list_empty(&cond_block->instrs))
- return true;
+ return;
condition = node_from_block(cond_block);
@@ -471,23 +466,12 @@ static bool append_conditional_break(struct hlsl_ctx *ctx, struct hlsl_block *co
bool_type = hlsl_get_scalar_type(ctx, HLSL_TYPE_BOOL);
/* We already checked for a 1-component numeric type, so
* add_implicit_conversion() is equivalent to add_cast() here. */
- if (!(cast = add_cast(ctx, cond_block, condition, bool_type, &condition->loc)))
- return false;
-
- if (!(not = hlsl_new_unary_expr(ctx, HLSL_OP1_LOGIC_NOT, cast, &condition->loc)))
- return false;
- hlsl_block_add_instr(cond_block, not);
+ cast = add_cast(ctx, cond_block, condition, bool_type, &condition->loc);
+ not = hlsl_block_add_unary_expr(ctx, cond_block, HLSL_OP1_LOGIC_NOT, cast, &condition->loc);
hlsl_block_init(&then_block);
-
- if (!(jump = hlsl_new_jump(ctx, HLSL_IR_JUMP_BREAK, NULL, &condition->loc)))
- return false;
- hlsl_block_add_instr(&then_block, jump);
-
- if (!(iff = hlsl_new_if(ctx, not, &then_block, NULL, &condition->loc)))
- return false;
- hlsl_block_add_instr(cond_block, iff);
- return true;
+ hlsl_block_add_jump(ctx, &then_block, HLSL_IR_JUMP_BREAK, NULL, &condition->loc);
+ hlsl_block_add_if(ctx, cond_block, not, &then_block, NULL, &condition->loc);
}
static void check_attribute_list_for_duplicates(struct hlsl_ctx *ctx, const struct parse_attribute_list *attrs)
@@ -531,11 +515,7 @@ static void resolve_loop_continue(struct hlsl_ctx *ctx, struct hlsl_block *block
{
if (!hlsl_clone_block(ctx, &cond_block, cond))
return;
- if (!append_conditional_break(ctx, &cond_block))
- {
- hlsl_block_cleanup(&cond_block);
- return;
- }
+ append_conditional_break(ctx, &cond_block);
list_move_before(&instr->entry, &cond_block.instrs);
}
}
@@ -611,11 +591,7 @@ static struct hlsl_default_value evaluate_static_expression(struct hlsl_ctx *ctx
return ret;
hlsl_block_add_block(&expr, block);
- if (!(node = add_implicit_conversion(ctx, &expr, node_from_block(&expr), dst_type, loc)))
- {
- hlsl_block_cleanup(&expr);
- return ret;
- }
+ node = add_implicit_conversion(ctx, &expr, node_from_block(&expr), dst_type, loc);
/* Wrap the node into a src to allow the reference to survive the multiple const passes. */
hlsl_src_from_node(&src, node);
@@ -668,7 +644,6 @@ static struct hlsl_block *create_loop(struct hlsl_ctx *ctx, enum hlsl_loop_type
{
enum hlsl_loop_unroll_type unroll_type = HLSL_LOOP_UNROLL;
unsigned int i, unroll_limit = 0;
- struct hlsl_ir_node *loop;
check_attribute_list_for_duplicates(ctx, attributes);
check_loop_attributes(ctx, attributes, loc);
@@ -719,17 +694,14 @@ static struct hlsl_block *create_loop(struct hlsl_ctx *ctx, enum hlsl_loop_type
if (!init && !(init = make_empty_block(ctx)))
goto oom;
- if (!append_conditional_break(ctx, cond))
- goto oom;
+ append_conditional_break(ctx, cond);
if (type == HLSL_LOOP_DO_WHILE)
list_move_tail(&body->instrs, &cond->instrs);
else
list_move_head(&body->instrs, &cond->instrs);
- if (!(loop = hlsl_new_loop(ctx, iter, body, unroll_type, unroll_limit, loc)))
- goto oom;
- hlsl_block_add_instr(init, loop);
+ hlsl_block_add_loop(ctx, init, iter, body, unroll_type, unroll_limit, loc);
destroy_block(cond);
destroy_block(body);
@@ -858,23 +830,16 @@ static bool add_return(struct hlsl_ctx *ctx, struct hlsl_block *block,
struct hlsl_ir_node *return_value, const struct vkd3d_shader_location *loc)
{
struct hlsl_type *return_type = ctx->cur_function->return_type;
- struct hlsl_ir_node *jump;
if (ctx->cur_function->return_var)
{
if (return_value)
{
- struct hlsl_ir_node *store;
-
if (return_value->data_type->class == HLSL_CLASS_ERROR)
return true;
- if (!(return_value = add_implicit_conversion(ctx, block, return_value, return_type, loc)))
- return false;
-
- if (!(store = hlsl_new_simple_store(ctx, ctx->cur_function->return_var, return_value)))
- return false;
- list_add_after(&return_value->entry, &store->entry);
+ return_value = add_implicit_conversion(ctx, block, return_value, return_type, loc);
+ hlsl_block_add_simple_store(ctx, block, ctx->cur_function->return_var, return_value);
}
else
{
@@ -888,52 +853,37 @@ static bool add_return(struct hlsl_ctx *ctx, struct hlsl_block *block,
hlsl_error(ctx, loc, VKD3D_SHADER_ERROR_HLSL_INVALID_RETURN, "Void functions cannot return a value.");
}
- if (!(jump = hlsl_new_jump(ctx, HLSL_IR_JUMP_RETURN, NULL, loc)))
- return false;
- hlsl_block_add_instr(block, jump);
-
+ hlsl_block_add_jump(ctx, block, HLSL_IR_JUMP_RETURN, NULL, loc);
return true;
}
struct hlsl_ir_node *hlsl_add_load_component(struct hlsl_ctx *ctx, struct hlsl_block *block,
struct hlsl_ir_node *var_instr, unsigned int comp, const struct vkd3d_shader_location *loc)
{
- struct hlsl_ir_node *load, *store;
- struct hlsl_block load_block;
struct hlsl_ir_var *var;
struct hlsl_deref src;
if (!(var = hlsl_new_synthetic_var(ctx, "deref", var_instr->data_type, &var_instr->loc)))
- return NULL;
+ {
+ block->value = ctx->error_instr;
+ return ctx->error_instr;
+ }
- if (!(store = hlsl_new_simple_store(ctx, var, var_instr)))
- return NULL;
- hlsl_block_add_instr(block, store);
+ hlsl_block_add_simple_store(ctx, block, var, var_instr);
hlsl_init_simple_deref_from_var(&src, var);
- if (!(load = hlsl_new_load_component(ctx, &load_block, &src, comp, loc)))
- return NULL;
- hlsl_block_add_block(block, &load_block);
-
- return load;
+ return hlsl_block_add_load_component(ctx, block, &src, comp, loc);
}
-static bool add_record_access(struct hlsl_ctx *ctx, struct hlsl_block *block, struct hlsl_ir_node *record,
+static void add_record_access(struct hlsl_ctx *ctx, struct hlsl_block *block, struct hlsl_ir_node *record,
unsigned int idx, const struct vkd3d_shader_location *loc)
{
- struct hlsl_ir_node *index, *c;
+ struct hlsl_ir_node *c;
VKD3D_ASSERT(idx < record->data_type->e.record.field_count);
- if (!(c = hlsl_new_uint_constant(ctx, idx, loc)))
- return false;
- hlsl_block_add_instr(block, c);
-
- if (!(index = hlsl_new_index(ctx, record, c, loc)))
- return false;
- hlsl_block_add_instr(block, index);
-
- return true;
+ c = hlsl_block_add_uint_constant(ctx, block, idx, loc);
+ hlsl_block_add_index(ctx, block, record, c, loc);
}
static struct hlsl_ir_node *add_binary_arithmetic_expr(struct hlsl_ctx *ctx, struct hlsl_block *block,
@@ -944,7 +894,6 @@ static bool add_array_access(struct hlsl_ctx *ctx, struct hlsl_block *block, str
struct hlsl_ir_node *index, const struct vkd3d_shader_location *loc)
{
const struct hlsl_type *expr_type = array->data_type, *index_type = index->data_type;
- struct hlsl_ir_node *return_index, *cast;
if (array->data_type->class == HLSL_CLASS_ERROR || index->data_type->class == HLSL_CLASS_ERROR)
{
@@ -968,14 +917,9 @@ static bool add_array_access(struct hlsl_ctx *ctx, struct hlsl_block *block, str
return false;
}
- if (!(index = add_implicit_conversion(ctx, block, index,
- hlsl_get_vector_type(ctx, HLSL_TYPE_UINT, dim_count), &index->loc)))
- return false;
-
- if (!(return_index = hlsl_new_index(ctx, array, index, loc)))
- return false;
- hlsl_block_add_instr(block, return_index);
-
+ index = add_implicit_conversion(ctx, block, index,
+ hlsl_get_vector_type(ctx, HLSL_TYPE_UINT, dim_count), &index->loc);
+ hlsl_block_add_index(ctx, block, array, index, loc);
return true;
}
@@ -985,10 +929,7 @@ static bool add_array_access(struct hlsl_ctx *ctx, struct hlsl_block *block, str
return false;
}
- if (!(cast = hlsl_new_cast(ctx, index, hlsl_get_scalar_type(ctx, HLSL_TYPE_UINT), &index->loc)))
- return false;
- hlsl_block_add_instr(block, cast);
- index = cast;
+ index = hlsl_block_add_cast(ctx, block, index, hlsl_get_scalar_type(ctx, HLSL_TYPE_UINT), &index->loc);
if (expr_type->class != HLSL_CLASS_ARRAY && expr_type->class != HLSL_CLASS_VECTOR && expr_type->class != HLSL_CLASS_MATRIX)
{
@@ -999,10 +940,7 @@ static bool add_array_access(struct hlsl_ctx *ctx, struct hlsl_block *block, str
return false;
}
- if (!(return_index = hlsl_new_index(ctx, array, index, loc)))
- return false;
- hlsl_block_add_instr(block, return_index);
-
+ hlsl_block_add_index(ctx, block, array, index, loc);
return true;
}
@@ -1144,31 +1082,34 @@ static bool gen_struct_fields(struct hlsl_ctx *ctx, struct parse_fields *fields,
return true;
}
-static bool add_record_access_recurse(struct hlsl_ctx *ctx, struct hlsl_block *block,
+static void add_record_access_recurse(struct hlsl_ctx *ctx, struct hlsl_block *block,
const char *name, const struct vkd3d_shader_location *loc)
{
struct hlsl_ir_node *record = node_from_block(block);
const struct hlsl_type *type = record->data_type;
const struct hlsl_struct_field *field, *base;
+ if (type->class == HLSL_CLASS_ERROR)
+ return;
+
if ((field = get_struct_field(type->e.record.fields, type->e.record.field_count, name)))
{
unsigned int field_idx = field - type->e.record.fields;
- return add_record_access(ctx, block, record, field_idx, loc);
+ add_record_access(ctx, block, record, field_idx, loc);
}
else if ((base = get_struct_field(type->e.record.fields, type->e.record.field_count, "$super")))
{
unsigned int base_idx = base - type->e.record.fields;
- if (!add_record_access(ctx, block, record, base_idx, loc))
- return false;
- return add_record_access_recurse(ctx, block, name, loc);
+ add_record_access(ctx, block, record, base_idx, loc);
+ add_record_access_recurse(ctx, block, name, loc);
+ }
+ else
+ {
+ hlsl_error(ctx, loc, VKD3D_SHADER_ERROR_HLSL_NOT_DEFINED, "Field \"%s\" is not defined.", name);
+ block->value = ctx->error_instr;
}
-
- hlsl_error(ctx, loc, VKD3D_SHADER_ERROR_HLSL_NOT_DEFINED, "Field \"%s\" is not defined.", name);
- block->value = ctx->error_instr;
- return true;
}
static bool add_typedef(struct hlsl_ctx *ctx, struct hlsl_type *const orig_type, struct list *list)
@@ -1239,6 +1180,14 @@ static bool add_typedef(struct hlsl_ctx *ctx, struct hlsl_type *const orig_type,
return true;
}
+static void check_invalid_stream_output_object(struct hlsl_ctx *ctx, const struct hlsl_type *type,
+ const char *name, const struct vkd3d_shader_location* loc)
+{
+ if (hlsl_type_component_count(type) != 1)
+ hlsl_error(ctx, loc, VKD3D_SHADER_ERROR_HLSL_INVALID_TYPE,
+ "Stream output object '%s' is not single-element.", name);
+}
+
static void initialize_var_components(struct hlsl_ctx *ctx, struct hlsl_block *instrs,
struct hlsl_ir_var *dst, unsigned int *store_index, struct hlsl_ir_node *src,
bool is_default_values_initializer);
@@ -1273,6 +1222,9 @@ static bool add_func_parameter(struct hlsl_ctx *ctx, struct hlsl_func_parameters
hlsl_error(ctx, loc, VKD3D_SHADER_ERROR_HLSL_INVALID_MODIFIER,
"Output parameter '%s' has a default value.", param->name);
+ if (hlsl_get_stream_output_type(param->type))
+ check_invalid_stream_output_object(ctx, param->type, param->name, loc);
+
if (!(var = hlsl_new_var(ctx, param->name, param->type, loc, &param->semantic, param->modifiers,
&param->reg_reservation)))
return false;
@@ -1289,9 +1241,7 @@ static bool add_func_parameter(struct hlsl_ctx *ctx, struct hlsl_func_parameters
if (!param->initializer.braces)
{
- if (!(add_implicit_conversion(ctx, param->initializer.instrs, param->initializer.args[0], param->type, loc)))
- return false;
-
+ add_implicit_conversion(ctx, param->initializer.instrs, param->initializer.args[0], param->type, loc);
param->initializer.args[0] = node_from_block(param->initializer.instrs);
}
@@ -1517,7 +1467,11 @@ static enum hlsl_base_type expr_common_base_type(enum hlsl_base_type t1, enum hl
return HLSL_TYPE_FLOAT;
if (t1 == HLSL_TYPE_UINT || t2 == HLSL_TYPE_UINT)
return HLSL_TYPE_UINT;
- return HLSL_TYPE_INT;
+ if (t1 == HLSL_TYPE_INT || t2 == HLSL_TYPE_INT)
+ return HLSL_TYPE_INT;
+ if (t1 == HLSL_TYPE_MIN16UINT || t2 == HLSL_TYPE_MIN16UINT)
+ return HLSL_TYPE_MIN16UINT;
+ vkd3d_unreachable();
}
static bool expr_common_shape(struct hlsl_ctx *ctx, struct hlsl_type *t1, struct hlsl_type *t2,
@@ -1600,15 +1554,12 @@ static struct hlsl_ir_node *add_expr(struct hlsl_ctx *ctx, struct hlsl_block *bl
enum hlsl_ir_expr_op op, struct hlsl_ir_node *operands[HLSL_MAX_OPERANDS],
struct hlsl_type *type, const struct vkd3d_shader_location *loc)
{
- struct hlsl_ir_node *expr;
unsigned int i;
if (type->class == HLSL_CLASS_MATRIX)
{
struct hlsl_type *scalar_type;
- struct hlsl_ir_load *var_load;
struct hlsl_deref var_deref;
- struct hlsl_ir_node *load;
struct hlsl_ir_var *var;
scalar_type = hlsl_get_scalar_type(ctx, type->e.numeric.type);
@@ -1620,58 +1571,24 @@ static struct hlsl_ir_node *add_expr(struct hlsl_ctx *ctx, struct hlsl_block *bl
for (i = 0; i < type->e.numeric.dimy * type->e.numeric.dimx; ++i)
{
struct hlsl_ir_node *value, *cell_operands[HLSL_MAX_OPERANDS] = { NULL };
- struct hlsl_block store_block;
unsigned int j;
for (j = 0; j < HLSL_MAX_OPERANDS; j++)
{
if (operands[j])
- {
- if (!(load = hlsl_add_load_component(ctx, block, operands[j], i, loc)))
- return NULL;
-
- cell_operands[j] = load;
- }
+ cell_operands[j] = hlsl_add_load_component(ctx, block, operands[j], i, loc);
}
if (!(value = add_expr(ctx, block, op, cell_operands, scalar_type, loc)))
return NULL;
- if (!hlsl_new_store_component(ctx, &store_block, &var_deref, i, value))
- return NULL;
- hlsl_block_add_block(block, &store_block);
+ hlsl_block_add_store_component(ctx, block, &var_deref, i, value);
}
- if (!(var_load = hlsl_new_var_load(ctx, var, loc)))
- return NULL;
- hlsl_block_add_instr(block, &var_load->node);
-
- return &var_load->node;
+ return hlsl_block_add_simple_load(ctx, block, var, loc);
}
- if (!(expr = hlsl_new_expr(ctx, op, operands, type, loc)))
- return NULL;
- hlsl_block_add_instr(block, expr);
-
- return expr;
-}
-
-static bool type_is_integer(enum hlsl_base_type type)
-{
- switch (type)
- {
- case HLSL_TYPE_BOOL:
- case HLSL_TYPE_INT:
- case HLSL_TYPE_UINT:
- return true;
-
- case HLSL_TYPE_DOUBLE:
- case HLSL_TYPE_FLOAT:
- case HLSL_TYPE_HALF:
- return false;
- }
-
- vkd3d_unreachable();
+ return hlsl_block_add_expr(ctx, block, op, operands, type, loc);
}
static void check_integer_type(struct hlsl_ctx *ctx, const struct hlsl_ir_node *instr)
@@ -1679,7 +1596,7 @@ static void check_integer_type(struct hlsl_ctx *ctx, const struct hlsl_ir_node *
const struct hlsl_type *type = instr->data_type;
struct vkd3d_string_buffer *string;
- if (type_is_integer(type->e.numeric.type))
+ if (hlsl_type_is_integer(type))
return;
if ((string = hlsl_type_to_string(ctx, type)))
@@ -1721,10 +1638,7 @@ static struct hlsl_ir_node *add_unary_logical_expr(struct hlsl_ctx *ctx, struct
bool_type = hlsl_get_numeric_type(ctx, arg->data_type->class, HLSL_TYPE_BOOL,
arg->data_type->e.numeric.dimx, arg->data_type->e.numeric.dimy);
-
- if (!(args[0] = add_implicit_conversion(ctx, block, arg, bool_type, loc)))
- return NULL;
-
+ args[0] = add_implicit_conversion(ctx, block, arg, bool_type, loc);
return add_expr(ctx, block, op, args, bool_type, loc);
}
@@ -1754,12 +1668,8 @@ static struct hlsl_ir_node *add_binary_arithmetic_expr(struct hlsl_ctx *ctx, str
return block->value;
}
- if (!(args[0] = add_implicit_conversion(ctx, block, arg1, common_type, loc)))
- return NULL;
-
- if (!(args[1] = add_implicit_conversion(ctx, block, arg2, common_type, loc)))
- return NULL;
-
+ args[0] = add_implicit_conversion(ctx, block, arg1, common_type, loc);
+ args[1] = add_implicit_conversion(ctx, block, arg2, common_type, loc);
return add_expr(ctx, block, op, args, common_type, loc);
}
@@ -1790,12 +1700,8 @@ static struct hlsl_ir_node *add_binary_comparison_expr(struct hlsl_ctx *ctx, str
common_type = hlsl_get_numeric_type(ctx, type, base, dimx, dimy);
return_type = hlsl_get_numeric_type(ctx, type, HLSL_TYPE_BOOL, dimx, dimy);
- if (!(args[0] = add_implicit_conversion(ctx, block, arg1, common_type, loc)))
- return NULL;
-
- if (!(args[1] = add_implicit_conversion(ctx, block, arg2, common_type, loc)))
- return NULL;
-
+ args[0] = add_implicit_conversion(ctx, block, arg1, common_type, loc);
+ args[1] = add_implicit_conversion(ctx, block, arg2, common_type, loc);
return add_expr(ctx, block, op, args, return_type, loc);
}
@@ -1813,12 +1719,8 @@ static struct hlsl_ir_node *add_binary_logical_expr(struct hlsl_ctx *ctx, struct
common_type = hlsl_get_numeric_type(ctx, type, HLSL_TYPE_BOOL, dimx, dimy);
- if (!(args[0] = add_implicit_conversion(ctx, block, arg1, common_type, loc)))
- return NULL;
-
- if (!(args[1] = add_implicit_conversion(ctx, block, arg2, common_type, loc)))
- return NULL;
-
+ args[0] = add_implicit_conversion(ctx, block, arg1, common_type, loc);
+ args[1] = add_implicit_conversion(ctx, block, arg2, common_type, loc);
return add_expr(ctx, block, op, args, common_type, loc);
}
@@ -1844,12 +1746,8 @@ static struct hlsl_ir_node *add_binary_shift_expr(struct hlsl_ctx *ctx, struct h
return_type = hlsl_get_numeric_type(ctx, type, base, dimx, dimy);
integer_type = hlsl_get_numeric_type(ctx, type, HLSL_TYPE_INT, dimx, dimy);
- if (!(args[0] = add_implicit_conversion(ctx, block, arg1, return_type, loc)))
- return NULL;
-
- if (!(args[1] = add_implicit_conversion(ctx, block, arg2, integer_type, loc)))
- return NULL;
-
+ args[0] = add_implicit_conversion(ctx, block, arg1, return_type, loc);
+ args[1] = add_implicit_conversion(ctx, block, arg2, integer_type, loc);
return add_expr(ctx, block, op, args, return_type, loc);
}
@@ -1897,12 +1795,8 @@ static struct hlsl_ir_node *add_binary_dot_expr(struct hlsl_ctx *ctx, struct hls
common_type = hlsl_get_vector_type(ctx, base, dim);
ret_type = hlsl_get_scalar_type(ctx, base);
- if (!(args[0] = add_implicit_conversion(ctx, instrs, arg1, common_type, loc)))
- return NULL;
-
- if (!(args[1] = add_implicit_conversion(ctx, instrs, arg2, common_type, loc)))
- return NULL;
-
+ args[0] = add_implicit_conversion(ctx, instrs, arg1, common_type, loc);
+ args[1] = add_implicit_conversion(ctx, instrs, arg2, common_type, loc);
return add_expr(ctx, instrs, op, args, ret_type, loc);
}
@@ -2098,8 +1992,7 @@ static bool add_assignment(struct hlsl_ctx *ctx, struct hlsl_block *block, struc
width = size;
}
- if (!(rhs = add_implicit_conversion(ctx, block, rhs, lhs_type, &rhs->loc)))
- return false;
+ rhs = add_implicit_conversion(ctx, block, rhs, lhs_type, &rhs->loc);
while (lhs->type != HLSL_IR_LOAD && lhs->type != HLSL_IR_INDEX)
{
@@ -2129,7 +2022,6 @@ static bool add_assignment(struct hlsl_ctx *ctx, struct hlsl_block *block, struc
else if (lhs->type == HLSL_IR_SWIZZLE)
{
struct hlsl_ir_swizzle *swizzle = hlsl_ir_swizzle(lhs);
- struct hlsl_ir_node *new_swizzle;
uint32_t s;
VKD3D_ASSERT(!matrix_writemask);
@@ -2160,13 +2052,9 @@ static bool add_assignment(struct hlsl_ctx *ctx, struct hlsl_block *block, struc
}
}
- if (!(new_swizzle = hlsl_new_swizzle(ctx, s, width, rhs, &swizzle->node.loc)))
- return false;
- hlsl_block_add_instr(block, new_swizzle);
-
+ rhs = hlsl_block_add_swizzle(ctx, block, s, width, rhs, &swizzle->node.loc);
lhs = swizzle->val.node;
lhs_type = hlsl_get_vector_type(ctx, lhs_type->e.numeric.type, width);
- rhs = new_swizzle;
}
else
{
@@ -2178,15 +2066,13 @@ static bool add_assignment(struct hlsl_ctx *ctx, struct hlsl_block *block, struc
/* lhs casts could have resulted in a discrepancy between the
* rhs->data_type and the type of the variable that will be ulimately
* stored to. This is corrected. */
- if (!(rhs = add_cast(ctx, block, rhs, lhs_type, &rhs->loc)))
- return false;
+ rhs = add_cast(ctx, block, rhs, lhs_type, &rhs->loc);
if (lhs->type == HLSL_IR_INDEX && hlsl_index_chain_has_resource_access(hlsl_ir_index(lhs)))
{
struct hlsl_ir_node *coords = hlsl_ir_index(lhs)->idx.node;
struct hlsl_deref resource_deref;
struct hlsl_type *resource_type;
- struct hlsl_ir_node *store;
unsigned int dim_count;
if (!hlsl_index_is_resource_access(hlsl_ir_index(lhs)))
@@ -2215,12 +2101,7 @@ static bool add_assignment(struct hlsl_ctx *ctx, struct hlsl_block *block, struc
VKD3D_ASSERT(coords->data_type->e.numeric.type == HLSL_TYPE_UINT);
VKD3D_ASSERT(coords->data_type->e.numeric.dimx == dim_count);
- if (!(store = hlsl_new_resource_store(ctx, &resource_deref, coords, rhs, &lhs->loc)))
- {
- hlsl_cleanup_deref(&resource_deref);
- return false;
- }
- hlsl_block_add_instr(block, store);
+ hlsl_block_add_resource_store(ctx, block, &resource_deref, coords, rhs, &lhs->loc);
hlsl_cleanup_deref(&resource_deref);
}
else if (matrix_writemask)
@@ -2235,25 +2116,14 @@ static bool add_assignment(struct hlsl_ctx *ctx, struct hlsl_block *block, struc
for (j = 0; j < lhs->data_type->e.numeric.dimx; ++j)
{
struct hlsl_ir_node *load;
- struct hlsl_block store_block;
const unsigned int idx = i * 4 + j;
const unsigned int component = i * lhs->data_type->e.numeric.dimx + j;
if (!(writemask & (1 << idx)))
continue;
- if (!(load = hlsl_add_load_component(ctx, block, rhs, k++, &rhs->loc)))
- {
- hlsl_cleanup_deref(&deref);
- return false;
- }
-
- if (!hlsl_new_store_component(ctx, &store_block, &deref, component, load))
- {
- hlsl_cleanup_deref(&deref);
- return false;
- }
- hlsl_block_add_block(block, &store_block);
+ load = hlsl_add_load_component(ctx, block, rhs, k++, &rhs->loc);
+ hlsl_block_add_store_component(ctx, block, &deref, component, load);
}
}
@@ -2269,49 +2139,32 @@ static bool add_assignment(struct hlsl_ctx *ctx, struct hlsl_block *block, struc
for (i = 0; i < mat->data_type->e.numeric.dimx; ++i)
{
- struct hlsl_ir_node *cell, *load, *store, *c;
+ struct hlsl_ir_node *cell, *load, *c;
struct hlsl_deref deref;
if (!(writemask & (1 << i)))
continue;
- if (!(c = hlsl_new_uint_constant(ctx, i, &lhs->loc)))
- return false;
- hlsl_block_add_instr(block, c);
-
- if (!(cell = hlsl_new_index(ctx, &row->node, c, &lhs->loc)))
- return false;
- hlsl_block_add_instr(block, cell);
+ c = hlsl_block_add_uint_constant(ctx, block, i, &lhs->loc);
- if (!(load = hlsl_add_load_component(ctx, block, rhs, k++, &rhs->loc)))
- return false;
+ cell = hlsl_block_add_index(ctx, block, &row->node, c, &lhs->loc);
+ load = hlsl_add_load_component(ctx, block, rhs, k++, &rhs->loc);
if (!hlsl_init_deref_from_index_chain(ctx, &deref, cell))
return false;
- if (!(store = hlsl_new_store_index(ctx, &deref, NULL, load, 0, &rhs->loc)))
- {
- hlsl_cleanup_deref(&deref);
- return false;
- }
- hlsl_block_add_instr(block, store);
+ hlsl_block_add_store_index(ctx, block, &deref, NULL, load, 0, &rhs->loc);
hlsl_cleanup_deref(&deref);
}
}
else
{
- struct hlsl_ir_node *store;
struct hlsl_deref deref;
if (!hlsl_init_deref_from_index_chain(ctx, &deref, lhs))
return false;
- if (!(store = hlsl_new_store_index(ctx, &deref, NULL, rhs, writemask, &rhs->loc)))
- {
- hlsl_cleanup_deref(&deref);
- return false;
- }
- hlsl_block_add_instr(block, store);
+ hlsl_block_add_store_index(ctx, block, &deref, NULL, rhs, writemask, &rhs->loc);
hlsl_cleanup_deref(&deref);
}
@@ -2332,9 +2185,7 @@ static bool add_increment(struct hlsl_ctx *ctx, struct hlsl_block *block, bool d
hlsl_error(ctx, loc, VKD3D_SHADER_ERROR_HLSL_MODIFIES_CONST,
"Argument to %s%screment operator is const.", post ? "post" : "pre", decrement ? "de" : "in");
- if (!(one = hlsl_new_int_constant(ctx, 1, loc)))
- return false;
- hlsl_block_add_instr(block, one);
+ one = hlsl_block_add_int_constant(ctx, block, 1, loc);
if (!add_assignment(ctx, block, lhs, decrement ? ASSIGN_OP_SUB : ASSIGN_OP_ADD, one, false))
return false;
@@ -2371,8 +2222,7 @@ static void initialize_var_components(struct hlsl_ctx *ctx, struct hlsl_block *i
struct hlsl_type *dst_comp_type;
struct hlsl_block block;
- if (!(load = hlsl_add_load_component(ctx, instrs, src, k, &src->loc)))
- return;
+ load = hlsl_add_load_component(ctx, instrs, src, k, &src->loc);
dst_comp_type = hlsl_type_get_component_type(ctx, dst->data_type, *store_index);
@@ -2438,12 +2288,8 @@ static void initialize_var_components(struct hlsl_ctx *ctx, struct hlsl_block *i
}
else
{
- if (!(conv = add_implicit_conversion(ctx, instrs, load, dst_comp_type, &src->loc)))
- return;
-
- if (!hlsl_new_store_component(ctx, &block, &dst_deref, *store_index, conv))
- return;
- hlsl_block_add_block(instrs, &block);
+ conv = add_implicit_conversion(ctx, instrs, load, dst_comp_type, &src->loc);
+ hlsl_block_add_store_component(ctx, instrs, &dst_deref, *store_index, conv);
}
}
@@ -2516,10 +2362,10 @@ static bool type_has_numeric_components(struct hlsl_type *type)
return false;
}
-static void check_invalid_in_out_modifiers(struct hlsl_ctx *ctx, unsigned int modifiers,
+static void check_invalid_non_parameter_modifiers(struct hlsl_ctx *ctx, unsigned int modifiers,
const struct vkd3d_shader_location *loc)
{
- modifiers &= (HLSL_STORAGE_IN | HLSL_STORAGE_OUT);
+ modifiers &= (HLSL_STORAGE_IN | HLSL_STORAGE_OUT | HLSL_PRIMITIVE_MODIFIERS_MASK);
if (modifiers)
{
struct vkd3d_string_buffer *string;
@@ -2553,6 +2399,7 @@ static void declare_var(struct hlsl_ctx *ctx, struct parse_variable_def *v)
bool constant_buffer = false;
struct hlsl_ir_var *var;
struct hlsl_type *type;
+ bool stream_output;
char *var_name;
unsigned int i;
@@ -2644,6 +2491,10 @@ static void declare_var(struct hlsl_ctx *ctx, struct parse_variable_def *v)
hlsl_fixme(ctx, &v->loc, "Shader model 5.1+ resource array.");
}
+ stream_output = !!hlsl_get_stream_output_type(type);
+ if (stream_output)
+ check_invalid_stream_output_object(ctx, type, v->name, &v->loc);
+
if (!(var_name = vkd3d_strdup(v->name)))
return;
@@ -2698,6 +2549,10 @@ static void declare_var(struct hlsl_ctx *ctx, struct parse_variable_def *v)
if (!(modifiers & HLSL_STORAGE_STATIC))
var->storage_modifiers |= HLSL_STORAGE_UNIFORM;
+ if (stream_output)
+ hlsl_error(ctx, &var->loc, VKD3D_SHADER_ERROR_HLSL_MISPLACED_STREAM_OUTPUT,
+ "Stream output object '%s' is not allowed in the global scope.", var->name);
+
if ((ctx->profile->major_version < 5 || ctx->profile->type == VKD3D_SHADER_TYPE_EFFECT)
&& (var->storage_modifiers & HLSL_STORAGE_UNIFORM))
{
@@ -2828,15 +2683,8 @@ static struct hlsl_block *initialize_vars(struct hlsl_ctx *ctx, struct list *var
}
if (!v->initializer.braces)
- {
- if (!(add_implicit_conversion(ctx, v->initializer.instrs, v->initializer.args[0], type, &v->loc)))
- {
- free_parse_variable_def(v);
- continue;
- }
-
- v->initializer.args[0] = node_from_block(v->initializer.instrs);
- }
+ v->initializer.args[0] = add_implicit_conversion(ctx,
+ v->initializer.instrs, v->initializer.args[0], type, &v->loc);
if (var->data_type->class != HLSL_CLASS_ERROR)
initialize_var(ctx, var, &v->initializer, is_default_values_initializer);
@@ -2859,7 +2707,7 @@ static struct hlsl_block *initialize_vars(struct hlsl_ctx *ctx, struct list *var
}
else if (var->storage_modifiers & HLSL_STORAGE_STATIC)
{
- struct hlsl_ir_node *cast, *store, *zero;
+ struct hlsl_ir_node *cast, *zero;
/* Initialize statics to zero by default. */
@@ -2869,25 +2717,9 @@ static struct hlsl_block *initialize_vars(struct hlsl_ctx *ctx, struct list *var
continue;
}
- if (!(zero = hlsl_new_uint_constant(ctx, 0, &var->loc)))
- {
- free_parse_variable_def(v);
- continue;
- }
- hlsl_block_add_instr(&ctx->static_initializers, zero);
-
- if (!(cast = add_cast(ctx, &ctx->static_initializers, zero, var->data_type, &var->loc)))
- {
- free_parse_variable_def(v);
- continue;
- }
-
- if (!(store = hlsl_new_simple_store(ctx, var, cast)))
- {
- free_parse_variable_def(v);
- continue;
- }
- hlsl_block_add_instr(&ctx->static_initializers, store);
+ zero = hlsl_block_add_uint_constant(ctx, &ctx->static_initializers, 0, &var->loc);
+ cast = add_cast(ctx, &ctx->static_initializers, zero, var->data_type, &var->loc);
+ hlsl_block_add_simple_store(ctx, &ctx->static_initializers, var, cast);
}
free_parse_variable_def(v);
}
@@ -2934,6 +2766,7 @@ static enum hlsl_base_type hlsl_base_type_class(enum hlsl_base_type t)
return HLSL_TYPE_FLOAT;
case HLSL_TYPE_INT:
+ case HLSL_TYPE_MIN16UINT:
case HLSL_TYPE_UINT:
return HLSL_TYPE_INT;
@@ -2949,6 +2782,7 @@ static unsigned int hlsl_base_type_width(enum hlsl_base_type t)
switch (t)
{
case HLSL_TYPE_HALF:
+ case HLSL_TYPE_MIN16UINT:
return 16;
case HLSL_TYPE_FLOAT:
@@ -3123,11 +2957,12 @@ static struct hlsl_ir_function_decl *find_function_call(struct hlsl_ctx *ctx,
return decl;
}
-static struct hlsl_ir_node *hlsl_new_void_expr(struct hlsl_ctx *ctx, const struct vkd3d_shader_location *loc)
+static void add_void_expr(struct hlsl_ctx *ctx, struct hlsl_block *block,
+ const struct vkd3d_shader_location *loc)
{
struct hlsl_ir_node *operands[HLSL_MAX_OPERANDS] = {0};
- return hlsl_new_expr(ctx, HLSL_OP0_VOID, operands, ctx->builtin_types.Void, loc);
+ hlsl_block_add_expr(ctx, block, HLSL_OP0_VOID, operands, ctx->builtin_types.Void, loc);
}
static struct hlsl_ir_node *add_user_call(struct hlsl_ctx *ctx,
@@ -3154,20 +2989,10 @@ static struct hlsl_ir_node *add_user_call(struct hlsl_ctx *ctx,
if (param->storage_modifiers & HLSL_STORAGE_IN)
{
- struct hlsl_ir_node *store;
-
if (!hlsl_types_are_equal(arg->data_type, param->data_type))
- {
- struct hlsl_ir_node *cast;
-
- if (!(cast = add_cast(ctx, args->instrs, arg, param->data_type, &arg->loc)))
- return NULL;
- arg = cast;
- }
+ arg = add_cast(ctx, args->instrs, arg, param->data_type, &arg->loc);
- if (!(store = hlsl_new_simple_store(ctx, param, arg)))
- return NULL;
- hlsl_block_add_instr(args->instrs, store);
+ hlsl_block_add_simple_store(ctx, args->instrs, param, arg);
}
++k;
@@ -3192,7 +3017,6 @@ static struct hlsl_ir_node *add_user_call(struct hlsl_ctx *ctx,
struct hlsl_type *type = hlsl_type_get_component_type(ctx, param->data_type, j);
struct hlsl_constant_value value;
struct hlsl_ir_node *comp;
- struct hlsl_block store_block;
if (!param->default_values[j].string)
{
@@ -3201,9 +3025,7 @@ static struct hlsl_ir_node *add_user_call(struct hlsl_ctx *ctx,
return NULL;
hlsl_block_add_instr(args->instrs, comp);
- if (!hlsl_new_store_component(ctx, &store_block, &param_deref, j, comp))
- return NULL;
- hlsl_block_add_block(args->instrs, &store_block);
+ hlsl_block_add_store_component(ctx, args->instrs, &param_deref, j, comp);
}
}
}
@@ -3222,37 +3044,22 @@ static struct hlsl_ir_node *add_user_call(struct hlsl_ctx *ctx,
if (param->storage_modifiers & HLSL_STORAGE_OUT)
{
- struct hlsl_ir_load *load;
+ struct hlsl_ir_node *load;
if (arg->data_type->modifiers & HLSL_MODIFIER_CONST)
hlsl_error(ctx, &arg->loc, VKD3D_SHADER_ERROR_HLSL_MODIFIES_CONST,
"Output argument to \"%s\" is const.", func->func->name);
- if (!(load = hlsl_new_var_load(ctx, param, &arg->loc)))
- return NULL;
- hlsl_block_add_instr(args->instrs, &load->node);
-
- if (!add_assignment(ctx, args->instrs, arg, ASSIGN_OP_ASSIGN, &load->node, true))
+ load = hlsl_block_add_simple_load(ctx, args->instrs, param, &arg->loc);
+ if (!add_assignment(ctx, args->instrs, arg, ASSIGN_OP_ASSIGN, load, true))
return NULL;
}
}
if (func->return_var)
- {
- struct hlsl_ir_load *load;
-
- if (!(load = hlsl_new_var_load(ctx, func->return_var, loc)))
- return false;
- hlsl_block_add_instr(args->instrs, &load->node);
- }
+ hlsl_block_add_simple_load(ctx, args->instrs, func->return_var, loc);
else
- {
- struct hlsl_ir_node *expr;
-
- if (!(expr = hlsl_new_void_expr(ctx, loc)))
- return false;
- hlsl_block_add_instr(args->instrs, expr);
- }
+ add_void_expr(ctx, args->instrs, loc);
return call;
}
@@ -3262,28 +3069,20 @@ static struct hlsl_ir_node *intrinsic_float_convert_arg(struct hlsl_ctx *ctx,
{
struct hlsl_type *type = arg->data_type;
- if (!type_is_integer(type->e.numeric.type))
+ if (!hlsl_type_is_integer(type))
return arg;
type = hlsl_get_numeric_type(ctx, type->class, HLSL_TYPE_FLOAT, type->e.numeric.dimx, type->e.numeric.dimy);
return add_implicit_conversion(ctx, params->instrs, arg, type, loc);
}
-static bool convert_args(struct hlsl_ctx *ctx, const struct parse_initializer *params,
+static void convert_args(struct hlsl_ctx *ctx, const struct parse_initializer *params,
struct hlsl_type *type, const struct vkd3d_shader_location *loc)
{
unsigned int i;
for (i = 0; i < params->args_count; ++i)
- {
- struct hlsl_ir_node *new_arg;
-
- if (!(new_arg = add_implicit_conversion(ctx, params->instrs, params->args[i], type, loc)))
- return false;
- params->args[i] = new_arg;
- }
-
- return true;
+ params->args[i] = add_implicit_conversion(ctx, params->instrs, params->args[i], type, loc);
}
static struct hlsl_type *elementwise_intrinsic_get_common_type(struct hlsl_ctx *ctx,
@@ -3344,7 +3143,8 @@ static bool elementwise_intrinsic_convert_args(struct hlsl_ctx *ctx,
if (!(common_type = elementwise_intrinsic_get_common_type(ctx, params, loc)))
return false;
- return convert_args(ctx, params, common_type, loc);
+ convert_args(ctx, params, common_type, loc);
+ return true;
}
static bool elementwise_intrinsic_float_convert_args(struct hlsl_ctx *ctx,
@@ -3354,10 +3154,11 @@ static bool elementwise_intrinsic_float_convert_args(struct hlsl_ctx *ctx,
if (!(type = elementwise_intrinsic_get_common_type(ctx, params, loc)))
return false;
- if (type_is_integer(type->e.numeric.type))
+ if (hlsl_type_is_integer(type))
type = hlsl_get_numeric_type(ctx, type->class, HLSL_TYPE_FLOAT, type->e.numeric.dimx, type->e.numeric.dimy);
- return convert_args(ctx, params, type, loc);
+ convert_args(ctx, params, type, loc);
+ return true;
}
static bool elementwise_intrinsic_uint_convert_args(struct hlsl_ctx *ctx,
@@ -3370,7 +3171,8 @@ static bool elementwise_intrinsic_uint_convert_args(struct hlsl_ctx *ctx,
type = hlsl_get_numeric_type(ctx, type->class, HLSL_TYPE_UINT, type->e.numeric.dimx, type->e.numeric.dimy);
- return convert_args(ctx, params, type, loc);
+ convert_args(ctx, params, type, loc);
+ return true;
}
static bool intrinsic_abs(struct hlsl_ctx *ctx,
@@ -3407,8 +3209,7 @@ static bool write_acos_or_asin(struct hlsl_ctx *ctx,
const char *fn_name = asin_mode ? fn_name_asin : fn_name_acos;
- if (!(arg = intrinsic_float_convert_arg(ctx, params, params->args[0], loc)))
- return false;
+ arg = intrinsic_float_convert_arg(ctx, params, params->args[0], loc);
type = arg->data_type;
if (!(body = hlsl_sprintf_alloc(ctx, template,
@@ -3438,7 +3239,7 @@ static struct hlsl_type *convert_numeric_type(const struct hlsl_ctx *ctx,
return hlsl_get_numeric_type(ctx, type->class, base_type, type->e.numeric.dimx, type->e.numeric.dimy);
}
-static bool add_combine_components(struct hlsl_ctx *ctx, const struct parse_initializer *params,
+static void add_combine_components(struct hlsl_ctx *ctx, const struct parse_initializer *params,
struct hlsl_ir_node *arg, enum hlsl_ir_expr_op op, const struct vkd3d_shader_location *loc)
{
struct hlsl_ir_node *res, *load;
@@ -3446,20 +3247,13 @@ static bool add_combine_components(struct hlsl_ctx *ctx, const struct parse_init
count = hlsl_type_component_count(arg->data_type);
- if (!(res = hlsl_add_load_component(ctx, params->instrs, arg, 0, loc)))
- return false;
+ res = hlsl_add_load_component(ctx, params->instrs, arg, 0, loc);
for (i = 1; i < count; ++i)
{
- if (!(load = hlsl_add_load_component(ctx, params->instrs, arg, i, loc)))
- return false;
-
- if (!(res = hlsl_new_binary_expr(ctx, op, res, load)))
- return NULL;
- hlsl_block_add_instr(params->instrs, res);
+ load = hlsl_add_load_component(ctx, params->instrs, arg, i, loc);
+ res = hlsl_block_add_binary_expr(ctx, params->instrs, op, res, load);
}
-
- return true;
}
static bool intrinsic_all(struct hlsl_ctx *ctx,
@@ -3469,10 +3263,9 @@ static bool intrinsic_all(struct hlsl_ctx *ctx,
struct hlsl_type *bool_type;
bool_type = convert_numeric_type(ctx, arg->data_type, HLSL_TYPE_BOOL);
- if (!(cast = add_cast(ctx, params->instrs, arg, bool_type, loc)))
- return false;
-
- return add_combine_components(ctx, params, cast, HLSL_OP2_LOGIC_AND, loc);
+ cast = add_cast(ctx, params->instrs, arg, bool_type, loc);
+ add_combine_components(ctx, params, cast, HLSL_OP2_LOGIC_AND, loc);
+ return true;
}
static bool intrinsic_any(struct hlsl_ctx *ctx, const struct parse_initializer *params,
@@ -3482,10 +3275,9 @@ static bool intrinsic_any(struct hlsl_ctx *ctx, const struct parse_initializer *
struct hlsl_type *bool_type;
bool_type = convert_numeric_type(ctx, arg->data_type, HLSL_TYPE_BOOL);
- if (!(cast = add_cast(ctx, params->instrs, arg, bool_type, loc)))
- return false;
-
- return add_combine_components(ctx, params, cast, HLSL_OP2_LOGIC_OR, loc);
+ cast = add_cast(ctx, params->instrs, arg, bool_type, loc);
+ add_combine_components(ctx, params, cast, HLSL_OP2_LOGIC_OR, loc);
+ return true;
}
static bool intrinsic_asin(struct hlsl_ctx *ctx,
@@ -3671,10 +3463,7 @@ static bool intrinsic_asuint(struct hlsl_ctx *ctx,
static bool intrinsic_ceil(struct hlsl_ctx *ctx,
const struct parse_initializer *params, const struct vkd3d_shader_location *loc)
{
- struct hlsl_ir_node *arg;
-
- if (!(arg = intrinsic_float_convert_arg(ctx, params, params->args[0], loc)))
- return false;
+ struct hlsl_ir_node *arg = intrinsic_float_convert_arg(ctx, params, params->args[0], loc);
return !!add_unary_arithmetic_expr(ctx, params->instrs, HLSL_OP1_CEIL, arg, loc);
}
@@ -3696,7 +3485,7 @@ static bool intrinsic_clamp(struct hlsl_ctx *ctx,
static bool intrinsic_clip(struct hlsl_ctx *ctx,
const struct parse_initializer *params, const struct vkd3d_shader_location *loc)
{
- struct hlsl_ir_node *condition, *jump;
+ struct hlsl_ir_node *condition;
if (!elementwise_intrinsic_float_convert_args(ctx, params, loc))
return false;
@@ -3714,20 +3503,14 @@ static bool intrinsic_clip(struct hlsl_ctx *ctx,
return false;
}
- if (!(jump = hlsl_new_jump(ctx, HLSL_IR_JUMP_DISCARD_NEG, condition, loc)))
- return false;
- hlsl_block_add_instr(params->instrs, jump);
-
+ hlsl_block_add_jump(ctx, params->instrs, HLSL_IR_JUMP_DISCARD_NEG, condition, loc);
return true;
}
static bool intrinsic_cos(struct hlsl_ctx *ctx,
const struct parse_initializer *params, const struct vkd3d_shader_location *loc)
{
- struct hlsl_ir_node *arg;
-
- if (!(arg = intrinsic_float_convert_arg(ctx, params, params->args[0], loc)))
- return false;
+ struct hlsl_ir_node *arg = intrinsic_float_convert_arg(ctx, params, params->args[0], loc);
return !!add_unary_arithmetic_expr(ctx, params->instrs, HLSL_OP1_COS, arg, loc);
}
@@ -3748,8 +3531,7 @@ static bool write_cosh_or_sinh(struct hlsl_ctx *ctx,
static const char fn_name_sinh[] = "sinh";
static const char fn_name_cosh[] = "cosh";
- if (!(arg = intrinsic_float_convert_arg(ctx, params, params->args[0], loc)))
- return false;
+ arg = intrinsic_float_convert_arg(ctx, params, params->args[0], loc);
type_name = arg->data_type->name;
fn_name = sinh_mode ? fn_name_sinh : fn_name_cosh;
@@ -3782,39 +3564,23 @@ static bool intrinsic_cross(struct hlsl_ctx *ctx,
enum hlsl_base_type base;
base = expr_common_base_type(arg1->data_type->e.numeric.type, arg2->data_type->e.numeric.type);
- if (type_is_integer(base))
+ if (hlsl_base_type_is_integer(base))
base = HLSL_TYPE_FLOAT;
cast_type = hlsl_get_vector_type(ctx, base, 3);
- if (!(arg1_cast = add_implicit_conversion(ctx, params->instrs, arg1, cast_type, loc)))
- return false;
-
- if (!(arg2_cast = add_implicit_conversion(ctx, params->instrs, arg2, cast_type, loc)))
- return false;
-
- if (!(arg1_swzl1 = hlsl_new_swizzle(ctx, HLSL_SWIZZLE(Z, X, Y, Z), 3, arg1_cast, loc)))
- return false;
- hlsl_block_add_instr(params->instrs, arg1_swzl1);
-
- if (!(arg2_swzl1 = hlsl_new_swizzle(ctx, HLSL_SWIZZLE(Y, Z, X, Y), 3, arg2_cast, loc)))
- return false;
- hlsl_block_add_instr(params->instrs, arg2_swzl1);
+ arg1_cast = add_implicit_conversion(ctx, params->instrs, arg1, cast_type, loc);
+ arg2_cast = add_implicit_conversion(ctx, params->instrs, arg2, cast_type, loc);
+ arg1_swzl1 = hlsl_block_add_swizzle(ctx, params->instrs, HLSL_SWIZZLE(Z, X, Y, Z), 3, arg1_cast, loc);
+ arg2_swzl1 = hlsl_block_add_swizzle(ctx, params->instrs, HLSL_SWIZZLE(Y, Z, X, Y), 3, arg2_cast, loc);
if (!(mul1 = add_binary_arithmetic_expr(ctx, params->instrs, HLSL_OP2_MUL, arg1_swzl1, arg2_swzl1, loc)))
return false;
- if (!(mul1_neg = hlsl_new_unary_expr(ctx, HLSL_OP1_NEG, mul1, loc)))
- return false;
- hlsl_block_add_instr(params->instrs, mul1_neg);
+ mul1_neg = hlsl_block_add_unary_expr(ctx, params->instrs, HLSL_OP1_NEG, mul1, loc);
- if (!(arg1_swzl2 = hlsl_new_swizzle(ctx, HLSL_SWIZZLE(Y, Z, X, Y), 3, arg1_cast, loc)))
- return false;
- hlsl_block_add_instr(params->instrs, arg1_swzl2);
-
- if (!(arg2_swzl2 = hlsl_new_swizzle(ctx, HLSL_SWIZZLE(Z, X, Y, Z), 3, arg2_cast, loc)))
- return false;
- hlsl_block_add_instr(params->instrs, arg2_swzl2);
+ arg1_swzl2 = hlsl_block_add_swizzle(ctx, params->instrs, HLSL_SWIZZLE(Y, Z, X, Y), 3, arg1_cast, loc);
+ arg2_swzl2 = hlsl_block_add_swizzle(ctx, params->instrs, HLSL_SWIZZLE(Z, X, Y, Z), 3, arg2_cast, loc);
if (!(mul2 = add_binary_arithmetic_expr(ctx, params->instrs, HLSL_OP2_MUL, arg1_swzl2, arg2_swzl2, loc)))
return false;
@@ -3827,8 +3593,7 @@ static bool intrinsic_ddx(struct hlsl_ctx *ctx,
{
struct hlsl_ir_node *arg;
- if (!(arg = intrinsic_float_convert_arg(ctx, params, params->args[0], loc)))
- return false;
+ arg = intrinsic_float_convert_arg(ctx, params, params->args[0], loc);
return !!add_unary_arithmetic_expr(ctx, params->instrs, HLSL_OP1_DSX, arg, loc);
}
@@ -3838,8 +3603,7 @@ static bool intrinsic_ddx_coarse(struct hlsl_ctx *ctx,
{
struct hlsl_ir_node *arg;
- if (!(arg = intrinsic_float_convert_arg(ctx, params, params->args[0], loc)))
- return false;
+ arg = intrinsic_float_convert_arg(ctx, params, params->args[0], loc);
return !!add_unary_arithmetic_expr(ctx, params->instrs, HLSL_OP1_DSX_COARSE, arg, loc);
}
@@ -3849,8 +3613,7 @@ static bool intrinsic_ddx_fine(struct hlsl_ctx *ctx,
{
struct hlsl_ir_node *arg;
- if (!(arg = intrinsic_float_convert_arg(ctx, params, params->args[0], loc)))
- return false;
+ arg = intrinsic_float_convert_arg(ctx, params, params->args[0], loc);
return !!add_unary_arithmetic_expr(ctx, params->instrs, HLSL_OP1_DSX_FINE, arg, loc);
}
@@ -3860,8 +3623,7 @@ static bool intrinsic_ddy(struct hlsl_ctx *ctx,
{
struct hlsl_ir_node *arg;
- if (!(arg = intrinsic_float_convert_arg(ctx, params, params->args[0], loc)))
- return false;
+ arg = intrinsic_float_convert_arg(ctx, params, params->args[0], loc);
return !!add_unary_arithmetic_expr(ctx, params->instrs, HLSL_OP1_DSY, arg, loc);
}
@@ -3871,8 +3633,7 @@ static bool intrinsic_ddy_coarse(struct hlsl_ctx *ctx,
{
struct hlsl_ir_node *arg;
- if (!(arg = intrinsic_float_convert_arg(ctx, params, params->args[0], loc)))
- return false;
+ arg = intrinsic_float_convert_arg(ctx, params, params->args[0], loc);
return !!add_unary_arithmetic_expr(ctx, params->instrs, HLSL_OP1_DSY_COARSE, arg, loc);
}
@@ -3882,14 +3643,10 @@ static bool intrinsic_degrees(struct hlsl_ctx *ctx,
{
struct hlsl_ir_node *arg, *deg;
- if (!(arg = intrinsic_float_convert_arg(ctx, params, params->args[0], loc)))
- return false;
+ arg = intrinsic_float_convert_arg(ctx, params, params->args[0], loc);
/* 1 rad = 180/pi degree = 57.2957795 degree */
- if (!(deg = hlsl_new_float_constant(ctx, 57.2957795f, loc)))
- return false;
- hlsl_block_add_instr(params->instrs, deg);
-
+ deg = hlsl_block_add_float_constant(ctx, params->instrs, 57.2957795f, loc);
return !!add_binary_arithmetic_expr(ctx, params->instrs, HLSL_OP2_MUL, arg, deg, loc);
}
@@ -3898,8 +3655,7 @@ static bool intrinsic_ddy_fine(struct hlsl_ctx *ctx,
{
struct hlsl_ir_node *arg;
- if (!(arg = intrinsic_float_convert_arg(ctx, params, params->args[0], loc)))
- return false;
+ arg = intrinsic_float_convert_arg(ctx, params, params->args[0], loc);
return !!add_unary_arithmetic_expr(ctx, params->instrs, HLSL_OP1_DSY_FINE, arg, loc);
}
@@ -3953,8 +3709,7 @@ static bool intrinsic_determinant(struct hlsl_ctx *ctx,
return false;
}
- if (!(arg = intrinsic_float_convert_arg(ctx, params, arg, loc)))
- return false;
+ arg = intrinsic_float_convert_arg(ctx, params, arg, loc);
dim = min(type->e.numeric.dimx, type->e.numeric.dimy);
if (dim == 1)
@@ -3996,11 +3751,8 @@ static bool intrinsic_distance(struct hlsl_ctx *ctx,
{
struct hlsl_ir_node *arg1, *arg2, *neg, *add, *dot;
- if (!(arg1 = intrinsic_float_convert_arg(ctx, params, params->args[0], loc)))
- return false;
-
- if (!(arg2 = intrinsic_float_convert_arg(ctx, params, params->args[1], loc)))
- return false;
+ arg1 = intrinsic_float_convert_arg(ctx, params, params->args[0], loc);
+ arg2 = intrinsic_float_convert_arg(ctx, params, params->args[1], loc);
if (!(neg = add_unary_arithmetic_expr(ctx, params->instrs, HLSL_OP1_NEG, arg2, loc)))
return false;
@@ -4069,13 +3821,10 @@ static bool intrinsic_exp(struct hlsl_ctx *ctx,
{
struct hlsl_ir_node *arg, *mul, *coeff;
- if (!(arg = intrinsic_float_convert_arg(ctx, params, params->args[0], loc)))
- return false;
+ arg = intrinsic_float_convert_arg(ctx, params, params->args[0], loc);
/* 1/ln(2) */
- if (!(coeff = hlsl_new_float_constant(ctx, 1.442695f, loc)))
- return false;
- hlsl_block_add_instr(params->instrs, coeff);
+ coeff = hlsl_block_add_float_constant(ctx, params->instrs, 1.442695f, loc);
if (!(mul = add_binary_arithmetic_expr(ctx, params->instrs, HLSL_OP2_MUL, coeff, arg, loc)))
return false;
@@ -4088,8 +3837,7 @@ static bool intrinsic_exp2(struct hlsl_ctx *ctx,
{
struct hlsl_ir_node *arg;
- if (!(arg = intrinsic_float_convert_arg(ctx, params, params->args[0], loc)))
- return false;
+ arg = intrinsic_float_convert_arg(ctx, params, params->args[0], loc);
return !!add_unary_arithmetic_expr(ctx, params->instrs, HLSL_OP1_EXP2, arg, loc);
}
@@ -4157,8 +3905,7 @@ static bool intrinsic_floor(struct hlsl_ctx *ctx,
{
struct hlsl_ir_node *arg;
- if (!(arg = intrinsic_float_convert_arg(ctx, params, params->args[0], loc)))
- return false;
+ arg = intrinsic_float_convert_arg(ctx, params, params->args[0], loc);
return !!add_unary_arithmetic_expr(ctx, params->instrs, HLSL_OP1_FLOOR, arg, loc);
}
@@ -4170,11 +3917,8 @@ static bool intrinsic_fmod(struct hlsl_ctx *ctx, const struct parse_initializer
struct hlsl_ir_node *operands[HLSL_MAX_OPERANDS] = { 0 };
static const struct hlsl_constant_value zero_value;
- if (!(x = intrinsic_float_convert_arg(ctx, params, params->args[0], loc)))
- return false;
-
- if (!(y = intrinsic_float_convert_arg(ctx, params, params->args[1], loc)))
- return false;
+ x = intrinsic_float_convert_arg(ctx, params, params->args[0], loc);
+ y = intrinsic_float_convert_arg(ctx, params, params->args[1], loc);
if (!(div = add_binary_arithmetic_expr(ctx, params->instrs, HLSL_OP2_DIV, x, y, loc)))
return false;
@@ -4209,8 +3953,7 @@ static bool intrinsic_frac(struct hlsl_ctx *ctx,
{
struct hlsl_ir_node *arg;
- if (!(arg = intrinsic_float_convert_arg(ctx, params, params->args[0], loc)))
- return false;
+ arg = intrinsic_float_convert_arg(ctx, params, params->args[0], loc);
return !!add_unary_arithmetic_expr(ctx, params->instrs, HLSL_OP1_FRACT, arg, loc);
}
@@ -4285,8 +4028,7 @@ static bool intrinsic_length(struct hlsl_ctx *ctx,
hlsl_release_string_buffer(ctx, string);
}
- if (!(arg = intrinsic_float_convert_arg(ctx, params, params->args[0], loc)))
- return false;
+ arg = intrinsic_float_convert_arg(ctx, params, params->args[0], loc);
if (!(dot = add_binary_dot_expr(ctx, params->instrs, arg, arg, loc)))
return false;
@@ -4314,21 +4056,6 @@ static bool intrinsic_lerp(struct hlsl_ctx *ctx,
return !!add_binary_arithmetic_expr(ctx, params->instrs, HLSL_OP2_ADD, params->args[0], mul, loc);
}
-static struct hlsl_ir_node * add_pow_expr(struct hlsl_ctx *ctx,
- struct hlsl_block *instrs, struct hlsl_ir_node *arg1, struct hlsl_ir_node *arg2,
- const struct vkd3d_shader_location *loc)
-{
- struct hlsl_ir_node *log, *mul;
-
- if (!(log = add_unary_arithmetic_expr(ctx, instrs, HLSL_OP1_LOG2, arg1, loc)))
- return NULL;
-
- if (!(mul = add_binary_arithmetic_expr(ctx, instrs, HLSL_OP2_MUL, arg2, log, loc)))
- return NULL;
-
- return add_unary_arithmetic_expr(ctx, instrs, HLSL_OP1_EXP2, mul, loc);
-}
-
static bool intrinsic_lit(struct hlsl_ctx *ctx,
const struct parse_initializer *params, const struct vkd3d_shader_location *loc)
{
@@ -4363,17 +4090,13 @@ static bool intrinsic_log(struct hlsl_ctx *ctx,
{
struct hlsl_ir_node *log, *arg, *coeff;
- if (!(arg = intrinsic_float_convert_arg(ctx, params, params->args[0], loc)))
- return false;
+ arg = intrinsic_float_convert_arg(ctx, params, params->args[0], loc);
if (!(log = add_unary_arithmetic_expr(ctx, params->instrs, HLSL_OP1_LOG2, arg, loc)))
return false;
/* ln(2) */
- if (!(coeff = hlsl_new_float_constant(ctx, 0.69314718055f, loc)))
- return false;
- hlsl_block_add_instr(params->instrs, coeff);
-
+ coeff = hlsl_block_add_float_constant(ctx, params->instrs, 0.69314718055f, loc);
return !!add_binary_arithmetic_expr(ctx, params->instrs, HLSL_OP2_MUL, log, coeff, loc);
}
@@ -4382,17 +4105,13 @@ static bool intrinsic_log10(struct hlsl_ctx *ctx,
{
struct hlsl_ir_node *log, *arg, *coeff;
- if (!(arg = intrinsic_float_convert_arg(ctx, params, params->args[0], loc)))
- return false;
+ arg = intrinsic_float_convert_arg(ctx, params, params->args[0], loc);
if (!(log = add_unary_arithmetic_expr(ctx, params->instrs, HLSL_OP1_LOG2, arg, loc)))
return false;
/* 1 / log2(10) */
- if (!(coeff = hlsl_new_float_constant(ctx, 0.301029996f, loc)))
- return false;
- hlsl_block_add_instr(params->instrs, coeff);
-
+ coeff = hlsl_block_add_float_constant(ctx, params->instrs, 0.301029996f, loc);
return !!add_binary_arithmetic_expr(ctx, params->instrs, HLSL_OP2_MUL, log, coeff, loc);
}
@@ -4401,8 +4120,7 @@ static bool intrinsic_log2(struct hlsl_ctx *ctx,
{
struct hlsl_ir_node *arg;
- if (!(arg = intrinsic_float_convert_arg(ctx, params, params->args[0], loc)))
- return false;
+ arg = intrinsic_float_convert_arg(ctx, params, params->args[0], loc);
return !!add_unary_arithmetic_expr(ctx, params->instrs, HLSL_OP1_LOG2, arg, loc);
}
@@ -4476,7 +4194,7 @@ static bool intrinsic_mul(struct hlsl_ctx *ctx,
struct hlsl_type *cast_type1 = arg1->data_type, *cast_type2 = arg2->data_type, *matrix_type, *ret_type;
unsigned int i, j, k, vect_count = 0;
struct hlsl_deref var_deref;
- struct hlsl_ir_load *load;
+ struct hlsl_ir_node *load;
struct hlsl_ir_var *var;
if (arg1->data_type->class == HLSL_CLASS_SCALAR || arg2->data_type->class == HLSL_CLASS_SCALAR)
@@ -4510,11 +4228,8 @@ static bool intrinsic_mul(struct hlsl_ctx *ctx,
ret_type = hlsl_get_scalar_type(ctx, base);
}
- if (!(cast1 = add_implicit_conversion(ctx, params->instrs, arg1, cast_type1, loc)))
- return false;
-
- if (!(cast2 = add_implicit_conversion(ctx, params->instrs, arg2, cast_type2, loc)))
- return false;
+ cast1 = add_implicit_conversion(ctx, params->instrs, arg1, cast_type1, loc);
+ cast2 = add_implicit_conversion(ctx, params->instrs, arg2, cast_type2, loc);
if (!(var = hlsl_new_synthetic_var(ctx, "mul", matrix_type, loc)))
return false;
@@ -4525,19 +4240,15 @@ static bool intrinsic_mul(struct hlsl_ctx *ctx,
for (j = 0; j < matrix_type->e.numeric.dimy; ++j)
{
struct hlsl_ir_node *instr = NULL;
- struct hlsl_block block;
for (k = 0; k < cast_type1->e.numeric.dimx && k < cast_type2->e.numeric.dimy; ++k)
{
struct hlsl_ir_node *value1, *value2, *mul;
- if (!(value1 = hlsl_add_load_component(ctx, params->instrs,
- cast1, j * cast1->data_type->e.numeric.dimx + k, loc)))
- return false;
-
- if (!(value2 = hlsl_add_load_component(ctx, params->instrs,
- cast2, k * cast2->data_type->e.numeric.dimx + i, loc)))
- return false;
+ value1 = hlsl_add_load_component(ctx, params->instrs,
+ cast1, j * cast1->data_type->e.numeric.dimx + k, loc);
+ value2 = hlsl_add_load_component(ctx, params->instrs,
+ cast2, k * cast2->data_type->e.numeric.dimx + i, loc);
if (!(mul = add_binary_arithmetic_expr(ctx, params->instrs, HLSL_OP2_MUL, value1, value2, loc)))
return false;
@@ -4553,17 +4264,14 @@ static bool intrinsic_mul(struct hlsl_ctx *ctx,
}
}
- if (!hlsl_new_store_component(ctx, &block, &var_deref, j * matrix_type->e.numeric.dimx + i, instr))
- return false;
- hlsl_block_add_block(params->instrs, &block);
+ hlsl_block_add_store_component(ctx, params->instrs, &var_deref,
+ j * matrix_type->e.numeric.dimx + i, instr);
}
}
- if (!(load = hlsl_new_var_load(ctx, var, loc)))
- return false;
- hlsl_block_add_instr(params->instrs, &load->node);
-
- return !!add_implicit_conversion(ctx, params->instrs, &load->node, ret_type, loc);
+ load = hlsl_block_add_simple_load(ctx, params->instrs, var, loc);
+ add_implicit_conversion(ctx, params->instrs, load, ret_type, loc);
+ return true;
}
static bool intrinsic_normalize(struct hlsl_ctx *ctx,
@@ -4582,8 +4290,7 @@ static bool intrinsic_normalize(struct hlsl_ctx *ctx,
hlsl_release_string_buffer(ctx, string);
}
- if (!(arg = intrinsic_float_convert_arg(ctx, params, params->args[0], loc)))
- return false;
+ arg = intrinsic_float_convert_arg(ctx, params, params->args[0], loc);
if (!(dot = add_binary_dot_expr(ctx, params->instrs, arg, arg, loc)))
return false;
@@ -4597,10 +4304,18 @@ static bool intrinsic_normalize(struct hlsl_ctx *ctx,
static bool intrinsic_pow(struct hlsl_ctx *ctx,
const struct parse_initializer *params, const struct vkd3d_shader_location *loc)
{
+ struct hlsl_ir_node *log, *mul;
+
if (!elementwise_intrinsic_float_convert_args(ctx, params, loc))
return false;
- return !!add_pow_expr(ctx, params->instrs, params->args[0], params->args[1], loc);
+ if (!(log = add_unary_arithmetic_expr(ctx, params->instrs, HLSL_OP1_LOG2, params->args[0], loc)))
+ return NULL;
+
+ if (!(mul = add_binary_arithmetic_expr(ctx, params->instrs, HLSL_OP2_MUL, params->args[1], log, loc)))
+ return NULL;
+
+ return add_unary_arithmetic_expr(ctx, params->instrs, HLSL_OP1_EXP2, mul, loc);
}
static bool intrinsic_radians(struct hlsl_ctx *ctx,
@@ -4608,14 +4323,10 @@ static bool intrinsic_radians(struct hlsl_ctx *ctx,
{
struct hlsl_ir_node *arg, *rad;
- if (!(arg = intrinsic_float_convert_arg(ctx, params, params->args[0], loc)))
- return false;
+ arg = intrinsic_float_convert_arg(ctx, params, params->args[0], loc);
/* 1 degree = pi/180 rad = 0.0174532925f rad */
- if (!(rad = hlsl_new_float_constant(ctx, 0.0174532925f, loc)))
- return false;
- hlsl_block_add_instr(params->instrs, rad);
-
+ rad = hlsl_block_add_float_constant(ctx, params->instrs, 0.0174532925f, loc);
return !!add_binary_arithmetic_expr(ctx, params->instrs, HLSL_OP2_MUL, arg, rad, loc);
}
@@ -4624,8 +4335,7 @@ static bool intrinsic_rcp(struct hlsl_ctx *ctx,
{
struct hlsl_ir_node *arg;
- if (!(arg = intrinsic_float_convert_arg(ctx, params, params->args[0], loc)))
- return false;
+ arg = intrinsic_float_convert_arg(ctx, params, params->args[0], loc);
return !!add_unary_arithmetic_expr(ctx, params->instrs, HLSL_OP1_RCP, arg, loc);
}
@@ -4656,7 +4366,6 @@ static bool intrinsic_refract(struct hlsl_ctx *ctx,
{
struct hlsl_type *type, *scalar_type;
struct hlsl_ir_function_decl *func;
- struct hlsl_ir_node *index;
char *body;
static const char template[] =
@@ -4686,9 +4395,7 @@ static bool intrinsic_refract(struct hlsl_ctx *ctx,
* which we will only use the first component of. */
scalar_type = hlsl_get_scalar_type(ctx, params->args[2]->data_type->e.numeric.type);
- if (!(index = add_implicit_conversion(ctx, params->instrs, params->args[2], scalar_type, loc)))
- return false;
- params->args[2] = index;
+ params->args[2] = add_implicit_conversion(ctx, params->instrs, params->args[2], scalar_type, loc);
if (!elementwise_intrinsic_float_convert_args(ctx, params, loc))
return false;
@@ -4711,8 +4418,7 @@ static bool intrinsic_round(struct hlsl_ctx *ctx,
{
struct hlsl_ir_node *arg;
- if (!(arg = intrinsic_float_convert_arg(ctx, params, params->args[0], loc)))
- return false;
+ arg = intrinsic_float_convert_arg(ctx, params, params->args[0], loc);
return !!add_unary_arithmetic_expr(ctx, params->instrs, HLSL_OP1_ROUND, arg, loc);
}
@@ -4722,8 +4428,7 @@ static bool intrinsic_rsqrt(struct hlsl_ctx *ctx,
{
struct hlsl_ir_node *arg;
- if (!(arg = intrinsic_float_convert_arg(ctx, params, params->args[0], loc)))
- return false;
+ arg = intrinsic_float_convert_arg(ctx, params, params->args[0], loc);
return !!add_unary_arithmetic_expr(ctx, params->instrs, HLSL_OP1_RSQ, arg, loc);
}
@@ -4733,8 +4438,7 @@ static bool intrinsic_saturate(struct hlsl_ctx *ctx,
{
struct hlsl_ir_node *arg;
- if (!(arg = intrinsic_float_convert_arg(ctx, params, params->args[0], loc)))
- return false;
+ arg = intrinsic_float_convert_arg(ctx, params, params->args[0], loc);
return !!add_unary_arithmetic_expr(ctx, params->instrs, HLSL_OP1_SAT, arg, loc);
}
@@ -4757,16 +4461,14 @@ static bool intrinsic_sign(struct hlsl_ctx *ctx,
if (!(lt = add_binary_comparison_expr(ctx, params->instrs, HLSL_OP2_LESS, zero, arg, loc)))
return false;
- if (!(op1 = add_implicit_conversion(ctx, params->instrs, lt, int_type, loc)))
- return false;
+ op1 = add_implicit_conversion(ctx, params->instrs, lt, int_type, loc);
/* Check if arg < 0, cast bool to int and invert (meaning true is -1) */
if (!(lt = add_binary_comparison_expr(ctx, params->instrs, HLSL_OP2_LESS, arg, zero, loc)))
return false;
- if (!(op2 = add_implicit_conversion(ctx, params->instrs, lt, int_type, loc)))
- return false;
+ op2 = add_implicit_conversion(ctx, params->instrs, lt, int_type, loc);
if (!(neg = add_unary_arithmetic_expr(ctx, params->instrs, HLSL_OP1_NEG, op2, loc)))
return false;
@@ -4780,8 +4482,7 @@ static bool intrinsic_sin(struct hlsl_ctx *ctx,
{
struct hlsl_ir_node *arg;
- if (!(arg = intrinsic_float_convert_arg(ctx, params, params->args[0], loc)))
- return false;
+ arg = intrinsic_float_convert_arg(ctx, params, params->args[0], loc);
return !!add_unary_arithmetic_expr(ctx, params->instrs, HLSL_OP1_SIN, arg, loc);
}
@@ -4855,8 +4556,7 @@ static bool intrinsic_sqrt(struct hlsl_ctx *ctx,
{
struct hlsl_ir_node *arg;
- if (!(arg = intrinsic_float_convert_arg(ctx, params, params->args[0], loc)))
- return false;
+ arg = intrinsic_float_convert_arg(ctx, params, params->args[0], loc);
return !!add_unary_arithmetic_expr(ctx, params->instrs, HLSL_OP1_SQRT, arg, loc);
}
@@ -4875,7 +4575,8 @@ static bool intrinsic_step(struct hlsl_ctx *ctx,
params->args[1], params->args[0], loc)))
return false;
- return !!add_implicit_conversion(ctx, params->instrs, ge, type, loc);
+ add_implicit_conversion(ctx, params->instrs, ge, type, loc);
+ return true;
}
static bool intrinsic_tan(struct hlsl_ctx *ctx,
@@ -4909,8 +4610,7 @@ static bool intrinsic_tanh(struct hlsl_ctx *ctx,
" return (exp_pos - exp_neg) / (exp_pos + exp_neg);\n"
"}\n";
- if (!(arg = intrinsic_float_convert_arg(ctx, params, params->args[0], loc)))
- return false;
+ arg = intrinsic_float_convert_arg(ctx, params, params->args[0], loc);
type = arg->data_type;
if (!(body = hlsl_sprintf_alloc(ctx, template,
@@ -4931,7 +4631,7 @@ static bool intrinsic_tex(struct hlsl_ctx *ctx, const struct parse_initializer *
unsigned int sampler_dim = hlsl_sampler_dim_count(dim);
struct hlsl_resource_load_params load_params = { 0 };
const struct hlsl_type *sampler_type;
- struct hlsl_ir_node *coords, *sample;
+ struct hlsl_ir_node *coords;
if (params->args_count != 2 && params->args_count != 4)
{
@@ -4963,47 +4663,27 @@ static bool intrinsic_tex(struct hlsl_ctx *ctx, const struct parse_initializer *
else
load_params.type = HLSL_RESOURCE_SAMPLE_LOD_BIAS;
- if (!(c = hlsl_new_swizzle(ctx, HLSL_SWIZZLE(X, Y, Z, W), sampler_dim, params->args[1], loc)))
- return false;
- hlsl_block_add_instr(params->instrs, c);
-
- if (!(coords = add_implicit_conversion(ctx, params->instrs, c,
- hlsl_get_vector_type(ctx, HLSL_TYPE_FLOAT, sampler_dim), loc)))
- {
- return false;
- }
-
- if (!(lod = hlsl_new_swizzle(ctx, HLSL_SWIZZLE(W, W, W, W), 1, params->args[1], loc)))
- return false;
- hlsl_block_add_instr(params->instrs, lod);
+ c = hlsl_block_add_swizzle(ctx, params->instrs, HLSL_SWIZZLE(X, Y, Z, W), sampler_dim, params->args[1], loc);
+ coords = add_implicit_conversion(ctx, params->instrs, c,
+ hlsl_get_vector_type(ctx, HLSL_TYPE_FLOAT, sampler_dim), loc);
- if (!(load_params.lod = add_implicit_conversion(ctx, params->instrs, lod,
- hlsl_get_scalar_type(ctx, HLSL_TYPE_FLOAT), loc)))
- {
- return false;
- }
+ lod = hlsl_block_add_swizzle(ctx, params->instrs, HLSL_SWIZZLE(W, W, W, W), 1, params->args[1], loc);
+ load_params.lod = add_implicit_conversion(ctx, params->instrs, lod,
+ hlsl_get_scalar_type(ctx, HLSL_TYPE_FLOAT), loc);
}
else if (!strcmp(name, "tex2Dproj")
|| !strcmp(name, "tex3Dproj")
|| !strcmp(name, "texCUBEproj"))
{
- if (!(coords = add_implicit_conversion(ctx, params->instrs, params->args[1],
- hlsl_get_vector_type(ctx, HLSL_TYPE_FLOAT, 4), loc)))
- {
- return false;
- }
+ coords = add_implicit_conversion(ctx, params->instrs, params->args[1],
+ hlsl_get_vector_type(ctx, HLSL_TYPE_FLOAT, 4), loc);
if (hlsl_version_ge(ctx, 4, 0))
{
struct hlsl_ir_node *divisor;
- if (!(divisor = hlsl_new_swizzle(ctx, HLSL_SWIZZLE(W, W, W, W), sampler_dim, coords, loc)))
- return false;
- hlsl_block_add_instr(params->instrs, divisor);
-
- if (!(coords = hlsl_new_swizzle(ctx, HLSL_SWIZZLE(X, Y, Z, W), sampler_dim, coords, loc)))
- return false;
- hlsl_block_add_instr(params->instrs, coords);
+ divisor = hlsl_block_add_swizzle(ctx, params->instrs, HLSL_SWIZZLE(W, W, W, W), sampler_dim, coords, loc);
+ coords = hlsl_block_add_swizzle(ctx, params->instrs, HLSL_SWIZZLE(X, Y, Z, W), sampler_dim, coords, loc);
if (!(coords = add_binary_arithmetic_expr(ctx, params->instrs, HLSL_OP2_DIV, coords, divisor, loc)))
return false;
@@ -5017,43 +4697,25 @@ static bool intrinsic_tex(struct hlsl_ctx *ctx, const struct parse_initializer *
}
else if (params->args_count == 4) /* Gradient sampling. */
{
- if (!(coords = add_implicit_conversion(ctx, params->instrs, params->args[1],
- hlsl_get_vector_type(ctx, HLSL_TYPE_FLOAT, sampler_dim), loc)))
- {
- return false;
- }
-
- if (!(load_params.ddx = add_implicit_conversion(ctx, params->instrs, params->args[2],
- hlsl_get_vector_type(ctx, HLSL_TYPE_FLOAT, sampler_dim), loc)))
- {
- return false;
- }
-
- if (!(load_params.ddy = add_implicit_conversion(ctx, params->instrs, params->args[3],
- hlsl_get_vector_type(ctx, HLSL_TYPE_FLOAT, sampler_dim), loc)))
- {
- return false;
- }
-
+ coords = add_implicit_conversion(ctx, params->instrs, params->args[1],
+ hlsl_get_vector_type(ctx, HLSL_TYPE_FLOAT, sampler_dim), loc);
+ load_params.ddx = add_implicit_conversion(ctx, params->instrs, params->args[2],
+ hlsl_get_vector_type(ctx, HLSL_TYPE_FLOAT, sampler_dim), loc);
+ load_params.ddy = add_implicit_conversion(ctx, params->instrs, params->args[3],
+ hlsl_get_vector_type(ctx, HLSL_TYPE_FLOAT, sampler_dim), loc);
load_params.type = HLSL_RESOURCE_SAMPLE_GRAD;
}
else
{
load_params.type = HLSL_RESOURCE_SAMPLE;
-
- if (!(coords = add_implicit_conversion(ctx, params->instrs, params->args[1],
- hlsl_get_vector_type(ctx, HLSL_TYPE_FLOAT, sampler_dim), loc)))
- {
- return false;
- }
+ coords = add_implicit_conversion(ctx, params->instrs, params->args[1],
+ hlsl_get_vector_type(ctx, HLSL_TYPE_FLOAT, sampler_dim), loc);
}
/* tex1D() functions never produce 1D resource declarations. For newer profiles half offset
is used for the second coordinate, while older ones appear to replicate first coordinate.*/
if (dim == HLSL_SAMPLER_DIM_1D)
{
- struct hlsl_ir_load *load;
- struct hlsl_ir_node *half;
struct hlsl_ir_var *var;
unsigned int idx = 0;
@@ -5062,22 +4724,10 @@ static bool intrinsic_tex(struct hlsl_ctx *ctx, const struct parse_initializer *
initialize_var_components(ctx, params->instrs, var, &idx, coords, false);
if (hlsl_version_ge(ctx, 4, 0))
- {
- if (!(half = hlsl_new_float_constant(ctx, 0.5f, loc)))
- return false;
- hlsl_block_add_instr(params->instrs, half);
-
- initialize_var_components(ctx, params->instrs, var, &idx, half, false);
- }
- else
- initialize_var_components(ctx, params->instrs, var, &idx, coords, false);
-
- if (!(load = hlsl_new_var_load(ctx, var, loc)))
- return false;
- hlsl_block_add_instr(params->instrs, &load->node);
-
- coords = &load->node;
+ coords = hlsl_block_add_float_constant(ctx, params->instrs, 0.5f, loc);
+ initialize_var_components(ctx, params->instrs, var, &idx, coords, false);
+ coords = hlsl_block_add_simple_load(ctx, params->instrs, var, loc);
dim = HLSL_SAMPLER_DIM_2D;
}
@@ -5086,9 +4736,7 @@ static bool intrinsic_tex(struct hlsl_ctx *ctx, const struct parse_initializer *
load_params.format = hlsl_get_vector_type(ctx, HLSL_TYPE_FLOAT, 4);
load_params.sampling_dim = dim;
- if (!(sample = hlsl_new_resource_load(ctx, &load_params, loc)))
- return false;
- hlsl_block_add_instr(params->instrs, sample);
+ hlsl_block_add_resource_load(ctx, params->instrs, &load_params, loc);
return true;
}
@@ -5175,7 +4823,6 @@ static bool intrinsic_transpose(struct hlsl_ctx *ctx,
{
struct hlsl_ir_node *arg = params->args[0];
struct hlsl_type *arg_type = arg->data_type;
- struct hlsl_ir_load *var_load;
struct hlsl_deref var_deref;
struct hlsl_type *mat_type;
struct hlsl_ir_node *load;
@@ -5210,32 +4857,21 @@ static bool intrinsic_transpose(struct hlsl_ctx *ctx,
{
for (j = 0; j < arg_type->e.numeric.dimy; ++j)
{
- struct hlsl_block block;
-
- if (!(load = hlsl_add_load_component(ctx, params->instrs, arg,
- j * arg->data_type->e.numeric.dimx + i, loc)))
- return false;
-
- if (!hlsl_new_store_component(ctx, &block, &var_deref, i * var->data_type->e.numeric.dimx + j, load))
- return false;
- hlsl_block_add_block(params->instrs, &block);
+ load = hlsl_add_load_component(ctx, params->instrs, arg,
+ j * arg->data_type->e.numeric.dimx + i, loc);
+ hlsl_block_add_store_component(ctx, params->instrs, &var_deref,
+ i * var->data_type->e.numeric.dimx + j, load);
}
}
- if (!(var_load = hlsl_new_var_load(ctx, var, loc)))
- return false;
- hlsl_block_add_instr(params->instrs, &var_load->node);
-
+ hlsl_block_add_simple_load(ctx, params->instrs, var, loc);
return true;
}
static bool intrinsic_trunc(struct hlsl_ctx *ctx,
const struct parse_initializer *params, const struct vkd3d_shader_location *loc)
{
- struct hlsl_ir_node *arg;
-
- if (!(arg = intrinsic_float_convert_arg(ctx, params, params->args[0], loc)))
- return false;
+ struct hlsl_ir_node *arg = intrinsic_float_convert_arg(ctx, params, params->args[0], loc);
return !!add_unary_arithmetic_expr(ctx, params->instrs, HLSL_OP1_TRUNC, arg, loc);
}
@@ -5243,7 +4879,7 @@ static bool intrinsic_trunc(struct hlsl_ctx *ctx,
static bool intrinsic_d3dcolor_to_ubyte4(struct hlsl_ctx *ctx,
const struct parse_initializer *params, const struct vkd3d_shader_location *loc)
{
- struct hlsl_ir_node *arg = params->args[0], *ret, *c, *swizzle;
+ struct hlsl_ir_node *arg = params->args[0], *ret, *c;
struct hlsl_type *arg_type = arg->data_type;
if (arg_type->class != HLSL_CLASS_SCALAR && !(arg_type->class == HLSL_CLASS_VECTOR
@@ -5260,21 +4896,11 @@ static bool intrinsic_d3dcolor_to_ubyte4(struct hlsl_ctx *ctx,
return false;
}
- if (!(arg = intrinsic_float_convert_arg(ctx, params, arg, loc)))
- return false;
-
- if (!(c = hlsl_new_float_constant(ctx, 255.0f + (0.5f / 256.0f), loc)))
- return false;
- hlsl_block_add_instr(params->instrs, c);
+ arg = intrinsic_float_convert_arg(ctx, params, arg, loc);
+ c = hlsl_block_add_float_constant(ctx, params->instrs, 255.0f + (0.5f / 256.0f), loc);
if (arg_type->class == HLSL_CLASS_VECTOR)
- {
- if (!(swizzle = hlsl_new_swizzle(ctx, HLSL_SWIZZLE(Z, Y, X, W), 4, arg, loc)))
- return false;
- hlsl_block_add_instr(params->instrs, swizzle);
-
- arg = swizzle;
- }
+ arg = hlsl_block_add_swizzle(ctx, params->instrs, HLSL_SWIZZLE(Z, Y, X, W), 4, arg, loc);
if (!(ret = add_binary_arithmetic_expr(ctx, params->instrs, HLSL_OP2_MUL, arg, c, loc)))
return false;
@@ -5289,25 +4915,20 @@ static bool intrinsic_GetRenderTargetSampleCount(struct hlsl_ctx *ctx,
const struct parse_initializer *params, const struct vkd3d_shader_location *loc)
{
struct hlsl_ir_node *operands[HLSL_MAX_OPERANDS] = {0};
- struct hlsl_ir_node *expr;
if (ctx->profile->type != VKD3D_SHADER_TYPE_PIXEL || hlsl_version_lt(ctx, 4, 1))
hlsl_error(ctx, loc, VKD3D_SHADER_ERROR_HLSL_INCOMPATIBLE_PROFILE,
"GetRenderTargetSampleCount() can only be used from a pixel shader using version 4.1 or higher.");
- if (!(expr = hlsl_new_expr(ctx, HLSL_OP0_RASTERIZER_SAMPLE_COUNT,
- operands, hlsl_get_scalar_type(ctx, HLSL_TYPE_UINT), loc)))
- return false;
- hlsl_block_add_instr(params->instrs, expr);
-
+ hlsl_block_add_expr(ctx, params->instrs, HLSL_OP0_RASTERIZER_SAMPLE_COUNT,
+ operands, hlsl_get_scalar_type(ctx, HLSL_TYPE_UINT), loc);
return true;
}
static bool intrinsic_interlocked(struct hlsl_ctx *ctx, enum hlsl_interlocked_op op,
const struct parse_initializer *params, const struct vkd3d_shader_location *loc, const char *name)
{
- struct hlsl_ir_node *lhs, *coords, *val, *cmp_val = NULL, *orig_val = NULL;
- struct hlsl_ir_node *interlocked, *void_ret;
+ struct hlsl_ir_node *interlocked, *lhs, *coords, *val, *cmp_val = NULL, *orig_val = NULL;
struct hlsl_type *lhs_type, *val_type;
struct vkd3d_string_buffer *string;
struct hlsl_deref dst_deref;
@@ -5421,10 +5042,7 @@ static bool intrinsic_interlocked(struct hlsl_ctx *ctx, enum hlsl_interlocked_op
return false;
}
- if (!(void_ret = hlsl_new_void_expr(ctx, loc)))
- return false;
- hlsl_block_add_instr(params->instrs, void_ret);
-
+ add_void_expr(ctx, params->instrs, loc);
return true;
}
@@ -5759,7 +5377,6 @@ static struct hlsl_block *add_compile_variant(struct hlsl_ctx *ctx, enum hlsl_co
static struct hlsl_block *add_constructor(struct hlsl_ctx *ctx, struct hlsl_type *type,
struct parse_initializer *params, const struct vkd3d_shader_location *loc)
{
- struct hlsl_ir_load *load;
struct hlsl_ir_var *var;
if (!hlsl_is_numeric_type(type))
@@ -5778,9 +5395,7 @@ static struct hlsl_block *add_constructor(struct hlsl_ctx *ctx, struct hlsl_type
initialize_var(ctx, var, params, false);
- if (!(load = hlsl_new_var_load(ctx, var, loc)))
- return NULL;
- hlsl_block_add_instr(params->instrs, &load->node);
+ hlsl_block_add_simple_load(ctx, params->instrs, var, loc);
vkd3d_free(params->args);
return params->instrs;
@@ -5822,8 +5437,7 @@ static bool add_ternary(struct hlsl_ctx *ctx, struct hlsl_block *block,
{
cond_type = hlsl_get_numeric_type(ctx, common_type->class,
HLSL_TYPE_BOOL, common_type->e.numeric.dimx, common_type->e.numeric.dimy);
- if (!(cond = add_implicit_conversion(ctx, block, cond, cond_type, &cond->loc)))
- return false;
+ cond = add_implicit_conversion(ctx, block, cond, cond_type, &cond->loc);
}
else
{
@@ -5852,15 +5466,11 @@ static bool add_ternary(struct hlsl_ctx *ctx, struct hlsl_block *block,
cond_type = hlsl_get_numeric_type(ctx, common_type->class, HLSL_TYPE_BOOL,
common_type->e.numeric.dimx, common_type->e.numeric.dimy);
- if (!(cond = add_implicit_conversion(ctx, block, cond, cond_type, &cond->loc)))
- return false;
+ cond = add_implicit_conversion(ctx, block, cond, cond_type, &cond->loc);
}
- if (!(first = add_implicit_conversion(ctx, block, first, common_type, &first->loc)))
- return false;
-
- if (!(second = add_implicit_conversion(ctx, block, second, common_type, &second->loc)))
- return false;
+ first = add_implicit_conversion(ctx, block, first, common_type, &first->loc);
+ second = add_implicit_conversion(ctx, block, second, common_type, &second->loc);
}
else
{
@@ -5880,9 +5490,7 @@ static bool add_ternary(struct hlsl_ctx *ctx, struct hlsl_block *block,
cond_type = hlsl_get_numeric_type(ctx, cond_type->class, HLSL_TYPE_BOOL,
cond_type->e.numeric.dimx, cond_type->e.numeric.dimy);
- if (!(cond = add_implicit_conversion(ctx, block, cond, cond_type, &cond->loc)))
- return false;
-
+ cond = add_implicit_conversion(ctx, block, cond, cond_type, &cond->loc);
common_type = first->data_type;
}
@@ -5935,7 +5543,6 @@ static bool add_raw_load_method_call(struct hlsl_ctx *ctx, struct hlsl_block *bl
const char *name, const struct parse_initializer *params, const struct vkd3d_shader_location *loc)
{
struct hlsl_resource_load_params load_params = {.type = HLSL_RESOURCE_LOAD};
- struct hlsl_ir_node *load;
unsigned int value_dim;
if (params->args_count != 1 && params->args_count != 2)
@@ -5967,16 +5574,11 @@ static bool add_raw_load_method_call(struct hlsl_ctx *ctx, struct hlsl_block *bl
else
value_dim = 4;
- if (!(load_params.coords = add_implicit_conversion(ctx, block, params->args[0],
- hlsl_get_scalar_type(ctx, HLSL_TYPE_UINT), loc)))
- return false;
-
+ load_params.coords = add_implicit_conversion(ctx, block, params->args[0],
+ hlsl_get_scalar_type(ctx, HLSL_TYPE_UINT), loc);
load_params.format = hlsl_get_vector_type(ctx, HLSL_TYPE_UINT, value_dim);
load_params.resource = object;
-
- if (!(load = hlsl_new_resource_load(ctx, &load_params, loc)))
- return false;
- hlsl_block_add_instr(block, load);
+ hlsl_block_add_resource_load(ctx, block, &load_params, loc);
return true;
}
@@ -5986,7 +5588,6 @@ static bool add_load_method_call(struct hlsl_ctx *ctx, struct hlsl_block *block,
const struct hlsl_type *object_type = object->data_type;
struct hlsl_resource_load_params load_params = {.type = HLSL_RESOURCE_LOAD};
unsigned int sampler_dim, offset_dim;
- struct hlsl_ir_node *load;
bool multisampled;
if (object_type->sampler_dim == HLSL_SAMPLER_DIM_RAW_BUFFER)
@@ -6013,18 +5614,12 @@ static bool add_load_method_call(struct hlsl_ctx *ctx, struct hlsl_block *block,
}
if (multisampled)
- {
- if (!(load_params.sample_index = add_implicit_conversion(ctx, block, params->args[1],
- hlsl_get_scalar_type(ctx, HLSL_TYPE_INT), loc)))
- return false;
- }
+ load_params.sample_index = add_implicit_conversion(ctx, block, params->args[1],
+ hlsl_get_scalar_type(ctx, HLSL_TYPE_INT), loc);
if (!!offset_dim && params->args_count > 1 + multisampled)
- {
- if (!(load_params.texel_offset = add_implicit_conversion(ctx, block, params->args[1 + multisampled],
- hlsl_get_vector_type(ctx, HLSL_TYPE_INT, offset_dim), loc)))
- return false;
- }
+ load_params.texel_offset = add_implicit_conversion(ctx, block, params->args[1 + multisampled],
+ hlsl_get_vector_type(ctx, HLSL_TYPE_INT, offset_dim), loc);
if (params->args_count > 1 + multisampled + !!offset_dim)
{
@@ -6032,16 +5627,11 @@ static bool add_load_method_call(struct hlsl_ctx *ctx, struct hlsl_block *block,
}
/* +1 for the mipmap level for non-multisampled textures */
- if (!(load_params.coords = add_implicit_conversion(ctx, block, params->args[0],
- hlsl_get_vector_type(ctx, HLSL_TYPE_INT, sampler_dim + !multisampled), loc)))
- return false;
-
+ load_params.coords = add_implicit_conversion(ctx, block, params->args[0],
+ hlsl_get_vector_type(ctx, HLSL_TYPE_INT, sampler_dim + !multisampled), loc);
load_params.format = object_type->e.resource.format;
load_params.resource = object;
-
- if (!(load = hlsl_new_resource_load(ctx, &load_params, loc)))
- return false;
- hlsl_block_add_instr(block, load);
+ hlsl_block_add_resource_load(ctx, block, &load_params, loc);
return true;
}
@@ -6052,7 +5642,6 @@ static bool add_sample_method_call(struct hlsl_ctx *ctx, struct hlsl_block *bloc
struct hlsl_resource_load_params load_params = {.type = HLSL_RESOURCE_SAMPLE};
unsigned int sampler_dim, offset_dim;
const struct hlsl_type *sampler_type;
- struct hlsl_ir_node *load;
sampler_dim = hlsl_sampler_dim_count(object_type->sampler_dim);
offset_dim = hlsl_offset_dim_count(object_type->sampler_dim);
@@ -6077,16 +5666,12 @@ static bool add_sample_method_call(struct hlsl_ctx *ctx, struct hlsl_block *bloc
return false;
}
- if (!(load_params.coords = add_implicit_conversion(ctx, block, params->args[1],
- hlsl_get_vector_type(ctx, HLSL_TYPE_FLOAT, sampler_dim), loc)))
- return false;
+ load_params.coords = add_implicit_conversion(ctx, block, params->args[1],
+ hlsl_get_vector_type(ctx, HLSL_TYPE_FLOAT, sampler_dim), loc);
if (offset_dim && params->args_count > 2)
- {
- if (!(load_params.texel_offset = add_implicit_conversion(ctx, block, params->args[2],
- hlsl_get_vector_type(ctx, HLSL_TYPE_INT, offset_dim), loc)))
- return false;
- }
+ load_params.texel_offset = add_implicit_conversion(ctx, block, params->args[2],
+ hlsl_get_vector_type(ctx, HLSL_TYPE_INT, offset_dim), loc);
if (params->args_count > 2 + !!offset_dim)
hlsl_fixme(ctx, loc, "Sample() clamp parameter.");
@@ -6096,11 +5681,7 @@ static bool add_sample_method_call(struct hlsl_ctx *ctx, struct hlsl_block *bloc
load_params.format = object_type->e.resource.format;
load_params.resource = object;
load_params.sampler = params->args[0];
-
- if (!(load = hlsl_new_resource_load(ctx, &load_params, loc)))
- return false;
- hlsl_block_add_instr(block, load);
-
+ hlsl_block_add_resource_load(ctx, block, &load_params, loc);
return true;
}
@@ -6111,7 +5692,6 @@ static bool add_sample_cmp_method_call(struct hlsl_ctx *ctx, struct hlsl_block *
struct hlsl_resource_load_params load_params = { 0 };
unsigned int sampler_dim, offset_dim;
const struct hlsl_type *sampler_type;
- struct hlsl_ir_node *load;
sampler_dim = hlsl_sampler_dim_count(object_type->sampler_dim);
offset_dim = hlsl_offset_dim_count(object_type->sampler_dim);
@@ -6142,20 +5722,14 @@ static bool add_sample_cmp_method_call(struct hlsl_ctx *ctx, struct hlsl_block *
return false;
}
- if (!(load_params.coords = add_implicit_conversion(ctx, block, params->args[1],
- hlsl_get_vector_type(ctx, HLSL_TYPE_FLOAT, sampler_dim), loc)))
- return false;
-
- if (!(load_params.cmp = add_implicit_conversion(ctx, block, params->args[2],
- hlsl_get_scalar_type(ctx, HLSL_TYPE_FLOAT), loc)))
- load_params.cmp = params->args[2];
+ load_params.coords = add_implicit_conversion(ctx, block, params->args[1],
+ hlsl_get_vector_type(ctx, HLSL_TYPE_FLOAT, sampler_dim), loc);
+ load_params.cmp = add_implicit_conversion(ctx, block, params->args[2],
+ hlsl_get_scalar_type(ctx, HLSL_TYPE_FLOAT), loc);
if (offset_dim && params->args_count > 3)
- {
- if (!(load_params.texel_offset = add_implicit_conversion(ctx, block, params->args[2],
- hlsl_get_vector_type(ctx, HLSL_TYPE_INT, offset_dim), loc)))
- return false;
- }
+ load_params.texel_offset = add_implicit_conversion(ctx, block, params->args[2],
+ hlsl_get_vector_type(ctx, HLSL_TYPE_INT, offset_dim), loc);
if (params->args_count > 3 + !!offset_dim)
hlsl_fixme(ctx, loc, "%s() clamp parameter.", name);
@@ -6165,11 +5739,7 @@ static bool add_sample_cmp_method_call(struct hlsl_ctx *ctx, struct hlsl_block *
load_params.format = object_type->e.resource.format;
load_params.resource = object;
load_params.sampler = params->args[0];
-
- if (!(load = hlsl_new_resource_load(ctx, &load_params, loc)))
- return false;
- hlsl_block_add_instr(block, load);
-
+ hlsl_block_add_resource_load(ctx, block, &load_params, loc);
return true;
}
@@ -6180,7 +5750,6 @@ static bool add_gather_method_call(struct hlsl_ctx *ctx, struct hlsl_block *bloc
struct hlsl_resource_load_params load_params = {0};
unsigned int sampler_dim, offset_dim;
const struct hlsl_type *sampler_type;
- struct hlsl_ir_node *load;
unsigned int read_channel;
sampler_dim = hlsl_sampler_dim_count(object_type->sampler_dim);
@@ -6234,9 +5803,8 @@ static bool add_gather_method_call(struct hlsl_ctx *ctx, struct hlsl_block *bloc
}
else if (offset_dim && params->args_count > 2)
{
- if (!(load_params.texel_offset = add_implicit_conversion(ctx, block, params->args[2],
- hlsl_get_vector_type(ctx, HLSL_TYPE_INT, offset_dim), loc)))
- return false;
+ load_params.texel_offset = add_implicit_conversion(ctx, block, params->args[2],
+ hlsl_get_vector_type(ctx, HLSL_TYPE_INT, offset_dim), loc);
}
sampler_type = params->args[0]->data_type;
@@ -6258,17 +5826,12 @@ static bool add_gather_method_call(struct hlsl_ctx *ctx, struct hlsl_block *bloc
return false;
}
- if (!(load_params.coords = add_implicit_conversion(ctx, block, params->args[1],
- hlsl_get_vector_type(ctx, HLSL_TYPE_FLOAT, sampler_dim), loc)))
- return false;
-
+ load_params.coords = add_implicit_conversion(ctx, block, params->args[1],
+ hlsl_get_vector_type(ctx, HLSL_TYPE_FLOAT, sampler_dim), loc);
load_params.format = hlsl_get_vector_type(ctx, object_type->e.resource.format->e.numeric.type, 4);
load_params.resource = object;
load_params.sampler = params->args[0];
-
- if (!(load = hlsl_new_resource_load(ctx, &load_params, loc)))
- return false;
- hlsl_block_add_instr(block, load);
+ hlsl_block_add_resource_load(ctx, block, &load_params, loc);
return true;
}
@@ -6279,7 +5842,6 @@ static bool add_gather_cmp_method_call(struct hlsl_ctx *ctx, struct hlsl_block *
struct hlsl_resource_load_params load_params = {0};
unsigned int sampler_dim, offset_dim;
const struct hlsl_type *sampler_type;
- struct hlsl_ir_node *load;
sampler_dim = hlsl_sampler_dim_count(object_type->sampler_dim);
offset_dim = hlsl_offset_dim_count(object_type->sampler_dim);
@@ -6346,10 +5908,7 @@ static bool add_gather_cmp_method_call(struct hlsl_ctx *ctx, struct hlsl_block *
load_params.format = hlsl_get_vector_type(ctx, object_type->e.resource.format->e.numeric.type, 4);
load_params.resource = object;
load_params.sampler = params->args[0];
-
- if (!(load = hlsl_new_resource_load(ctx, &load_params, loc)))
- return false;
- hlsl_block_add_instr(block, load);
+ hlsl_block_add_resource_load(ctx, block, &load_params, loc);
return true;
}
@@ -6361,9 +5920,7 @@ static bool add_assignment_from_component(struct hlsl_ctx *ctx, struct hlsl_bloc
if (!dest)
return true;
- if (!(load = hlsl_add_load_component(ctx, instrs, src, component, loc)))
- return false;
-
+ load = hlsl_add_load_component(ctx, instrs, src, component, loc);
if (!add_assignment(ctx, instrs, dest, ASSIGN_OP_ASSIGN, load, false))
return false;
@@ -6377,7 +5934,6 @@ static bool add_getdimensions_method_call(struct hlsl_ctx *ctx, struct hlsl_bloc
bool uint_resinfo, has_uint_arg, has_float_arg;
struct hlsl_resource_load_params load_params;
struct hlsl_ir_node *sample_info, *res_info;
- struct hlsl_ir_node *zero = NULL, *void_ret;
struct hlsl_type *uint_type, *float_type;
unsigned int i, j;
enum func_argument
@@ -6441,12 +5997,8 @@ static bool add_getdimensions_method_call(struct hlsl_ctx *ctx, struct hlsl_bloc
/* Input parameter. */
if (iter->args[j] == ARG_MIP_LEVEL)
{
- if (!(args[ARG_MIP_LEVEL] = add_implicit_conversion(ctx, block, args[ARG_MIP_LEVEL],
- hlsl_get_scalar_type(ctx, HLSL_TYPE_UINT), loc)))
- {
- return false;
- }
-
+ args[ARG_MIP_LEVEL] = add_implicit_conversion(ctx, block, args[ARG_MIP_LEVEL],
+ hlsl_get_scalar_type(ctx, HLSL_TYPE_UINT), loc);
continue;
}
@@ -6478,22 +6030,14 @@ static bool add_getdimensions_method_call(struct hlsl_ctx *ctx, struct hlsl_bloc
}
if (!args[ARG_MIP_LEVEL])
- {
- if (!(zero = hlsl_new_uint_constant(ctx, 0, loc)))
- return false;
- hlsl_block_add_instr(block, zero);
- args[ARG_MIP_LEVEL] = zero;
- }
+ args[ARG_MIP_LEVEL] = hlsl_block_add_uint_constant(ctx, block, 0, loc);
memset(&load_params, 0, sizeof(load_params));
load_params.type = HLSL_RESOURCE_RESINFO;
load_params.resource = object;
load_params.lod = args[ARG_MIP_LEVEL];
load_params.format = hlsl_get_vector_type(ctx, uint_resinfo ? HLSL_TYPE_UINT : HLSL_TYPE_FLOAT, 4);
-
- if (!(res_info = hlsl_new_resource_load(ctx, &load_params, loc)))
- return false;
- hlsl_block_add_instr(block, res_info);
+ res_info = hlsl_block_add_resource_load(ctx, block, &load_params, loc);
if (!add_assignment_from_component(ctx, block, args[ARG_WIDTH], res_info, 0, loc))
return false;
@@ -6516,18 +6060,13 @@ static bool add_getdimensions_method_call(struct hlsl_ctx *ctx, struct hlsl_bloc
load_params.type = HLSL_RESOURCE_SAMPLE_INFO;
load_params.resource = object;
load_params.format = args[ARG_SAMPLE_COUNT]->data_type;
- if (!(sample_info = hlsl_new_resource_load(ctx, &load_params, loc)))
- return false;
- hlsl_block_add_instr(block, sample_info);
+ sample_info = hlsl_block_add_resource_load(ctx, block, &load_params, loc);
if (!add_assignment(ctx, block, args[ARG_SAMPLE_COUNT], ASSIGN_OP_ASSIGN, sample_info, false))
return false;
}
- if (!(void_ret = hlsl_new_void_expr(ctx, loc)))
- return false;
- hlsl_block_add_instr(block, void_ret);
-
+ add_void_expr(ctx, block, loc);
return true;
}
@@ -6538,7 +6077,6 @@ static bool add_sample_lod_method_call(struct hlsl_ctx *ctx, struct hlsl_block *
struct hlsl_resource_load_params load_params = { 0 };
unsigned int sampler_dim, offset_dim;
const struct hlsl_type *sampler_type;
- struct hlsl_ir_node *load;
sampler_dim = hlsl_sampler_dim_count(object_type->sampler_dim);
offset_dim = hlsl_offset_dim_count(object_type->sampler_dim);
@@ -6568,20 +6106,14 @@ static bool add_sample_lod_method_call(struct hlsl_ctx *ctx, struct hlsl_block *
return false;
}
- if (!(load_params.coords = add_implicit_conversion(ctx, block, params->args[1],
- hlsl_get_vector_type(ctx, HLSL_TYPE_FLOAT, sampler_dim), loc)))
- load_params.coords = params->args[1];
-
- if (!(load_params.lod = add_implicit_conversion(ctx, block, params->args[2],
- hlsl_get_scalar_type(ctx, HLSL_TYPE_FLOAT), loc)))
- load_params.lod = params->args[2];
+ load_params.coords = add_implicit_conversion(ctx, block, params->args[1],
+ hlsl_get_vector_type(ctx, HLSL_TYPE_FLOAT, sampler_dim), loc);
+ load_params.lod = add_implicit_conversion(ctx, block, params->args[2],
+ hlsl_get_scalar_type(ctx, HLSL_TYPE_FLOAT), loc);
if (offset_dim && params->args_count > 3)
- {
- if (!(load_params.texel_offset = add_implicit_conversion(ctx, block, params->args[3],
- hlsl_get_vector_type(ctx, HLSL_TYPE_INT, offset_dim), loc)))
- return false;
- }
+ load_params.texel_offset = add_implicit_conversion(ctx, block, params->args[3],
+ hlsl_get_vector_type(ctx, HLSL_TYPE_INT, offset_dim), loc);
if (params->args_count > 3 + !!offset_dim)
hlsl_fixme(ctx, loc, "Tiled resource status argument.");
@@ -6589,10 +6121,7 @@ static bool add_sample_lod_method_call(struct hlsl_ctx *ctx, struct hlsl_block *
load_params.format = object_type->e.resource.format;
load_params.resource = object;
load_params.sampler = params->args[0];
-
- if (!(load = hlsl_new_resource_load(ctx, &load_params, loc)))
- return false;
- hlsl_block_add_instr(block, load);
+ hlsl_block_add_resource_load(ctx, block, &load_params, loc);
return true;
}
@@ -6603,7 +6132,6 @@ static bool add_sample_grad_method_call(struct hlsl_ctx *ctx, struct hlsl_block
struct hlsl_resource_load_params load_params = { 0 };
unsigned int sampler_dim, offset_dim;
const struct hlsl_type *sampler_type;
- struct hlsl_ir_node *load;
sampler_dim = hlsl_sampler_dim_count(object_type->sampler_dim);
offset_dim = hlsl_offset_dim_count(object_type->sampler_dim);
@@ -6630,24 +6158,16 @@ static bool add_sample_grad_method_call(struct hlsl_ctx *ctx, struct hlsl_block
return false;
}
- if (!(load_params.coords = add_implicit_conversion(ctx, block, params->args[1],
- hlsl_get_vector_type(ctx, HLSL_TYPE_FLOAT, sampler_dim), loc)))
- load_params.coords = params->args[1];
-
- if (!(load_params.ddx = add_implicit_conversion(ctx, block, params->args[2],
- hlsl_get_vector_type(ctx, HLSL_TYPE_FLOAT, sampler_dim), loc)))
- load_params.ddx = params->args[2];
-
- if (!(load_params.ddy = add_implicit_conversion(ctx, block, params->args[3],
- hlsl_get_vector_type(ctx, HLSL_TYPE_FLOAT, sampler_dim), loc)))
- load_params.ddy = params->args[3];
+ load_params.coords = add_implicit_conversion(ctx, block, params->args[1],
+ hlsl_get_vector_type(ctx, HLSL_TYPE_FLOAT, sampler_dim), loc);
+ load_params.ddx = add_implicit_conversion(ctx, block, params->args[2],
+ hlsl_get_vector_type(ctx, HLSL_TYPE_FLOAT, sampler_dim), loc);
+ load_params.ddy = add_implicit_conversion(ctx, block, params->args[3],
+ hlsl_get_vector_type(ctx, HLSL_TYPE_FLOAT, sampler_dim), loc);
if (offset_dim && params->args_count > 4)
- {
- if (!(load_params.texel_offset = add_implicit_conversion(ctx, block, params->args[4],
- hlsl_get_vector_type(ctx, HLSL_TYPE_INT, offset_dim), loc)))
- return false;
- }
+ load_params.texel_offset = add_implicit_conversion(ctx, block, params->args[4],
+ hlsl_get_vector_type(ctx, HLSL_TYPE_INT, offset_dim), loc);
if (params->args_count > 4 + !!offset_dim)
hlsl_fixme(ctx, loc, "Tiled resource status argument.");
@@ -6655,17 +6175,14 @@ static bool add_sample_grad_method_call(struct hlsl_ctx *ctx, struct hlsl_block
load_params.format = object_type->e.resource.format;
load_params.resource = object;
load_params.sampler = params->args[0];
-
- if (!(load = hlsl_new_resource_load(ctx, &load_params, loc)))
- return false;
- hlsl_block_add_instr(block, load);
+ hlsl_block_add_resource_load(ctx, block, &load_params, loc);
return true;
}
static bool add_store_method_call(struct hlsl_ctx *ctx, struct hlsl_block *block, struct hlsl_ir_node *object,
const char *name, const struct parse_initializer *params, const struct vkd3d_shader_location *loc)
{
- struct hlsl_ir_node *offset, *rhs, *store;
+ struct hlsl_ir_node *offset, *rhs;
struct hlsl_deref resource_deref;
unsigned int value_dim;
@@ -6685,24 +6202,15 @@ static bool add_store_method_call(struct hlsl_ctx *ctx, struct hlsl_block *block
else
value_dim = 4;
- if (!(offset = add_implicit_conversion(ctx, block, params->args[0],
- hlsl_get_scalar_type(ctx, HLSL_TYPE_UINT), loc)))
- return false;
-
- if (!(rhs = add_implicit_conversion(ctx, block, params->args[1],
- hlsl_get_vector_type(ctx, HLSL_TYPE_UINT, value_dim), loc)))
- return false;
+ offset = add_implicit_conversion(ctx, block, params->args[0],
+ hlsl_get_scalar_type(ctx, HLSL_TYPE_UINT), loc);
+ rhs = add_implicit_conversion(ctx, block, params->args[1],
+ hlsl_get_vector_type(ctx, HLSL_TYPE_UINT, value_dim), loc);
if (!hlsl_init_deref_from_index_chain(ctx, &resource_deref, object))
return false;
- if (!(store = hlsl_new_resource_store(ctx, &resource_deref, offset, rhs, loc)))
- {
- hlsl_cleanup_deref(&resource_deref);
- return false;
- }
-
- hlsl_block_add_instr(block, store);
+ hlsl_block_add_resource_store(ctx, block, &resource_deref, offset, rhs, loc);
hlsl_cleanup_deref(&resource_deref);
return true;
@@ -6903,15 +6411,8 @@ static bool add_switch(struct hlsl_ctx *ctx, struct hlsl_block *block,
return true;
}
- if (!(selector = add_implicit_conversion(ctx, block, selector,
- hlsl_get_scalar_type(ctx, HLSL_TYPE_UINT), &selector->loc)))
- {
- destroy_switch_cases(cases);
- destroy_block(block);
- cleanup_parse_attribute_list(attributes);
- return false;
- }
-
+ selector = add_implicit_conversion(ctx, block, selector,
+ hlsl_get_scalar_type(ctx, HLSL_TYPE_UINT), &selector->loc);
s = hlsl_new_switch(ctx, selector, cases, loc);
destroy_switch_cases(cases);
@@ -7052,6 +6553,8 @@ static void validate_uav_type(struct hlsl_ctx *ctx, enum hlsl_sampler_dim dim,
%token KW_INLINE
%token KW_INOUT
%token KW_INPUTPATCH
+%token KW_LINE
+%token KW_LINEADJ
%token KW_LINEAR
%token KW_LINESTREAM
%token KW_MATRIX
@@ -7064,6 +6567,7 @@ static void validate_uav_type(struct hlsl_ctx *ctx, enum hlsl_sampler_dim dim,
%token KW_PACKOFFSET
%token KW_PASS
%token KW_PIXELSHADER
+%token KW_POINT
%token KW_POINTSTREAM
%token KW_RASTERIZERORDEREDBUFFER
%token KW_RASTERIZERORDEREDSTRUCTUREDBUFFER
@@ -7114,6 +6618,8 @@ static void validate_uav_type(struct hlsl_ctx *ctx, enum hlsl_sampler_dim dim,
%token KW_TEXTURE3D
%token KW_TEXTURECUBE
%token KW_TEXTURECUBEARRAY
+%token KW_TRIANGLE
+%token KW_TRIANGLEADJ
%token KW_TRIANGLESTREAM
%token KW_TRUE
%token KW_TYPEDEF
@@ -8121,7 +7627,8 @@ parameter:
parameter_decl:
var_modifiers type_no_void any_identifier arrays colon_attributes
{
- uint32_t modifiers = $1;
+ uint32_t prim_modifiers = $1 & HLSL_PRIMITIVE_MODIFIERS_MASK;
+ uint32_t modifiers = $1 & ~HLSL_PRIMITIVE_MODIFIERS_MASK;
struct hlsl_type *type;
unsigned int i;
@@ -8146,6 +7653,22 @@ parameter_decl:
}
vkd3d_free($4.sizes);
+ if (prim_modifiers && (prim_modifiers & (prim_modifiers - 1)))
+ {
+ hlsl_error(ctx, &@1, VKD3D_SHADER_ERROR_HLSL_INVALID_MODIFIER,
+ "Primitive type modifiers are mutually exclusive.");
+ prim_modifiers = 0;
+ }
+
+ if (prim_modifiers)
+ {
+ if (type->class != HLSL_CLASS_ARRAY)
+ hlsl_error(ctx, &@1, VKD3D_SHADER_ERROR_HLSL_INVALID_MODIFIER,
+ "Primitive type modifiers can only be applied to arrays.");
+ else
+ type->modifiers |= prim_modifiers;
+ }
+
$$.type = type;
if (hlsl_version_ge(ctx, 5, 1) && type->class == HLSL_CLASS_ARRAY && hlsl_type_is_resource(type))
@@ -8752,6 +8275,26 @@ state_block:
hlsl_src_from_node(&entry->args[i], $5.args[i]);
vkd3d_free($5.args);
+ $$ = $1;
+ hlsl_state_block_add_entry($$, entry);
+ }
+ | state_block stateblock_lhs_identifier state_block_index_opt '=' '<' primary_expr '>' ';'
+ {
+ struct hlsl_state_block_entry *entry;
+
+ if (!(entry = hlsl_alloc(ctx, sizeof(*entry))))
+ YYABORT;
+
+ entry->name = $2;
+ entry->lhs_has_index = $3.has_index;
+ entry->lhs_index = $3.index;
+
+ entry->instrs = $6;
+ entry->args_count = 1;
+ if (!(entry->args = hlsl_alloc(ctx, sizeof(*entry->args) * entry->args_count)))
+ YYABORT;
+ hlsl_src_from_node(&entry->args[0], node_from_block($6));
+
$$ = $1;
hlsl_state_block_add_entry($$, entry);
}
@@ -8845,7 +8388,7 @@ variable_def_typed:
if (!(type = apply_type_modifiers(ctx, $2, &modifiers, true, &@1)))
YYABORT;
- check_invalid_in_out_modifiers(ctx, modifiers, &@1);
+ check_invalid_non_parameter_modifiers(ctx, modifiers, &@1);
$$ = $3;
$$->basic_type = type;
@@ -8860,7 +8403,7 @@ variable_def_typed:
if (!(type = apply_type_modifiers(ctx, $2, &modifiers, true, &@1)))
YYABORT;
- check_invalid_in_out_modifiers(ctx, modifiers, &@1);
+ check_invalid_non_parameter_modifiers(ctx, modifiers, &@1);
$$ = $3;
$$->basic_type = type;
@@ -9001,6 +8544,26 @@ var_modifiers:
{
$$ = add_modifiers(ctx, $2, HLSL_MODIFIER_SNORM, &@1);
}
+ | KW_LINE var_modifiers
+ {
+ $$ = add_modifiers(ctx, $2, HLSL_PRIMITIVE_LINE, &@1);
+ }
+ | KW_LINEADJ var_modifiers
+ {
+ $$ = add_modifiers(ctx, $2, HLSL_PRIMITIVE_LINEADJ, &@1);
+ }
+ | KW_POINT var_modifiers
+ {
+ $$ = add_modifiers(ctx, $2, HLSL_PRIMITIVE_POINT, &@1);
+ }
+ | KW_TRIANGLE var_modifiers
+ {
+ $$ = add_modifiers(ctx, $2, HLSL_PRIMITIVE_TRIANGLE, &@1);
+ }
+ | KW_TRIANGLEADJ var_modifiers
+ {
+ $$ = add_modifiers(ctx, $2, HLSL_PRIMITIVE_TRIANGLEADJ, &@1);
+ }
| var_identifier var_modifiers
{
$$ = $2;
@@ -9130,8 +8693,6 @@ statement:
jump_statement:
KW_BREAK ';'
{
- struct hlsl_ir_node *jump;
-
if (!is_break_allowed(ctx->cur_scope))
{
hlsl_error(ctx, &@1, VKD3D_SHADER_ERROR_HLSL_INVALID_SYNTAX,
@@ -9140,22 +8701,15 @@ jump_statement:
if (!($$ = make_empty_block(ctx)))
YYABORT;
- if (!(jump = hlsl_new_jump(ctx, HLSL_IR_JUMP_BREAK, NULL, &@1)))
- YYABORT;
- hlsl_block_add_instr($$, jump);
+ hlsl_block_add_jump(ctx, $$, HLSL_IR_JUMP_BREAK, NULL, &@1);
}
| KW_CONTINUE ';'
{
- struct hlsl_ir_node *jump;
-
check_continue(ctx, ctx->cur_scope, &@1);
if (!($$ = make_empty_block(ctx)))
YYABORT;
-
- if (!(jump = hlsl_new_jump(ctx, HLSL_IR_JUMP_UNRESOLVED_CONTINUE, NULL, &@1)))
- YYABORT;
- hlsl_block_add_instr($$, jump);
+ hlsl_block_add_jump(ctx, $$, HLSL_IR_JUMP_UNRESOLVED_CONTINUE, NULL, &@1);
}
| KW_RETURN expr ';'
{
@@ -9172,18 +8726,12 @@ jump_statement:
}
| KW_DISCARD ';'
{
- struct hlsl_ir_node *discard, *c;
+ struct hlsl_ir_node *c;
if (!($$ = make_empty_block(ctx)))
YYABORT;
-
- if (!(c = hlsl_new_uint_constant(ctx, ~0u, &@1)))
- return false;
- hlsl_block_add_instr($$, c);
-
- if (!(discard = hlsl_new_jump(ctx, HLSL_IR_JUMP_DISCARD_NZ, c, &@1)))
- return false;
- hlsl_block_add_instr($$, discard);
+ c = hlsl_block_add_uint_constant(ctx, $$, ~0u, &@1);
+ hlsl_block_add_jump(ctx, $$, HLSL_IR_JUMP_DISCARD_NZ, c, &@1);
}
selection_statement:
@@ -9191,7 +8739,6 @@ selection_statement:
{
struct hlsl_ir_node *condition = node_from_block($4);
const struct parse_attribute_list *attributes = &$1;
- struct hlsl_ir_node *instr;
unsigned int i;
check_attribute_list_for_duplicates(ctx, attributes);
@@ -9213,27 +8760,14 @@ selection_statement:
check_condition_type(ctx, condition);
- if (!(condition = add_cast(ctx, $4, condition, hlsl_get_scalar_type(ctx, HLSL_TYPE_BOOL), &@4)))
- {
- destroy_block($6.then_block);
- destroy_block($6.else_block);
- cleanup_parse_attribute_list(&$1);
- YYABORT;
- }
+ condition = add_cast(ctx, $4, condition, hlsl_get_scalar_type(ctx, HLSL_TYPE_BOOL), &@4);
+ hlsl_block_add_if(ctx, $4, condition, $6.then_block, $6.else_block, &@2);
- if (!(instr = hlsl_new_if(ctx, condition, $6.then_block, $6.else_block, &@2)))
- {
- destroy_block($6.then_block);
- destroy_block($6.else_block);
- cleanup_parse_attribute_list(&$1);
- YYABORT;
- }
destroy_block($6.then_block);
destroy_block($6.else_block);
cleanup_parse_attribute_list(&$1);
$$ = $4;
- hlsl_block_add_instr($$, instr);
}
if_body:
@@ -9383,30 +8917,21 @@ func_arguments:
primary_expr:
C_FLOAT
{
- struct hlsl_ir_node *c;
-
- if (!(c = hlsl_new_float_constant(ctx, $1, &@1)))
- YYABORT;
- if (!($$ = make_block(ctx, c)))
+ if (!($$ = make_empty_block(ctx)))
YYABORT;
+ hlsl_block_add_float_constant(ctx, $$, $1, &@1);
}
| C_INTEGER
{
- struct hlsl_ir_node *c;
-
- if (!(c = hlsl_new_int_constant(ctx, $1, &@1)))
- YYABORT;
- if (!($$ = make_block(ctx, c)))
+ if (!($$ = make_empty_block(ctx)))
YYABORT;
+ hlsl_block_add_int_constant(ctx, $$, $1, &@1);
}
| C_UNSIGNED
{
- struct hlsl_ir_node *c;
-
- if (!(c = hlsl_new_uint_constant(ctx, $1, &@1)))
- YYABORT;
- if (!($$ = make_block(ctx, c)))
+ if (!($$ = make_empty_block(ctx)))
YYABORT;
+ hlsl_block_add_uint_constant(ctx, $$, $1, &@1);
}
| boolean
{
@@ -9451,17 +8976,15 @@ primary_expr:
}
| VAR_IDENTIFIER
{
- struct hlsl_ir_load *load;
struct hlsl_ir_var *var;
if ((var = hlsl_get_var(ctx->cur_scope, $1)))
{
vkd3d_free($1);
- if (!(load = hlsl_new_var_load(ctx, var, &@1)))
- YYABORT;
- if (!($$ = make_block(ctx, &load->node)))
+ if (!($$ = make_empty_block(ctx)))
YYABORT;
+ hlsl_block_add_simple_load(ctx, $$, var, &@1);
}
else
{
@@ -9583,12 +9106,7 @@ postfix_expr:
if (node->data_type->class == HLSL_CLASS_STRUCT)
{
- if (!add_record_access_recurse(ctx, $1, $3, &@2))
- {
- destroy_block($1);
- vkd3d_free($3);
- YYABORT;
- }
+ add_record_access_recurse(ctx, $1, $3, &@2);
}
else if (hlsl_is_numeric_type(node->data_type))
{
@@ -9703,12 +9221,7 @@ unary_expr:
hlsl_error(ctx, &@2, VKD3D_SHADER_ERROR_HLSL_INVALID_MODIFIER,
"Modifiers are not allowed on casts.");
- if (!add_explicit_conversion(ctx, $6, $3, &$4, &@3))
- {
- destroy_block($6);
- vkd3d_free($4.sizes);
- YYABORT;
- }
+ add_explicit_conversion(ctx, $6, $3, &$4, &@3);
vkd3d_free($4.sizes);
$$ = $6;
}
diff --git a/libs/vkd3d/libs/vkd3d-shader/hlsl_codegen.c b/libs/vkd3d/libs/vkd3d-shader/hlsl_codegen.c
index 2afd3e1e1e5..ba56ba90403 100644
--- a/libs/vkd3d/libs/vkd3d-shader/hlsl_codegen.c
+++ b/libs/vkd3d/libs/vkd3d-shader/hlsl_codegen.c
@@ -62,14 +62,9 @@ static struct hlsl_ir_node *new_offset_from_path_index(struct hlsl_ctx *ctx, str
size /= 4;
}
- if (!(c = hlsl_new_uint_constant(ctx, size, loc)))
- return NULL;
- hlsl_block_add_instr(block, c);
-
- if (!(idx_offset = hlsl_new_binary_expr(ctx, HLSL_OP2_MUL, c, idx)))
- return NULL;
- hlsl_block_add_instr(block, idx_offset);
+ c = hlsl_block_add_uint_constant(ctx, block, size, loc);
+ idx_offset = hlsl_block_add_binary_expr(ctx, block, HLSL_OP2_MUL, c, idx);
break;
}
@@ -86,12 +81,7 @@ static struct hlsl_ir_node *new_offset_from_path_index(struct hlsl_ctx *ctx, str
field_offset /= 4;
}
- if (!(c = hlsl_new_uint_constant(ctx, field_offset, loc)))
- return NULL;
- hlsl_block_add_instr(block, c);
-
- idx_offset = c;
-
+ idx_offset = hlsl_block_add_uint_constant(ctx, block, field_offset, loc);
break;
}
@@ -100,12 +90,7 @@ static struct hlsl_ir_node *new_offset_from_path_index(struct hlsl_ctx *ctx, str
}
if (idx_offset)
- {
- if (!(base_offset = hlsl_new_binary_expr(ctx, HLSL_OP2_ADD, base_offset, idx_offset)))
- return NULL;
- hlsl_block_add_instr(block, base_offset);
- }
-
+ return hlsl_block_add_binary_expr(ctx, block, HLSL_OP2_ADD, base_offset, idx_offset);
return base_offset;
}
@@ -122,9 +107,7 @@ static struct hlsl_ir_node *new_offset_instr_from_deref(struct hlsl_ctx *ctx, st
hlsl_block_init(block);
- if (!(offset = hlsl_new_uint_constant(ctx, 0, loc)))
- return NULL;
- hlsl_block_add_instr(block, offset);
+ offset = hlsl_block_add_uint_constant(ctx, block, 0, loc);
VKD3D_ASSERT(deref->var);
type = deref->var->data_type;
@@ -134,16 +117,9 @@ static struct hlsl_ir_node *new_offset_instr_from_deref(struct hlsl_ctx *ctx, st
struct hlsl_block idx_block;
hlsl_block_init(&idx_block);
-
- if (!(offset = new_offset_from_path_index(ctx, &idx_block, type, offset, deref->path[i].node,
- regset, offset_component, loc)))
- {
- hlsl_block_cleanup(&idx_block);
- return NULL;
- }
-
+ offset = new_offset_from_path_index(ctx, &idx_block, type, offset,
+ deref->path[i].node, regset, offset_component, loc);
hlsl_block_add_block(block, &idx_block);
-
type = hlsl_get_element_type_from_path_index(ctx, type, deref->path[i].node);
}
@@ -174,8 +150,7 @@ static bool replace_deref_path_with_offset(struct hlsl_ctx *ctx, struct hlsl_der
deref->data_type = type;
- if (!(offset = new_offset_instr_from_deref(ctx, &block, deref, &offset_component, &instr->loc)))
- return false;
+ offset = new_offset_instr_from_deref(ctx, &block, deref, &offset_component, &instr->loc);
list_move_before(&instr->entry, &block.instrs);
hlsl_cleanup_deref(deref);
@@ -203,41 +178,34 @@ static bool clean_constant_deref_offset_srcs(struct hlsl_ctx *ctx, struct hlsl_d
}
-/* Split uniforms into two variables representing the constant and temp
- * registers, and copy the former to the latter, so that writes to uniforms
- * work. */
-static void prepend_uniform_copy(struct hlsl_ctx *ctx, struct hlsl_block *block, struct hlsl_ir_var *temp)
+/* For a uniform variable, create a temp copy of it so, in case a value is
+ * stored to the uniform at some point the shader, all derefs can be diverted
+ * to this temp copy instead.
+ * Also, promote the uniform to an extern var. */
+static void prepend_uniform_copy(struct hlsl_ctx *ctx, struct hlsl_block *block, struct hlsl_ir_var *uniform)
{
- struct hlsl_ir_var *uniform;
struct hlsl_ir_node *store;
struct hlsl_ir_load *load;
+ struct hlsl_ir_var *temp;
char *new_name;
- /* Use the synthetic name for the temp, rather than the uniform, so that we
- * can write the uniform name into the shader reflection data. */
+ uniform->is_uniform = 1;
+ list_add_tail(&ctx->extern_vars, &uniform->extern_entry);
- if (!(uniform = hlsl_new_var(ctx, temp->name, temp->data_type,
- &temp->loc, NULL, temp->storage_modifiers, &temp->reg_reservation)))
+ if (!(new_name = hlsl_sprintf_alloc(ctx, "<temp-%s>", uniform->name)))
return;
- list_add_before(&temp->scope_entry, &uniform->scope_entry);
- list_add_tail(&ctx->extern_vars, &uniform->extern_entry);
- uniform->is_uniform = 1;
- uniform->is_param = temp->is_param;
- uniform->buffer = temp->buffer;
- if (temp->default_values)
+
+ if (!(temp = hlsl_new_var(ctx, new_name, uniform->data_type,
+ &uniform->loc, NULL, uniform->storage_modifiers, NULL)))
{
- /* Transfer default values from the temp to the uniform. */
- VKD3D_ASSERT(!uniform->default_values);
- VKD3D_ASSERT(hlsl_type_component_count(temp->data_type) == hlsl_type_component_count(uniform->data_type));
- uniform->default_values = temp->default_values;
- temp->default_values = NULL;
+ vkd3d_free(new_name);
+ return;
}
+ list_add_before(&uniform->scope_entry, &temp->scope_entry);
- if (!(new_name = hlsl_sprintf_alloc(ctx, "<temp-%s>", temp->name)))
- return;
- temp->name = new_name;
+ uniform->temp_copy = temp;
- if (!(load = hlsl_new_var_load(ctx, uniform, &temp->loc)))
+ if (!(load = hlsl_new_var_load(ctx, uniform, &uniform->loc)))
return;
list_add_head(&block->instrs, &load->node.entry);
@@ -246,6 +214,25 @@ static void prepend_uniform_copy(struct hlsl_ctx *ctx, struct hlsl_block *block,
list_add_after(&load->node.entry, &store->entry);
}
+/* If a uniform is written to at some point in the shader, all dereferences
+ * must point to the temp copy instead, which is what this pass does. */
+static bool divert_written_uniform_derefs_to_temp(struct hlsl_ctx *ctx, struct hlsl_deref *deref,
+ struct hlsl_ir_node *instr)
+{
+ if (!deref->var->is_uniform || !deref->var->first_write)
+ return false;
+
+ /* Skip derefs from instructions before first write so copies from the
+ * uniform to the temp are unaffected. */
+ if (instr->index < deref->var->first_write)
+ return false;
+
+ VKD3D_ASSERT(deref->var->temp_copy);
+
+ deref->var = deref->var->temp_copy;
+ return true;
+}
+
static void validate_field_semantic(struct hlsl_ctx *ctx, struct hlsl_struct_field *field)
{
if (!field->semantic.name && hlsl_is_numeric_type(hlsl_get_multiarray_element_type(field->type))
@@ -259,13 +246,23 @@ static void validate_field_semantic(struct hlsl_ctx *ctx, struct hlsl_struct_fie
static enum hlsl_base_type base_type_get_semantic_equivalent(enum hlsl_base_type base)
{
- if (base == HLSL_TYPE_BOOL)
- return HLSL_TYPE_UINT;
- if (base == HLSL_TYPE_INT)
- return HLSL_TYPE_UINT;
- if (base == HLSL_TYPE_HALF)
- return HLSL_TYPE_FLOAT;
- return base;
+ switch (base)
+ {
+ case HLSL_TYPE_BOOL:
+ case HLSL_TYPE_INT:
+ case HLSL_TYPE_MIN16UINT:
+ case HLSL_TYPE_UINT:
+ return HLSL_TYPE_UINT;
+
+ case HLSL_TYPE_HALF:
+ case HLSL_TYPE_FLOAT:
+ return HLSL_TYPE_FLOAT;
+
+ case HLSL_TYPE_DOUBLE:
+ return HLSL_TYPE_DOUBLE;
+ }
+
+ vkd3d_unreachable();
}
static bool types_are_semantic_equivalent(struct hlsl_ctx *ctx, const struct hlsl_type *type1,
@@ -274,9 +271,9 @@ static bool types_are_semantic_equivalent(struct hlsl_ctx *ctx, const struct hls
if (ctx->profile->major_version < 4)
return true;
- if (hlsl_type_is_patch_array(type1))
+ if (hlsl_type_is_primitive_array(type1))
{
- return hlsl_type_is_patch_array(type2)
+ return hlsl_type_is_primitive_array(type2)
&& type1->e.array.array_type == type2->e.array.array_type
&& type1->e.array.elements_count == type2->e.array.elements_count
&& types_are_semantic_equivalent(ctx, type1->e.array.type, type2->e.array.type);
@@ -298,8 +295,8 @@ static struct hlsl_ir_var *add_semantic_var(struct hlsl_ctx *ctx, struct hlsl_ir
const char *prefix;
char *new_name;
- if (hlsl_type_is_patch_array(type))
- prefix = type->e.array.array_type == HLSL_ARRAY_PATCH_INPUT ? "inputpatch" : "outputpatch";
+ if (hlsl_type_is_primitive_array(type))
+ prefix = type->e.array.array_type == HLSL_ARRAY_PATCH_OUTPUT ? "outputpatch" : "inputprim";
else
prefix = output ? "output" : "input";
@@ -310,9 +307,9 @@ static struct hlsl_ir_var *add_semantic_var(struct hlsl_ctx *ctx, struct hlsl_ir
{
if (!ascii_strcasecmp(ext_var->name, new_name))
{
- VKD3D_ASSERT(hlsl_type_is_patch_array(ext_var->data_type)
+ VKD3D_ASSERT(hlsl_type_is_primitive_array(ext_var->data_type)
|| ext_var->data_type->class <= HLSL_CLASS_VECTOR);
- VKD3D_ASSERT(hlsl_type_is_patch_array(type) || type->class <= HLSL_CLASS_VECTOR);
+ VKD3D_ASSERT(hlsl_type_is_primitive_array(type) || type->class <= HLSL_CLASS_VECTOR);
if (output)
{
@@ -386,7 +383,7 @@ static uint32_t combine_field_storage_modifiers(uint32_t modifiers, uint32_t fie
}
static void prepend_input_copy(struct hlsl_ctx *ctx, struct hlsl_ir_function_decl *func,
- struct hlsl_block *block, struct hlsl_ir_var *top_var, uint32_t patch_index, struct hlsl_ir_load *lhs,
+ struct hlsl_block *block, uint32_t prim_index, struct hlsl_ir_load *lhs,
uint32_t modifiers, struct hlsl_semantic *semantic, uint32_t semantic_index, bool force_align)
{
struct hlsl_type *type = lhs->node.data_type, *vector_type_src, *vector_type_dst;
@@ -416,31 +413,29 @@ static void prepend_input_copy(struct hlsl_ctx *ctx, struct hlsl_ir_function_dec
for (i = 0; i < hlsl_type_major_size(type); ++i)
{
- struct hlsl_ir_node *store, *cast;
+ struct hlsl_ir_node *cast;
struct hlsl_ir_var *input;
struct hlsl_ir_load *load;
- if (hlsl_type_is_patch_array(top_var->data_type))
+ if (hlsl_type_is_primitive_array(var->data_type))
{
- struct hlsl_type *top_type = top_var->data_type;
- struct hlsl_type *patch_type;
- struct hlsl_deref patch_deref;
+ struct hlsl_type *prim_type_src;
+ struct hlsl_deref prim_deref;
struct hlsl_ir_node *idx;
- if (!(patch_type = hlsl_new_array_type(ctx, vector_type_src, top_type->e.array.elements_count,
- top_type->e.array.array_type)))
+ if (!(prim_type_src = hlsl_new_array_type(ctx, vector_type_src, var->data_type->e.array.elements_count,
+ var->data_type->e.array.array_type)))
return;
+ prim_type_src->modifiers = var->data_type->modifiers & HLSL_PRIMITIVE_MODIFIERS_MASK;
- if (!(input = add_semantic_var(ctx, func, var, patch_type,
+ if (!(input = add_semantic_var(ctx, func, var, prim_type_src,
modifiers, semantic, semantic_index + i, false, force_align, loc)))
return;
- hlsl_init_simple_deref_from_var(&patch_deref, input);
+ hlsl_init_simple_deref_from_var(&prim_deref, input);
- if (!(idx = hlsl_new_uint_constant(ctx, patch_index, &var->loc)))
- return;
- hlsl_block_add_instr(block, idx);
+ idx = hlsl_block_add_uint_constant(ctx, block, prim_index, &var->loc);
- if (!(load = hlsl_new_load_index(ctx, &patch_deref, idx, loc)))
+ if (!(load = hlsl_new_load_index(ctx, &prim_deref, idx, loc)))
return;
hlsl_block_add_instr(block, &load->node);
}
@@ -455,33 +450,25 @@ static void prepend_input_copy(struct hlsl_ctx *ctx, struct hlsl_ir_function_dec
hlsl_block_add_instr(block, &load->node);
}
- if (!(cast = hlsl_new_cast(ctx, &load->node, vector_type_dst, &var->loc)))
- return;
- hlsl_block_add_instr(block, cast);
+ cast = hlsl_block_add_cast(ctx, block, &load->node, vector_type_dst, &var->loc);
if (type->class == HLSL_CLASS_MATRIX)
{
- if (!(c = hlsl_new_uint_constant(ctx, i, &var->loc)))
- return;
- hlsl_block_add_instr(block, c);
+ c = hlsl_block_add_uint_constant(ctx, block, i, &var->loc);
- if (!(store = hlsl_new_store_index(ctx, &lhs->src, c, cast, 0, &var->loc)))
- return;
- hlsl_block_add_instr(block, store);
+ hlsl_block_add_store_index(ctx, block, &lhs->src, c, cast, 0, &var->loc);
}
else
{
VKD3D_ASSERT(i == 0);
- if (!(store = hlsl_new_store_index(ctx, &lhs->src, NULL, cast, 0, &var->loc)))
- return;
- hlsl_block_add_instr(block, store);
+ hlsl_block_add_store_index(ctx, block, &lhs->src, NULL, cast, 0, &var->loc);
}
}
}
static void prepend_input_copy_recurse(struct hlsl_ctx *ctx, struct hlsl_ir_function_decl *func,
- struct hlsl_block *block, struct hlsl_ir_var *top_var, uint32_t patch_index, struct hlsl_ir_load *lhs,
+ struct hlsl_block *block, uint32_t prim_index, struct hlsl_ir_load *lhs,
uint32_t modifiers, struct hlsl_semantic *semantic, uint32_t semantic_index, bool force_align)
{
struct vkd3d_shader_location *loc = &lhs->node.loc;
@@ -507,8 +494,8 @@ static void prepend_input_copy_recurse(struct hlsl_ctx *ctx, struct hlsl_ir_func
element_modifiers = modifiers;
force_align = true;
- if (hlsl_type_is_patch_array(type))
- patch_index = i;
+ if (hlsl_type_is_primitive_array(type))
+ prim_index = i;
}
else
{
@@ -526,22 +513,20 @@ static void prepend_input_copy_recurse(struct hlsl_ctx *ctx, struct hlsl_ir_func
force_align = (i == 0);
}
- if (!(c = hlsl_new_uint_constant(ctx, i, &var->loc)))
- return;
- hlsl_block_add_instr(block, c);
+ c = hlsl_block_add_uint_constant(ctx, block, i, &var->loc);
/* This redundant load is expected to be deleted later by DCE. */
if (!(element_load = hlsl_new_load_index(ctx, &lhs->src, c, loc)))
return;
hlsl_block_add_instr(block, &element_load->node);
- prepend_input_copy_recurse(ctx, func, block, top_var, patch_index, element_load,
+ prepend_input_copy_recurse(ctx, func, block, prim_index, element_load,
element_modifiers, semantic, elem_semantic_index, force_align);
}
}
else
{
- prepend_input_copy(ctx, func, block, var, patch_index, lhs, modifiers, semantic, semantic_index, force_align);
+ prepend_input_copy(ctx, func, block, prim_index, lhs, modifiers, semantic, semantic_index, force_align);
}
}
@@ -559,8 +544,8 @@ static void prepend_input_var_copy(struct hlsl_ctx *ctx, struct hlsl_ir_function
return;
hlsl_block_add_instr(&block, &load->node);
- prepend_input_copy_recurse(ctx, func, &block, var, 0, load,
- var->storage_modifiers, &var->semantic, var->semantic.index, false);
+ prepend_input_copy_recurse(ctx, func, &block, 0, load, var->storage_modifiers,
+ &var->semantic, var->semantic.index, false);
list_move_head(&func->body.instrs, &block.instrs);
}
@@ -593,9 +578,8 @@ static void append_output_copy(struct hlsl_ctx *ctx, struct hlsl_ir_function_dec
for (i = 0; i < hlsl_type_major_size(type); ++i)
{
- struct hlsl_ir_node *store;
struct hlsl_ir_var *output;
- struct hlsl_ir_load *load;
+ struct hlsl_ir_node *load;
if (!(output = add_semantic_var(ctx, func, var, vector_type,
modifiers, semantic, semantic_index + i, true, force_align, loc)))
@@ -603,26 +587,17 @@ static void append_output_copy(struct hlsl_ctx *ctx, struct hlsl_ir_function_dec
if (type->class == HLSL_CLASS_MATRIX)
{
- if (!(c = hlsl_new_uint_constant(ctx, i, &var->loc)))
- return;
- hlsl_block_add_instr(&func->body, c);
-
- if (!(load = hlsl_new_load_index(ctx, &rhs->src, c, &var->loc)))
- return;
- hlsl_block_add_instr(&func->body, &load->node);
+ c = hlsl_block_add_uint_constant(ctx, &func->body, i, &var->loc);
+ load = hlsl_block_add_load_index(ctx, &func->body, &rhs->src, c, &var->loc);
}
else
{
VKD3D_ASSERT(i == 0);
- if (!(load = hlsl_new_load_index(ctx, &rhs->src, NULL, &var->loc)))
- return;
- hlsl_block_add_instr(&func->body, &load->node);
+ load = hlsl_block_add_load_index(ctx, &func->body, &rhs->src, NULL, &var->loc);
}
- if (!(store = hlsl_new_simple_store(ctx, output, &load->node)))
- return;
- hlsl_block_add_instr(&func->body, store);
+ hlsl_block_add_simple_store(ctx, &func->body, output, load);
}
}
@@ -666,9 +641,7 @@ static void append_output_copy_recurse(struct hlsl_ctx *ctx,
force_align = (i == 0);
}
- if (!(c = hlsl_new_uint_constant(ctx, i, &var->loc)))
- return;
- hlsl_block_add_instr(&func->body, c);
+ c = hlsl_block_add_uint_constant(ctx, &func->body, i, &var->loc);
if (!(element_load = hlsl_new_load_index(ctx, &rhs->src, c, loc)))
return;
@@ -705,6 +678,9 @@ bool hlsl_transform_ir(struct hlsl_ctx *ctx, bool (*func)(struct hlsl_ctx *ctx,
struct hlsl_ir_node *instr, *next;
bool progress = false;
+ if (ctx->result)
+ return false;
+
LIST_FOR_EACH_ENTRY_SAFE(instr, next, &block->instrs, struct hlsl_ir_node, entry)
{
if (instr->type == HLSL_IR_IF)
@@ -853,9 +829,9 @@ static bool find_recursive_calls(struct hlsl_ctx *ctx, struct hlsl_ir_node *inst
static void insert_early_return_break(struct hlsl_ctx *ctx,
struct hlsl_ir_function_decl *func, struct hlsl_ir_node *cf_instr)
{
- struct hlsl_ir_node *iff, *jump;
struct hlsl_block then_block;
struct hlsl_ir_load *load;
+ struct hlsl_ir_node *iff;
hlsl_block_init(&then_block);
@@ -863,9 +839,7 @@ static void insert_early_return_break(struct hlsl_ctx *ctx,
return;
list_add_after(&cf_instr->entry, &load->node.entry);
- if (!(jump = hlsl_new_jump(ctx, HLSL_IR_JUMP_BREAK, NULL, &cf_instr->loc)))
- return;
- hlsl_block_add_instr(&then_block, jump);
+ hlsl_block_add_jump(ctx, &then_block, HLSL_IR_JUMP_BREAK, NULL, &cf_instr->loc);
if (!(iff = hlsl_new_if(ctx, &load->node, &then_block, NULL, &cf_instr->loc)))
return;
@@ -1037,9 +1011,8 @@ static bool lower_return(struct hlsl_ctx *ctx, struct hlsl_ir_function_decl *fun
else if (cf_instr)
{
struct list *tail = list_tail(&block->instrs);
- struct hlsl_ir_node *not, *iff;
+ struct hlsl_ir_node *not, *load;
struct hlsl_block then_block;
- struct hlsl_ir_load *load;
/* If we're in a loop, we should have used "break" instead. */
VKD3D_ASSERT(!in_loop);
@@ -1051,17 +1024,9 @@ static bool lower_return(struct hlsl_ctx *ctx, struct hlsl_ir_function_decl *fun
list_move_slice_tail(&then_block.instrs, list_next(&block->instrs, &cf_instr->entry), tail);
lower_return(ctx, func, &then_block, in_loop);
- if (!(load = hlsl_new_var_load(ctx, func->early_return_var, &cf_instr->loc)))
- return false;
- hlsl_block_add_instr(block, &load->node);
-
- if (!(not = hlsl_new_unary_expr(ctx, HLSL_OP1_LOGIC_NOT, &load->node, &cf_instr->loc)))
- return false;
- hlsl_block_add_instr(block, not);
-
- if (!(iff = hlsl_new_if(ctx, not, &then_block, NULL, &cf_instr->loc)))
- return false;
- list_add_tail(&block->instrs, &iff->entry);
+ load = hlsl_block_add_simple_load(ctx, block, func->early_return_var, &cf_instr->loc);
+ not = hlsl_block_add_unary_expr(ctx, block, HLSL_OP1_LOGIC_NOT, load, &cf_instr->loc);
+ hlsl_block_add_if(ctx, block, not, &then_block, NULL, &cf_instr->loc);
}
return has_early_return;
@@ -1096,10 +1061,9 @@ static struct hlsl_ir_node *add_zero_mipmap_level(struct hlsl_ctx *ctx, struct h
struct hlsl_ir_node *index, const struct vkd3d_shader_location *loc)
{
unsigned int dim_count = index->data_type->e.numeric.dimx;
- struct hlsl_ir_node *store, *zero;
- struct hlsl_ir_load *coords_load;
struct hlsl_deref coords_deref;
struct hlsl_ir_var *coords;
+ struct hlsl_ir_node *zero;
VKD3D_ASSERT(dim_count < 4);
@@ -1108,23 +1072,12 @@ static struct hlsl_ir_node *add_zero_mipmap_level(struct hlsl_ctx *ctx, struct h
return NULL;
hlsl_init_simple_deref_from_var(&coords_deref, coords);
- if (!(store = hlsl_new_store_index(ctx, &coords_deref, NULL, index, (1u << dim_count) - 1, loc)))
- return NULL;
- hlsl_block_add_instr(block, store);
-
- if (!(zero = hlsl_new_uint_constant(ctx, 0, loc)))
- return NULL;
- hlsl_block_add_instr(block, zero);
+ hlsl_block_add_store_index(ctx, block, &coords_deref, NULL, index, (1u << dim_count) - 1, loc);
- if (!(store = hlsl_new_store_index(ctx, &coords_deref, NULL, zero, 1u << dim_count, loc)))
- return NULL;
- hlsl_block_add_instr(block, store);
-
- if (!(coords_load = hlsl_new_var_load(ctx, coords, loc)))
- return NULL;
- hlsl_block_add_instr(block, &coords_load->node);
+ zero = hlsl_block_add_uint_constant(ctx, block, 0, loc);
+ hlsl_block_add_store_index(ctx, block, &coords_deref, NULL, zero, 1u << dim_count, loc);
- return &coords_load->node;
+ return hlsl_block_add_simple_load(ctx, block, coords, loc);
}
static bool lower_complex_casts(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, struct hlsl_block *block)
@@ -1133,7 +1086,6 @@ static bool lower_complex_casts(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr
struct hlsl_type *src_type, *dst_type;
struct hlsl_deref var_deref;
bool broadcast, matrix_cast;
- struct hlsl_ir_load *load;
struct hlsl_ir_node *arg;
struct hlsl_ir_var *var;
unsigned int dst_idx;
@@ -1172,7 +1124,6 @@ static bool lower_complex_casts(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr
{
struct hlsl_ir_node *component_load, *cast;
struct hlsl_type *dst_comp_type;
- struct hlsl_block store_block;
unsigned int src_idx;
if (broadcast)
@@ -1191,23 +1142,13 @@ static bool lower_complex_casts(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr
}
dst_comp_type = hlsl_type_get_component_type(ctx, dst_type, dst_idx);
+ component_load = hlsl_add_load_component(ctx, block, arg, src_idx, &arg->loc);
+ cast = hlsl_block_add_cast(ctx, block, component_load, dst_comp_type, &arg->loc);
- if (!(component_load = hlsl_add_load_component(ctx, block, arg, src_idx, &arg->loc)))
- return false;
-
- if (!(cast = hlsl_new_cast(ctx, component_load, dst_comp_type, &arg->loc)))
- return false;
- hlsl_block_add_instr(block, cast);
-
- if (!hlsl_new_store_component(ctx, &store_block, &var_deref, dst_idx, cast))
- return false;
- hlsl_block_add_block(block, &store_block);
+ hlsl_block_add_store_component(ctx, block, &var_deref, dst_idx, cast);
}
- if (!(load = hlsl_new_var_load(ctx, var, &instr->loc)))
- return false;
- hlsl_block_add_instr(block, &load->node);
-
+ hlsl_block_add_simple_load(ctx, block, var, &instr->loc);
return true;
}
@@ -1219,7 +1160,6 @@ static bool lower_complex_casts(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr
static bool lower_matrix_swizzles(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, struct hlsl_block *block)
{
struct hlsl_ir_swizzle *swizzle;
- struct hlsl_ir_load *var_load;
struct hlsl_deref var_deref;
struct hlsl_type *matrix_type;
struct hlsl_ir_var *var;
@@ -1238,23 +1178,15 @@ static bool lower_matrix_swizzles(struct hlsl_ctx *ctx, struct hlsl_ir_node *ins
for (i = 0; i < instr->data_type->e.numeric.dimx; ++i)
{
- struct hlsl_block store_block;
struct hlsl_ir_node *load;
k = swizzle->u.matrix.components[i].y * matrix_type->e.numeric.dimx + swizzle->u.matrix.components[i].x;
- if (!(load = hlsl_add_load_component(ctx, block, swizzle->val.node, k, &instr->loc)))
- return false;
-
- if (!hlsl_new_store_component(ctx, &store_block, &var_deref, i, load))
- return false;
- hlsl_block_add_block(block, &store_block);
+ load = hlsl_add_load_component(ctx, block, swizzle->val.node, k, &instr->loc);
+ hlsl_block_add_store_component(ctx, block, &var_deref, i, load);
}
- if (!(var_load = hlsl_new_var_load(ctx, var, &instr->loc)))
- return false;
- hlsl_block_add_instr(block, &var_load->node);
-
+ hlsl_block_add_simple_load(ctx, block, var, &instr->loc);
return true;
}
@@ -1266,10 +1198,10 @@ static bool lower_matrix_swizzles(struct hlsl_ctx *ctx, struct hlsl_ir_node *ins
* resource access. */
static bool lower_index_loads(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, struct hlsl_block *block)
{
- struct hlsl_ir_node *val, *store;
struct hlsl_deref var_deref;
struct hlsl_ir_index *index;
struct hlsl_ir_load *load;
+ struct hlsl_ir_node *val;
struct hlsl_ir_var *var;
if (instr->type != HLSL_IR_INDEX)
@@ -1282,7 +1214,6 @@ static bool lower_index_loads(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr,
unsigned int dim_count = hlsl_sampler_dim_count(val->data_type->sampler_dim);
struct hlsl_ir_node *coords = index->idx.node;
struct hlsl_resource_load_params params = {0};
- struct hlsl_ir_node *resource_load;
VKD3D_ASSERT(coords->data_type->class == HLSL_CLASS_VECTOR);
VKD3D_ASSERT(coords->data_type->e.numeric.type == HLSL_TYPE_UINT);
@@ -1295,10 +1226,7 @@ static bool lower_index_loads(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr,
params.resource = val;
params.coords = coords;
params.format = val->data_type->e.resource.format;
-
- if (!(resource_load = hlsl_new_resource_load(ctx, &params, &instr->loc)))
- return false;
- hlsl_block_add_instr(block, resource_load);
+ hlsl_block_add_resource_load(ctx, block, &params, &instr->loc);
return true;
}
@@ -1306,9 +1234,7 @@ static bool lower_index_loads(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr,
return false;
hlsl_init_simple_deref_from_var(&var_deref, var);
- if (!(store = hlsl_new_simple_store(ctx, var, val)))
- return false;
- hlsl_block_add_instr(block, store);
+ hlsl_block_add_simple_store(ctx, block, var, val);
if (hlsl_index_is_noncontiguous(index))
{
@@ -1326,9 +1252,7 @@ static bool lower_index_loads(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr,
{
struct hlsl_ir_node *c;
- if (!(c = hlsl_new_uint_constant(ctx, i, &instr->loc)))
- return false;
- hlsl_block_add_instr(block, c);
+ c = hlsl_block_add_uint_constant(ctx, block, i, &instr->loc);
if (!(load = hlsl_new_load_index(ctx, &var_deref, c, &instr->loc)))
return false;
@@ -1338,20 +1262,14 @@ static bool lower_index_loads(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr,
return false;
hlsl_block_add_instr(block, &load->node);
- if (!(store = hlsl_new_store_index(ctx, &row_deref, c, &load->node, 0, &instr->loc)))
- return false;
- hlsl_block_add_instr(block, store);
+ hlsl_block_add_store_index(ctx, block, &row_deref, c, &load->node, 0, &instr->loc);
}
- if (!(load = hlsl_new_var_load(ctx, var, &instr->loc)))
- return false;
- hlsl_block_add_instr(block, &load->node);
+ hlsl_block_add_simple_load(ctx, block, var, &instr->loc);
}
else
{
- if (!(load = hlsl_new_load_index(ctx, &var_deref, index->idx.node, &instr->loc)))
- return false;
- hlsl_block_add_instr(block, &load->node);
+ hlsl_block_add_load_index(ctx, block, &var_deref, index->idx.node, &instr->loc);
}
return true;
}
@@ -1373,22 +1291,16 @@ static bool lower_broadcasts(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, s
if (src_type->class <= HLSL_CLASS_VECTOR && dst_type->class <= HLSL_CLASS_VECTOR && src_type->e.numeric.dimx == 1)
{
- struct hlsl_ir_node *new_cast, *swizzle;
+ struct hlsl_ir_node *new_cast;
dst_scalar_type = hlsl_get_scalar_type(ctx, dst_type->e.numeric.type);
/* We need to preserve the cast since it might be doing more than just
* turning the scalar into a vector. */
- if (!(new_cast = hlsl_new_cast(ctx, cast->operands[0].node, dst_scalar_type, &cast->node.loc)))
- return false;
- hlsl_block_add_instr(block, new_cast);
+ new_cast = hlsl_block_add_cast(ctx, block, cast->operands[0].node, dst_scalar_type, &cast->node.loc);
if (dst_type->e.numeric.dimx != 1)
- {
- if (!(swizzle = hlsl_new_swizzle(ctx, HLSL_SWIZZLE(X, X, X, X),
- dst_type->e.numeric.dimx, new_cast, &cast->node.loc)))
- return false;
- hlsl_block_add_instr(block, swizzle);
- }
+ hlsl_block_add_swizzle(ctx, block, HLSL_SWIZZLE(X, X, X, X),
+ dst_type->e.numeric.dimx, new_cast, &cast->node.loc);
return true;
}
@@ -1398,7 +1310,7 @@ static bool lower_broadcasts(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, s
/* Allocate a unique, ordered index to each instruction, which will be used for
* copy propagation and computing liveness ranges.
- * Index 0 means unused; index 1 means function entry, so start at 2. */
+ * Index 0 means unused, so start at 1. */
static unsigned int index_instructions(struct hlsl_block *block, unsigned int index)
{
struct hlsl_ir_node *instr;
@@ -1480,6 +1392,17 @@ static unsigned int index_instructions(struct hlsl_block *block, unsigned int in
*
* we can copy-prop the load (@7) into a constant vector {123, 456}, but we
* cannot easily vectorize the stores @3 and @6.
+ *
+ * Moreover, we implement a transformation that propagates loads with a single
+ * non-constant index in its deref path. Consider a load of the form
+ * var[[a0][a1]...[i]...[an]], where ak are integral constants, and i is an
+ * arbitrary non-constant node. If, for all j, the following holds:
+ *
+ * var[[a0][a1]...[j]...[an]] = x[[c0*j + d0][c1*j + d1]...[cm*j + dm]],
+ *
+ * where ck, dk are constants, then we can replace the load with
+ * x[[c0*i + d0]...[cm*i + dm]]. This pass is implemented by
+ * copy_propagation_replace_with_deref().
*/
struct copy_propagation_value
@@ -1704,16 +1627,25 @@ static void copy_propagation_invalidate_variable_from_deref_recurse(struct hlsl_
if (path_node->type == HLSL_IR_CONSTANT)
{
+ uint32_t index = hlsl_ir_constant(path_node)->value.u[0].u;
+
+ /* Don't bother invalidating anything if the index is constant but
+ * out-of-range.
+ * Such indices are illegal in HLSL, but only if the code is not
+ * dead, and we can't always know if code is dead without copy-prop
+ * itself. */
+ if (index >= hlsl_type_element_count(type))
+ return;
+
copy_propagation_invalidate_variable_from_deref_recurse(ctx, var_def, deref, subtype,
- depth + 1, hlsl_ir_constant(path_node)->value.u[0].u * subtype_comp_count,
- writemask, time);
+ depth + 1, comp_start + index * subtype_comp_count, writemask, time);
}
else
{
for (i = 0; i < hlsl_type_element_count(type); ++i)
{
copy_propagation_invalidate_variable_from_deref_recurse(ctx, var_def, deref, subtype,
- depth + 1, i * subtype_comp_count, writemask, time);
+ depth + 1, comp_start + i * subtype_comp_count, writemask, time);
}
}
}
@@ -1837,147 +1769,475 @@ static bool copy_propagation_replace_with_constant_vector(struct hlsl_ctx *ctx,
return true;
}
-static bool copy_propagation_transform_load(struct hlsl_ctx *ctx,
- struct hlsl_ir_load *load, struct copy_propagation_state *state)
+static bool component_index_from_deref_path_node(struct hlsl_ir_node *path_node,
+ struct hlsl_type *type, unsigned int *index)
{
- struct hlsl_type *type = load->node.data_type;
+ unsigned int idx, i;
+
+ if (path_node->type != HLSL_IR_CONSTANT)
+ return false;
+
+ idx = hlsl_ir_constant(path_node)->value.u[0].u;
+ *index = 0;
switch (type->class)
{
- case HLSL_CLASS_DEPTH_STENCIL_STATE:
- case HLSL_CLASS_SCALAR:
case HLSL_CLASS_VECTOR:
- case HLSL_CLASS_PIXEL_SHADER:
- case HLSL_CLASS_RASTERIZER_STATE:
- case HLSL_CLASS_SAMPLER:
- case HLSL_CLASS_STRING:
- case HLSL_CLASS_TEXTURE:
- case HLSL_CLASS_UAV:
- case HLSL_CLASS_VERTEX_SHADER:
- case HLSL_CLASS_COMPUTE_SHADER:
- case HLSL_CLASS_DOMAIN_SHADER:
- case HLSL_CLASS_HULL_SHADER:
- case HLSL_CLASS_RENDER_TARGET_VIEW:
- case HLSL_CLASS_DEPTH_STENCIL_VIEW:
- case HLSL_CLASS_GEOMETRY_SHADER:
- case HLSL_CLASS_BLEND_STATE:
- case HLSL_CLASS_STREAM_OUTPUT:
- case HLSL_CLASS_NULL:
+ if (idx >= type->e.numeric.dimx)
+ return false;
+ *index = idx;
break;
case HLSL_CLASS_MATRIX:
+ if (idx >= hlsl_type_major_size(type))
+ return false;
+ if (hlsl_type_is_row_major(type))
+ *index = idx * type->e.numeric.dimx;
+ else
+ *index = idx * type->e.numeric.dimy;
+ break;
+
case HLSL_CLASS_ARRAY:
+ if (idx >= type->e.array.elements_count)
+ return false;
+ *index = idx * hlsl_type_component_count(type->e.array.type);
+ break;
+
case HLSL_CLASS_STRUCT:
- /* We can't handle complex types here.
- * They should have been already split anyway by earlier passes,
- * but they may not have been deleted yet. We can't rely on DCE to
- * solve that problem for us, since we may be called on a partial
- * block, but DCE deletes dead stores, so it needs to be able to
- * see the whole program. */
- case HLSL_CLASS_ERROR:
- return false;
+ for (i = 0; i < idx; ++i)
+ *index += hlsl_type_component_count(type->e.record.fields[i].type);
+ break;
- case HLSL_CLASS_CONSTANT_BUFFER:
- case HLSL_CLASS_EFFECT_GROUP:
- case HLSL_CLASS_PASS:
- case HLSL_CLASS_TECHNIQUE:
- case HLSL_CLASS_VOID:
+ default:
vkd3d_unreachable();
}
- if (copy_propagation_replace_with_constant_vector(ctx, state, load, HLSL_SWIZZLE(X, Y, Z, W), &load->node))
- return true;
-
- if (copy_propagation_replace_with_single_instr(ctx, state, load, HLSL_SWIZZLE(X, Y, Z, W), &load->node))
- return true;
-
- return false;
+ return true;
}
-static bool copy_propagation_transform_swizzle(struct hlsl_ctx *ctx,
- struct hlsl_ir_swizzle *swizzle, struct copy_propagation_state *state)
+static bool nonconst_index_from_deref(struct hlsl_ctx *ctx, const struct hlsl_deref *deref,
+ unsigned int *idx, unsigned int *base, unsigned int *scale, unsigned int *count)
{
- struct hlsl_ir_load *load;
-
- if (swizzle->val.node->type != HLSL_IR_LOAD)
- return false;
- load = hlsl_ir_load(swizzle->val.node);
+ struct hlsl_type *type = deref->var->data_type;
+ bool found = false;
+ unsigned int i;
- if (copy_propagation_replace_with_constant_vector(ctx, state, load, swizzle->u.vector, &swizzle->node))
- return true;
+ *base = 0;
- if (copy_propagation_replace_with_single_instr(ctx, state, load, swizzle->u.vector, &swizzle->node))
- return true;
+ for (i = 0; i < deref->path_len; ++i)
+ {
+ struct hlsl_ir_node *path_node = deref->path[i].node;
+ struct hlsl_type *next_type;
- return false;
-}
+ VKD3D_ASSERT(path_node);
-static bool copy_propagation_transform_object_load(struct hlsl_ctx *ctx,
- struct hlsl_deref *deref, struct copy_propagation_state *state, unsigned int time)
-{
- struct copy_propagation_value *value;
- struct hlsl_ir_load *load;
- unsigned int start, count;
+ /* We should always have generated a cast to UINT. */
+ VKD3D_ASSERT(hlsl_is_vec1(path_node->data_type) && path_node->data_type->e.numeric.type == HLSL_TYPE_UINT);
- if (!hlsl_component_index_range_from_deref(ctx, deref, &start, &count))
- return false;
- VKD3D_ASSERT(count == 1);
+ next_type = hlsl_get_element_type_from_path_index(ctx, type, path_node);
- if (!(value = copy_propagation_get_value(state, deref->var, start, time)))
- return false;
- VKD3D_ASSERT(value->component == 0);
+ if (path_node->type != HLSL_IR_CONSTANT)
+ {
+ if (found)
+ return false;
+ found = true;
+ *idx = i;
+ *scale = hlsl_type_component_count(next_type);
+ *count = hlsl_type_element_count(type);
+ }
+ else
+ {
+ unsigned int index;
- /* Only HLSL_IR_LOAD can produce an object. */
- load = hlsl_ir_load(value->node);
+ if (!component_index_from_deref_path_node(path_node, type, &index))
+ return false;
+ *base += index;
+ }
- /* As we are replacing the instruction's deref (with the one in the hlsl_ir_load) and not the
- * instruction itself, we won't be able to rely on the value retrieved by
- * copy_propagation_get_value() for the new deref in subsequent iterations of copy propagation.
- * This is because another value may be written to that deref between the hlsl_ir_load and
- * this instruction.
- *
- * For this reason, we only replace the new deref when it corresponds to a uniform variable,
- * which cannot be written to.
- *
- * In a valid shader, all object references must resolve statically to a single uniform object.
- * If this is the case, we can expect copy propagation on regular store/loads and the other
- * compilation passes to replace all hlsl_ir_loads with loads to uniform objects, so this
- * implementation is complete, even with this restriction.
- */
- if (!load->src.var->is_uniform)
- {
- TRACE("Ignoring load from non-uniform object variable %s\n", load->src.var->name);
- return false;
+ type = next_type;
}
- hlsl_cleanup_deref(deref);
- hlsl_copy_deref(ctx, deref, &load->src);
-
- return true;
+ return found;
}
-static bool copy_propagation_transform_resource_load(struct hlsl_ctx *ctx,
- struct hlsl_ir_resource_load *load, struct copy_propagation_state *state)
+static struct hlsl_ir_node *new_affine_path_index(struct hlsl_ctx *ctx, const struct vkd3d_shader_location *loc,
+ struct hlsl_block *block, struct hlsl_ir_node *index, int c, int d)
{
- bool progress = false;
+ struct hlsl_ir_node *c_node, *d_node, *ic, *idx;
+ bool use_uint = c >= 0 && d >= 0;
- progress |= copy_propagation_transform_object_load(ctx, &load->resource, state, load->node.index);
- if (load->sampler.var)
- progress |= copy_propagation_transform_object_load(ctx, &load->sampler, state, load->node.index);
- return progress;
-}
+ if (!c)
+ {
+ VKD3D_ASSERT(d >= 0);
-static bool copy_propagation_transform_resource_store(struct hlsl_ctx *ctx,
- struct hlsl_ir_resource_store *store, struct copy_propagation_state *state)
-{
- bool progress = false;
+ return hlsl_block_add_uint_constant(ctx, block, d, loc);
+ }
- progress |= copy_propagation_transform_object_load(ctx, &store->resource, state, store->node.index);
- return progress;
+ if (use_uint)
+ {
+ c_node = hlsl_block_add_uint_constant(ctx, block, c, loc);
+ d_node = hlsl_block_add_uint_constant(ctx, block, d, loc);
+ }
+ else
+ {
+ c_node = hlsl_block_add_int_constant(ctx, block, c, loc);
+ d_node = hlsl_block_add_int_constant(ctx, block, d, loc);
+ index = hlsl_block_add_cast(ctx, block, index, hlsl_get_scalar_type(ctx, HLSL_TYPE_INT), loc);
+ }
+
+ ic = hlsl_block_add_binary_expr(ctx, block, HLSL_OP2_MUL, index, c_node);
+ idx = hlsl_block_add_binary_expr(ctx, block, HLSL_OP2_ADD, ic, d_node);
+ if (!use_uint)
+ idx = hlsl_block_add_cast(ctx, block, idx, hlsl_get_scalar_type(ctx, HLSL_TYPE_UINT), loc);
+
+ return idx;
}
-static bool copy_propagation_transform_interlocked(struct hlsl_ctx *ctx,
- struct hlsl_ir_interlocked *interlocked, struct copy_propagation_state *state)
+static bool copy_propagation_replace_with_deref(struct hlsl_ctx *ctx,
+ const struct copy_propagation_state *state, const struct hlsl_ir_load *load,
+ uint32_t swizzle, struct hlsl_ir_node *instr)
+{
+ const unsigned int instr_component_count = hlsl_type_component_count(instr->data_type);
+ unsigned int nonconst_i = 0, base, scale, count;
+ struct hlsl_ir_node *index, *new_instr = NULL;
+ const struct hlsl_deref *deref = &load->src;
+ const struct hlsl_ir_var *var = deref->var;
+ unsigned int time = load->node.index;
+ struct hlsl_deref tmp_deref = {0};
+ struct hlsl_ir_load *new_load;
+ struct hlsl_ir_var *x = NULL;
+ int *c = NULL, *d = NULL;
+ uint32_t ret_swizzle = 0;
+ struct hlsl_block block;
+ unsigned int path_len;
+ bool success = false;
+ int i, j, k;
+
+ if (!nonconst_index_from_deref(ctx, deref, &nonconst_i, &base, &scale, &count))
+ return false;
+
+ VKD3D_ASSERT(count);
+
+ hlsl_block_init(&block);
+
+ index = deref->path[nonconst_i].node;
+
+ /* Iterate over the nonconst index, and check if their values all have the form
+ * x[[c0*i + d0][c1*i + d1]...[cm*i + dm]], and determine the constants c, d. */
+ for (i = 0; i < count; ++i)
+ {
+ unsigned int start = base + scale * i;
+ struct copy_propagation_value *value;
+ struct hlsl_ir_load *idx;
+ uint32_t cur_swizzle = 0;
+
+ if (!(value = copy_propagation_get_value(state, var,
+ start + hlsl_swizzle_get_component(swizzle, 0), time)))
+ goto done;
+
+ if (value->node->type != HLSL_IR_LOAD)
+ goto done;
+ idx = hlsl_ir_load(value->node);
+
+ if (!x)
+ x = idx->src.var;
+ else if (x != idx->src.var)
+ goto done;
+
+ if (hlsl_version_lt(ctx, 4, 0) && x->is_uniform && ctx->profile->type != VKD3D_SHADER_TYPE_VERTEX)
+ {
+ TRACE("Skipping propagating non-constant deref to SM1 uniform %s.\n", var->name);
+ goto done;
+ }
+
+ if (i == 0)
+ {
+ path_len = idx->src.path_len;
+
+ if (path_len)
+ {
+ if (!(c = hlsl_calloc(ctx, path_len, sizeof(c[0])))
+ || !(d = hlsl_alloc(ctx, path_len * sizeof(d[0]))))
+ goto done;
+ }
+
+ for (k = 0; k < path_len; ++k)
+ {
+ if (idx->src.path[k].node->type != HLSL_IR_CONSTANT)
+ goto done;
+ d[k] = hlsl_ir_constant(idx->src.path[k].node)->value.u[0].u;
+ }
+
+ }
+ else if (i == 1)
+ {
+ struct hlsl_type *type = idx->src.var->data_type;
+
+ if (idx->src.path_len != path_len)
+ goto done;
+
+ /* Calculate constants c and d based on the first two path indices. */
+ for (k = 0; k < path_len; ++k)
+ {
+ int ix;
+
+ if (idx->src.path[k].node->type != HLSL_IR_CONSTANT)
+ goto done;
+ ix = hlsl_ir_constant(idx->src.path[k].node)->value.u[0].u;
+ c[k] = ix - d[k];
+ d[k] = ix - c[k] * i;
+
+ if (c[k] && type->class == HLSL_CLASS_STRUCT)
+ goto done;
+
+ type = hlsl_get_element_type_from_path_index(ctx, type, idx->src.path[k].node);
+ }
+ }
+ else
+ {
+ if (idx->src.path_len != path_len)
+ goto done;
+
+ /* Check that this load has the form x[[c0*i +d0][c1*i + d1]...[cm*i + dm]]. */
+ for (k = 0; k < path_len; ++k)
+ {
+ if (idx->src.path[k].node->type != HLSL_IR_CONSTANT)
+ goto done;
+ if (hlsl_ir_constant(idx->src.path[k].node)->value.u[0].u != c[k] * i + d[k])
+ goto done;
+ }
+ }
+
+ hlsl_swizzle_set_component(&cur_swizzle, 0, value->component);
+
+ for (j = 1; j < instr_component_count; ++j)
+ {
+ struct copy_propagation_value *val;
+
+ if (!(val = copy_propagation_get_value(state, var,
+ start + hlsl_swizzle_get_component(swizzle, j), time)))
+ goto done;
+ if (val->node != &idx->node)
+ goto done;
+
+ hlsl_swizzle_set_component(&cur_swizzle, j, val->component);
+ }
+
+ if (i == 0)
+ ret_swizzle = cur_swizzle;
+ else if (ret_swizzle != cur_swizzle)
+ goto done;
+ }
+
+ if (!hlsl_init_deref(ctx, &tmp_deref, x, path_len))
+ goto done;
+
+ for (k = 0; k < path_len; ++k)
+ {
+ hlsl_src_from_node(&tmp_deref.path[k],
+ new_affine_path_index(ctx, &load->node.loc, &block, index, c[k], d[k]));
+ }
+
+ if (!(new_load = hlsl_new_load_index(ctx, &tmp_deref, NULL, &load->node.loc)))
+ goto done;
+ new_instr = &new_load->node;
+ hlsl_block_add_instr(&block, new_instr);
+
+ if (new_instr->data_type->class == HLSL_CLASS_SCALAR || new_instr->data_type->class == HLSL_CLASS_VECTOR)
+ new_instr = hlsl_block_add_swizzle(ctx, &block, ret_swizzle, instr_component_count, new_instr, &instr->loc);
+
+ if (TRACE_ON())
+ {
+ struct vkd3d_string_buffer buffer;
+
+ vkd3d_string_buffer_init(&buffer);
+
+ vkd3d_string_buffer_printf(&buffer, "Load from %s[", var->name);
+ for (j = 0; j < deref->path_len; ++j)
+ {
+ if (j == nonconst_i)
+ vkd3d_string_buffer_printf(&buffer, "[i]");
+ else
+ vkd3d_string_buffer_printf(&buffer, "[%u]", hlsl_ir_constant(deref->path[j].node)->value.u[0].u);
+ }
+ vkd3d_string_buffer_printf(&buffer, "]%s propagated as %s[",
+ debug_hlsl_swizzle(swizzle, instr_component_count), tmp_deref.var->name);
+ for (k = 0; k < path_len; ++k)
+ {
+ if (c[k])
+ vkd3d_string_buffer_printf(&buffer, "[i*%d + %d]", c[k], d[k]);
+ else
+ vkd3d_string_buffer_printf(&buffer, "[%d]", d[k]);
+ }
+ vkd3d_string_buffer_printf(&buffer, "]%s (i = %p).\n",
+ debug_hlsl_swizzle(ret_swizzle, instr_component_count), index);
+
+ vkd3d_string_buffer_trace(&buffer);
+ vkd3d_string_buffer_cleanup(&buffer);
+ }
+
+ list_move_before(&instr->entry, &block.instrs);
+ hlsl_replace_node(instr, new_instr);
+ success = true;
+
+done:
+ hlsl_cleanup_deref(&tmp_deref);
+ hlsl_block_cleanup(&block);
+ vkd3d_free(c);
+ vkd3d_free(d);
+ return success;
+}
+
+static bool copy_propagation_transform_load(struct hlsl_ctx *ctx,
+ struct hlsl_ir_load *load, struct copy_propagation_state *state)
+{
+ struct hlsl_type *type = load->node.data_type;
+
+ switch (type->class)
+ {
+ case HLSL_CLASS_DEPTH_STENCIL_STATE:
+ case HLSL_CLASS_SCALAR:
+ case HLSL_CLASS_VECTOR:
+ case HLSL_CLASS_PIXEL_SHADER:
+ case HLSL_CLASS_RASTERIZER_STATE:
+ case HLSL_CLASS_SAMPLER:
+ case HLSL_CLASS_STRING:
+ case HLSL_CLASS_TEXTURE:
+ case HLSL_CLASS_UAV:
+ case HLSL_CLASS_VERTEX_SHADER:
+ case HLSL_CLASS_COMPUTE_SHADER:
+ case HLSL_CLASS_DOMAIN_SHADER:
+ case HLSL_CLASS_HULL_SHADER:
+ case HLSL_CLASS_RENDER_TARGET_VIEW:
+ case HLSL_CLASS_DEPTH_STENCIL_VIEW:
+ case HLSL_CLASS_GEOMETRY_SHADER:
+ case HLSL_CLASS_BLEND_STATE:
+ case HLSL_CLASS_STREAM_OUTPUT:
+ case HLSL_CLASS_NULL:
+ break;
+
+ case HLSL_CLASS_MATRIX:
+ case HLSL_CLASS_ARRAY:
+ case HLSL_CLASS_STRUCT:
+ /* We can't handle complex types here.
+ * They should have been already split anyway by earlier passes,
+ * but they may not have been deleted yet. We can't rely on DCE to
+ * solve that problem for us, since we may be called on a partial
+ * block, but DCE deletes dead stores, so it needs to be able to
+ * see the whole program. */
+ case HLSL_CLASS_ERROR:
+ return false;
+
+ case HLSL_CLASS_CONSTANT_BUFFER:
+ case HLSL_CLASS_EFFECT_GROUP:
+ case HLSL_CLASS_PASS:
+ case HLSL_CLASS_TECHNIQUE:
+ case HLSL_CLASS_VOID:
+ vkd3d_unreachable();
+ }
+
+ if (copy_propagation_replace_with_constant_vector(ctx, state, load, HLSL_SWIZZLE(X, Y, Z, W), &load->node))
+ return true;
+
+ if (copy_propagation_replace_with_single_instr(ctx, state, load, HLSL_SWIZZLE(X, Y, Z, W), &load->node))
+ return true;
+
+ if (copy_propagation_replace_with_deref(ctx, state, load, HLSL_SWIZZLE(X, Y, Z, W), &load->node))
+ return true;
+
+ return false;
+}
+
+static bool copy_propagation_transform_swizzle(struct hlsl_ctx *ctx,
+ struct hlsl_ir_swizzle *swizzle, struct copy_propagation_state *state)
+{
+ struct hlsl_ir_load *load;
+
+ if (swizzle->val.node->type != HLSL_IR_LOAD)
+ return false;
+ load = hlsl_ir_load(swizzle->val.node);
+
+ if (copy_propagation_replace_with_constant_vector(ctx, state, load, swizzle->u.vector, &swizzle->node))
+ return true;
+
+ if (copy_propagation_replace_with_single_instr(ctx, state, load, swizzle->u.vector, &swizzle->node))
+ return true;
+
+ if (copy_propagation_replace_with_deref(ctx, state, load, swizzle->u.vector, &swizzle->node))
+ return true;
+
+ return false;
+}
+
+static bool copy_propagation_transform_object_load(struct hlsl_ctx *ctx,
+ struct hlsl_deref *deref, struct copy_propagation_state *state, unsigned int time)
+{
+ struct copy_propagation_value *value;
+ struct hlsl_ir_load *load;
+ unsigned int start, count;
+
+ if (!hlsl_component_index_range_from_deref(ctx, deref, &start, &count))
+ return false;
+ VKD3D_ASSERT(count == 1);
+
+ if (!(value = copy_propagation_get_value(state, deref->var, start, time)))
+ return false;
+ VKD3D_ASSERT(value->component == 0);
+
+ /* A uniform object should have never been written to. */
+ VKD3D_ASSERT(!deref->var->is_uniform);
+
+ /* Only HLSL_IR_LOAD can produce an object. */
+ load = hlsl_ir_load(value->node);
+
+ /* As we are replacing the instruction's deref (with the one in the hlsl_ir_load) and not the
+ * instruction itself, we won't be able to rely on the value retrieved by
+ * copy_propagation_get_value() for the new deref in subsequent iterations of copy propagation.
+ * This is because another value may be written to that deref between the hlsl_ir_load and
+ * this instruction.
+ *
+ * For this reason, we only replace the new deref when it corresponds to a uniform variable,
+ * which cannot be written to.
+ *
+ * In a valid shader, all object references must resolve statically to a single uniform object.
+ * If this is the case, we can expect copy propagation on regular store/loads and the other
+ * compilation passes to replace all hlsl_ir_loads with loads to uniform objects, so this
+ * implementation is complete, even with this restriction.
+ */
+ if (!load->src.var->is_uniform)
+ {
+ TRACE("Ignoring load from non-uniform object variable %s\n", load->src.var->name);
+ return false;
+ }
+
+ hlsl_cleanup_deref(deref);
+ hlsl_copy_deref(ctx, deref, &load->src);
+
+ return true;
+}
+
+static bool copy_propagation_transform_resource_load(struct hlsl_ctx *ctx,
+ struct hlsl_ir_resource_load *load, struct copy_propagation_state *state)
+{
+ bool progress = false;
+
+ progress |= copy_propagation_transform_object_load(ctx, &load->resource, state, load->node.index);
+ if (load->sampler.var)
+ progress |= copy_propagation_transform_object_load(ctx, &load->sampler, state, load->node.index);
+ return progress;
+}
+
+static bool copy_propagation_transform_resource_store(struct hlsl_ctx *ctx,
+ struct hlsl_ir_resource_store *store, struct copy_propagation_state *state)
+{
+ bool progress = false;
+
+ progress |= copy_propagation_transform_object_load(ctx, &store->resource, state, store->node.index);
+ return progress;
+}
+
+static bool copy_propagation_transform_interlocked(struct hlsl_ctx *ctx,
+ struct hlsl_ir_interlocked *interlocked, struct copy_propagation_state *state)
{
bool progress = false;
@@ -2067,167 +2327,718 @@ static void copy_propagation_invalidate_from_block(struct hlsl_ctx *ctx, struct
break;
}
- default:
- break;
+ default:
+ break;
+ }
+ }
+}
+
+static bool copy_propagation_transform_block(struct hlsl_ctx *ctx, struct hlsl_block *block,
+ struct copy_propagation_state *state);
+
+static bool copy_propagation_process_if(struct hlsl_ctx *ctx, struct hlsl_ir_if *iff,
+ struct copy_propagation_state *state)
+{
+ bool progress = false;
+
+ copy_propagation_push_scope(state, ctx);
+ progress |= copy_propagation_transform_block(ctx, &iff->then_block, state);
+ if (state->stopped)
+ return progress;
+ copy_propagation_pop_scope(state);
+
+ copy_propagation_push_scope(state, ctx);
+ progress |= copy_propagation_transform_block(ctx, &iff->else_block, state);
+ if (state->stopped)
+ return progress;
+ copy_propagation_pop_scope(state);
+
+ /* Ideally we'd invalidate the outer state looking at what was
+ * touched in the two inner states, but this doesn't work for
+ * loops (because we need to know what is invalidated in advance),
+ * so we need copy_propagation_invalidate_from_block() anyway. */
+ copy_propagation_invalidate_from_block(ctx, state, &iff->then_block, iff->node.index);
+ copy_propagation_invalidate_from_block(ctx, state, &iff->else_block, iff->node.index);
+
+ return progress;
+}
+
+static bool copy_propagation_process_loop(struct hlsl_ctx *ctx, struct hlsl_ir_loop *loop,
+ struct copy_propagation_state *state)
+{
+ bool progress = false;
+
+ copy_propagation_invalidate_from_block(ctx, state, &loop->body, loop->node.index);
+ copy_propagation_invalidate_from_block(ctx, state, &loop->iter, loop->node.index);
+
+ copy_propagation_push_scope(state, ctx);
+ progress |= copy_propagation_transform_block(ctx, &loop->body, state);
+ if (state->stopped)
+ return progress;
+ copy_propagation_pop_scope(state);
+
+ return progress;
+}
+
+static bool copy_propagation_process_switch(struct hlsl_ctx *ctx, struct hlsl_ir_switch *s,
+ struct copy_propagation_state *state)
+{
+ struct hlsl_ir_switch_case *c;
+ bool progress = false;
+
+ LIST_FOR_EACH_ENTRY(c, &s->cases, struct hlsl_ir_switch_case, entry)
+ {
+ copy_propagation_push_scope(state, ctx);
+ progress |= copy_propagation_transform_block(ctx, &c->body, state);
+ if (state->stopped)
+ return progress;
+ copy_propagation_pop_scope(state);
+ }
+
+ LIST_FOR_EACH_ENTRY(c, &s->cases, struct hlsl_ir_switch_case, entry)
+ {
+ copy_propagation_invalidate_from_block(ctx, state, &c->body, s->node.index);
+ }
+
+ return progress;
+}
+
+static bool copy_propagation_transform_block(struct hlsl_ctx *ctx, struct hlsl_block *block,
+ struct copy_propagation_state *state)
+{
+ struct hlsl_ir_node *instr, *next;
+ bool progress = false;
+
+ LIST_FOR_EACH_ENTRY_SAFE(instr, next, &block->instrs, struct hlsl_ir_node, entry)
+ {
+ if (instr == state->stop)
+ {
+ state->stopped = true;
+ return progress;
+ }
+
+ switch (instr->type)
+ {
+ case HLSL_IR_LOAD:
+ progress |= copy_propagation_transform_load(ctx, hlsl_ir_load(instr), state);
+ break;
+
+ case HLSL_IR_RESOURCE_LOAD:
+ progress |= copy_propagation_transform_resource_load(ctx, hlsl_ir_resource_load(instr), state);
+ break;
+
+ case HLSL_IR_RESOURCE_STORE:
+ progress |= copy_propagation_transform_resource_store(ctx, hlsl_ir_resource_store(instr), state);
+ break;
+
+ case HLSL_IR_STORE:
+ copy_propagation_record_store(ctx, hlsl_ir_store(instr), state);
+ break;
+
+ case HLSL_IR_SWIZZLE:
+ progress |= copy_propagation_transform_swizzle(ctx, hlsl_ir_swizzle(instr), state);
+ break;
+
+ case HLSL_IR_IF:
+ progress |= copy_propagation_process_if(ctx, hlsl_ir_if(instr), state);
+ break;
+
+ case HLSL_IR_LOOP:
+ progress |= copy_propagation_process_loop(ctx, hlsl_ir_loop(instr), state);
+ break;
+
+ case HLSL_IR_SWITCH:
+ progress |= copy_propagation_process_switch(ctx, hlsl_ir_switch(instr), state);
+ break;
+
+ case HLSL_IR_INTERLOCKED:
+ progress |= copy_propagation_transform_interlocked(ctx, hlsl_ir_interlocked(instr), state);
+
+ default:
+ break;
+ }
+
+ if (state->stopped)
+ return progress;
+ }
+
+ return progress;
+}
+
+bool hlsl_copy_propagation_execute(struct hlsl_ctx *ctx, struct hlsl_block *block)
+{
+ struct copy_propagation_state state;
+ bool progress;
+
+ if (ctx->result)
+ return false;
+
+ index_instructions(block, 1);
+
+ copy_propagation_state_init(&state, ctx);
+
+ progress = copy_propagation_transform_block(ctx, block, &state);
+
+ copy_propagation_state_destroy(&state);
+
+ return progress;
+}
+
+enum validation_result
+{
+ DEREF_VALIDATION_OK,
+ DEREF_VALIDATION_OUT_OF_BOUNDS,
+ DEREF_VALIDATION_NOT_CONSTANT,
+};
+
+struct vectorize_exprs_state
+{
+ struct vectorizable_exprs_group
+ {
+ struct hlsl_block *block;
+ struct hlsl_ir_expr *exprs[4];
+ uint8_t expr_count, component_count;
+ } *groups;
+ size_t count, capacity;
+};
+
+static bool is_same_vectorizable_source(struct hlsl_ir_node *a, struct hlsl_ir_node *b)
+{
+ /* TODO: We can also vectorize different constants. */
+
+ if (a->type == HLSL_IR_SWIZZLE)
+ a = hlsl_ir_swizzle(a)->val.node;
+ if (b->type == HLSL_IR_SWIZZLE)
+ b = hlsl_ir_swizzle(b)->val.node;
+
+ return a == b;
+}
+
+static bool is_same_vectorizable_expr(struct hlsl_ir_expr *a, struct hlsl_ir_expr *b)
+{
+ if (a->op != b->op)
+ return false;
+
+ for (size_t j = 0; j < HLSL_MAX_OPERANDS; ++j)
+ {
+ if (!a->operands[j].node)
+ break;
+ if (!is_same_vectorizable_source(a->operands[j].node, b->operands[j].node))
+ return false;
+ }
+
+ return true;
+}
+
+static void record_vectorizable_expr(struct hlsl_ctx *ctx, struct hlsl_block *block,
+ struct hlsl_ir_expr *expr, struct vectorize_exprs_state *state)
+{
+ if (expr->node.data_type->class > HLSL_CLASS_VECTOR)
+ return;
+
+ /* These are the only current ops that are not per-component. */
+ if (expr->op == HLSL_OP1_COS_REDUCED || expr->op == HLSL_OP1_SIN_REDUCED
+ || expr->op == HLSL_OP2_DOT || expr->op == HLSL_OP3_DP2ADD)
+ return;
+
+ for (size_t i = 0; i < state->count; ++i)
+ {
+ struct vectorizable_exprs_group *group = &state->groups[i];
+ struct hlsl_ir_expr *other = group->exprs[0];
+
+ /* These are SSA instructions, which means they have the same value
+ * regardless of what block they're in. However, being in different
+ * blocks may mean that one expression or the other is not always
+ * executed. */
+
+ if (expr->node.data_type->e.numeric.dimx + group->component_count <= 4
+ && group->block == block
+ && is_same_vectorizable_expr(expr, other))
+ {
+ group->exprs[group->expr_count++] = expr;
+ group->component_count += expr->node.data_type->e.numeric.dimx;
+ return;
+ }
+ }
+
+ if (!hlsl_array_reserve(ctx, (void **)&state->groups,
+ &state->capacity, state->count + 1, sizeof(*state->groups)))
+ return;
+ state->groups[state->count].block = block;
+ state->groups[state->count].exprs[0] = expr;
+ state->groups[state->count].expr_count = 1;
+ state->groups[state->count].component_count = expr->node.data_type->e.numeric.dimx;
+ ++state->count;
+}
+
+static void find_vectorizable_expr_groups(struct hlsl_ctx *ctx, struct hlsl_block *block,
+ struct vectorize_exprs_state *state)
+{
+ struct hlsl_ir_node *instr;
+
+ LIST_FOR_EACH_ENTRY(instr, &block->instrs, struct hlsl_ir_node, entry)
+ {
+ if (instr->type == HLSL_IR_EXPR)
+ {
+ record_vectorizable_expr(ctx, block, hlsl_ir_expr(instr), state);
+ }
+ else if (instr->type == HLSL_IR_IF)
+ {
+ struct hlsl_ir_if *iff = hlsl_ir_if(instr);
+
+ find_vectorizable_expr_groups(ctx, &iff->then_block, state);
+ find_vectorizable_expr_groups(ctx, &iff->else_block, state);
+ }
+ else if (instr->type == HLSL_IR_LOOP)
+ {
+ find_vectorizable_expr_groups(ctx, &hlsl_ir_loop(instr)->body, state);
+ }
+ else if (instr->type == HLSL_IR_SWITCH)
+ {
+ struct hlsl_ir_switch *s = hlsl_ir_switch(instr);
+ struct hlsl_ir_switch_case *c;
+
+ LIST_FOR_EACH_ENTRY(c, &s->cases, struct hlsl_ir_switch_case, entry)
+ find_vectorizable_expr_groups(ctx, &c->body, state);
+ }
+ }
+}
+
+/* Combine sequences like
+ *
+ * 3: @1.x
+ * 4: @2.x
+ * 5: @3 * @4
+ * 6: @1.y
+ * 7: @2.x
+ * 8: @6 * @7
+ *
+ * into
+ *
+ * 5_1: @1.xy
+ * 5_2: @2.xx
+ * 5_3: @5_1 * @5_2
+ * 5: @5_3.x
+ * 8: @5_3.y
+ *
+ * Each operand to an expression needs to refer to the same ultimate source
+ * (in this case @1 and @2 respectively), but can be a swizzle thereof.
+ *
+ * In practice the swizzles @5 and @8 can generally then be vectorized again,
+ * either as part of another expression, or as part of a store.
+ */
+static bool vectorize_exprs(struct hlsl_ctx *ctx, struct hlsl_block *block)
+{
+ struct vectorize_exprs_state state = {0};
+ bool progress = false;
+
+ find_vectorizable_expr_groups(ctx, block, &state);
+
+ for (unsigned int i = 0; i < state.count; ++i)
+ {
+ struct vectorizable_exprs_group *group = &state.groups[i];
+ struct hlsl_ir_node *args[HLSL_MAX_OPERANDS] = {0};
+ uint32_t swizzles[HLSL_MAX_OPERANDS] = {0};
+ struct hlsl_ir_node *arg, *combined;
+ unsigned int component_count = 0;
+ struct hlsl_type *combined_type;
+ struct hlsl_block new_block;
+ struct hlsl_ir_expr *expr;
+
+ if (group->expr_count == 1)
+ continue;
+
+ hlsl_block_init(&new_block);
+
+ for (unsigned int j = 0; j < group->expr_count; ++j)
+ {
+ expr = group->exprs[j];
+
+ for (unsigned int a = 0; a < HLSL_MAX_OPERANDS; ++a)
+ {
+ uint32_t arg_swizzle;
+
+ if (!(arg = expr->operands[a].node))
+ break;
+
+ if (arg->type == HLSL_IR_SWIZZLE)
+ arg_swizzle = hlsl_ir_swizzle(arg)->u.vector;
+ else
+ arg_swizzle = HLSL_SWIZZLE(X, Y, Z, W);
+
+ /* Mask out the invalid components. */
+ arg_swizzle &= (1u << VKD3D_SHADER_SWIZZLE_SHIFT(arg->data_type->e.numeric.dimx)) - 1;
+ swizzles[a] |= arg_swizzle << VKD3D_SHADER_SWIZZLE_SHIFT(component_count);
+ }
+
+ component_count += expr->node.data_type->e.numeric.dimx;
+ }
+
+ expr = group->exprs[0];
+ for (unsigned int a = 0; a < HLSL_MAX_OPERANDS; ++a)
+ {
+ if (!(arg = expr->operands[a].node))
+ break;
+ if (arg->type == HLSL_IR_SWIZZLE)
+ arg = hlsl_ir_swizzle(arg)->val.node;
+ args[a] = hlsl_block_add_swizzle(ctx, &new_block, swizzles[a], component_count, arg, &arg->loc);
+ }
+
+ combined_type = hlsl_get_vector_type(ctx, expr->node.data_type->e.numeric.type, component_count);
+ combined = hlsl_block_add_expr(ctx, &new_block, expr->op, args, combined_type, &expr->node.loc);
+
+ list_move_before(&expr->node.entry, &new_block.instrs);
+
+ TRACE("Combining %u %s instructions into %p.\n", group->expr_count,
+ debug_hlsl_expr_op(group->exprs[0]->op), combined);
+
+ component_count = 0;
+ for (unsigned int j = 0; j < group->expr_count; ++j)
+ {
+ struct hlsl_ir_node *replacement;
+
+ expr = group->exprs[j];
+
+ if (!(replacement = hlsl_new_swizzle(ctx,
+ HLSL_SWIZZLE(X, Y, Z, W) >> VKD3D_SHADER_SWIZZLE_SHIFT(component_count),
+ expr->node.data_type->e.numeric.dimx, combined, &expr->node.loc)))
+ goto out;
+ component_count += expr->node.data_type->e.numeric.dimx;
+ list_add_before(&expr->node.entry, &replacement->entry);
+ hlsl_replace_node(&expr->node, replacement);
}
+
+ progress = true;
}
+
+out:
+ vkd3d_free(state.groups);
+ return progress;
}
-static bool copy_propagation_transform_block(struct hlsl_ctx *ctx, struct hlsl_block *block,
- struct copy_propagation_state *state);
+struct vectorize_stores_state
+{
+ struct vectorizable_stores_group
+ {
+ struct hlsl_block *block;
+ /* We handle overlapping stores, because it's not really easier not to.
+ * In theory, then, we could collect an arbitrary number of stores here.
+ *
+ * In practice, overlapping stores are unlikely, and of course at most
+ * 4 stores can appear without overlap. Therefore, for simplicity, we
+ * just use a fixed array of 4.
+ *
+ * Since computing the writemask requires traversing the deref, and we
+ * need to do that anyway, we store it here for convenience. */
+ struct hlsl_ir_store *stores[4];
+ unsigned int path_len;
+ uint8_t writemasks[4];
+ uint8_t store_count;
+ bool dirty;
+ } *groups;
+ size_t count, capacity;
+};
-static bool copy_propagation_process_if(struct hlsl_ctx *ctx, struct hlsl_ir_if *iff,
- struct copy_propagation_state *state)
+/* This must be a store to a subsection of a vector.
+ * In theory we can also vectorize stores to packed struct fields,
+ * but this requires target-specific knowledge and is probably best left
+ * to a VSIR pass. */
+static bool can_vectorize_store(struct hlsl_ctx *ctx, struct hlsl_ir_store *store,
+ unsigned int *path_len, uint8_t *writemask)
{
- bool progress = false;
+ struct hlsl_type *type = store->lhs.var->data_type;
+ unsigned int i;
- copy_propagation_push_scope(state, ctx);
- progress |= copy_propagation_transform_block(ctx, &iff->then_block, state);
- if (state->stopped)
- return progress;
- copy_propagation_pop_scope(state);
+ if (store->rhs.node->data_type->class > HLSL_CLASS_VECTOR)
+ return false;
- copy_propagation_push_scope(state, ctx);
- progress |= copy_propagation_transform_block(ctx, &iff->else_block, state);
- if (state->stopped)
- return progress;
- copy_propagation_pop_scope(state);
+ if (type->class == HLSL_CLASS_SCALAR)
+ return false;
- /* Ideally we'd invalidate the outer state looking at what was
- * touched in the two inner states, but this doesn't work for
- * loops (because we need to know what is invalidated in advance),
- * so we need copy_propagation_invalidate_from_block() anyway. */
- copy_propagation_invalidate_from_block(ctx, state, &iff->then_block, iff->node.index);
- copy_propagation_invalidate_from_block(ctx, state, &iff->else_block, iff->node.index);
+ for (i = 0; type->class != HLSL_CLASS_VECTOR && i < store->lhs.path_len; ++i)
+ type = hlsl_get_element_type_from_path_index(ctx, type, store->lhs.path[i].node);
- return progress;
+ if (type->class != HLSL_CLASS_VECTOR)
+ return false;
+
+ *path_len = i;
+
+ if (i < store->lhs.path_len)
+ {
+ struct hlsl_ir_constant *c;
+
+ /* This is a store to a scalar component of a vector, achieved via
+ * indexing. */
+
+ if (store->lhs.path[i].node->type != HLSL_IR_CONSTANT)
+ return false;
+ c = hlsl_ir_constant(store->lhs.path[i].node);
+ *writemask = (1u << c->value.u[0].u);
+ }
+ else
+ {
+ *writemask = store->writemask;
+ }
+
+ return true;
}
-static bool copy_propagation_process_loop(struct hlsl_ctx *ctx, struct hlsl_ir_loop *loop,
- struct copy_propagation_state *state)
+static bool derefs_are_same_vector(struct hlsl_ctx *ctx, const struct hlsl_deref *a, const struct hlsl_deref *b)
{
- bool progress = false;
+ struct hlsl_type *type = a->var->data_type;
- copy_propagation_invalidate_from_block(ctx, state, &loop->body, loop->node.index);
- copy_propagation_invalidate_from_block(ctx, state, &loop->iter, loop->node.index);
+ if (a->var != b->var)
+ return false;
- copy_propagation_push_scope(state, ctx);
- progress |= copy_propagation_transform_block(ctx, &loop->body, state);
- if (state->stopped)
- return progress;
- copy_propagation_pop_scope(state);
+ for (unsigned int i = 0; type->class != HLSL_CLASS_VECTOR && i < a->path_len && i < b->path_len; ++i)
+ {
+ if (a->path[i].node != b->path[i].node)
+ return false;
+ type = hlsl_get_element_type_from_path_index(ctx, type, a->path[i].node);
+ }
- return progress;
+ return true;
}
-static bool copy_propagation_process_switch(struct hlsl_ctx *ctx, struct hlsl_ir_switch *s,
- struct copy_propagation_state *state)
+static void record_vectorizable_store(struct hlsl_ctx *ctx, struct hlsl_block *block,
+ struct hlsl_ir_store *store, struct vectorize_stores_state *state)
{
- struct hlsl_ir_switch_case *c;
- bool progress = false;
+ unsigned int path_len;
+ uint8_t writemask;
- LIST_FOR_EACH_ENTRY(c, &s->cases, struct hlsl_ir_switch_case, entry)
+ if (!can_vectorize_store(ctx, store, &path_len, &writemask))
{
- copy_propagation_push_scope(state, ctx);
- progress |= copy_propagation_transform_block(ctx, &c->body, state);
- if (state->stopped)
- return progress;
- copy_propagation_pop_scope(state);
+ /* In the case of a dynamically indexed vector, we must invalidate
+ * any groups that statically index the same vector.
+ * For the sake of expediency, we go one step further and invalidate
+ * any groups that store to the same variable.
+ * (We also don't check that that was the reason why this store isn't
+ * vectorizable.)
+ * We could be more granular, but we'll defer that until it comes
+ * up in practice. */
+ for (size_t i = 0; i < state->count; ++i)
+ {
+ if (state->groups[i].stores[0]->lhs.var == store->lhs.var)
+ state->groups[i].dirty = true;
+ }
+ return;
}
- LIST_FOR_EACH_ENTRY(c, &s->cases, struct hlsl_ir_switch_case, entry)
+ for (size_t i = 0; i < state->count; ++i)
{
- copy_propagation_invalidate_from_block(ctx, state, &c->body, s->node.index);
+ struct vectorizable_stores_group *group = &state->groups[i];
+ struct hlsl_ir_store *other = group->stores[0];
+
+ if (group->dirty)
+ continue;
+
+ if (derefs_are_same_vector(ctx, &store->lhs, &other->lhs))
+ {
+ /* Stores must be in the same CFG block. If they're not,
+ * they're not executed in exactly the same flow, and
+ * therefore can't be vectorized. */
+ if (group->block == block
+ && is_same_vectorizable_source(store->rhs.node, other->rhs.node))
+ {
+ if (group->store_count < ARRAY_SIZE(group->stores))
+ {
+ group->stores[group->store_count] = store;
+ group->writemasks[group->store_count] = writemask;
+ ++group->store_count;
+ return;
+ }
+ }
+ else
+ {
+ /* A store to the same vector with a different source, or in
+ * a different CFG block, invalidates any earlier store.
+ *
+ * A store to a component which *contains* the vector in
+ * question would also invalidate, but we should have split all
+ * of those by the time we get here. */
+ group->dirty = true;
+
+ /* Note that we do exit this loop early if we find a store A we
+ * can vectorize with, but that's fine. If there was a store B
+ * also in the state that we can't vectorize with, it would
+ * already have invalidated A. */
+ }
+ }
+ else
+ {
+ /* This could still be a store to the same vector, if e.g. the
+ * vector is part of a dynamically indexed array, or the path has
+ * two equivalent instructions which refer to the same component.
+ * [CSE may help with the latter, but we don't have it yet,
+ * and we shouldn't depend on it anyway.]
+ * For the sake of expediency, we just invalidate it if it refers
+ * to the same variable at all.
+ * As above, we could be more granular, but we'll defer that until
+ * it comes up in practice. */
+ if (store->lhs.var == other->lhs.var)
+ group->dirty = true;
+
+ /* As above, we don't need to worry about exiting the loop early. */
+ }
}
- return progress;
+ if (!hlsl_array_reserve(ctx, (void **)&state->groups,
+ &state->capacity, state->count + 1, sizeof(*state->groups)))
+ return;
+ state->groups[state->count].block = block;
+ state->groups[state->count].stores[0] = store;
+ state->groups[state->count].path_len = path_len;
+ state->groups[state->count].writemasks[0] = writemask;
+ state->groups[state->count].store_count = 1;
+ state->groups[state->count].dirty = false;
+ ++state->count;
}
-static bool copy_propagation_transform_block(struct hlsl_ctx *ctx, struct hlsl_block *block,
- struct copy_propagation_state *state)
+static void find_vectorizable_store_groups(struct hlsl_ctx *ctx, struct hlsl_block *block,
+ struct vectorize_stores_state *state)
{
- struct hlsl_ir_node *instr, *next;
- bool progress = false;
+ struct hlsl_ir_node *instr;
- LIST_FOR_EACH_ENTRY_SAFE(instr, next, &block->instrs, struct hlsl_ir_node, entry)
+ LIST_FOR_EACH_ENTRY(instr, &block->instrs, struct hlsl_ir_node, entry)
{
- if (instr == state->stop)
+ if (instr->type == HLSL_IR_STORE)
{
- state->stopped = true;
- return progress;
+ record_vectorizable_store(ctx, block, hlsl_ir_store(instr), state);
}
+ else if (instr->type == HLSL_IR_LOAD)
+ {
+ struct hlsl_ir_var *var = hlsl_ir_load(instr)->src.var;
- switch (instr->type)
+ /* By vectorizing store A with store B, we are effectively moving
+ * store A down to happen at the same time as store B.
+ * If there was a load of the same variable between the two, this
+ * would be incorrect.
+ * Therefore invalidate all stores to this variable. As above, we
+ * could be more granular if necessary. */
+
+ for (unsigned int i = 0; i < state->count; ++i)
+ {
+ if (state->groups[i].stores[0]->lhs.var == var)
+ state->groups[i].dirty = true;
+ }
+ }
+ else if (instr->type == HLSL_IR_IF)
{
- case HLSL_IR_LOAD:
- progress |= copy_propagation_transform_load(ctx, hlsl_ir_load(instr), state);
- break;
+ struct hlsl_ir_if *iff = hlsl_ir_if(instr);
- case HLSL_IR_RESOURCE_LOAD:
- progress |= copy_propagation_transform_resource_load(ctx, hlsl_ir_resource_load(instr), state);
- break;
+ find_vectorizable_store_groups(ctx, &iff->then_block, state);
+ find_vectorizable_store_groups(ctx, &iff->else_block, state);
+ }
+ else if (instr->type == HLSL_IR_LOOP)
+ {
+ find_vectorizable_store_groups(ctx, &hlsl_ir_loop(instr)->body, state);
+ }
+ else if (instr->type == HLSL_IR_SWITCH)
+ {
+ struct hlsl_ir_switch *s = hlsl_ir_switch(instr);
+ struct hlsl_ir_switch_case *c;
- case HLSL_IR_RESOURCE_STORE:
- progress |= copy_propagation_transform_resource_store(ctx, hlsl_ir_resource_store(instr), state);
- break;
+ LIST_FOR_EACH_ENTRY(c, &s->cases, struct hlsl_ir_switch_case, entry)
+ find_vectorizable_store_groups(ctx, &c->body, state);
+ }
+ }
+}
- case HLSL_IR_STORE:
- copy_propagation_record_store(ctx, hlsl_ir_store(instr), state);
- break;
+/* Combine sequences like
+ *
+ * 2: @1.yw
+ * 3: @1.zy
+ * 4: var.xy = @2
+ * 5: var.yw = @3
+ *
+ * to
+ *
+ * 2: @1.yzy
+ * 5: var.xyw = @2
+ *
+ * There are a lot of gotchas here. We need to make sure the two stores are to
+ * the same vector (which may be embedded in a complex variable), that they're
+ * always executed in the same control flow, and that there aren't any other
+ * stores or loads on the same vector in the middle. */
+static bool vectorize_stores(struct hlsl_ctx *ctx, struct hlsl_block *block)
+{
+ struct vectorize_stores_state state = {0};
+ bool progress = false;
- case HLSL_IR_SWIZZLE:
- progress |= copy_propagation_transform_swizzle(ctx, hlsl_ir_swizzle(instr), state);
- break;
+ find_vectorizable_store_groups(ctx, block, &state);
- case HLSL_IR_IF:
- progress |= copy_propagation_process_if(ctx, hlsl_ir_if(instr), state);
- break;
+ for (unsigned int i = 0; i < state.count; ++i)
+ {
+ struct vectorizable_stores_group *group = &state.groups[i];
+ uint32_t new_swizzle = 0, new_writemask = 0;
+ struct hlsl_ir_node *new_rhs, *value;
+ uint32_t swizzle_components[4];
+ unsigned int component_count;
+ struct hlsl_ir_store *store;
+ struct hlsl_block new_block;
- case HLSL_IR_LOOP:
- progress |= copy_propagation_process_loop(ctx, hlsl_ir_loop(instr), state);
- break;
+ if (group->store_count == 1)
+ continue;
- case HLSL_IR_SWITCH:
- progress |= copy_propagation_process_switch(ctx, hlsl_ir_switch(instr), state);
- break;
+ hlsl_block_init(&new_block);
- case HLSL_IR_INTERLOCKED:
- progress |= copy_propagation_transform_interlocked(ctx, hlsl_ir_interlocked(instr), state);
+ /* Compute the swizzle components. */
+ for (unsigned int j = 0; j < group->store_count; ++j)
+ {
+ unsigned int writemask = group->writemasks[j];
+ uint32_t rhs_swizzle;
- default:
- break;
+ store = group->stores[j];
+
+ if (store->rhs.node->type == HLSL_IR_SWIZZLE)
+ rhs_swizzle = hlsl_ir_swizzle(store->rhs.node)->u.vector;
+ else
+ rhs_swizzle = HLSL_SWIZZLE(X, Y, Z, W);
+
+ component_count = 0;
+ for (unsigned int k = 0; k < 4; ++k)
+ {
+ if (writemask & (1u << k))
+ swizzle_components[k] = hlsl_swizzle_get_component(rhs_swizzle, component_count++);
+ }
+
+ new_writemask |= writemask;
}
- if (state->stopped)
- return progress;
- }
+ /* Construct the new swizzle. */
+ component_count = 0;
+ for (unsigned int k = 0; k < 4; ++k)
+ {
+ if (new_writemask & (1u << k))
+ hlsl_swizzle_set_component(&new_swizzle, component_count++, swizzle_components[k]);
+ }
- return progress;
-}
+ store = group->stores[0];
+ value = store->rhs.node;
+ if (value->type == HLSL_IR_SWIZZLE)
+ value = hlsl_ir_swizzle(value)->val.node;
-bool hlsl_copy_propagation_execute(struct hlsl_ctx *ctx, struct hlsl_block *block)
-{
- struct copy_propagation_state state;
- bool progress;
+ new_rhs = hlsl_block_add_swizzle(ctx, &new_block, new_swizzle, component_count, value, &value->loc);
+ hlsl_block_add_store_parent(ctx, &new_block, &store->lhs,
+ group->path_len, new_rhs, new_writemask, &store->node.loc);
- index_instructions(block, 2);
+ TRACE("Combining %u stores to %s.\n", group->store_count, store->lhs.var->name);
- copy_propagation_state_init(&state, ctx);
+ list_move_before(&group->stores[group->store_count - 1]->node.entry, &new_block.instrs);
- progress = copy_propagation_transform_block(ctx, block, &state);
+ for (unsigned int j = 0; j < group->store_count; ++j)
+ {
+ list_remove(&group->stores[j]->node.entry);
+ hlsl_free_instr(&group->stores[j]->node);
+ }
- copy_propagation_state_destroy(&state);
+ progress = true;
+ }
+ vkd3d_free(state.groups);
return progress;
}
-enum validation_result
-{
- DEREF_VALIDATION_OK,
- DEREF_VALIDATION_OUT_OF_BOUNDS,
- DEREF_VALIDATION_NOT_CONSTANT,
-};
-
static enum validation_result validate_component_index_range_from_deref(struct hlsl_ctx *ctx,
const struct hlsl_deref *deref)
{
@@ -2244,8 +3055,7 @@ static enum validation_result validate_component_index_range_from_deref(struct h
return DEREF_VALIDATION_NOT_CONSTANT;
/* We should always have generated a cast to UINT. */
- VKD3D_ASSERT(path_node->data_type->class == HLSL_CLASS_SCALAR
- && path_node->data_type->e.numeric.type == HLSL_TYPE_UINT);
+ VKD3D_ASSERT(hlsl_is_vec1(path_node->data_type) && path_node->data_type->e.numeric.type == HLSL_TYPE_UINT);
idx = hlsl_ir_constant(path_node)->value.u[0].u;
@@ -2402,11 +3212,6 @@ static bool validate_dereferences(struct hlsl_ctx *ctx, struct hlsl_ir_node *ins
return false;
}
-static bool is_vec1(const struct hlsl_type *type)
-{
- return (type->class == HLSL_CLASS_SCALAR) || (type->class == HLSL_CLASS_VECTOR && type->e.numeric.dimx == 1);
-}
-
static bool fold_redundant_casts(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, void *context)
{
if (instr->type == HLSL_IR_EXPR)
@@ -2421,7 +3226,8 @@ static bool fold_redundant_casts(struct hlsl_ctx *ctx, struct hlsl_ir_node *inst
src_type = expr->operands[0].node->data_type;
if (hlsl_types_are_equal(src_type, dst_type)
- || (src_type->e.numeric.type == dst_type->e.numeric.type && is_vec1(src_type) && is_vec1(dst_type)))
+ || (src_type->e.numeric.type == dst_type->e.numeric.type
+ && hlsl_is_vec1(src_type) && hlsl_is_vec1(dst_type)))
{
hlsl_replace_node(&expr->node, expr->operands[0].node);
return true;
@@ -2584,20 +3390,14 @@ static bool lower_narrowing_casts(struct hlsl_ctx *ctx, struct hlsl_ir_node *ins
if (src_type->class <= HLSL_CLASS_VECTOR && dst_type->class <= HLSL_CLASS_VECTOR
&& dst_type->e.numeric.dimx < src_type->e.numeric.dimx)
{
- struct hlsl_ir_node *new_cast, *swizzle;
+ struct hlsl_ir_node *new_cast;
dst_vector_type = hlsl_get_vector_type(ctx, dst_type->e.numeric.type, src_type->e.numeric.dimx);
/* We need to preserve the cast since it might be doing more than just
* narrowing the vector. */
- if (!(new_cast = hlsl_new_cast(ctx, cast->operands[0].node, dst_vector_type, &cast->node.loc)))
- return false;
- hlsl_block_add_instr(block, new_cast);
-
- if (!(swizzle = hlsl_new_swizzle(ctx, HLSL_SWIZZLE(X, Y, Z, W),
- dst_type->e.numeric.dimx, new_cast, &cast->node.loc)))
- return false;
- hlsl_block_add_instr(block, swizzle);
-
+ new_cast = hlsl_block_add_cast(ctx, block, cast->operands[0].node, dst_vector_type, &cast->node.loc);
+ hlsl_block_add_swizzle(ctx, block, HLSL_SWIZZLE(X, Y, Z, W),
+ dst_type->e.numeric.dimx, new_cast, &cast->node.loc);
return true;
}
@@ -2768,16 +3568,9 @@ static bool normalize_switch_cases(struct hlsl_ctx *ctx, struct hlsl_ir_node *in
}
else
{
- struct hlsl_ir_node *jump;
-
if (!(def = hlsl_new_switch_case(ctx, 0, true, NULL, &s->node.loc)))
return true;
- if (!(jump = hlsl_new_jump(ctx, HLSL_IR_JUMP_BREAK, NULL, &s->node.loc)))
- {
- hlsl_free_ir_switch_case(def);
- return true;
- }
- hlsl_block_add_instr(&def->body, jump);
+ hlsl_block_add_jump(ctx, &def->body, HLSL_IR_JUMP_BREAK, NULL, &s->node.loc);
}
list_add_tail(&s->cases, &def->entry);
@@ -2808,7 +3601,7 @@ static bool lower_nonconstant_vector_derefs(struct hlsl_ctx *ctx, struct hlsl_ir
if (type->class == HLSL_CLASS_VECTOR && idx->type != HLSL_IR_CONSTANT)
{
- struct hlsl_ir_node *eq, *swizzle, *dot, *c, *operands[HLSL_MAX_OPERANDS] = {0};
+ struct hlsl_ir_node *eq, *swizzle, *c, *operands[HLSL_MAX_OPERANDS] = {0};
unsigned int width = type->e.numeric.dimx;
struct hlsl_constant_value value;
struct hlsl_ir_load *vector_load;
@@ -2818,9 +3611,7 @@ static bool lower_nonconstant_vector_derefs(struct hlsl_ctx *ctx, struct hlsl_ir
return false;
hlsl_block_add_instr(block, &vector_load->node);
- if (!(swizzle = hlsl_new_swizzle(ctx, HLSL_SWIZZLE(X, X, X, X), width, idx, &instr->loc)))
- return false;
- hlsl_block_add_instr(block, swizzle);
+ swizzle = hlsl_block_add_swizzle(ctx, block, HLSL_SWIZZLE(X, X, X, X), width, idx, &instr->loc);
value.u[0].u = 0;
value.u[1].u = 1;
@@ -2832,14 +3623,9 @@ static bool lower_nonconstant_vector_derefs(struct hlsl_ctx *ctx, struct hlsl_ir
operands[0] = swizzle;
operands[1] = c;
- if (!(eq = hlsl_new_expr(ctx, HLSL_OP2_EQUAL, operands,
- hlsl_get_vector_type(ctx, HLSL_TYPE_BOOL, width), &instr->loc)))
- return false;
- hlsl_block_add_instr(block, eq);
-
- if (!(eq = hlsl_new_cast(ctx, eq, type, &instr->loc)))
- return false;
- hlsl_block_add_instr(block, eq);
+ eq = hlsl_block_add_expr(ctx, block, HLSL_OP2_EQUAL, operands,
+ hlsl_get_vector_type(ctx, HLSL_TYPE_BOOL, width), &instr->loc);
+ eq = hlsl_block_add_cast(ctx, block, eq, type, &instr->loc);
op = HLSL_OP2_DOT;
if (width == 1)
@@ -2849,10 +3635,7 @@ static bool lower_nonconstant_vector_derefs(struct hlsl_ctx *ctx, struct hlsl_ir
* LOGIC_OR + LOGIC_AND. */
operands[0] = &vector_load->node;
operands[1] = eq;
- if (!(dot = hlsl_new_expr(ctx, op, operands, instr->data_type, &instr->loc)))
- return false;
- hlsl_block_add_instr(block, dot);
-
+ hlsl_block_add_expr(ctx, block, op, operands, instr->data_type, &instr->loc);
return true;
}
@@ -2891,6 +3674,11 @@ static bool validate_nonconstant_vector_store_derefs(struct hlsl_ctx *ctx, struc
return false;
}
+static bool deref_supports_sm1_indirect_addressing(struct hlsl_ctx *ctx, const struct hlsl_deref *deref)
+{
+ return ctx->profile->type == VKD3D_SHADER_TYPE_VERTEX && deref->var->is_uniform;
+}
+
/* This pass flattens array (and row_major matrix) loads that include the indexing of a non-constant
* index into multiple constant loads, where the value of only one of them ends up in the resulting
* node.
@@ -2901,7 +3689,7 @@ static bool lower_nonconstant_array_loads(struct hlsl_ctx *ctx, struct hlsl_ir_n
struct hlsl_block *block)
{
struct hlsl_constant_value zero_value = {0};
- struct hlsl_ir_node *cut_index, *zero, *store;
+ struct hlsl_ir_node *cut_index, *zero;
unsigned int i, i_cut, element_count;
const struct hlsl_deref *deref;
struct hlsl_type *cut_type;
@@ -2917,6 +3705,9 @@ static bool lower_nonconstant_array_loads(struct hlsl_ctx *ctx, struct hlsl_ir_n
if (deref->path_len == 0)
return false;
+ if (deref_supports_sm1_indirect_addressing(ctx, deref))
+ return false;
+
for (i = deref->path_len - 1; ; --i)
{
if (deref->path[i].node->type != HLSL_IR_CONSTANT)
@@ -2944,70 +3735,44 @@ static bool lower_nonconstant_array_loads(struct hlsl_ctx *ctx, struct hlsl_ir_n
return false;
hlsl_block_add_instr(block, zero);
- if (!(store = hlsl_new_simple_store(ctx, var, zero)))
- return false;
- hlsl_block_add_instr(block, store);
+ hlsl_block_add_simple_store(ctx, block, var, zero);
TRACE("Lowering non-constant %s load on variable '%s'.\n", row_major ? "row_major" : "array", deref->var->name);
element_count = hlsl_type_element_count(cut_type);
for (i = 0; i < element_count; ++i)
{
+ struct hlsl_ir_node *const_i, *equals, *ternary, *specific_load, *var_load;
struct hlsl_type *btype = hlsl_get_scalar_type(ctx, HLSL_TYPE_BOOL);
struct hlsl_ir_node *operands[HLSL_MAX_OPERANDS] = {0};
- struct hlsl_ir_node *const_i, *equals, *ternary, *var_store;
- struct hlsl_ir_load *var_load, *specific_load;
struct hlsl_deref deref_copy = {0};
- if (!(const_i = hlsl_new_uint_constant(ctx, i, &cut_index->loc)))
- return false;
- hlsl_block_add_instr(block, const_i);
+ const_i = hlsl_block_add_uint_constant(ctx, block, i, &cut_index->loc);
operands[0] = cut_index;
operands[1] = const_i;
- if (!(equals = hlsl_new_expr(ctx, HLSL_OP2_EQUAL, operands, btype, &cut_index->loc)))
- return false;
- hlsl_block_add_instr(block, equals);
-
- if (!(equals = hlsl_new_swizzle(ctx, HLSL_SWIZZLE(X, X, X, X),
- var->data_type->e.numeric.dimx, equals, &cut_index->loc)))
- return false;
- hlsl_block_add_instr(block, equals);
+ equals = hlsl_block_add_expr(ctx, block, HLSL_OP2_EQUAL, operands, btype, &cut_index->loc);
+ equals = hlsl_block_add_swizzle(ctx, block, HLSL_SWIZZLE(X, X, X, X),
+ var->data_type->e.numeric.dimx, equals, &cut_index->loc);
- if (!(var_load = hlsl_new_var_load(ctx, var, &cut_index->loc)))
- return false;
- hlsl_block_add_instr(block, &var_load->node);
+ var_load = hlsl_block_add_simple_load(ctx, block, var, &cut_index->loc);
if (!hlsl_copy_deref(ctx, &deref_copy, deref))
return false;
hlsl_src_remove(&deref_copy.path[i_cut]);
hlsl_src_from_node(&deref_copy.path[i_cut], const_i);
-
- if (!(specific_load = hlsl_new_load_index(ctx, &deref_copy, NULL, &cut_index->loc)))
- {
- hlsl_cleanup_deref(&deref_copy);
- return false;
- }
- hlsl_block_add_instr(block, &specific_load->node);
-
+ specific_load = hlsl_block_add_load_index(ctx, block, &deref_copy, NULL, &cut_index->loc);
hlsl_cleanup_deref(&deref_copy);
operands[0] = equals;
- operands[1] = &specific_load->node;
- operands[2] = &var_load->node;
- if (!(ternary = hlsl_new_expr(ctx, HLSL_OP3_TERNARY, operands, instr->data_type, &cut_index->loc)))
- return false;
- hlsl_block_add_instr(block, ternary);
+ operands[1] = specific_load;
+ operands[2] = var_load;
+ ternary = hlsl_block_add_expr(ctx, block, HLSL_OP3_TERNARY, operands, instr->data_type, &cut_index->loc);
- if (!(var_store = hlsl_new_simple_store(ctx, var, ternary)))
- return false;
- hlsl_block_add_instr(block, var_store);
+ hlsl_block_add_simple_store(ctx, block, var, ternary);
}
- if (!(load = hlsl_new_var_load(ctx, var, &instr->loc)))
- return false;
- hlsl_block_add_instr(block, &load->node);
-
+ hlsl_block_add_simple_load(ctx, block, var, &instr->loc);
return true;
}
@@ -3278,9 +4043,37 @@ static bool sort_synthetic_separated_samplers_first(struct hlsl_ctx *ctx)
return false;
}
-/* Turn CAST to int or uint as follows:
+/* Turn CAST to int or uint into TRUNC + REINTERPRET */
+static bool lower_casts_to_int(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, struct hlsl_block *block)
+{
+ struct hlsl_ir_node *operands[HLSL_MAX_OPERANDS] = { 0 };
+ struct hlsl_ir_node *arg, *trunc;
+ struct hlsl_ir_expr *expr;
+
+ if (instr->type != HLSL_IR_EXPR)
+ return false;
+ expr = hlsl_ir_expr(instr);
+ if (expr->op != HLSL_OP1_CAST)
+ return false;
+
+ arg = expr->operands[0].node;
+ if (!hlsl_type_is_integer(instr->data_type) || instr->data_type->e.numeric.type == HLSL_TYPE_BOOL)
+ return false;
+ if (!hlsl_type_is_floating_point(arg->data_type))
+ return false;
+
+ trunc = hlsl_block_add_unary_expr(ctx, block, HLSL_OP1_TRUNC, arg, &instr->loc);
+
+ memset(operands, 0, sizeof(operands));
+ operands[0] = trunc;
+ hlsl_block_add_expr(ctx, block, HLSL_OP1_REINTERPRET, operands, instr->data_type, &instr->loc);
+
+ return true;
+}
+
+/* Turn TRUNC into:
*
- * CAST(x) = x - FRACT(x) + extra
+ * TRUNC(x) = x - FRACT(x) + extra
*
* where
*
@@ -3288,27 +4081,19 @@ static bool sort_synthetic_separated_samplers_first(struct hlsl_ctx *ctx)
*
* where the comparisons in the extra term are performed using CMP or SLT
* depending on whether this is a pixel or vertex shader, respectively.
- *
- * A REINTERPET (which is written as a mere MOV) is also applied to the final
- * result for type consistency.
*/
-static bool lower_casts_to_int(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, struct hlsl_block *block)
+static bool lower_trunc(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, struct hlsl_block *block)
{
- struct hlsl_ir_node *operands[HLSL_MAX_OPERANDS] = { 0 };
struct hlsl_ir_node *arg, *res;
struct hlsl_ir_expr *expr;
if (instr->type != HLSL_IR_EXPR)
return false;
expr = hlsl_ir_expr(instr);
- if (expr->op != HLSL_OP1_CAST)
+ if (expr->op != HLSL_OP1_TRUNC)
return false;
arg = expr->operands[0].node;
- if (instr->data_type->e.numeric.type != HLSL_TYPE_INT && instr->data_type->e.numeric.type != HLSL_TYPE_UINT)
- return false;
- if (arg->data_type->e.numeric.type != HLSL_TYPE_FLOAT && arg->data_type->e.numeric.type != HLSL_TYPE_HALF)
- return false;
if (ctx->profile->type == VKD3D_SHADER_TYPE_PIXEL)
{
@@ -3328,13 +4113,8 @@ static bool lower_casts_to_int(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr,
return false;
hlsl_block_add_instr(block, one);
- if (!(fract = hlsl_new_unary_expr(ctx, HLSL_OP1_FRACT, arg, &instr->loc)))
- return false;
- hlsl_block_add_instr(block, fract);
-
- if (!(neg_fract = hlsl_new_unary_expr(ctx, HLSL_OP1_NEG, fract, &instr->loc)))
- return false;
- hlsl_block_add_instr(block, neg_fract);
+ fract = hlsl_block_add_unary_expr(ctx, block, HLSL_OP1_FRACT, arg, &instr->loc);
+ neg_fract = hlsl_block_add_unary_expr(ctx, block, HLSL_OP1_NEG, fract, &instr->loc);
if (!(has_fract = hlsl_new_ternary_expr(ctx, HLSL_OP3_CMP, neg_fract, zero, one)))
return false;
@@ -3344,52 +4124,63 @@ static bool lower_casts_to_int(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr,
return false;
hlsl_block_add_instr(block, extra);
- if (!(floor = hlsl_new_binary_expr(ctx, HLSL_OP2_ADD, arg, neg_fract)))
- return false;
- hlsl_block_add_instr(block, floor);
-
- if (!(res = hlsl_new_binary_expr(ctx, HLSL_OP2_ADD, floor, extra)))
- return false;
- hlsl_block_add_instr(block, res);
+ floor = hlsl_block_add_binary_expr(ctx, block, HLSL_OP2_ADD, arg, neg_fract);
+ res = hlsl_block_add_binary_expr(ctx, block, HLSL_OP2_ADD, floor, extra);
}
else
{
struct hlsl_ir_node *neg_arg, *is_neg, *fract, *neg_fract, *has_fract, *floor;
- if (!(neg_arg = hlsl_new_unary_expr(ctx, HLSL_OP1_NEG, arg, &instr->loc)))
- return false;
- hlsl_block_add_instr(block, neg_arg);
+ neg_arg = hlsl_block_add_unary_expr(ctx, block, HLSL_OP1_NEG, arg, &instr->loc);
+ is_neg = hlsl_block_add_binary_expr(ctx, block, HLSL_OP2_SLT, arg, neg_arg);
+ fract = hlsl_block_add_unary_expr(ctx, block, HLSL_OP1_FRACT, arg, &instr->loc);
+ neg_fract = hlsl_block_add_unary_expr(ctx, block, HLSL_OP1_NEG, fract, &instr->loc);
+ has_fract = hlsl_block_add_binary_expr(ctx, block, HLSL_OP2_SLT, neg_fract, fract);
+ floor = hlsl_block_add_binary_expr(ctx, block, HLSL_OP2_ADD, arg, neg_fract);
- if (!(is_neg = hlsl_new_binary_expr(ctx, HLSL_OP2_SLT, arg, neg_arg)))
+ if (!(res = hlsl_new_ternary_expr(ctx, HLSL_OP3_MAD, is_neg, has_fract, floor)))
return false;
- hlsl_block_add_instr(block, is_neg);
+ hlsl_block_add_instr(block, res);
+ }
- if (!(fract = hlsl_new_unary_expr(ctx, HLSL_OP1_FRACT, arg, &instr->loc)))
- return false;
- hlsl_block_add_instr(block, fract);
+ return true;
+}
- if (!(neg_fract = hlsl_new_unary_expr(ctx, HLSL_OP1_NEG, fract, &instr->loc)))
- return false;
- hlsl_block_add_instr(block, neg_fract);
+/* Lower modulus using:
+ *
+ * mod(x, y) = x - trunc(x / y) * y;
+ *
+ */
+static bool lower_int_modulus_sm1(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, struct hlsl_block *block)
+{
+ struct hlsl_ir_node *div, *trunc, *mul, *neg, *operands[2], *ret;
+ struct hlsl_type *float_type;
+ struct hlsl_ir_expr *expr;
+ bool is_float;
- if (!(has_fract = hlsl_new_binary_expr(ctx, HLSL_OP2_SLT, neg_fract, fract)))
- return false;
- hlsl_block_add_instr(block, has_fract);
+ if (instr->type != HLSL_IR_EXPR)
+ return false;
+ expr = hlsl_ir_expr(instr);
+ if (expr->op != HLSL_OP2_MOD)
+ return false;
- if (!(floor = hlsl_new_binary_expr(ctx, HLSL_OP2_ADD, arg, neg_fract)))
- return false;
- hlsl_block_add_instr(block, floor);
+ is_float = instr->data_type->e.numeric.type == HLSL_TYPE_FLOAT
+ || instr->data_type->e.numeric.type == HLSL_TYPE_HALF;
+ if (is_float)
+ return false;
+ float_type = hlsl_get_vector_type(ctx, HLSL_TYPE_FLOAT, instr->data_type->e.numeric.dimx);
- if (!(res = hlsl_new_ternary_expr(ctx, HLSL_OP3_MAD, is_neg, has_fract, floor)))
- return false;
- hlsl_block_add_instr(block, res);
+ for (unsigned int i = 0; i < 2; ++i)
+ {
+ operands[i] = hlsl_block_add_cast(ctx, block, expr->operands[i].node, float_type, &instr->loc);
}
- memset(operands, 0, sizeof(operands));
- operands[0] = res;
- if (!(res = hlsl_new_expr(ctx, HLSL_OP1_REINTERPRET, operands, instr->data_type, &instr->loc)))
- return false;
- hlsl_block_add_instr(block, res);
+ div = hlsl_block_add_binary_expr(ctx, block, HLSL_OP2_DIV, operands[0], operands[1]);
+ trunc = hlsl_block_add_unary_expr(ctx, block, HLSL_OP1_TRUNC, div, &instr->loc);
+ mul = hlsl_block_add_binary_expr(ctx, block, HLSL_OP2_MUL, trunc, operands[1]);
+ neg = hlsl_block_add_unary_expr(ctx, block, HLSL_OP1_NEG, mul, &instr->loc);
+ ret = hlsl_block_add_binary_expr(ctx, block, HLSL_OP2_ADD, operands[0], neg);
+ hlsl_block_add_cast(ctx, block, ret, instr->data_type, &instr->loc);
return true;
}
@@ -3397,8 +4188,10 @@ static bool lower_casts_to_int(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr,
/* Lower DIV to RCP + MUL. */
static bool lower_division(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, struct hlsl_block *block)
{
- struct hlsl_ir_node *rcp, *mul;
+ struct hlsl_ir_node *rcp, *ret, *operands[2];
+ struct hlsl_type *float_type;
struct hlsl_ir_expr *expr;
+ bool is_float;
if (instr->type != HLSL_IR_EXPR)
return false;
@@ -3406,13 +4199,21 @@ static bool lower_division(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, str
if (expr->op != HLSL_OP2_DIV)
return false;
- if (!(rcp = hlsl_new_unary_expr(ctx, HLSL_OP1_RCP, expr->operands[1].node, &instr->loc)))
- return false;
- hlsl_block_add_instr(block, rcp);
+ is_float = instr->data_type->e.numeric.type == HLSL_TYPE_FLOAT
+ || instr->data_type->e.numeric.type == HLSL_TYPE_HALF;
+ float_type = hlsl_get_vector_type(ctx, HLSL_TYPE_FLOAT, instr->data_type->e.numeric.dimx);
- if (!(mul = hlsl_new_binary_expr(ctx, HLSL_OP2_MUL, expr->operands[0].node, rcp)))
- return false;
- hlsl_block_add_instr(block, mul);
+ for (unsigned int i = 0; i < 2; ++i)
+ {
+ operands[i] = expr->operands[i].node;
+ if (!is_float)
+ operands[i] = hlsl_block_add_cast(ctx, block, operands[i], float_type, &instr->loc);
+ }
+
+ rcp = hlsl_block_add_unary_expr(ctx, block, HLSL_OP1_RCP, operands[1], &instr->loc);
+ ret = hlsl_block_add_binary_expr(ctx, block, HLSL_OP2_MUL, operands[0], rcp);
+ if (!is_float)
+ ret = hlsl_block_add_cast(ctx, block, ret, instr->data_type, &instr->loc);
return true;
}
@@ -3420,8 +4221,8 @@ static bool lower_division(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, str
/* Lower SQRT to RSQ + RCP. */
static bool lower_sqrt(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, struct hlsl_block *block)
{
- struct hlsl_ir_node *rsq, *rcp;
struct hlsl_ir_expr *expr;
+ struct hlsl_ir_node *rsq;
if (instr->type != HLSL_IR_EXPR)
return false;
@@ -3429,20 +4230,15 @@ static bool lower_sqrt(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, struct
if (expr->op != HLSL_OP1_SQRT)
return false;
- if (!(rsq = hlsl_new_unary_expr(ctx, HLSL_OP1_RSQ, expr->operands[0].node, &instr->loc)))
- return false;
- hlsl_block_add_instr(block, rsq);
-
- if (!(rcp = hlsl_new_unary_expr(ctx, HLSL_OP1_RCP, rsq, &instr->loc)))
- return false;
- hlsl_block_add_instr(block, rcp);
+ rsq = hlsl_block_add_unary_expr(ctx, block, HLSL_OP1_RSQ, expr->operands[0].node, &instr->loc);
+ hlsl_block_add_unary_expr(ctx, block, HLSL_OP1_RCP, rsq, &instr->loc);
return true;
}
/* Lower DP2 to MUL + ADD */
static bool lower_dot(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, struct hlsl_block *block)
{
- struct hlsl_ir_node *arg1, *arg2, *mul, *replacement, *zero, *add_x, *add_y;
+ struct hlsl_ir_node *arg1, *arg2, *mul, *add_x, *add_y;
struct hlsl_ir_expr *expr;
if (instr->type != HLSL_IR_EXPR)
@@ -3459,37 +4255,22 @@ static bool lower_dot(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, struct h
{
struct hlsl_ir_node *operands[HLSL_MAX_OPERANDS] = { 0 };
- if (!(zero = hlsl_new_float_constant(ctx, 0.0f, &expr->node.loc)))
- return false;
- hlsl_block_add_instr(block, zero);
-
operands[0] = arg1;
operands[1] = arg2;
- operands[2] = zero;
+ operands[2] = hlsl_block_add_float_constant(ctx, block, 0.0f, &expr->node.loc);
- if (!(replacement = hlsl_new_expr(ctx, HLSL_OP3_DP2ADD, operands, instr->data_type, &expr->node.loc)))
- return false;
+ hlsl_block_add_expr(ctx, block, HLSL_OP3_DP2ADD, operands, instr->data_type, &expr->node.loc);
}
else
{
- if (!(mul = hlsl_new_binary_expr(ctx, HLSL_OP2_MUL, expr->operands[0].node, expr->operands[1].node)))
- return false;
- hlsl_block_add_instr(block, mul);
-
- if (!(add_x = hlsl_new_swizzle(ctx, HLSL_SWIZZLE(X, X, X, X),
- instr->data_type->e.numeric.dimx, mul, &expr->node.loc)))
- return false;
- hlsl_block_add_instr(block, add_x);
-
- if (!(add_y = hlsl_new_swizzle(ctx, HLSL_SWIZZLE(Y, Y, Y, Y),
- instr->data_type->e.numeric.dimx, mul, &expr->node.loc)))
- return false;
- hlsl_block_add_instr(block, add_y);
+ mul = hlsl_block_add_binary_expr(ctx, block, HLSL_OP2_MUL, expr->operands[0].node, expr->operands[1].node);
- if (!(replacement = hlsl_new_binary_expr(ctx, HLSL_OP2_ADD, add_x, add_y)))
- return false;
+ add_x = hlsl_block_add_swizzle(ctx, block, HLSL_SWIZZLE(X, X, X, X),
+ instr->data_type->e.numeric.dimx, mul, &expr->node.loc);
+ add_y = hlsl_block_add_swizzle(ctx, block, HLSL_SWIZZLE(Y, Y, Y, Y),
+ instr->data_type->e.numeric.dimx, mul, &expr->node.loc);
+ hlsl_block_add_binary_expr(ctx, block, HLSL_OP2_ADD, add_x, add_y);
}
- hlsl_block_add_instr(block, replacement);
return true;
}
@@ -3497,7 +4278,7 @@ static bool lower_dot(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, struct h
/* Lower ABS to MAX */
static bool lower_abs(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, struct hlsl_block *block)
{
- struct hlsl_ir_node *arg, *neg, *replacement;
+ struct hlsl_ir_node *arg, *neg;
struct hlsl_ir_expr *expr;
if (instr->type != HLSL_IR_EXPR)
@@ -3507,21 +4288,15 @@ static bool lower_abs(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, struct h
if (expr->op != HLSL_OP1_ABS)
return false;
- if (!(neg = hlsl_new_unary_expr(ctx, HLSL_OP1_NEG, arg, &instr->loc)))
- return false;
- hlsl_block_add_instr(block, neg);
-
- if (!(replacement = hlsl_new_binary_expr(ctx, HLSL_OP2_MAX, neg, arg)))
- return false;
- hlsl_block_add_instr(block, replacement);
-
+ neg = hlsl_block_add_unary_expr(ctx, block, HLSL_OP1_NEG, arg, &instr->loc);
+ hlsl_block_add_binary_expr(ctx, block, HLSL_OP2_MAX, neg, arg);
return true;
}
/* Lower ROUND using FRC, ROUND(x) -> ((x + 0.5) - FRC(x + 0.5)). */
static bool lower_round(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, struct hlsl_block *block)
{
- struct hlsl_ir_node *arg, *neg, *sum, *frc, *half, *replacement;
+ struct hlsl_ir_node *arg, *neg, *sum, *frc, *half;
struct hlsl_type *type = instr->data_type;
struct hlsl_constant_value half_value;
unsigned int i, component_count;
@@ -3542,29 +4317,17 @@ static bool lower_round(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, struct
return false;
hlsl_block_add_instr(block, half);
- if (!(sum = hlsl_new_binary_expr(ctx, HLSL_OP2_ADD, arg, half)))
- return false;
- hlsl_block_add_instr(block, sum);
-
- if (!(frc = hlsl_new_unary_expr(ctx, HLSL_OP1_FRACT, sum, &instr->loc)))
- return false;
- hlsl_block_add_instr(block, frc);
-
- if (!(neg = hlsl_new_unary_expr(ctx, HLSL_OP1_NEG, frc, &instr->loc)))
- return false;
- hlsl_block_add_instr(block, neg);
-
- if (!(replacement = hlsl_new_binary_expr(ctx, HLSL_OP2_ADD, sum, neg)))
- return false;
- hlsl_block_add_instr(block, replacement);
-
+ sum = hlsl_block_add_binary_expr(ctx, block, HLSL_OP2_ADD, arg, half);
+ frc = hlsl_block_add_unary_expr(ctx, block, HLSL_OP1_FRACT, sum, &instr->loc);
+ neg = hlsl_block_add_unary_expr(ctx, block, HLSL_OP1_NEG, frc, &instr->loc);
+ hlsl_block_add_binary_expr(ctx, block, HLSL_OP2_ADD, sum, neg);
return true;
}
/* Lower CEIL to FRC */
static bool lower_ceil(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, struct hlsl_block *block)
{
- struct hlsl_ir_node *arg, *neg, *sum, *frc;
+ struct hlsl_ir_node *arg, *neg, *frc;
struct hlsl_ir_expr *expr;
if (instr->type != HLSL_IR_EXPR)
@@ -3575,25 +4338,16 @@ static bool lower_ceil(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, struct
if (expr->op != HLSL_OP1_CEIL)
return false;
- if (!(neg = hlsl_new_unary_expr(ctx, HLSL_OP1_NEG, arg, &instr->loc)))
- return false;
- hlsl_block_add_instr(block, neg);
-
- if (!(frc = hlsl_new_unary_expr(ctx, HLSL_OP1_FRACT, neg, &instr->loc)))
- return false;
- hlsl_block_add_instr(block, frc);
-
- if (!(sum = hlsl_new_binary_expr(ctx, HLSL_OP2_ADD, frc, arg)))
- return false;
- hlsl_block_add_instr(block, sum);
-
+ neg = hlsl_block_add_unary_expr(ctx, block, HLSL_OP1_NEG, arg, &instr->loc);
+ frc = hlsl_block_add_unary_expr(ctx, block, HLSL_OP1_FRACT, neg, &instr->loc);
+ hlsl_block_add_binary_expr(ctx, block, HLSL_OP2_ADD, frc, arg);
return true;
}
/* Lower FLOOR to FRC */
static bool lower_floor(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, struct hlsl_block *block)
{
- struct hlsl_ir_node *arg, *neg, *sum, *frc;
+ struct hlsl_ir_node *arg, *neg, *frc;
struct hlsl_ir_expr *expr;
if (instr->type != HLSL_IR_EXPR)
@@ -3604,18 +4358,9 @@ static bool lower_floor(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, struct
if (expr->op != HLSL_OP1_FLOOR)
return false;
- if (!(frc = hlsl_new_unary_expr(ctx, HLSL_OP1_FRACT, arg, &instr->loc)))
- return false;
- hlsl_block_add_instr(block, frc);
-
- if (!(neg = hlsl_new_unary_expr(ctx, HLSL_OP1_NEG, frc, &instr->loc)))
- return false;
- hlsl_block_add_instr(block, neg);
-
- if (!(sum = hlsl_new_binary_expr(ctx, HLSL_OP2_ADD, neg, arg)))
- return false;
- hlsl_block_add_instr(block, sum);
-
+ frc = hlsl_block_add_unary_expr(ctx, block, HLSL_OP1_FRACT, arg, &instr->loc);
+ neg = hlsl_block_add_unary_expr(ctx, block, HLSL_OP1_NEG, frc, &instr->loc);
+ hlsl_block_add_binary_expr(ctx, block, HLSL_OP2_ADD, neg, arg);
return true;
}
@@ -3667,33 +4412,26 @@ static bool lower_trig(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, struct
if (!(mad = hlsl_new_ternary_expr(ctx, HLSL_OP3_MAD, arg, reciprocal_two_pi, half)))
return false;
hlsl_block_add_instr(block, mad);
- if (!(frc = hlsl_new_unary_expr(ctx, HLSL_OP1_FRACT, mad, &instr->loc)))
- return false;
- hlsl_block_add_instr(block, frc);
+ frc = hlsl_block_add_unary_expr(ctx, block, HLSL_OP1_FRACT, mad, &instr->loc);
if (!(reduced = hlsl_new_ternary_expr(ctx, HLSL_OP3_MAD, frc, two_pi, neg_pi)))
return false;
hlsl_block_add_instr(block, reduced);
if (type->e.numeric.dimx == 1)
{
- if (!(sincos = hlsl_new_unary_expr(ctx, op, reduced, &instr->loc)))
- return false;
- hlsl_block_add_instr(block, sincos);
+ sincos = hlsl_block_add_unary_expr(ctx, block, op, reduced, &instr->loc);
}
else
{
struct hlsl_ir_node *comps[4] = {0};
struct hlsl_ir_var *var;
struct hlsl_deref var_deref;
- struct hlsl_ir_load *var_load;
for (i = 0; i < type->e.numeric.dimx; ++i)
{
uint32_t s = hlsl_swizzle_from_writemask(1 << i);
- if (!(comps[i] = hlsl_new_swizzle(ctx, s, 1, reduced, &instr->loc)))
- return false;
- hlsl_block_add_instr(block, comps[i]);
+ comps[i] = hlsl_block_add_swizzle(ctx, block, s, 1, reduced, &instr->loc);
}
if (!(var = hlsl_new_synthetic_var(ctx, "sincos", type, &instr->loc)))
@@ -3702,20 +4440,11 @@ static bool lower_trig(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, struct
for (i = 0; i < type->e.numeric.dimx; ++i)
{
- struct hlsl_block store_block;
-
- if (!(sincos = hlsl_new_unary_expr(ctx, op, comps[i], &instr->loc)))
- return false;
- hlsl_block_add_instr(block, sincos);
-
- if (!hlsl_new_store_component(ctx, &store_block, &var_deref, i, sincos))
- return false;
- hlsl_block_add_block(block, &store_block);
+ sincos = hlsl_block_add_unary_expr(ctx, block, op, comps[i], &instr->loc);
+ hlsl_block_add_store_component(ctx, block, &var_deref, i, sincos);
}
- if (!(var_load = hlsl_new_load_index(ctx, &var_deref, NULL, &instr->loc)))
- return false;
- hlsl_block_add_instr(block, &var_load->node);
+ hlsl_block_add_load_index(ctx, block, &var_deref, NULL, &instr->loc);
}
return true;
@@ -3723,8 +4452,8 @@ static bool lower_trig(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, struct
static bool lower_logic_not(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, struct hlsl_block *block)
{
+ struct hlsl_ir_node *arg, *arg_cast, *neg, *one, *sub;
struct hlsl_ir_node *operands[HLSL_MAX_OPERANDS];
- struct hlsl_ir_node *arg, *arg_cast, *neg, *one, *sub, *res;
struct hlsl_constant_value one_value;
struct hlsl_type *float_type;
struct hlsl_ir_expr *expr;
@@ -3741,13 +4470,9 @@ static bool lower_logic_not(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, st
/* If this is happens, it means we failed to cast the argument to boolean somewhere. */
VKD3D_ASSERT(arg->data_type->e.numeric.type == HLSL_TYPE_BOOL);
- if (!(arg_cast = hlsl_new_cast(ctx, arg, float_type, &arg->loc)))
- return false;
- hlsl_block_add_instr(block, arg_cast);
+ arg_cast = hlsl_block_add_cast(ctx, block, arg, float_type, &arg->loc);
- if (!(neg = hlsl_new_unary_expr(ctx, HLSL_OP1_NEG, arg_cast, &instr->loc)))
- return false;
- hlsl_block_add_instr(block, neg);
+ neg = hlsl_block_add_unary_expr(ctx, block, HLSL_OP1_NEG, arg_cast, &instr->loc);
one_value.u[0].f = 1.0;
one_value.u[1].f = 1.0;
@@ -3757,24 +4482,19 @@ static bool lower_logic_not(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, st
return false;
hlsl_block_add_instr(block, one);
- if (!(sub = hlsl_new_binary_expr(ctx, HLSL_OP2_ADD, one, neg)))
- return false;
- hlsl_block_add_instr(block, sub);
+ sub = hlsl_block_add_binary_expr(ctx, block, HLSL_OP2_ADD, one, neg);
memset(operands, 0, sizeof(operands));
operands[0] = sub;
- if (!(res = hlsl_new_expr(ctx, HLSL_OP1_REINTERPRET, operands, instr->data_type, &instr->loc)))
- return false;
- hlsl_block_add_instr(block, res);
-
+ hlsl_block_add_expr(ctx, block, HLSL_OP1_REINTERPRET, operands, instr->data_type, &instr->loc);
return true;
}
/* Lower TERNARY to CMP for SM1. */
static bool lower_ternary(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, struct hlsl_block *block)
{
- struct hlsl_ir_node *operands[HLSL_MAX_OPERANDS] = { 0 }, *replacement;
struct hlsl_ir_node *cond, *first, *second, *float_cond, *neg;
+ struct hlsl_ir_node *operands[HLSL_MAX_OPERANDS] = {0};
struct hlsl_ir_expr *expr;
struct hlsl_type *type;
@@ -3799,23 +4519,14 @@ static bool lower_ternary(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, stru
type = hlsl_get_numeric_type(ctx, instr->data_type->class, HLSL_TYPE_FLOAT,
instr->data_type->e.numeric.dimx, instr->data_type->e.numeric.dimy);
-
- if (!(float_cond = hlsl_new_cast(ctx, cond, type, &instr->loc)))
- return false;
- hlsl_block_add_instr(block, float_cond);
-
- if (!(neg = hlsl_new_unary_expr(ctx, HLSL_OP1_NEG, float_cond, &instr->loc)))
- return false;
- hlsl_block_add_instr(block, neg);
+ float_cond = hlsl_block_add_cast(ctx, block, cond, type, &instr->loc);
+ neg = hlsl_block_add_unary_expr(ctx, block, HLSL_OP1_NEG, float_cond, &instr->loc);
memset(operands, 0, sizeof(operands));
operands[0] = neg;
operands[1] = second;
operands[2] = first;
- if (!(replacement = hlsl_new_expr(ctx, HLSL_OP3_CMP, operands, first->data_type, &instr->loc)))
- return false;
-
- hlsl_block_add_instr(block, replacement);
+ hlsl_block_add_expr(ctx, block, HLSL_OP3_CMP, operands, first->data_type, &instr->loc);
return true;
}
@@ -3867,7 +4578,7 @@ static bool lower_resource_load_bias(struct hlsl_ctx *ctx, struct hlsl_ir_node *
static bool lower_comparison_operators(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr,
struct hlsl_block *block)
{
- struct hlsl_ir_node *arg1, *arg1_cast, *arg2, *arg2_cast, *slt, *res, *ret;
+ struct hlsl_ir_node *arg1, *arg1_cast, *arg2, *arg2_cast, *slt, *res;
struct hlsl_ir_node *operands[HLSL_MAX_OPERANDS];
struct hlsl_type *float_type;
struct hlsl_ir_expr *expr;
@@ -3884,13 +4595,8 @@ static bool lower_comparison_operators(struct hlsl_ctx *ctx, struct hlsl_ir_node
arg2 = expr->operands[1].node;
float_type = hlsl_get_vector_type(ctx, HLSL_TYPE_FLOAT, instr->data_type->e.numeric.dimx);
- if (!(arg1_cast = hlsl_new_cast(ctx, arg1, float_type, &instr->loc)))
- return false;
- hlsl_block_add_instr(block, arg1_cast);
-
- if (!(arg2_cast = hlsl_new_cast(ctx, arg2, float_type, &instr->loc)))
- return false;
- hlsl_block_add_instr(block, arg2_cast);
+ arg1_cast = hlsl_block_add_cast(ctx, block, arg1, float_type, &instr->loc);
+ arg2_cast = hlsl_block_add_cast(ctx, block, arg2, float_type, &instr->loc);
switch (expr->op)
{
@@ -3899,36 +4605,21 @@ static bool lower_comparison_operators(struct hlsl_ctx *ctx, struct hlsl_ir_node
{
struct hlsl_ir_node *neg, *sub, *abs, *abs_neg;
- if (!(neg = hlsl_new_unary_expr(ctx, HLSL_OP1_NEG, arg2_cast, &instr->loc)))
- return false;
- hlsl_block_add_instr(block, neg);
-
- if (!(sub = hlsl_new_binary_expr(ctx, HLSL_OP2_ADD, arg1_cast, neg)))
- return false;
- hlsl_block_add_instr(block, sub);
+ neg = hlsl_block_add_unary_expr(ctx, block, HLSL_OP1_NEG, arg2_cast, &instr->loc);
+ sub = hlsl_block_add_binary_expr(ctx, block, HLSL_OP2_ADD, arg1_cast, neg);
if (ctx->profile->major_version >= 3)
{
- if (!(abs = hlsl_new_unary_expr(ctx, HLSL_OP1_ABS, sub, &instr->loc)))
- return false;
- hlsl_block_add_instr(block, abs);
+ abs = hlsl_block_add_unary_expr(ctx, block, HLSL_OP1_ABS, sub, &instr->loc);
}
else
{
/* Use MUL as a precarious ABS. */
- if (!(abs = hlsl_new_binary_expr(ctx, HLSL_OP2_MUL, sub, sub)))
- return false;
- hlsl_block_add_instr(block, abs);
+ abs = hlsl_block_add_binary_expr(ctx, block, HLSL_OP2_MUL, sub, sub);
}
- if (!(abs_neg = hlsl_new_unary_expr(ctx, HLSL_OP1_NEG, abs, &instr->loc)))
- return false;
- hlsl_block_add_instr(block, abs_neg);
-
- if (!(slt = hlsl_new_binary_expr(ctx, HLSL_OP2_SLT, abs_neg, abs)))
- return false;
- hlsl_block_add_instr(block, slt);
-
+ abs_neg = hlsl_block_add_unary_expr(ctx, block, HLSL_OP1_NEG, abs, &instr->loc);
+ slt = hlsl_block_add_binary_expr(ctx, block, HLSL_OP2_SLT, abs_neg, abs);
negate = (expr->op == HLSL_OP2_EQUAL);
break;
}
@@ -3936,10 +4627,7 @@ static bool lower_comparison_operators(struct hlsl_ctx *ctx, struct hlsl_ir_node
case HLSL_OP2_GEQUAL:
case HLSL_OP2_LESS:
{
- if (!(slt = hlsl_new_binary_expr(ctx, HLSL_OP2_SLT, arg1_cast, arg2_cast)))
- return false;
- hlsl_block_add_instr(block, slt);
-
+ slt = hlsl_block_add_binary_expr(ctx, block, HLSL_OP2_SLT, arg1_cast, arg2_cast);
negate = (expr->op == HLSL_OP2_GEQUAL);
break;
}
@@ -3961,13 +4649,8 @@ static bool lower_comparison_operators(struct hlsl_ctx *ctx, struct hlsl_ir_node
return false;
hlsl_block_add_instr(block, one);
- if (!(slt_neg = hlsl_new_unary_expr(ctx, HLSL_OP1_NEG, slt, &instr->loc)))
- return false;
- hlsl_block_add_instr(block, slt_neg);
-
- if (!(res = hlsl_new_binary_expr(ctx, HLSL_OP2_ADD, one, slt_neg)))
- return false;
- hlsl_block_add_instr(block, res);
+ slt_neg = hlsl_block_add_unary_expr(ctx, block, HLSL_OP1_NEG, slt, &instr->loc);
+ res = hlsl_block_add_binary_expr(ctx, block, HLSL_OP2_ADD, one, slt_neg);
}
else
{
@@ -3978,10 +4661,7 @@ static bool lower_comparison_operators(struct hlsl_ctx *ctx, struct hlsl_ir_node
* and casts to BOOL have already been lowered to "!= 0". */
memset(operands, 0, sizeof(operands));
operands[0] = res;
- if (!(ret = hlsl_new_expr(ctx, HLSL_OP1_REINTERPRET, operands, instr->data_type, &instr->loc)))
- return false;
- hlsl_block_add_instr(block, ret);
-
+ hlsl_block_add_expr(ctx, block, HLSL_OP1_REINTERPRET, operands, instr->data_type, &instr->loc);
return true;
}
@@ -4010,21 +4690,10 @@ static bool lower_slt(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, struct h
arg2 = expr->operands[1].node;
float_type = hlsl_get_vector_type(ctx, HLSL_TYPE_FLOAT, instr->data_type->e.numeric.dimx);
- if (!(arg1_cast = hlsl_new_cast(ctx, arg1, float_type, &instr->loc)))
- return false;
- hlsl_block_add_instr(block, arg1_cast);
-
- if (!(arg2_cast = hlsl_new_cast(ctx, arg2, float_type, &instr->loc)))
- return false;
- hlsl_block_add_instr(block, arg2_cast);
-
- if (!(neg = hlsl_new_unary_expr(ctx, HLSL_OP1_NEG, arg2_cast, &instr->loc)))
- return false;
- hlsl_block_add_instr(block, neg);
-
- if (!(sub = hlsl_new_binary_expr(ctx, HLSL_OP2_ADD, arg1_cast, neg)))
- return false;
- hlsl_block_add_instr(block, sub);
+ arg1_cast = hlsl_block_add_cast(ctx, block, arg1, float_type, &instr->loc);
+ arg2_cast = hlsl_block_add_cast(ctx, block, arg2, float_type, &instr->loc);
+ neg = hlsl_block_add_unary_expr(ctx, block, HLSL_OP1_NEG, arg2_cast, &instr->loc);
+ sub = hlsl_block_add_binary_expr(ctx, block, HLSL_OP2_ADD, arg1_cast, neg);
memset(&zero_value, 0, sizeof(zero_value));
if (!(zero = hlsl_new_constant(ctx, float_type, &zero_value, &instr->loc)))
@@ -4056,7 +4725,7 @@ static bool lower_slt(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, struct h
*/
static bool lower_cmp(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, struct hlsl_block *block)
{
- struct hlsl_ir_node *args[3], *args_cast[3], *slt, *neg_slt, *sub, *zero, *one, *mul1, *mul2, *add;
+ struct hlsl_ir_node *args[3], *args_cast[3], *slt, *neg_slt, *sub, *zero, *one, *mul1, *mul2;
struct hlsl_constant_value zero_value, one_value;
struct hlsl_type *float_type;
struct hlsl_ir_expr *expr;
@@ -4073,10 +4742,7 @@ static bool lower_cmp(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, struct h
for (i = 0; i < 3; ++i)
{
args[i] = expr->operands[i].node;
-
- if (!(args_cast[i] = hlsl_new_cast(ctx, args[i], float_type, &instr->loc)))
- return false;
- hlsl_block_add_instr(block, args_cast[i]);
+ args_cast[i] = hlsl_block_add_cast(ctx, block, args[i], float_type, &instr->loc);
}
memset(&zero_value, 0, sizeof(zero_value));
@@ -4092,30 +4758,12 @@ static bool lower_cmp(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, struct h
return false;
hlsl_block_add_instr(block, one);
- if (!(slt = hlsl_new_binary_expr(ctx, HLSL_OP2_SLT, args_cast[0], zero)))
- return false;
- hlsl_block_add_instr(block, slt);
-
- if (!(mul1 = hlsl_new_binary_expr(ctx, HLSL_OP2_MUL, args_cast[2], slt)))
- return false;
- hlsl_block_add_instr(block, mul1);
-
- if (!(neg_slt = hlsl_new_unary_expr(ctx, HLSL_OP1_NEG, slt, &instr->loc)))
- return false;
- hlsl_block_add_instr(block, neg_slt);
-
- if (!(sub = hlsl_new_binary_expr(ctx, HLSL_OP2_ADD, one, neg_slt)))
- return false;
- hlsl_block_add_instr(block, sub);
-
- if (!(mul2 = hlsl_new_binary_expr(ctx, HLSL_OP2_MUL, args_cast[1], sub)))
- return false;
- hlsl_block_add_instr(block, mul2);
-
- if (!(add = hlsl_new_binary_expr(ctx, HLSL_OP2_ADD, mul1, mul2)))
- return false;
- hlsl_block_add_instr(block, add);
-
+ slt = hlsl_block_add_binary_expr(ctx, block, HLSL_OP2_SLT, args_cast[0], zero);
+ mul1 = hlsl_block_add_binary_expr(ctx, block, HLSL_OP2_MUL, args_cast[2], slt);
+ neg_slt = hlsl_block_add_unary_expr(ctx, block, HLSL_OP1_NEG, slt, &instr->loc);
+ sub = hlsl_block_add_binary_expr(ctx, block, HLSL_OP2_ADD, one, neg_slt);
+ mul2 = hlsl_block_add_binary_expr(ctx, block, HLSL_OP2_MUL, args_cast[1], sub);
+ hlsl_block_add_binary_expr(ctx, block, HLSL_OP2_ADD, mul1, mul2);
return true;
}
@@ -4145,10 +4793,8 @@ static bool lower_casts_to_bool(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr
return false;
hlsl_block_add_instr(block, zero);
- if (!(neq = hlsl_new_binary_expr(ctx, HLSL_OP2_NEQUAL, expr->operands[0].node, zero)))
- return false;
+ neq = hlsl_block_add_binary_expr(ctx, block, HLSL_OP2_NEQUAL, expr->operands[0].node, zero);
neq->data_type = expr->node.data_type;
- hlsl_block_add_instr(block, neq);
return true;
}
@@ -4158,7 +4804,6 @@ struct hlsl_ir_node *hlsl_add_conditional(struct hlsl_ctx *ctx, struct hlsl_bloc
{
struct hlsl_type *cond_type = condition->data_type;
struct hlsl_ir_node *operands[HLSL_MAX_OPERANDS];
- struct hlsl_ir_node *cond;
VKD3D_ASSERT(hlsl_types_are_equal(if_true->data_type, if_false->data_type));
@@ -4166,23 +4811,16 @@ struct hlsl_ir_node *hlsl_add_conditional(struct hlsl_ctx *ctx, struct hlsl_bloc
{
cond_type = hlsl_get_numeric_type(ctx, cond_type->class, HLSL_TYPE_BOOL,
cond_type->e.numeric.dimx, cond_type->e.numeric.dimy);
-
- if (!(condition = hlsl_new_cast(ctx, condition, cond_type, &condition->loc)))
- return NULL;
- hlsl_block_add_instr(instrs, condition);
+ condition = hlsl_block_add_cast(ctx, instrs, condition, cond_type, &condition->loc);
}
operands[0] = condition;
operands[1] = if_true;
operands[2] = if_false;
- if (!(cond = hlsl_new_expr(ctx, HLSL_OP3_TERNARY, operands, if_true->data_type, &condition->loc)))
- return false;
- hlsl_block_add_instr(instrs, cond);
-
- return cond;
+ return hlsl_block_add_expr(ctx, instrs, HLSL_OP3_TERNARY, operands, if_true->data_type, &condition->loc);
}
-static bool lower_int_division(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, struct hlsl_block *block)
+static bool lower_int_division_sm4(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, struct hlsl_block *block)
{
struct hlsl_ir_node *arg1, *arg2, *xor, *and, *abs1, *abs2, *div, *neg, *cast1, *cast2, *cast3, *high_bit;
struct hlsl_type *type = instr->data_type, *utype;
@@ -4203,9 +4841,7 @@ static bool lower_int_division(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr,
return false;
utype = hlsl_get_numeric_type(ctx, type->class, HLSL_TYPE_UINT, type->e.numeric.dimx, type->e.numeric.dimy);
- if (!(xor = hlsl_new_binary_expr(ctx, HLSL_OP2_BIT_XOR, arg1, arg2)))
- return false;
- hlsl_block_add_instr(block, xor);
+ xor = hlsl_block_add_binary_expr(ctx, block, HLSL_OP2_BIT_XOR, arg1, arg2);
for (i = 0; i < type->e.numeric.dimx; ++i)
high_bit_value.u[i].u = 0x80000000;
@@ -4213,42 +4849,18 @@ static bool lower_int_division(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr,
return false;
hlsl_block_add_instr(block, high_bit);
- if (!(and = hlsl_new_binary_expr(ctx, HLSL_OP2_BIT_AND, xor, high_bit)))
- return false;
- hlsl_block_add_instr(block, and);
-
- if (!(abs1 = hlsl_new_unary_expr(ctx, HLSL_OP1_ABS, arg1, &instr->loc)))
- return false;
- hlsl_block_add_instr(block, abs1);
-
- if (!(cast1 = hlsl_new_cast(ctx, abs1, utype, &instr->loc)))
- return false;
- hlsl_block_add_instr(block, cast1);
-
- if (!(abs2 = hlsl_new_unary_expr(ctx, HLSL_OP1_ABS, arg2, &instr->loc)))
- return false;
- hlsl_block_add_instr(block, abs2);
-
- if (!(cast2 = hlsl_new_cast(ctx, abs2, utype, &instr->loc)))
- return false;
- hlsl_block_add_instr(block, cast2);
-
- if (!(div = hlsl_new_binary_expr(ctx, HLSL_OP2_DIV, cast1, cast2)))
- return false;
- hlsl_block_add_instr(block, div);
-
- if (!(cast3 = hlsl_new_cast(ctx, div, type, &instr->loc)))
- return false;
- hlsl_block_add_instr(block, cast3);
-
- if (!(neg = hlsl_new_unary_expr(ctx, HLSL_OP1_NEG, cast3, &instr->loc)))
- return false;
- hlsl_block_add_instr(block, neg);
-
+ and = hlsl_block_add_binary_expr(ctx, block, HLSL_OP2_BIT_AND, xor, high_bit);
+ abs1 = hlsl_block_add_unary_expr(ctx, block, HLSL_OP1_ABS, arg1, &instr->loc);
+ cast1 = hlsl_block_add_cast(ctx, block, abs1, utype, &instr->loc);
+ abs2 = hlsl_block_add_unary_expr(ctx, block, HLSL_OP1_ABS, arg2, &instr->loc);
+ cast2 = hlsl_block_add_cast(ctx, block, abs2, utype, &instr->loc);
+ div = hlsl_block_add_binary_expr(ctx, block, HLSL_OP2_DIV, cast1, cast2);
+ cast3 = hlsl_block_add_cast(ctx, block, div, type, &instr->loc);
+ neg = hlsl_block_add_unary_expr(ctx, block, HLSL_OP1_NEG, cast3, &instr->loc);
return hlsl_add_conditional(ctx, block, and, neg, cast3);
}
-static bool lower_int_modulus(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, struct hlsl_block *block)
+static bool lower_int_modulus_sm4(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, struct hlsl_block *block)
{
struct hlsl_ir_node *arg1, *arg2, *and, *abs1, *abs2, *div, *neg, *cast1, *cast2, *cast3, *high_bit;
struct hlsl_type *type = instr->data_type, *utype;
@@ -4275,45 +4887,21 @@ static bool lower_int_modulus(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr,
return false;
hlsl_block_add_instr(block, high_bit);
- if (!(and = hlsl_new_binary_expr(ctx, HLSL_OP2_BIT_AND, arg1, high_bit)))
- return false;
- hlsl_block_add_instr(block, and);
-
- if (!(abs1 = hlsl_new_unary_expr(ctx, HLSL_OP1_ABS, arg1, &instr->loc)))
- return false;
- hlsl_block_add_instr(block, abs1);
-
- if (!(cast1 = hlsl_new_cast(ctx, abs1, utype, &instr->loc)))
- return false;
- hlsl_block_add_instr(block, cast1);
-
- if (!(abs2 = hlsl_new_unary_expr(ctx, HLSL_OP1_ABS, arg2, &instr->loc)))
- return false;
- hlsl_block_add_instr(block, abs2);
-
- if (!(cast2 = hlsl_new_cast(ctx, abs2, utype, &instr->loc)))
- return false;
- hlsl_block_add_instr(block, cast2);
-
- if (!(div = hlsl_new_binary_expr(ctx, HLSL_OP2_MOD, cast1, cast2)))
- return false;
- hlsl_block_add_instr(block, div);
-
- if (!(cast3 = hlsl_new_cast(ctx, div, type, &instr->loc)))
- return false;
- hlsl_block_add_instr(block, cast3);
-
- if (!(neg = hlsl_new_unary_expr(ctx, HLSL_OP1_NEG, cast3, &instr->loc)))
- return false;
- hlsl_block_add_instr(block, neg);
-
+ and = hlsl_block_add_binary_expr(ctx, block, HLSL_OP2_BIT_AND, arg1, high_bit);
+ abs1 = hlsl_block_add_unary_expr(ctx, block, HLSL_OP1_ABS, arg1, &instr->loc);
+ cast1 = hlsl_block_add_cast(ctx, block, abs1, utype, &instr->loc);
+ abs2 = hlsl_block_add_unary_expr(ctx, block, HLSL_OP1_ABS, arg2, &instr->loc);
+ cast2 = hlsl_block_add_cast(ctx, block, abs2, utype, &instr->loc);
+ div = hlsl_block_add_binary_expr(ctx, block, HLSL_OP2_MOD, cast1, cast2);
+ cast3 = hlsl_block_add_cast(ctx, block, div, type, &instr->loc);
+ neg = hlsl_block_add_unary_expr(ctx, block, HLSL_OP1_NEG, cast3, &instr->loc);
return hlsl_add_conditional(ctx, block, and, neg, cast3);
}
static bool lower_int_abs(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, struct hlsl_block *block)
{
struct hlsl_type *type = instr->data_type;
- struct hlsl_ir_node *arg, *neg, *max;
+ struct hlsl_ir_node *arg, *neg;
struct hlsl_ir_expr *expr;
if (instr->type != HLSL_IR_EXPR)
@@ -4329,14 +4917,8 @@ static bool lower_int_abs(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, stru
arg = expr->operands[0].node;
- if (!(neg = hlsl_new_unary_expr(ctx, HLSL_OP1_NEG, arg, &instr->loc)))
- return false;
- hlsl_block_add_instr(block, neg);
-
- if (!(max = hlsl_new_binary_expr(ctx, HLSL_OP2_MAX, arg, neg)))
- return false;
- hlsl_block_add_instr(block, max);
-
+ neg = hlsl_block_add_unary_expr(ctx, block, HLSL_OP1_NEG, arg, &instr->loc);
+ hlsl_block_add_binary_expr(ctx, block, HLSL_OP2_MAX, arg, neg);
return true;
}
@@ -4355,8 +4937,7 @@ static bool lower_int_dot(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, stru
if (expr->op != HLSL_OP2_DOT)
return false;
- if (type->e.numeric.type == HLSL_TYPE_INT || type->e.numeric.type == HLSL_TYPE_UINT
- || type->e.numeric.type == HLSL_TYPE_BOOL)
+ if (hlsl_type_is_integer(type))
{
arg1 = expr->operands[0].node;
arg2 = expr->operands[1].node;
@@ -4364,26 +4945,18 @@ static bool lower_int_dot(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, stru
dimx = arg1->data_type->e.numeric.dimx;
is_bool = type->e.numeric.type == HLSL_TYPE_BOOL;
- if (!(mult = hlsl_new_binary_expr(ctx, is_bool ? HLSL_OP2_LOGIC_AND : HLSL_OP2_MUL, arg1, arg2)))
- return false;
- hlsl_block_add_instr(block, mult);
+ mult = hlsl_block_add_binary_expr(ctx, block, is_bool ? HLSL_OP2_LOGIC_AND : HLSL_OP2_MUL, arg1, arg2);
for (i = 0; i < dimx; ++i)
{
uint32_t s = hlsl_swizzle_from_writemask(1 << i);
- if (!(comps[i] = hlsl_new_swizzle(ctx, s, 1, mult, &instr->loc)))
- return false;
- hlsl_block_add_instr(block, comps[i]);
+ comps[i] = hlsl_block_add_swizzle(ctx, block, s, 1, mult, &instr->loc);
}
res = comps[0];
for (i = 1; i < dimx; ++i)
- {
- if (!(res = hlsl_new_binary_expr(ctx, is_bool ? HLSL_OP2_LOGIC_OR : HLSL_OP2_ADD, res, comps[i])))
- return false;
- hlsl_block_add_instr(block, res);
- }
+ res = hlsl_block_add_binary_expr(ctx, block, is_bool ? HLSL_OP2_LOGIC_OR : HLSL_OP2_ADD, res, comps[i]);
return true;
}
@@ -4393,125 +4966,45 @@ static bool lower_int_dot(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, stru
static bool lower_float_modulus(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, struct hlsl_block *block)
{
- struct hlsl_ir_node *arg1, *arg2, *mul1, *neg1, *ge, *neg2, *div, *mul2, *frc, *cond, *one, *mul3;
+ struct hlsl_ir_node *arg1, *arg2, *mul1, *neg1, *ge, *neg2, *div, *mul2, *frc, *cond, *one;
struct hlsl_type *type = instr->data_type, *btype;
struct hlsl_constant_value one_value;
- struct hlsl_ir_expr *expr;
- unsigned int i;
-
- if (instr->type != HLSL_IR_EXPR)
- return false;
- expr = hlsl_ir_expr(instr);
- arg1 = expr->operands[0].node;
- arg2 = expr->operands[1].node;
- if (expr->op != HLSL_OP2_MOD)
- return false;
- if (type->class != HLSL_CLASS_SCALAR && type->class != HLSL_CLASS_VECTOR)
- return false;
- if (type->e.numeric.type != HLSL_TYPE_FLOAT)
- return false;
- btype = hlsl_get_numeric_type(ctx, type->class, HLSL_TYPE_BOOL, type->e.numeric.dimx, type->e.numeric.dimy);
-
- if (!(mul1 = hlsl_new_binary_expr(ctx, HLSL_OP2_MUL, arg2, arg1)))
- return false;
- hlsl_block_add_instr(block, mul1);
-
- if (!(neg1 = hlsl_new_unary_expr(ctx, HLSL_OP1_NEG, mul1, &instr->loc)))
- return false;
- hlsl_block_add_instr(block, neg1);
-
- if (!(ge = hlsl_new_binary_expr(ctx, HLSL_OP2_GEQUAL, mul1, neg1)))
- return false;
- ge->data_type = btype;
- hlsl_block_add_instr(block, ge);
-
- if (!(neg2 = hlsl_new_unary_expr(ctx, HLSL_OP1_NEG, arg2, &instr->loc)))
- return false;
- hlsl_block_add_instr(block, neg2);
-
- if (!(cond = hlsl_add_conditional(ctx, block, ge, arg2, neg2)))
- return false;
-
- for (i = 0; i < type->e.numeric.dimx; ++i)
- one_value.u[i].f = 1.0f;
- if (!(one = hlsl_new_constant(ctx, type, &one_value, &instr->loc)))
- return false;
- hlsl_block_add_instr(block, one);
-
- if (!(div = hlsl_new_binary_expr(ctx, HLSL_OP2_DIV, one, cond)))
- return false;
- hlsl_block_add_instr(block, div);
-
- if (!(mul2 = hlsl_new_binary_expr(ctx, HLSL_OP2_MUL, div, arg1)))
- return false;
- hlsl_block_add_instr(block, mul2);
-
- if (!(frc = hlsl_new_unary_expr(ctx, HLSL_OP1_FRACT, mul2, &instr->loc)))
- return false;
- hlsl_block_add_instr(block, frc);
-
- if (!(mul3 = hlsl_new_binary_expr(ctx, HLSL_OP2_MUL, frc, cond)))
- return false;
- hlsl_block_add_instr(block, mul3);
-
- return true;
-}
-
-static bool lower_nonfloat_exprs(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, struct hlsl_block *block)
-{
- struct hlsl_ir_expr *expr;
-
- if (instr->type != HLSL_IR_EXPR)
- return false;
- expr = hlsl_ir_expr(instr);
- if (expr->op == HLSL_OP1_CAST || instr->data_type->e.numeric.type == HLSL_TYPE_FLOAT)
- return false;
-
- switch (expr->op)
- {
- case HLSL_OP1_ABS:
- case HLSL_OP1_NEG:
- case HLSL_OP2_ADD:
- case HLSL_OP2_DIV:
- case HLSL_OP2_LOGIC_AND:
- case HLSL_OP2_LOGIC_OR:
- case HLSL_OP2_MAX:
- case HLSL_OP2_MIN:
- case HLSL_OP2_MUL:
- {
- struct hlsl_ir_node *operands[HLSL_MAX_OPERANDS] = {0};
- struct hlsl_ir_node *arg, *arg_cast, *float_expr, *ret;
- struct hlsl_type *float_type;
- unsigned int i;
+ struct hlsl_ir_expr *expr;
+ unsigned int i;
- for (i = 0; i < HLSL_MAX_OPERANDS; ++i)
- {
- arg = expr->operands[i].node;
- if (!arg)
- continue;
+ if (instr->type != HLSL_IR_EXPR)
+ return false;
+ expr = hlsl_ir_expr(instr);
+ arg1 = expr->operands[0].node;
+ arg2 = expr->operands[1].node;
+ if (expr->op != HLSL_OP2_MOD)
+ return false;
+ if (type->class != HLSL_CLASS_SCALAR && type->class != HLSL_CLASS_VECTOR)
+ return false;
+ if (type->e.numeric.type != HLSL_TYPE_FLOAT)
+ return false;
+ btype = hlsl_get_numeric_type(ctx, type->class, HLSL_TYPE_BOOL, type->e.numeric.dimx, type->e.numeric.dimy);
- float_type = hlsl_get_vector_type(ctx, HLSL_TYPE_FLOAT, arg->data_type->e.numeric.dimx);
- if (!(arg_cast = hlsl_new_cast(ctx, arg, float_type, &instr->loc)))
- return false;
- hlsl_block_add_instr(block, arg_cast);
+ mul1 = hlsl_block_add_binary_expr(ctx, block, HLSL_OP2_MUL, arg2, arg1);
+ neg1 = hlsl_block_add_unary_expr(ctx, block, HLSL_OP1_NEG, mul1, &instr->loc);
- operands[i] = arg_cast;
- }
+ ge = hlsl_block_add_binary_expr(ctx, block, HLSL_OP2_GEQUAL, mul1, neg1);
+ ge->data_type = btype;
- float_type = hlsl_get_vector_type(ctx, HLSL_TYPE_FLOAT, instr->data_type->e.numeric.dimx);
- if (!(float_expr = hlsl_new_expr(ctx, expr->op, operands, float_type, &instr->loc)))
- return false;
- hlsl_block_add_instr(block, float_expr);
+ neg2 = hlsl_block_add_unary_expr(ctx, block, HLSL_OP1_NEG, arg2, &instr->loc);
+ cond = hlsl_add_conditional(ctx, block, ge, arg2, neg2);
- if (!(ret = hlsl_new_cast(ctx, float_expr, instr->data_type, &instr->loc)))
- return false;
- hlsl_block_add_instr(block, ret);
+ for (i = 0; i < type->e.numeric.dimx; ++i)
+ one_value.u[i].f = 1.0f;
+ if (!(one = hlsl_new_constant(ctx, type, &one_value, &instr->loc)))
+ return false;
+ hlsl_block_add_instr(block, one);
- return true;
- }
- default:
- return false;
- }
+ div = hlsl_block_add_binary_expr(ctx, block, HLSL_OP2_DIV, one, cond);
+ mul2 = hlsl_block_add_binary_expr(ctx, block, HLSL_OP2_MUL, div, arg1);
+ frc = hlsl_block_add_unary_expr(ctx, block, HLSL_OP1_FRACT, mul2, &instr->loc);
+ hlsl_block_add_binary_expr(ctx, block, HLSL_OP2_MUL, frc, cond);
+ return true;
}
static bool lower_discard_neg(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, void *context)
@@ -4541,9 +5034,7 @@ static bool lower_discard_neg(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr,
operands[1] = zero;
cmp_type = hlsl_get_numeric_type(ctx, arg_type->class, HLSL_TYPE_BOOL,
arg_type->e.numeric.dimx, arg_type->e.numeric.dimy);
- if (!(cmp = hlsl_new_expr(ctx, HLSL_OP2_LESS, operands, cmp_type, &instr->loc)))
- return false;
- hlsl_block_add_instr(&block, cmp);
+ cmp = hlsl_block_add_expr(ctx, &block, HLSL_OP2_LESS, operands, cmp_type, &instr->loc);
if (!(bool_false = hlsl_new_constant(ctx, hlsl_get_scalar_type(ctx, HLSL_TYPE_BOOL), &zero_value, &instr->loc)))
return false;
@@ -4554,12 +5045,8 @@ static bool lower_discard_neg(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr,
count = hlsl_type_component_count(cmp_type);
for (i = 0; i < count; ++i)
{
- if (!(load = hlsl_add_load_component(ctx, &block, cmp, i, &instr->loc)))
- return false;
-
- if (!(or = hlsl_new_binary_expr(ctx, HLSL_OP2_LOGIC_OR, or, load)))
- return NULL;
- hlsl_block_add_instr(&block, or);
+ load = hlsl_add_load_component(ctx, &block, cmp, i, &instr->loc);
+ or = hlsl_block_add_binary_expr(ctx, &block, HLSL_OP2_LOGIC_OR, or, load);
}
list_move_tail(&instr->entry, &block.instrs);
@@ -4588,17 +5075,9 @@ static bool lower_discard_nz(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, v
hlsl_block_init(&block);
- if (!(cond_cast = hlsl_new_cast(ctx, cond, float_type, &instr->loc)))
- return false;
- hlsl_block_add_instr(&block, cond_cast);
-
- if (!(abs = hlsl_new_unary_expr(ctx, HLSL_OP1_ABS, cond_cast, &instr->loc)))
- return false;
- hlsl_block_add_instr(&block, abs);
-
- if (!(neg = hlsl_new_unary_expr(ctx, HLSL_OP1_NEG, abs, &instr->loc)))
- return false;
- hlsl_block_add_instr(&block, neg);
+ cond_cast = hlsl_block_add_cast(ctx, &block, cond, float_type, &instr->loc);
+ abs = hlsl_block_add_unary_expr(ctx, &block, HLSL_OP1_ABS, cond_cast, &instr->loc);
+ neg = hlsl_block_add_unary_expr(ctx, &block, HLSL_OP1_NEG, abs, &instr->loc);
list_move_tail(&instr->entry, &block.instrs);
hlsl_src_remove(&jump->condition);
@@ -4634,6 +5113,9 @@ static bool dce(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, void *context)
struct hlsl_ir_store *store = hlsl_ir_store(instr);
struct hlsl_ir_var *var = store->lhs.var;
+ if (var->is_output_semantic)
+ break;
+
if (var->last_read < instr->index)
{
list_remove(&instr->entry);
@@ -4938,20 +5420,15 @@ static void compute_liveness_recurse(struct hlsl_block *block, unsigned int loop
}
}
-static void init_var_liveness(struct hlsl_ir_var *var)
-{
- if (var->is_uniform || var->is_input_semantic)
- var->first_write = 1;
- else if (var->is_output_semantic)
- var->last_read = UINT_MAX;
-}
-
static void compute_liveness(struct hlsl_ctx *ctx, struct hlsl_ir_function_decl *entry_func)
{
struct hlsl_scope *scope;
struct hlsl_ir_var *var;
- index_instructions(&entry_func->body, 2);
+ if (ctx->result)
+ return;
+
+ index_instructions(&entry_func->body, 1);
LIST_FOR_EACH_ENTRY(scope, &ctx->scopes, struct hlsl_scope, entry)
{
@@ -4959,12 +5436,6 @@ static void compute_liveness(struct hlsl_ctx *ctx, struct hlsl_ir_function_decl
var->first_write = var->last_read = 0;
}
- LIST_FOR_EACH_ENTRY(var, &ctx->extern_vars, struct hlsl_ir_var, extern_entry)
- init_var_liveness(var);
-
- LIST_FOR_EACH_ENTRY(var, &entry_func->extern_vars, struct hlsl_ir_var, extern_entry)
- init_var_liveness(var);
-
compute_liveness_recurse(&entry_func->body, 0, 0);
}
@@ -5001,7 +5472,7 @@ struct register_allocator
/* Indexable temps are allocated separately and always keep their index regardless of their
* lifetime. */
- size_t indexable_count;
+ uint32_t indexable_count;
/* Total number of registers allocated so far. Used to declare sm4 temp count. */
uint32_t reg_count;
@@ -5269,8 +5740,7 @@ static void register_deref_usage(struct hlsl_ctx *ctx, struct hlsl_deref *deref)
struct hlsl_type *type;
unsigned int index;
- if (!hlsl_regset_index_from_deref(ctx, deref, regset, &index))
- return;
+ hlsl_regset_index_from_deref(ctx, deref, regset, &index);
if (regset <= HLSL_REGSET_LAST_OBJECT)
{
@@ -5281,7 +5751,6 @@ static void register_deref_usage(struct hlsl_ctx *ctx, struct hlsl_deref *deref)
{
type = hlsl_deref_get_type(ctx, deref);
- hlsl_regset_index_from_deref(ctx, deref, regset, &index);
required_bind_count = align(index + type->reg_size[regset], 4) / 4;
var->bind_count[regset] = max(var->bind_count[regset], required_bind_count);
}
@@ -5481,6 +5950,33 @@ static void allocate_temp_registers_recurse(struct hlsl_ctx *ctx,
}
}
+static bool find_constant(struct hlsl_ctx *ctx, const float *f, unsigned int count, struct hlsl_reg *ret)
+{
+ struct hlsl_constant_defs *defs = &ctx->constant_defs;
+
+ for (size_t i = 0; i < defs->count; ++i)
+ {
+ const struct hlsl_constant_register *reg = &defs->regs[i];
+
+ for (size_t j = 0; j <= 4 - count; ++j)
+ {
+ unsigned int writemask = ((1u << count) - 1) << j;
+
+ if ((reg->allocated_mask & writemask) == writemask
+ && !memcmp(f, &reg->value.f[j], count * sizeof(float)))
+ {
+ ret->id = reg->index;
+ ret->allocation_size = 1;
+ ret->writemask = writemask;
+ ret->allocated = true;
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
static void record_constant(struct hlsl_ctx *ctx, unsigned int component_index, float f,
const struct vkd3d_shader_location *loc)
{
@@ -5494,6 +5990,7 @@ static void record_constant(struct hlsl_ctx *ctx, unsigned int component_index,
if (reg->index == (component_index / 4))
{
reg->value.f[component_index % 4] = f;
+ reg->allocated_mask |= (1u << (component_index % 4));
return;
}
}
@@ -5504,6 +6001,7 @@ static void record_constant(struct hlsl_ctx *ctx, unsigned int component_index,
memset(reg, 0, sizeof(*reg));
reg->index = component_index / 4;
reg->value.f[component_index % 4] = f;
+ reg->allocated_mask = (1u << (component_index % 4));
reg->loc = *loc;
}
@@ -5520,49 +6018,57 @@ static void allocate_const_registers_recurse(struct hlsl_ctx *ctx,
{
struct hlsl_ir_constant *constant = hlsl_ir_constant(instr);
const struct hlsl_type *type = instr->data_type;
- unsigned int x, i;
-
- constant->reg = allocate_numeric_registers_for_type(ctx, allocator, 1, UINT_MAX, type);
- TRACE("Allocated constant @%u to %s.\n", instr->index, debug_register('c', constant->reg, type));
+ float f[4] = {0};
VKD3D_ASSERT(hlsl_is_numeric_type(type));
VKD3D_ASSERT(type->e.numeric.dimy == 1);
- VKD3D_ASSERT(constant->reg.writemask);
- for (x = 0, i = 0; x < 4; ++x)
+ for (unsigned int i = 0; i < type->e.numeric.dimx; ++i)
{
const union hlsl_constant_value_component *value;
- float f = 0;
- if (!(constant->reg.writemask & (1u << x)))
- continue;
- value = &constant->value.u[i++];
+ value = &constant->value.u[i];
switch (type->e.numeric.type)
{
case HLSL_TYPE_BOOL:
- f = !!value->u;
+ f[i] = !!value->u;
break;
case HLSL_TYPE_FLOAT:
case HLSL_TYPE_HALF:
- f = value->f;
+ f[i] = value->f;
break;
case HLSL_TYPE_INT:
- f = value->i;
+ f[i] = value->i;
break;
+ case HLSL_TYPE_MIN16UINT:
case HLSL_TYPE_UINT:
- f = value->u;
+ f[i] = value->u;
break;
case HLSL_TYPE_DOUBLE:
FIXME("Double constant.\n");
return;
}
+ }
+
+ if (find_constant(ctx, f, type->e.numeric.dimx, &constant->reg))
+ {
+ TRACE("Reusing already allocated constant %s for @%u.\n",
+ debug_register('c', constant->reg, type), instr->index);
+ break;
+ }
+
+ constant->reg = allocate_numeric_registers_for_type(ctx, allocator, 1, UINT_MAX, type);
+ TRACE("Allocated constant @%u to %s.\n", instr->index, debug_register('c', constant->reg, type));
- record_constant(ctx, constant->reg.id * 4 + x, f, &constant->node.loc);
+ for (unsigned int x = 0, i = 0; x < 4; ++x)
+ {
+ if ((constant->reg.writemask & (1u << x)))
+ record_constant(ctx, constant->reg.id * 4 + x, f[i++], &constant->node.loc);
}
break;
@@ -5765,15 +6271,12 @@ static uint32_t allocate_temp_registers(struct hlsl_ctx *ctx, struct hlsl_ir_fun
/* ps_1_* outputs are special and go in temp register 0. */
if (ctx->profile->major_version == 1 && ctx->profile->type == VKD3D_SHADER_TYPE_PIXEL)
{
- size_t i;
-
- for (i = 0; i < entry_func->parameters.count; ++i)
+ LIST_FOR_EACH_ENTRY(var, &entry_func->extern_vars, struct hlsl_ir_var, extern_entry)
{
- var = entry_func->parameters.vars[i];
if (var->is_output_semantic)
{
record_allocation(ctx, &allocator, 0, VKD3DSP_WRITEMASK_ALL,
- var->first_write, var->last_read, 0, false);
+ var->first_write, UINT_MAX, 0, false);
break;
}
}
@@ -5782,6 +6285,13 @@ static uint32_t allocate_temp_registers(struct hlsl_ctx *ctx, struct hlsl_ir_fun
allocate_temp_registers_recurse(ctx, &entry_func->body, &allocator);
vkd3d_free(allocator.allocations);
+ if (allocator.indexable_count)
+ TRACE("Declaration of function \"%s\" required %u temp registers, and %u indexable temps.\n",
+ entry_func->func->name, allocator.reg_count, allocator.indexable_count);
+ else
+ TRACE("Declaration of function \"%s\" required %u temp registers.\n",
+ entry_func->func->name, allocator.reg_count);
+
return allocator.reg_count;
}
@@ -5803,6 +6313,11 @@ static enum vkd3d_shader_interpolation_mode sm4_get_interpolation_mode(struct hl
{HLSL_STORAGE_CENTROID | HLSL_STORAGE_LINEAR, VKD3DSIM_LINEAR_CENTROID},
};
+ if (hlsl_type_is_primitive_array(type))
+ type = type->e.array.type;
+
+ VKD3D_ASSERT(hlsl_is_numeric_type(type));
+
if ((storage_modifiers & HLSL_STORAGE_NOINTERPOLATION)
|| base_type_get_semantic_equivalent(type->e.numeric.type) == HLSL_TYPE_UINT)
return VKD3DSIM_CONSTANT;
@@ -5829,7 +6344,7 @@ static void allocate_semantic_register(struct hlsl_ctx *ctx, struct hlsl_ir_var
[VKD3D_SHADER_TYPE_COMPUTE] = "Compute",
};
- bool is_patch = hlsl_type_is_patch_array(var->data_type);
+ bool is_primitive = hlsl_type_is_primitive_array(var->data_type);
enum vkd3d_shader_register_type type;
struct vkd3d_shader_version version;
bool special_interpolation = false;
@@ -5870,7 +6385,7 @@ static void allocate_semantic_register(struct hlsl_ctx *ctx, struct hlsl_ir_var
bool has_idx;
if (!sm4_sysval_semantic_from_semantic_name(&semantic, &version, ctx->semantic_compat_mapping, ctx->domain,
- var->semantic.name, var->semantic.index, output, ctx->is_patch_constant_func, is_patch))
+ var->semantic.name, var->semantic.index, output, ctx->is_patch_constant_func, is_primitive))
{
hlsl_error(ctx, &var->loc, VKD3D_SHADER_ERROR_HLSL_INVALID_SEMANTIC,
"Invalid semantic '%s'.", var->semantic.name);
@@ -5903,7 +6418,7 @@ static void allocate_semantic_register(struct hlsl_ctx *ctx, struct hlsl_ir_var
}
else
{
- unsigned int component_count = is_patch
+ unsigned int component_count = is_primitive
? var->data_type->e.array.type->e.numeric.dimx : var->data_type->e.numeric.dimx;
int mode = (ctx->profile->major_version < 4)
? 0 : sm4_get_interpolation_mode(var->data_type, var->storage_modifiers);
@@ -5922,7 +6437,7 @@ static void allocate_semantic_register(struct hlsl_ctx *ctx, struct hlsl_ir_var
static void allocate_semantic_registers(struct hlsl_ctx *ctx, struct hlsl_ir_function_decl *entry_func)
{
- struct register_allocator in_patch_allocator = {0}, patch_constant_out_patch_allocator = {0};
+ struct register_allocator in_prim_allocator = {0}, patch_constant_out_patch_allocator = {0};
struct register_allocator input_allocator = {0}, output_allocator = {0};
bool is_vertex_shader = ctx->profile->type == VKD3D_SHADER_TYPE_VERTEX;
bool is_pixel_shader = ctx->profile->type == VKD3D_SHADER_TYPE_PIXEL;
@@ -5935,7 +6450,7 @@ static void allocate_semantic_registers(struct hlsl_ctx *ctx, struct hlsl_ir_fun
{
if (var->is_input_semantic)
{
- if (hlsl_type_is_patch_array(var->data_type))
+ if (hlsl_type_is_primitive_array(var->data_type))
{
bool is_patch_constant_output_patch = ctx->is_patch_constant_func &&
var->data_type->e.array.array_type == HLSL_ARRAY_PATCH_OUTPUT;
@@ -5944,7 +6459,7 @@ static void allocate_semantic_registers(struct hlsl_ctx *ctx, struct hlsl_ir_fun
allocate_semantic_register(ctx, var, &patch_constant_out_patch_allocator, false,
!is_vertex_shader);
else
- allocate_semantic_register(ctx, var, &in_patch_allocator, false,
+ allocate_semantic_register(ctx, var, &in_prim_allocator, false,
!is_vertex_shader);
}
else
@@ -6360,7 +6875,7 @@ bool hlsl_component_index_range_from_deref(struct hlsl_ctx *ctx, const struct hl
unsigned int *start, unsigned int *count)
{
struct hlsl_type *type = deref->var->data_type;
- unsigned int i, k;
+ unsigned int i;
*start = 0;
*count = 0;
@@ -6368,49 +6883,18 @@ bool hlsl_component_index_range_from_deref(struct hlsl_ctx *ctx, const struct hl
for (i = 0; i < deref->path_len; ++i)
{
struct hlsl_ir_node *path_node = deref->path[i].node;
- unsigned int idx = 0;
+ unsigned int index;
VKD3D_ASSERT(path_node);
if (path_node->type != HLSL_IR_CONSTANT)
return false;
/* We should always have generated a cast to UINT. */
- VKD3D_ASSERT(path_node->data_type->class == HLSL_CLASS_SCALAR
- && path_node->data_type->e.numeric.type == HLSL_TYPE_UINT);
-
- idx = hlsl_ir_constant(path_node)->value.u[0].u;
+ VKD3D_ASSERT(hlsl_is_vec1(path_node->data_type) && path_node->data_type->e.numeric.type == HLSL_TYPE_UINT);
- switch (type->class)
- {
- case HLSL_CLASS_VECTOR:
- if (idx >= type->e.numeric.dimx)
- return false;
- *start += idx;
- break;
-
- case HLSL_CLASS_MATRIX:
- if (idx >= hlsl_type_major_size(type))
- return false;
- if (hlsl_type_is_row_major(type))
- *start += idx * type->e.numeric.dimx;
- else
- *start += idx * type->e.numeric.dimy;
- break;
-
- case HLSL_CLASS_ARRAY:
- if (idx >= type->e.array.elements_count)
- return false;
- *start += idx * hlsl_type_component_count(type->e.array.type);
- break;
-
- case HLSL_CLASS_STRUCT:
- for (k = 0; k < idx; ++k)
- *start += hlsl_type_component_count(type->e.record.fields[k].type);
- break;
-
- default:
- vkd3d_unreachable();
- }
+ if (!component_index_from_deref_path_node(path_node, type, &index))
+ return false;
+ *start += index;
type = hlsl_get_element_type_from_path_index(ctx, type, path_node);
}
@@ -6439,8 +6923,7 @@ bool hlsl_regset_index_from_deref(struct hlsl_ctx *ctx, const struct hlsl_deref
if (path_node->type == HLSL_IR_CONSTANT)
{
/* We should always have generated a cast to UINT. */
- VKD3D_ASSERT(path_node->data_type->class == HLSL_CLASS_SCALAR
- && path_node->data_type->e.numeric.type == HLSL_TYPE_UINT);
+ VKD3D_ASSERT(hlsl_is_vec1(path_node->data_type) && path_node->data_type->e.numeric.type == HLSL_TYPE_UINT);
idx = hlsl_ir_constant(path_node)->value.u[0].u;
@@ -6502,14 +6985,13 @@ bool hlsl_offset_from_deref(struct hlsl_ctx *ctx, const struct hlsl_deref *deref
*offset = deref->const_offset;
- if (hlsl_type_is_patch_array(deref->var->data_type))
+ if (hlsl_type_is_primitive_array(deref->var->data_type))
return false;
if (offset_node)
{
/* We should always have generated a cast to UINT. */
- VKD3D_ASSERT(offset_node->data_type->class == HLSL_CLASS_SCALAR
- && offset_node->data_type->e.numeric.type == HLSL_TYPE_UINT);
+ VKD3D_ASSERT(hlsl_is_vec1(offset_node->data_type) && offset_node->data_type->e.numeric.type == HLSL_TYPE_UINT);
VKD3D_ASSERT(offset_node->type != HLSL_IR_CONSTANT);
return false;
}
@@ -6544,11 +7026,14 @@ struct hlsl_reg hlsl_reg_from_deref(struct hlsl_ctx *ctx, const struct hlsl_dere
{
const struct hlsl_ir_var *var = deref->var;
struct hlsl_reg ret = var->regs[HLSL_REGSET_NUMERIC];
- unsigned int offset = hlsl_offset_from_deref_safe(ctx, deref);
+ unsigned int offset = 0;
VKD3D_ASSERT(deref->data_type);
VKD3D_ASSERT(hlsl_is_numeric_type(deref->data_type));
+ if (!hlsl_type_is_primitive_array(deref->var->data_type))
+ offset = hlsl_offset_from_deref_safe(ctx, deref);
+
ret.index += offset / 4;
ret.id += offset / 4;
@@ -6559,6 +7044,36 @@ struct hlsl_reg hlsl_reg_from_deref(struct hlsl_ctx *ctx, const struct hlsl_dere
return ret;
}
+static bool get_integral_argument_value(struct hlsl_ctx *ctx, const struct hlsl_attribute *attr,
+ unsigned int i, enum hlsl_base_type *base_type, int *value)
+{
+ const struct hlsl_ir_node *instr = attr->args[i].node;
+ const struct hlsl_type *type = instr->data_type;
+
+ if (type->class != HLSL_CLASS_SCALAR
+ || (type->e.numeric.type != HLSL_TYPE_INT && type->e.numeric.type != HLSL_TYPE_UINT))
+ {
+ struct vkd3d_string_buffer *string;
+
+ if ((string = hlsl_type_to_string(ctx, type)))
+ hlsl_error(ctx, &instr->loc, VKD3D_SHADER_ERROR_HLSL_INVALID_TYPE,
+ "Unexpected type for argument %u of [%s]: expected int or uint, but got %s.",
+ i, attr->name, string->buffer);
+ hlsl_release_string_buffer(ctx, string);
+ return false;
+ }
+
+ if (instr->type != HLSL_IR_CONSTANT)
+ {
+ hlsl_fixme(ctx, &instr->loc, "Non-constant expression in [%s] initializer.", attr->name);
+ return false;
+ }
+
+ *base_type = type->e.numeric.type;
+ *value = hlsl_ir_constant(instr)->value.u[0].i;
+ return true;
+}
+
static const char *get_string_argument_value(struct hlsl_ctx *ctx, const struct hlsl_attribute *attr, unsigned int i)
{
const struct hlsl_ir_node *instr = attr->args[i].node;
@@ -6594,36 +7109,17 @@ static void parse_numthreads_attribute(struct hlsl_ctx *ctx, const struct hlsl_a
for (i = 0; i < attr->args_count; ++i)
{
- const struct hlsl_ir_node *instr = attr->args[i].node;
- const struct hlsl_type *type = instr->data_type;
- const struct hlsl_ir_constant *constant;
-
- if (type->class != HLSL_CLASS_SCALAR
- || (type->e.numeric.type != HLSL_TYPE_INT && type->e.numeric.type != HLSL_TYPE_UINT))
- {
- struct vkd3d_string_buffer *string;
-
- if ((string = hlsl_type_to_string(ctx, type)))
- hlsl_error(ctx, &instr->loc, VKD3D_SHADER_ERROR_HLSL_INVALID_TYPE,
- "Wrong type for argument %u of [numthreads]: expected int or uint, but got %s.",
- i, string->buffer);
- hlsl_release_string_buffer(ctx, string);
- break;
- }
+ enum hlsl_base_type base_type;
+ int value;
- if (instr->type != HLSL_IR_CONSTANT)
- {
- hlsl_fixme(ctx, &instr->loc, "Non-constant expression in [numthreads] initializer.");
- break;
- }
- constant = hlsl_ir_constant(instr);
+ if (!get_integral_argument_value(ctx, attr, i, &base_type, &value))
+ return;
- if ((type->e.numeric.type == HLSL_TYPE_INT && constant->value.u[0].i <= 0)
- || (type->e.numeric.type == HLSL_TYPE_UINT && !constant->value.u[0].u))
- hlsl_error(ctx, &instr->loc, VKD3D_SHADER_ERROR_HLSL_INVALID_THREAD_COUNT,
+ if ((base_type == HLSL_TYPE_INT && value <= 0) || (base_type == HLSL_TYPE_UINT && !value))
+ hlsl_error(ctx, &attr->args[i].node->loc, VKD3D_SHADER_ERROR_HLSL_INVALID_THREAD_COUNT,
"Thread count must be a positive integer.");
- ctx->thread_count[i] = constant->value.u[0].u;
+ ctx->thread_count[i] = value;
}
}
@@ -6655,9 +7151,8 @@ static void parse_domain_attribute(struct hlsl_ctx *ctx, const struct hlsl_attri
static void parse_outputcontrolpoints_attribute(struct hlsl_ctx *ctx, const struct hlsl_attribute *attr)
{
- const struct hlsl_ir_node *instr;
- const struct hlsl_type *type;
- const struct hlsl_ir_constant *constant;
+ enum hlsl_base_type base_type;
+ int value;
if (attr->args_count != 1)
{
@@ -6666,35 +7161,14 @@ static void parse_outputcontrolpoints_attribute(struct hlsl_ctx *ctx, const stru
return;
}
- instr = attr->args[0].node;
- type = instr->data_type;
-
- if (type->class != HLSL_CLASS_SCALAR
- || (type->e.numeric.type != HLSL_TYPE_INT && type->e.numeric.type != HLSL_TYPE_UINT))
- {
- struct vkd3d_string_buffer *string;
-
- if ((string = hlsl_type_to_string(ctx, type)))
- hlsl_error(ctx, &instr->loc, VKD3D_SHADER_ERROR_HLSL_INVALID_TYPE,
- "Wrong type for argument 0 of [outputcontrolpoints]: expected int or uint, but got %s.",
- string->buffer);
- hlsl_release_string_buffer(ctx, string);
- return;
- }
-
- if (instr->type != HLSL_IR_CONSTANT)
- {
- hlsl_fixme(ctx, &instr->loc, "Non-constant expression in [outputcontrolpoints] initializer.");
+ if (!get_integral_argument_value(ctx, attr, 0, &base_type, &value))
return;
- }
- constant = hlsl_ir_constant(instr);
- if ((type->e.numeric.type == HLSL_TYPE_INT && constant->value.u[0].i < 0)
- || constant->value.u[0].u > 32)
- hlsl_error(ctx, &instr->loc, VKD3D_SHADER_ERROR_HLSL_INVALID_CONTROL_POINT_COUNT,
+ if (value < 0 || value > 32)
+ hlsl_error(ctx, &attr->args[0].node->loc, VKD3D_SHADER_ERROR_HLSL_INVALID_CONTROL_POINT_COUNT,
"Output control point count must be between 0 and 32.");
- ctx->output_control_point_count = constant->value.u[0].u;
+ ctx->output_control_point_count = value;
}
static void parse_outputtopology_attribute(struct hlsl_ctx *ctx, const struct hlsl_attribute *attr)
@@ -6788,6 +7262,28 @@ static void parse_patchconstantfunc_attribute(struct hlsl_ctx *ctx, const struct
"Patch constant function \"%s\" is not defined.", name);
}
+static void parse_maxvertexcount_attribute(struct hlsl_ctx *ctx, const struct hlsl_attribute *attr)
+{
+ enum hlsl_base_type base_type;
+ int value;
+
+ if (attr->args_count != 1)
+ {
+ hlsl_error(ctx, &attr->loc, VKD3D_SHADER_ERROR_HLSL_WRONG_PARAMETER_COUNT,
+ "Expected 1 parameter for [maxvertexcount] attribute, but got %u.", attr->args_count);
+ return;
+ }
+
+ if (!get_integral_argument_value(ctx, attr, 0, &base_type, &value))
+ return;
+
+ if (value < 1 || value > 1024)
+ hlsl_error(ctx, &attr->args[0].node->loc, VKD3D_SHADER_ERROR_HLSL_INVALID_MAX_VERTEX_COUNT,
+ "Max vertex count must be between 1 and 1024.");
+
+ ctx->max_vertex_count = value;
+}
+
static void parse_entry_function_attributes(struct hlsl_ctx *ctx, struct hlsl_ir_function_decl *entry_func)
{
const struct hlsl_profile_info *profile = ctx->profile;
@@ -6812,6 +7308,8 @@ static void parse_entry_function_attributes(struct hlsl_ctx *ctx, struct hlsl_ir
parse_patchconstantfunc_attribute(ctx, attr);
else if (!strcmp(attr->name, "earlydepthstencil") && profile->type == VKD3D_SHADER_TYPE_PIXEL)
entry_func->early_depth_test = true;
+ else if (!strcmp(attr->name, "maxvertexcount") && profile->type == VKD3D_SHADER_TYPE_GEOMETRY)
+ parse_maxvertexcount_attribute(ctx, attr);
else
hlsl_warning(ctx, &entry_func->attrs[i]->loc, VKD3D_SHADER_WARNING_HLSL_UNKNOWN_ATTRIBUTE,
"Ignoring unknown attribute \"%s\".", entry_func->attrs[i]->name);
@@ -6884,7 +7382,71 @@ static void validate_hull_shader_attributes(struct hlsl_ctx *ctx, const struct h
}
}
-static void validate_and_record_patch_type(struct hlsl_ctx *ctx, struct hlsl_ir_var *var)
+static enum vkd3d_primitive_type get_primitive_type(struct hlsl_ctx *ctx, struct hlsl_ir_var *var)
+{
+ uint32_t prim_modifier = var->data_type->modifiers & HLSL_PRIMITIVE_MODIFIERS_MASK;
+ enum vkd3d_primitive_type prim_type = VKD3D_PT_UNDEFINED;
+
+ if (prim_modifier)
+ {
+ unsigned int count = var->data_type->e.array.elements_count;
+ unsigned int expected_count;
+
+ VKD3D_ASSERT(!(prim_modifier & (prim_modifier - 1)));
+
+ switch (prim_modifier)
+ {
+ case HLSL_PRIMITIVE_POINT:
+ prim_type = VKD3D_PT_POINTLIST;
+ expected_count = 1;
+ break;
+
+ case HLSL_PRIMITIVE_LINE:
+ prim_type = VKD3D_PT_LINELIST;
+ expected_count = 2;
+ break;
+
+ case HLSL_PRIMITIVE_TRIANGLE:
+ prim_type = VKD3D_PT_TRIANGLELIST;
+ expected_count = 3;
+ break;
+
+ case HLSL_PRIMITIVE_LINEADJ:
+ prim_type = VKD3D_PT_LINELIST_ADJ;
+ expected_count = 4;
+ break;
+
+ case HLSL_PRIMITIVE_TRIANGLEADJ:
+ prim_type = VKD3D_PT_TRIANGLELIST_ADJ;
+ expected_count = 6;
+ break;
+
+ default:
+ vkd3d_unreachable();
+ }
+
+ if (count != expected_count)
+ {
+ struct vkd3d_string_buffer *string;
+
+ if ((string = hlsl_modifiers_to_string(ctx, prim_modifier)))
+ hlsl_error(ctx, &var->loc, VKD3D_SHADER_ERROR_HLSL_INVALID_CONTROL_POINT_COUNT,
+ "Control point count %u does not match the expect count %u for the %s input primitive type.",
+ count, expected_count, string->buffer);
+ hlsl_release_string_buffer(ctx, string);
+ }
+ }
+
+ /* Patch types take precedence over primitive modifiers. */
+ if (hlsl_type_is_patch_array(var->data_type))
+ prim_type = VKD3D_PT_PATCH;
+
+ VKD3D_ASSERT(prim_type != VKD3D_PT_UNDEFINED);
+ return prim_type;
+}
+
+
+static void validate_and_record_prim_type(struct hlsl_ctx *ctx, struct hlsl_ir_var *var)
{
unsigned int control_point_count = var->data_type->e.array.elements_count;
enum hlsl_array_type array_type = var->data_type->e.array.array_type;
@@ -6902,7 +7464,7 @@ static void validate_and_record_patch_type(struct hlsl_ctx *ctx, struct hlsl_ir_
return;
}
}
- else
+ else if (array_type == HLSL_ARRAY_PATCH_OUTPUT)
{
if (!ctx->is_patch_constant_func && profile->type != VKD3D_SHADER_TYPE_DOMAIN)
{
@@ -6913,6 +7475,30 @@ static void validate_and_record_patch_type(struct hlsl_ctx *ctx, struct hlsl_ir_
}
}
+ if ((var->data_type->modifiers & HLSL_PRIMITIVE_MODIFIERS_MASK) && profile->type != VKD3D_SHADER_TYPE_GEOMETRY)
+ {
+ hlsl_error(ctx, &var->loc, VKD3D_SHADER_ERROR_HLSL_INCOMPATIBLE_PROFILE,
+ "Input primitive parameters can only be used in geometry shaders.");
+ return;
+ }
+
+ if (profile->type == VKD3D_SHADER_TYPE_GEOMETRY)
+ {
+ enum vkd3d_primitive_type prim_type = get_primitive_type(ctx, var);
+
+ if (ctx->input_primitive_type == VKD3D_PT_UNDEFINED)
+ {
+ ctx->input_primitive_type = prim_type;
+ }
+ else if (ctx->input_primitive_type != prim_type)
+ {
+ hlsl_error(ctx, &var->loc, VKD3D_SHADER_ERROR_HLSL_INVALID_TYPE,
+ "Input primitive type does not match the previously declared type.");
+ hlsl_note(ctx, &ctx->input_primitive_param->loc, VKD3D_SHADER_LOG_ERROR,
+ "The input primitive was previously declared here.");
+ }
+ }
+
if (control_point_count > 32)
{
hlsl_error(ctx, &var->loc, VKD3D_SHADER_ERROR_HLSL_INVALID_CONTROL_POINT_COUNT,
@@ -6925,7 +7511,7 @@ static void validate_and_record_patch_type(struct hlsl_ctx *ctx, struct hlsl_ir_
{
if (control_point_count != ctx->output_control_point_count)
hlsl_error(ctx, &var->loc, VKD3D_SHADER_ERROR_HLSL_INVALID_CONTROL_POINT_COUNT,
- "Output control point count %u does not match the count %u specified in the control point function.",
+ "Output control point count %u does not match the count %u declared in the control point function.",
control_point_count, ctx->output_control_point_count);
if (!hlsl_types_are_equal(control_point_type, ctx->output_control_point_type))
@@ -6937,22 +7523,32 @@ static void validate_and_record_patch_type(struct hlsl_ctx *ctx, struct hlsl_ir_
if (ctx->input_control_point_count != UINT_MAX)
{
- VKD3D_ASSERT(ctx->is_patch_constant_func);
+ VKD3D_ASSERT(profile->type == VKD3D_SHADER_TYPE_GEOMETRY || ctx->is_patch_constant_func);
if (control_point_count != ctx->input_control_point_count)
+ {
hlsl_error(ctx, &var->loc, VKD3D_SHADER_ERROR_HLSL_INVALID_CONTROL_POINT_COUNT,
- "Input control point count %u does not match the count %u specified in the control point function.",
+ "Input control point count %u does not match the count %u declared previously.",
control_point_count, ctx->input_control_point_count);
+ hlsl_note(ctx, &ctx->input_primitive_param->loc, VKD3D_SHADER_LOG_ERROR,
+ "The input primitive was previously declared here.");
+ }
- if (!hlsl_types_are_equal(control_point_type, ctx->input_control_point_type))
+ if (profile->type != VKD3D_SHADER_TYPE_GEOMETRY
+ && !hlsl_types_are_equal(control_point_type, ctx->input_control_point_type))
+ {
hlsl_error(ctx, &var->loc, VKD3D_SHADER_ERROR_HLSL_INVALID_TYPE,
- "Input control point type does not match the input type specified in the control point function.");
+ "Input control point type does not match the input type declared previously.");
+ hlsl_note(ctx, &ctx->input_primitive_param->loc, VKD3D_SHADER_LOG_ERROR,
+ "The input primitive was previously declared here.");
+ }
return;
}
ctx->input_control_point_count = control_point_count;
ctx->input_control_point_type = control_point_type;
+ ctx->input_primitive_param = var;
}
static void remove_unreachable_code(struct hlsl_ctx *ctx, struct hlsl_block *body)
@@ -7007,13 +7603,47 @@ static void remove_unreachable_code(struct hlsl_ctx *ctx, struct hlsl_block *bod
list_move_slice_tail(&block.instrs, start, list_tail(&body->instrs));
hlsl_block_cleanup(&block);
- break;
- }
+ break;
+ }
+}
+
+void hlsl_lower_index_loads(struct hlsl_ctx *ctx, struct hlsl_block *body)
+{
+ lower_ir(ctx, lower_index_loads, body);
+}
+
+
+static bool simplify_exprs(struct hlsl_ctx *ctx, struct hlsl_block *block)
+{
+ bool progress, any_progress = false;
+
+ do
+ {
+ progress = hlsl_transform_ir(ctx, hlsl_fold_constant_exprs, block, NULL);
+ progress |= hlsl_transform_ir(ctx, hlsl_normalize_binary_exprs, block, NULL);
+ progress |= hlsl_transform_ir(ctx, hlsl_fold_constant_identities, block, NULL);
+ progress |= hlsl_transform_ir(ctx, hlsl_fold_constant_swizzles, block, NULL);
+
+ any_progress |= progress;
+ } while (progress);
+
+ return any_progress;
}
-void hlsl_lower_index_loads(struct hlsl_ctx *ctx, struct hlsl_block *body)
+static void hlsl_run_folding_passes(struct hlsl_ctx *ctx, struct hlsl_block *body)
{
- lower_ir(ctx, lower_index_loads, body);
+ bool progress;
+
+ hlsl_transform_ir(ctx, fold_redundant_casts, body, NULL);
+ do
+ {
+ progress = simplify_exprs(ctx, body);
+ progress |= hlsl_copy_propagation_execute(ctx, body);
+ progress |= hlsl_transform_ir(ctx, fold_swizzle_chains, body, NULL);
+ progress |= hlsl_transform_ir(ctx, remove_trivial_swizzles, body, NULL);
+ progress |= hlsl_transform_ir(ctx, remove_trivial_conditional_branches, body, NULL);
+ } while (progress);
+ hlsl_transform_ir(ctx, fold_redundant_casts, body, NULL);
}
void hlsl_run_const_passes(struct hlsl_ctx *ctx, struct hlsl_block *body)
@@ -7035,32 +7665,24 @@ void hlsl_run_const_passes(struct hlsl_ctx *ctx, struct hlsl_block *body)
lower_ir(ctx, lower_narrowing_casts, body);
lower_ir(ctx, lower_int_dot, body);
- lower_ir(ctx, lower_int_division, body);
- lower_ir(ctx, lower_int_modulus, body);
+ if (hlsl_version_ge(ctx, 4, 0))
+ {
+ lower_ir(ctx, lower_int_modulus_sm4, body);
+ lower_ir(ctx, lower_int_division_sm4, body);
+ }
lower_ir(ctx, lower_int_abs, body);
lower_ir(ctx, lower_casts_to_bool, body);
lower_ir(ctx, lower_float_modulus, body);
- hlsl_transform_ir(ctx, fold_redundant_casts, body, NULL);
- do
- {
- progress = hlsl_transform_ir(ctx, hlsl_fold_constant_exprs, body, NULL);
- progress |= hlsl_transform_ir(ctx, hlsl_fold_constant_identities, body, NULL);
- progress |= hlsl_transform_ir(ctx, hlsl_normalize_binary_exprs, body, NULL);
- progress |= hlsl_transform_ir(ctx, hlsl_fold_constant_swizzles, body, NULL);
- progress |= hlsl_copy_propagation_execute(ctx, body);
- progress |= hlsl_transform_ir(ctx, fold_swizzle_chains, body, NULL);
- progress |= hlsl_transform_ir(ctx, remove_trivial_swizzles, body, NULL);
- progress |= hlsl_transform_ir(ctx, remove_trivial_conditional_branches, body, NULL);
- } while (progress);
+ hlsl_run_folding_passes(ctx, body);
}
static void generate_vsir_signature_entry(struct hlsl_ctx *ctx, struct vsir_program *program,
struct shader_signature *signature, bool output, struct hlsl_ir_var *var)
{
enum vkd3d_shader_component_type component_type = VKD3D_SHADER_COMPONENT_VOID;
+ bool is_primitive = hlsl_type_is_primitive_array(var->data_type);
enum vkd3d_shader_sysval_semantic sysval = VKD3D_SHADER_SV_NONE;
- bool is_patch = hlsl_type_is_patch_array(var->data_type);
unsigned int register_index, mask, use_mask;
const char *name = var->semantic.name;
enum vkd3d_shader_register_type type;
@@ -7073,7 +7695,7 @@ static void generate_vsir_signature_entry(struct hlsl_ctx *ctx, struct vsir_prog
bool has_idx, ret;
ret = sm4_sysval_semantic_from_semantic_name(&sysval, &program->shader_version, ctx->semantic_compat_mapping,
- ctx->domain, var->semantic.name, var->semantic.index, output, ctx->is_patch_constant_func, is_patch);
+ ctx->domain, var->semantic.name, var->semantic.index, output, ctx->is_patch_constant_func, is_primitive);
VKD3D_ASSERT(ret);
if (sysval == ~0u)
return;
@@ -7109,6 +7731,7 @@ static void generate_vsir_signature_entry(struct hlsl_ctx *ctx, struct vsir_prog
break;
case HLSL_TYPE_BOOL:
+ case HLSL_TYPE_MIN16UINT:
case HLSL_TYPE_UINT:
component_type = VKD3D_SHADER_COMPONENT_UINT;
break;
@@ -7198,6 +7821,22 @@ static void generate_vsir_signature_entry(struct hlsl_ctx *ctx, struct vsir_prog
element->used_mask = use_mask;
if (program->shader_version.type == VKD3D_SHADER_TYPE_PIXEL && !output)
element->interpolation_mode = VKD3DSIM_LINEAR;
+
+ switch (var->data_type->e.numeric.type)
+ {
+ case HLSL_TYPE_BOOL:
+ case HLSL_TYPE_DOUBLE:
+ case HLSL_TYPE_FLOAT:
+ case HLSL_TYPE_HALF:
+ case HLSL_TYPE_INT:
+ case HLSL_TYPE_UINT:
+ element->min_precision = VKD3D_SHADER_MINIMUM_PRECISION_NONE;
+ break;
+
+ case HLSL_TYPE_MIN16UINT:
+ element->min_precision = VKD3D_SHADER_MINIMUM_PRECISION_UINT_16;
+ break;
+ }
}
static void generate_vsir_signature(struct hlsl_ctx *ctx,
@@ -7265,6 +7904,7 @@ static enum vkd3d_data_type vsir_data_type_from_hlsl_type(struct hlsl_ctx *ctx,
return VKD3D_DATA_INT;
case HLSL_TYPE_UINT:
case HLSL_TYPE_BOOL:
+ case HLSL_TYPE_MIN16UINT:
return VKD3D_DATA_UINT;
}
}
@@ -7416,7 +8056,7 @@ static void sm1_generate_vsir_sampler_dcls(struct hlsl_ctx *ctx,
static enum vkd3d_shader_register_type sm4_get_semantic_register_type(enum vkd3d_shader_type shader_type,
bool is_patch_constant_func, const struct hlsl_ir_var *var)
{
- if (hlsl_type_is_patch_array(var->data_type))
+ if (hlsl_type_is_primitive_array(var->data_type))
{
VKD3D_ASSERT(var->is_input_semantic);
@@ -7636,7 +8276,7 @@ static bool sm4_generate_vsir_reg_from_deref(struct hlsl_ctx *ctx, struct vsir_p
}
else
{
- unsigned int offset = hlsl_offset_from_deref_safe(ctx, deref) + var->buffer_offset;
+ unsigned int offset = deref->const_offset + var->buffer_offset;
VKD3D_ASSERT(data_type->class <= HLSL_CLASS_VECTOR);
reg->type = VKD3DSPR_CONSTBUFFER;
@@ -7654,19 +8294,27 @@ static bool sm4_generate_vsir_reg_from_deref(struct hlsl_ctx *ctx, struct vsir_p
reg->idx[1].offset = offset / 4;
reg->idx_count = 2;
}
+
+ if (deref->rel_offset.node)
+ {
+ if (!(reg->idx[reg->idx_count - 1].rel_addr = sm4_generate_vsir_new_idx_src(ctx,
+ program, deref->rel_offset.node)))
+ return false;
+ }
+
*writemask = ((1u << data_type->e.numeric.dimx) - 1) << (offset & 3);
}
}
else if (var->is_input_semantic)
{
- bool is_patch = hlsl_type_is_patch_array(var->data_type);
+ bool is_primitive = hlsl_type_is_primitive_array(var->data_type);
bool has_idx;
if (sm4_register_from_semantic_name(version, var->semantic.name, false, &reg->type, &has_idx))
{
unsigned int offset = hlsl_offset_from_deref_safe(ctx, deref);
- VKD3D_ASSERT(!is_patch);
+ VKD3D_ASSERT(!is_primitive);
if (has_idx)
{
@@ -7688,12 +8336,12 @@ static bool sm4_generate_vsir_reg_from_deref(struct hlsl_ctx *ctx, struct vsir_p
reg->type = sm4_get_semantic_register_type(version->type, ctx->is_patch_constant_func, var);
reg->dimension = VSIR_DIMENSION_VEC4;
- reg->idx[is_patch ? 1 : 0].offset = hlsl_reg.id;
- reg->idx_count = is_patch ? 2 : 1;
+ reg->idx[is_primitive ? 1 : 0].offset = hlsl_reg.id;
+ reg->idx_count = is_primitive ? 2 : 1;
*writemask = hlsl_reg.writemask;
}
- if (is_patch)
+ if (is_primitive)
{
reg->idx[0].offset = deref->const_offset / 4;
if (deref->rel_offset.node)
@@ -7750,7 +8398,8 @@ static bool sm4_generate_vsir_init_src_param_from_deref(struct hlsl_ctx *ctx, st
if (!sm4_generate_vsir_reg_from_deref(ctx, program, &src_param->reg, &writemask, deref))
return false;
- src_param->swizzle = generate_vsir_get_src_swizzle(writemask, dst_writemask);
+ if (src_param->reg.dimension != VSIR_DIMENSION_NONE)
+ src_param->swizzle = generate_vsir_get_src_swizzle(writemask, dst_writemask);
return true;
}
@@ -7780,7 +8429,6 @@ static void sm1_generate_vsir_instr_constant(struct hlsl_ctx *ctx,
struct vsir_program *program, struct hlsl_ir_constant *constant)
{
struct hlsl_ir_node *instr = &constant->node;
- struct vkd3d_shader_dst_param *dst_param;
struct vkd3d_shader_src_param *src_param;
struct vkd3d_shader_instruction *ins;
@@ -7792,13 +8440,11 @@ static void sm1_generate_vsir_instr_constant(struct hlsl_ctx *ctx,
src_param = &ins->src[0];
vsir_register_init(&src_param->reg, VKD3DSPR_CONST, VKD3D_DATA_FLOAT, 1);
+ src_param->reg.dimension = VSIR_DIMENSION_VEC4;
src_param->reg.idx[0].offset = constant->reg.id;
src_param->swizzle = generate_vsir_get_src_swizzle(constant->reg.writemask, instr->reg.writemask);
- dst_param = &ins->dst[0];
- vsir_register_init(&dst_param->reg, VKD3DSPR_TEMP, VKD3D_DATA_FLOAT, 1);
- dst_param->reg.idx[0].offset = instr->reg.id;
- dst_param->write_mask = instr->reg.writemask;
+ vsir_dst_from_hlsl_node(&ins->dst[0], ctx, instr);
}
static void sm4_generate_vsir_rasterizer_sample_count(struct hlsl_ctx *ctx,
@@ -7885,11 +8531,13 @@ static void sm1_generate_vsir_instr_expr_per_component_instr_op(struct hlsl_ctx
dst_param = &ins->dst[0];
vsir_register_init(&dst_param->reg, VKD3DSPR_TEMP, VKD3D_DATA_FLOAT, 1);
dst_param->reg.idx[0].offset = instr->reg.id;
+ dst_param->reg.dimension = VSIR_DIMENSION_VEC4;
dst_param->write_mask = 1u << i;
src_param = &ins->src[0];
vsir_register_init(&src_param->reg, VKD3DSPR_TEMP, VKD3D_DATA_FLOAT, 1);
src_param->reg.idx[0].offset = operand->reg.id;
+ src_param->reg.dimension = VSIR_DIMENSION_VEC4;
c = vsir_swizzle_get_component(src_swizzle, i);
src_param->swizzle = vsir_swizzle_from_writemask(1u << c);
}
@@ -7901,7 +8549,6 @@ static void sm1_generate_vsir_instr_expr_sincos(struct hlsl_ctx *ctx, struct vsi
{
struct hlsl_ir_node *operand = expr->operands[0].node;
struct hlsl_ir_node *instr = &expr->node;
- struct vkd3d_shader_dst_param *dst_param;
struct vkd3d_shader_src_param *src_param;
struct vkd3d_shader_instruction *ins;
unsigned int src_count = 0;
@@ -7912,25 +8559,20 @@ static void sm1_generate_vsir_instr_expr_sincos(struct hlsl_ctx *ctx, struct vsi
if (!(ins = generate_vsir_add_program_instruction(ctx, program, &instr->loc, VKD3DSIH_SINCOS, 1, src_count)))
return;
- dst_param = &ins->dst[0];
- vsir_register_init(&dst_param->reg, VKD3DSPR_TEMP, VKD3D_DATA_FLOAT, 1);
- dst_param->reg.idx[0].offset = instr->reg.id;
- dst_param->write_mask = instr->reg.writemask;
-
- src_param = &ins->src[0];
- vsir_register_init(&src_param->reg, VKD3DSPR_TEMP, VKD3D_DATA_FLOAT, 1);
- src_param->reg.idx[0].offset = operand->reg.id;
- src_param->swizzle = generate_vsir_get_src_swizzle(operand->reg.writemask, VKD3DSP_WRITEMASK_ALL);
+ vsir_dst_from_hlsl_node(&ins->dst[0], ctx, instr);
+ vsir_src_from_hlsl_node(&ins->src[0], ctx, operand, VKD3DSP_WRITEMASK_ALL);
if (ctx->profile->major_version < 3)
{
src_param = &ins->src[1];
vsir_register_init(&src_param->reg, VKD3DSPR_CONST, VKD3D_DATA_FLOAT, 1);
+ src_param->reg.dimension = VSIR_DIMENSION_VEC4;
src_param->reg.idx[0].offset = ctx->d3dsincosconst1.id;
src_param->swizzle = VKD3D_SHADER_NO_SWIZZLE;
- src_param = &ins->src[1];
+ src_param = &ins->src[2];
vsir_register_init(&src_param->reg, VKD3DSPR_CONST, VKD3D_DATA_FLOAT, 1);
+ src_param->reg.dimension = VSIR_DIMENSION_VEC4;
src_param->reg.idx[0].offset = ctx->d3dsincosconst2.id;
src_param->swizzle = VKD3D_SHADER_NO_SWIZZLE;
}
@@ -7957,6 +8599,7 @@ static bool sm1_generate_vsir_instr_expr_cast(struct hlsl_ctx *ctx,
switch (src_type->e.numeric.type)
{
case HLSL_TYPE_INT:
+ case HLSL_TYPE_MIN16UINT:
case HLSL_TYPE_UINT:
case HLSL_TYPE_BOOL:
/* Integrals are internally represented as floats, so no change is necessary.*/
@@ -7978,8 +8621,9 @@ static bool sm1_generate_vsir_instr_expr_cast(struct hlsl_ctx *ctx,
break;
case HLSL_TYPE_INT:
+ case HLSL_TYPE_MIN16UINT:
case HLSL_TYPE_UINT:
- switch(src_type->e.numeric.type)
+ switch (src_type->e.numeric.type)
{
case HLSL_TYPE_HALF:
case HLSL_TYPE_FLOAT:
@@ -7989,6 +8633,7 @@ static bool sm1_generate_vsir_instr_expr_cast(struct hlsl_ctx *ctx,
break;
case HLSL_TYPE_INT:
+ case HLSL_TYPE_MIN16UINT:
case HLSL_TYPE_UINT:
case HLSL_TYPE_BOOL:
generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_MOV, 0, 0, true);
@@ -8004,13 +8649,8 @@ static bool sm1_generate_vsir_instr_expr_cast(struct hlsl_ctx *ctx,
switch (src_type->e.numeric.type)
{
case HLSL_TYPE_FLOAT:
- if (ctx->double_as_float_alias)
- {
- generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_MOV, 0, 0, true);
- return true;
- }
- hlsl_error(ctx, &instr->loc, VKD3D_SHADER_ERROR_HLSL_INVALID_TYPE,
- "The 'double' type is not supported for the %s profile.", ctx->profile->name);
+ generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_MOV, 0, 0, true);
+ return true;
break;
default:
@@ -8033,12 +8673,15 @@ static bool sm1_generate_vsir_instr_expr(struct hlsl_ctx *ctx, struct vsir_progr
struct hlsl_ir_expr *expr)
{
struct hlsl_ir_node *instr = &expr->node;
+ struct hlsl_type *type = instr->data_type;
- if (expr->op != HLSL_OP1_REINTERPRET && expr->op != HLSL_OP1_CAST
- && instr->data_type->e.numeric.type != HLSL_TYPE_FLOAT)
+ if (!hlsl_is_numeric_type(type))
+ goto err;
+
+ if (type->e.numeric.type == HLSL_TYPE_DOUBLE && !ctx->double_as_float_alias)
{
- /* These need to be lowered. */
- hlsl_fixme(ctx, &instr->loc, "SM1 non-float expression.");
+ hlsl_error(ctx, &instr->loc, VKD3D_SHADER_ERROR_HLSL_INVALID_TYPE,
+ "The 'double' type is not supported for the %s profile.", ctx->profile->name);
return false;
}
@@ -8053,30 +8696,44 @@ static bool sm1_generate_vsir_instr_expr(struct hlsl_ctx *ctx, struct vsir_progr
case HLSL_OP1_COS_REDUCED:
VKD3D_ASSERT(expr->node.reg.writemask == VKD3DSP_WRITEMASK_0);
+ if (!hlsl_type_is_floating_point(type))
+ goto err;
sm1_generate_vsir_instr_expr_sincos(ctx, program, expr);
break;
case HLSL_OP1_DSX:
+ if (!hlsl_type_is_floating_point(type))
+ goto err;
generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_DSX, 0, 0, true);
break;
case HLSL_OP1_DSY:
+ if (!hlsl_type_is_floating_point(type))
+ goto err;
generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_DSY, 0, 0, true);
break;
case HLSL_OP1_EXP2:
+ if (!hlsl_type_is_floating_point(type))
+ goto err;
sm1_generate_vsir_instr_expr_per_component_instr_op(ctx, program, expr, VKD3DSIH_EXP);
break;
case HLSL_OP1_LOG2:
+ if (!hlsl_type_is_floating_point(type))
+ goto err;
sm1_generate_vsir_instr_expr_per_component_instr_op(ctx, program, expr, VKD3DSIH_LOG);
break;
case HLSL_OP1_NEG:
+ if (type->e.numeric.type == HLSL_TYPE_BOOL)
+ goto err;
generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_MOV, VKD3DSPSM_NEG, 0, true);
break;
case HLSL_OP1_RCP:
+ if (!hlsl_type_is_floating_point(type))
+ goto err;
sm1_generate_vsir_instr_expr_per_component_instr_op(ctx, program, expr, VKD3DSIH_RCP);
break;
@@ -8085,23 +8742,33 @@ static bool sm1_generate_vsir_instr_expr(struct hlsl_ctx *ctx, struct vsir_progr
break;
case HLSL_OP1_RSQ:
+ if (!hlsl_type_is_floating_point(type))
+ goto err;
sm1_generate_vsir_instr_expr_per_component_instr_op(ctx, program, expr, VKD3DSIH_RSQ);
break;
case HLSL_OP1_SAT:
+ if (!hlsl_type_is_floating_point(type))
+ goto err;
generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_MOV, 0, VKD3DSPDM_SATURATE, true);
break;
case HLSL_OP1_SIN_REDUCED:
VKD3D_ASSERT(expr->node.reg.writemask == VKD3DSP_WRITEMASK_1);
+ if (!hlsl_type_is_floating_point(type))
+ goto err;
sm1_generate_vsir_instr_expr_sincos(ctx, program, expr);
break;
case HLSL_OP2_ADD:
+ if (type->e.numeric.type == HLSL_TYPE_BOOL)
+ goto err;
generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_ADD, 0, 0, true);
break;
case HLSL_OP2_DOT:
+ if (!hlsl_type_is_floating_point(type))
+ goto err;
switch (expr->operands[0].node->data_type->e.numeric.dimx)
{
case 3:
@@ -8135,35 +8802,49 @@ static bool sm1_generate_vsir_instr_expr(struct hlsl_ctx *ctx, struct vsir_progr
break;
case HLSL_OP2_LOGIC_AND:
+ if (type->e.numeric.type != HLSL_TYPE_BOOL)
+ goto err;
generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_MIN, 0, 0, true);
break;
case HLSL_OP2_LOGIC_OR:
+ if (type->e.numeric.type != HLSL_TYPE_BOOL)
+ goto err;
generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_MAX, 0, 0, true);
break;
case HLSL_OP2_SLT:
+ if (!hlsl_type_is_floating_point(type))
+ goto err;
generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_SLT, 0, 0, true);
break;
case HLSL_OP3_CMP:
+ if (!hlsl_type_is_floating_point(type))
+ goto err;
generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_CMP, 0, 0, true);
break;
case HLSL_OP3_DP2ADD:
+ if (!hlsl_type_is_floating_point(type))
+ goto err;
generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_DP2ADD, 0, 0, false);
break;
case HLSL_OP3_MAD:
+ if (!hlsl_type_is_floating_point(type))
+ goto err;
generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_MAD, 0, 0, true);
break;
default:
- hlsl_fixme(ctx, &instr->loc, "SM1 \"%s\" expression.", debug_hlsl_expr_op(expr->op));
- return false;
+ goto err;
}
-
return true;
+
+err:
+ hlsl_fixme(ctx, &instr->loc, "SM1 %s expression of type %s.", debug_hlsl_expr_op(expr->op), instr->data_type->name);
+ return false;
}
static void sm1_generate_vsir_init_dst_param_from_deref(struct hlsl_ctx *ctx,
@@ -8213,19 +8894,68 @@ static void sm1_generate_vsir_init_dst_param_from_deref(struct hlsl_ctx *ctx,
else
VKD3D_ASSERT(reg.allocated);
- vsir_register_init(&dst_param->reg, type, VKD3D_DATA_FLOAT, 1);
+ if (type == VKD3DSPR_DEPTHOUT)
+ {
+ vsir_register_init(&dst_param->reg, type, VKD3D_DATA_FLOAT, 0);
+ dst_param->reg.dimension = VSIR_DIMENSION_SCALAR;
+ }
+ else
+ {
+ vsir_register_init(&dst_param->reg, type, VKD3D_DATA_FLOAT, 1);
+ dst_param->reg.idx[0].offset = register_index;
+ dst_param->reg.dimension = VSIR_DIMENSION_VEC4;
+ }
dst_param->write_mask = writemask;
- dst_param->reg.idx[0].offset = register_index;
if (deref->rel_offset.node)
hlsl_fixme(ctx, loc, "Translate relative addressing on dst register for vsir.");
}
+static void sm1_generate_vsir_instr_mova(struct hlsl_ctx *ctx,
+ struct vsir_program *program, struct hlsl_ir_node *instr)
+{
+ enum vkd3d_shader_opcode opcode = hlsl_version_ge(ctx, 2, 0) ? VKD3DSIH_MOVA : VKD3DSIH_MOV;
+ struct vkd3d_shader_dst_param *dst_param;
+ struct vkd3d_shader_instruction *ins;
+
+ VKD3D_ASSERT(instr->reg.allocated);
+
+ if (!(ins = generate_vsir_add_program_instruction(ctx, program, &instr->loc, opcode, 1, 1)))
+ return;
+
+ dst_param = &ins->dst[0];
+ vsir_register_init(&dst_param->reg, VKD3DSPR_ADDR, VKD3D_DATA_FLOAT, 0);
+ dst_param->write_mask = VKD3DSP_WRITEMASK_0;
+
+ VKD3D_ASSERT(instr->data_type->class <= HLSL_CLASS_VECTOR);
+ VKD3D_ASSERT(instr->data_type->e.numeric.dimx == 1);
+ vsir_src_from_hlsl_node(&ins->src[0], ctx, instr, VKD3DSP_WRITEMASK_ALL);
+}
+
+static struct vkd3d_shader_src_param *sm1_generate_vsir_new_address_src(struct hlsl_ctx *ctx,
+ struct vsir_program *program)
+{
+ struct vkd3d_shader_src_param *idx_src;
+
+ if (!(idx_src = vsir_program_get_src_params(program, 1)))
+ {
+ ctx->result = VKD3D_ERROR_OUT_OF_MEMORY;
+ return NULL;
+ }
+
+ memset(idx_src, 0, sizeof(*idx_src));
+ vsir_register_init(&idx_src->reg, VKD3DSPR_ADDR, VKD3D_DATA_FLOAT, 0);
+ idx_src->reg.dimension = VSIR_DIMENSION_VEC4;
+ idx_src->swizzle = VKD3D_SHADER_SWIZZLE(X, X, X, X);
+ return idx_src;
+}
+
static void sm1_generate_vsir_init_src_param_from_deref(struct hlsl_ctx *ctx,
- struct vkd3d_shader_src_param *src_param, struct hlsl_deref *deref,
- unsigned int dst_writemask, const struct vkd3d_shader_location *loc)
+ struct vsir_program *program, struct vkd3d_shader_src_param *src_param,
+ struct hlsl_deref *deref, uint32_t dst_writemask, const struct vkd3d_shader_location *loc)
{
enum vkd3d_shader_register_type type = VKD3DSPR_TEMP;
+ struct vkd3d_shader_src_param *src_rel_addr = NULL;
struct vkd3d_shader_version version;
uint32_t register_index;
unsigned int writemask;
@@ -8243,12 +8973,26 @@ static void sm1_generate_vsir_init_src_param_from_deref(struct hlsl_ctx *ctx,
}
else if (deref->var->is_uniform)
{
+ unsigned int offset = deref->const_offset;
+
type = VKD3DSPR_CONST;
+ register_index = deref->var->regs[HLSL_REGSET_NUMERIC].id + offset / 4;
- reg = hlsl_reg_from_deref(ctx, deref);
- register_index = reg.id;
- writemask = reg.writemask;
- VKD3D_ASSERT(reg.allocated);
+ writemask = 0xf & (0xf << (offset % 4));
+ if (deref->var->regs[HLSL_REGSET_NUMERIC].writemask)
+ writemask = hlsl_combine_writemasks(deref->var->regs[HLSL_REGSET_NUMERIC].writemask, writemask);
+
+ if (deref->rel_offset.node)
+ {
+ VKD3D_ASSERT(deref_supports_sm1_indirect_addressing(ctx, deref));
+
+ if (!(src_rel_addr = sm1_generate_vsir_new_address_src(ctx, program)))
+ {
+ ctx->result = VKD3D_ERROR_OUT_OF_MEMORY;
+ return;
+ }
+ }
+ VKD3D_ASSERT(deref->var->regs[HLSL_REGSET_NUMERIC].allocated);
}
else if (deref->var->is_input_semantic)
{
@@ -8280,32 +9024,30 @@ static void sm1_generate_vsir_init_src_param_from_deref(struct hlsl_ctx *ctx,
}
vsir_register_init(&src_param->reg, type, VKD3D_DATA_FLOAT, 1);
+ src_param->reg.dimension = VSIR_DIMENSION_VEC4;
src_param->reg.idx[0].offset = register_index;
+ src_param->reg.idx[0].rel_addr = src_rel_addr;
src_param->swizzle = generate_vsir_get_src_swizzle(writemask, dst_writemask);
-
- if (deref->rel_offset.node)
- hlsl_fixme(ctx, loc, "Translate relative addressing on src register for vsir.");
}
static void sm1_generate_vsir_instr_load(struct hlsl_ctx *ctx, struct vsir_program *program,
struct hlsl_ir_load *load)
{
struct hlsl_ir_node *instr = &load->node;
- struct vkd3d_shader_dst_param *dst_param;
struct vkd3d_shader_instruction *ins;
VKD3D_ASSERT(instr->reg.allocated);
+ if (load->src.rel_offset.node)
+ sm1_generate_vsir_instr_mova(ctx, program, load->src.rel_offset.node);
+
if (!(ins = generate_vsir_add_program_instruction(ctx, program, &instr->loc, VKD3DSIH_MOV, 1, 1)))
return;
- dst_param = &ins->dst[0];
- vsir_register_init(&dst_param->reg, VKD3DSPR_TEMP, VKD3D_DATA_FLOAT, 1);
- dst_param->reg.idx[0].offset = instr->reg.id;
- dst_param->write_mask = instr->reg.writemask;
+ vsir_dst_from_hlsl_node(&ins->dst[0], ctx, instr);
- sm1_generate_vsir_init_src_param_from_deref(ctx, &ins->src[0], &load->src, dst_param->write_mask,
- &ins->location);
+ sm1_generate_vsir_init_src_param_from_deref(ctx, program, &ins->src[0],
+ &load->src, ins->dst[0].write_mask, &ins->location);
}
static void sm1_generate_vsir_instr_resource_load(struct hlsl_ctx *ctx,
@@ -8315,7 +9057,6 @@ static void sm1_generate_vsir_instr_resource_load(struct hlsl_ctx *ctx,
struct hlsl_ir_node *ddx = load->ddx.node;
struct hlsl_ir_node *ddy = load->ddy.node;
struct hlsl_ir_node *instr = &load->node;
- struct vkd3d_shader_dst_param *dst_param;
struct vkd3d_shader_src_param *src_param;
struct vkd3d_shader_instruction *ins;
enum vkd3d_shader_opcode opcode;
@@ -8354,15 +9095,12 @@ static void sm1_generate_vsir_instr_resource_load(struct hlsl_ctx *ctx,
return;
ins->flags = flags;
- dst_param = &ins->dst[0];
- vsir_register_init(&dst_param->reg, VKD3DSPR_TEMP, VKD3D_DATA_FLOAT, 1);
- dst_param->reg.idx[0].offset = instr->reg.id;
- dst_param->write_mask = instr->reg.writemask;
+ vsir_dst_from_hlsl_node(&ins->dst[0], ctx, instr);
src_param = &ins->src[0];
vsir_src_from_hlsl_node(src_param, ctx, coords, VKD3DSP_WRITEMASK_ALL);
- sm1_generate_vsir_init_src_param_from_deref(ctx, &ins->src[1], &load->resource,
+ sm1_generate_vsir_init_src_param_from_deref(ctx, program, &ins->src[1], &load->resource,
VKD3DSP_WRITEMASK_ALL, &ins->location);
if (load->load_type == HLSL_RESOURCE_SAMPLE_GRAD)
@@ -8379,7 +9117,6 @@ static void generate_vsir_instr_swizzle(struct hlsl_ctx *ctx,
struct vsir_program *program, struct hlsl_ir_swizzle *swizzle_instr)
{
struct hlsl_ir_node *instr = &swizzle_instr->node, *val = swizzle_instr->val.node;
- struct vkd3d_shader_dst_param *dst_param;
struct vkd3d_shader_src_param *src_param;
struct vkd3d_shader_instruction *ins;
uint32_t swizzle;
@@ -8389,11 +9126,7 @@ static void generate_vsir_instr_swizzle(struct hlsl_ctx *ctx,
if (!(ins = generate_vsir_add_program_instruction(ctx, program, &instr->loc, VKD3DSIH_MOV, 1, 1)))
return;
- dst_param = &ins->dst[0];
- vsir_register_init(&dst_param->reg, VKD3DSPR_TEMP, vsir_data_type_from_hlsl_instruction(ctx, instr), 1);
- dst_param->reg.idx[0].offset = instr->reg.id;
- dst_param->reg.dimension = VSIR_DIMENSION_VEC4;
- dst_param->write_mask = instr->reg.writemask;
+ vsir_dst_from_hlsl_node(&ins->dst[0], ctx, instr);
swizzle = hlsl_swizzle_from_writemask(val->reg.writemask);
swizzle = hlsl_combine_swizzles(swizzle, swizzle_instr->u.vector, instr->data_type->e.numeric.dimx);
@@ -8429,7 +9162,6 @@ static void sm1_generate_vsir_instr_jump(struct hlsl_ctx *ctx,
{
struct hlsl_ir_node *condition = jump->condition.node;
struct hlsl_ir_node *instr = &jump->node;
- struct vkd3d_shader_dst_param *dst_param;
struct vkd3d_shader_instruction *ins;
if (jump->type == HLSL_IR_JUMP_DISCARD_NEG)
@@ -8437,10 +9169,7 @@ static void sm1_generate_vsir_instr_jump(struct hlsl_ctx *ctx,
if (!(ins = generate_vsir_add_program_instruction(ctx, program, &instr->loc, VKD3DSIH_TEXKILL, 1, 0)))
return;
- dst_param = &ins->dst[0];
- vsir_register_init(&dst_param->reg, VKD3DSPR_TEMP, VKD3D_DATA_FLOAT, 1);
- dst_param->reg.idx[0].offset = condition->reg.id;
- dst_param->write_mask = condition->reg.writemask;
+ vsir_dst_from_hlsl_node(&ins->dst[0], ctx, condition);
}
else
{
@@ -8561,6 +9290,10 @@ static void sm1_generate_vsir(struct hlsl_ctx *ctx, struct hlsl_ir_function_decl
return;
}
+ program->temp_count = allocate_temp_registers(ctx, entry_func);
+ if (ctx->result)
+ return;
+
generate_vsir_signature(ctx, program, entry_func);
hlsl_block_init(&block);
@@ -8650,6 +9383,10 @@ D3DXPARAMETER_TYPE hlsl_sm1_base_type(const struct hlsl_type *type, bool is_comb
case HLSL_TYPE_INT:
case HLSL_TYPE_UINT:
return D3DXPT_INT;
+ /* Minimum-precision types are not supported until 46, but at
+ * that point they do the same thing, and return sm4 types. */
+ case HLSL_TYPE_MIN16UINT:
+ return 0x39;
}
break;
@@ -8934,6 +9671,7 @@ static void write_sm1_uniforms(struct hlsl_ctx *ctx, struct vkd3d_bytecode_buffe
uni.f = var->default_values[k].number.i;
break;
+ case HLSL_TYPE_MIN16UINT:
case HLSL_TYPE_UINT:
case HLSL_TYPE_BOOL:
uni.f = var->default_values[k].number.u;
@@ -8977,7 +9715,7 @@ static void sm4_generate_vsir_instr_dcl_semantic(struct hlsl_ctx *ctx, struct vs
const struct hlsl_ir_var *var, struct hlsl_block *block, const struct vkd3d_shader_location *loc)
{
const struct vkd3d_shader_version *version = &program->shader_version;
- const bool is_patch = hlsl_type_is_patch_array(var->data_type);
+ const bool is_primitive = hlsl_type_is_primitive_array(var->data_type);
const bool output = var->is_output_semantic;
enum vkd3d_shader_sysval_semantic semantic;
struct vkd3d_shader_dst_param *dst_param;
@@ -8989,7 +9727,7 @@ static void sm4_generate_vsir_instr_dcl_semantic(struct hlsl_ctx *ctx, struct vs
bool has_idx;
sm4_sysval_semantic_from_semantic_name(&semantic, version, ctx->semantic_compat_mapping, ctx->domain,
- var->semantic.name, var->semantic.index, output, ctx->is_patch_constant_func, is_patch);
+ var->semantic.name, var->semantic.index, output, ctx->is_patch_constant_func, is_primitive);
if (semantic == ~0u)
semantic = VKD3D_SHADER_SV_NONE;
@@ -9002,9 +9740,17 @@ static void sm4_generate_vsir_instr_dcl_semantic(struct hlsl_ctx *ctx, struct vs
? VKD3DSIH_DCL_INPUT_PS : VKD3DSIH_DCL_INPUT;
break;
+ case VKD3D_SHADER_SV_PRIMITIVE_ID:
+ if (version->type == VKD3D_SHADER_TYPE_PIXEL)
+ opcode = VKD3DSIH_DCL_INPUT_PS_SGV;
+ else if (version->type == VKD3D_SHADER_TYPE_GEOMETRY)
+ opcode = VKD3DSIH_DCL_INPUT;
+ else
+ opcode = VKD3DSIH_DCL_INPUT_SGV;
+ break;
+
case VKD3D_SHADER_SV_INSTANCE_ID:
case VKD3D_SHADER_SV_IS_FRONT_FACE:
- case VKD3D_SHADER_SV_PRIMITIVE_ID:
case VKD3D_SHADER_SV_SAMPLE_INDEX:
case VKD3D_SHADER_SV_VERTEX_ID:
opcode = (version->type == VKD3D_SHADER_TYPE_PIXEL)
@@ -9014,7 +9760,7 @@ static void sm4_generate_vsir_instr_dcl_semantic(struct hlsl_ctx *ctx, struct vs
default:
if (version->type == VKD3D_SHADER_TYPE_PIXEL)
opcode = VKD3DSIH_DCL_INPUT_PS_SIV;
- else if (is_patch)
+ else if (is_primitive && version->type != VKD3D_SHADER_TYPE_GEOMETRY)
opcode = VKD3DSIH_DCL_INPUT;
else
opcode = VKD3DSIH_DCL_INPUT_SIV;
@@ -9055,7 +9801,7 @@ static void sm4_generate_vsir_instr_dcl_semantic(struct hlsl_ctx *ctx, struct vs
}
else if (opcode == VKD3DSIH_DCL_INPUT || opcode == VKD3DSIH_DCL_INPUT_PS)
{
- VKD3D_ASSERT(semantic == VKD3D_SHADER_SV_NONE || is_patch);
+ VKD3D_ASSERT(semantic == VKD3D_SHADER_SV_NONE || is_primitive || version->type == VKD3D_SHADER_TYPE_GEOMETRY);
dst_param = &ins->declaration.dst;
}
else
@@ -9066,7 +9812,7 @@ static void sm4_generate_vsir_instr_dcl_semantic(struct hlsl_ctx *ctx, struct vs
dst_param = &ins->declaration.register_semantic.reg;
}
- if (is_patch)
+ if (is_primitive)
{
VKD3D_ASSERT(has_idx);
vsir_register_init(&dst_param->reg, type, VKD3D_DATA_FLOAT, 2);
@@ -9127,13 +9873,6 @@ static bool type_is_float(const struct hlsl_type *type)
return type->e.numeric.type == HLSL_TYPE_FLOAT || type->e.numeric.type == HLSL_TYPE_HALF;
}
-static bool type_is_integer(const struct hlsl_type *type)
-{
- return type->e.numeric.type == HLSL_TYPE_BOOL
- || type->e.numeric.type == HLSL_TYPE_INT
- || type->e.numeric.type == HLSL_TYPE_UINT;
-}
-
static void sm4_generate_vsir_cast_from_bool(struct hlsl_ctx *ctx, struct vsir_program *program,
const struct hlsl_ir_expr *expr, uint32_t bits)
{
@@ -9188,6 +9927,7 @@ static bool sm4_generate_vsir_instr_expr_cast(struct hlsl_ctx *ctx,
generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_ITOF, 0, 0, true);
return true;
+ case HLSL_TYPE_MIN16UINT: /* FIXME: Needs minimum-precision annotations. */
case HLSL_TYPE_UINT:
generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_UTOF, 0, 0, true);
return true;
@@ -9211,6 +9951,7 @@ static bool sm4_generate_vsir_instr_expr_cast(struct hlsl_ctx *ctx,
return true;
case HLSL_TYPE_INT:
+ case HLSL_TYPE_MIN16UINT: /* FIXME: Needs minimum-precision annotations. */
case HLSL_TYPE_UINT:
generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_MOV, 0, 0, true);
return true;
@@ -9225,6 +9966,7 @@ static bool sm4_generate_vsir_instr_expr_cast(struct hlsl_ctx *ctx,
}
break;
+ case HLSL_TYPE_MIN16UINT: /* FIXME: Needs minimum-precision annotations. */
case HLSL_TYPE_UINT:
switch (src_type->e.numeric.type)
{
@@ -9234,6 +9976,7 @@ static bool sm4_generate_vsir_instr_expr_cast(struct hlsl_ctx *ctx,
return true;
case HLSL_TYPE_INT:
+ case HLSL_TYPE_MIN16UINT: /* FIXME: Needs minimum-precision annotations. */
case HLSL_TYPE_UINT:
generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_MOV, 0, 0, true);
return true;
@@ -9339,7 +10082,7 @@ static bool sm4_generate_vsir_instr_expr(struct hlsl_ctx *ctx,
return true;
case HLSL_OP1_BIT_NOT:
- VKD3D_ASSERT(type_is_integer(dst_type));
+ VKD3D_ASSERT(hlsl_type_is_integer(dst_type));
generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_NOT, 0, 0, true);
return true;
@@ -9431,6 +10174,7 @@ static bool sm4_generate_vsir_instr_expr(struct hlsl_ctx *ctx,
return true;
case HLSL_TYPE_INT:
+ case HLSL_TYPE_MIN16UINT: /* FIXME: Needs minimum-precision annotations. */
case HLSL_TYPE_UINT:
generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_INEG, 0, 0, true);
return true;
@@ -9498,6 +10242,7 @@ static bool sm4_generate_vsir_instr_expr(struct hlsl_ctx *ctx,
return true;
case HLSL_TYPE_INT:
+ case HLSL_TYPE_MIN16UINT: /* FIXME: Needs minimum-precision annotations. */
case HLSL_TYPE_UINT:
generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_IADD, 0, 0, true);
return true;
@@ -9508,17 +10253,17 @@ static bool sm4_generate_vsir_instr_expr(struct hlsl_ctx *ctx,
}
case HLSL_OP2_BIT_AND:
- VKD3D_ASSERT(type_is_integer(dst_type));
+ VKD3D_ASSERT(hlsl_type_is_integer(dst_type));
generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_AND, 0, 0, true);
return true;
case HLSL_OP2_BIT_OR:
- VKD3D_ASSERT(type_is_integer(dst_type));
+ VKD3D_ASSERT(hlsl_type_is_integer(dst_type));
generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_OR, 0, 0, true);
return true;
case HLSL_OP2_BIT_XOR:
- VKD3D_ASSERT(type_is_integer(dst_type));
+ VKD3D_ASSERT(hlsl_type_is_integer(dst_type));
generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_XOR, 0, 0, true);
return true;
@@ -9529,6 +10274,7 @@ static bool sm4_generate_vsir_instr_expr(struct hlsl_ctx *ctx,
generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_DIV, 0, 0, true);
return true;
+ case HLSL_TYPE_MIN16UINT: /* FIXME: Needs minimum-precision annotations. */
case HLSL_TYPE_UINT:
sm4_generate_vsir_expr_with_two_destinations(ctx, program, VKD3DSIH_UDIV, expr, 0);
return true;
@@ -9577,6 +10323,7 @@ static bool sm4_generate_vsir_instr_expr(struct hlsl_ctx *ctx,
case HLSL_TYPE_BOOL:
case HLSL_TYPE_INT:
+ case HLSL_TYPE_MIN16UINT: /* FIXME: Needs minimum-precision annotations. */
case HLSL_TYPE_UINT:
generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_IEQ, 0, 0, true);
return true;
@@ -9601,6 +10348,7 @@ static bool sm4_generate_vsir_instr_expr(struct hlsl_ctx *ctx,
return true;
case HLSL_TYPE_BOOL:
+ case HLSL_TYPE_MIN16UINT: /* FIXME: Needs minimum-precision annotations. */
case HLSL_TYPE_UINT:
generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_UGE, 0, 0, true);
return true;
@@ -9625,6 +10373,7 @@ static bool sm4_generate_vsir_instr_expr(struct hlsl_ctx *ctx,
return true;
case HLSL_TYPE_BOOL:
+ case HLSL_TYPE_MIN16UINT: /* FIXME: Needs minimum-precision annotations. */
case HLSL_TYPE_UINT:
generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_ULT, 0, 0, true);
return true;
@@ -9646,7 +10395,7 @@ static bool sm4_generate_vsir_instr_expr(struct hlsl_ctx *ctx,
return true;
case HLSL_OP2_LSHIFT:
- VKD3D_ASSERT(type_is_integer(dst_type));
+ VKD3D_ASSERT(hlsl_type_is_integer(dst_type));
VKD3D_ASSERT(dst_type->e.numeric.type != HLSL_TYPE_BOOL);
generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_ISHL, 0, 0, true);
return true;
@@ -9659,6 +10408,7 @@ static bool sm4_generate_vsir_instr_expr(struct hlsl_ctx *ctx,
return true;
case HLSL_TYPE_INT:
+ case HLSL_TYPE_MIN16UINT: /* FIXME: Needs minimum-precision annotations. */
case HLSL_TYPE_UINT:
generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_IMAD, 0, 0, true);
return true;
@@ -9679,6 +10429,7 @@ static bool sm4_generate_vsir_instr_expr(struct hlsl_ctx *ctx,
generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_IMAX, 0, 0, true);
return true;
+ case HLSL_TYPE_MIN16UINT: /* FIXME: Needs minimum-precision annotations. */
case HLSL_TYPE_UINT:
generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_UMAX, 0, 0, true);
return true;
@@ -9699,6 +10450,7 @@ static bool sm4_generate_vsir_instr_expr(struct hlsl_ctx *ctx,
generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_IMIN, 0, 0, true);
return true;
+ case HLSL_TYPE_MIN16UINT: /* FIXME: Needs minimum-precision annotations. */
case HLSL_TYPE_UINT:
generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_UMIN, 0, 0, true);
return true;
@@ -9711,6 +10463,7 @@ static bool sm4_generate_vsir_instr_expr(struct hlsl_ctx *ctx,
case HLSL_OP2_MOD:
switch (dst_type->e.numeric.type)
{
+ case HLSL_TYPE_MIN16UINT: /* FIXME: Needs minimum-precision annotations. */
case HLSL_TYPE_UINT:
sm4_generate_vsir_expr_with_two_destinations(ctx, program, VKD3DSIH_UDIV, expr, 1);
return true;
@@ -9728,6 +10481,7 @@ static bool sm4_generate_vsir_instr_expr(struct hlsl_ctx *ctx,
return true;
case HLSL_TYPE_INT:
+ case HLSL_TYPE_MIN16UINT: /* FIXME: Needs minimum-precision annotations. */
case HLSL_TYPE_UINT:
/* Using IMUL instead of UMUL because we're taking the low
* bits, and the native compiler generates IMUL. */
@@ -9750,6 +10504,7 @@ static bool sm4_generate_vsir_instr_expr(struct hlsl_ctx *ctx,
case HLSL_TYPE_BOOL:
case HLSL_TYPE_INT:
+ case HLSL_TYPE_MIN16UINT: /* FIXME: Needs minimum-precision annotations. */
case HLSL_TYPE_UINT:
generate_vsir_instr_expr_single_instr_op(ctx, program, expr, VKD3DSIH_INE, 0, 0, true);
return true;
@@ -9761,7 +10516,7 @@ static bool sm4_generate_vsir_instr_expr(struct hlsl_ctx *ctx,
}
case HLSL_OP2_RSHIFT:
- VKD3D_ASSERT(type_is_integer(dst_type));
+ VKD3D_ASSERT(hlsl_type_is_integer(dst_type));
VKD3D_ASSERT(dst_type->e.numeric.type != HLSL_TYPE_BOOL);
generate_vsir_instr_expr_single_instr_op(ctx, program, expr,
dst_type->e.numeric.type == HLSL_TYPE_INT ? VKD3DSIH_ISHR : VKD3DSIH_USHR, 0, 0, true);
@@ -10840,12 +11595,32 @@ static void generate_vsir_scan_required_features(struct hlsl_ctx *ctx, struct vs
* STENCIL_REF, and TYPED_UAV_LOAD_ADDITIONAL_FORMATS. */
}
+static bool is_minimum_precision(enum hlsl_base_type type)
+{
+ switch (type)
+ {
+ case HLSL_TYPE_BOOL:
+ case HLSL_TYPE_DOUBLE:
+ case HLSL_TYPE_FLOAT:
+ case HLSL_TYPE_HALF:
+ case HLSL_TYPE_INT:
+ case HLSL_TYPE_UINT:
+ return false;
+
+ case HLSL_TYPE_MIN16UINT:
+ return true;
+ }
+
+ vkd3d_unreachable();
+}
+
static void generate_vsir_scan_global_flags(struct hlsl_ctx *ctx,
struct vsir_program *program, const struct hlsl_ir_function_decl *entry_func)
{
const struct vkd3d_shader_version *version = &program->shader_version;
struct extern_resource *extern_resources;
unsigned int extern_resources_count, i;
+ struct hlsl_ir_var *var;
extern_resources = sm4_get_extern_resources(ctx, &extern_resources_count);
@@ -10866,6 +11641,25 @@ static void generate_vsir_scan_global_flags(struct hlsl_ctx *ctx,
sm4_free_extern_resources(extern_resources, extern_resources_count);
+ LIST_FOR_EACH_ENTRY(var, &entry_func->extern_vars, struct hlsl_ir_var, extern_entry)
+ {
+ const struct hlsl_type *type = var->data_type;
+
+ if (hlsl_type_is_primitive_array(type))
+ type = var->data_type->e.array.type;
+
+ /* Note that it doesn't matter if the semantic is unused or doesn't
+ * generate a signature element (e.g. SV_DispatchThreadID). */
+ if ((var->is_input_semantic || var->is_output_semantic)
+ && (type->is_minimum_precision || is_minimum_precision(type->e.numeric.type)))
+ {
+ program->global_flags |= VKD3DSGF_ENABLE_MINIMUM_PRECISION;
+ break;
+ }
+ }
+ /* FIXME: We also need to check for minimum-precision uniforms and local
+ * variable arithmetic. */
+
if (entry_func->early_depth_test && vkd3d_shader_ver_ge(version, 5, 0))
program->global_flags |= VKD3DSGF_FORCE_EARLY_DEPTH_STENCIL;
}
@@ -10994,6 +11788,7 @@ static enum vkd3d_data_type sm4_generate_vsir_get_format_type(const struct hlsl_
return VKD3D_DATA_INT;
case HLSL_TYPE_BOOL:
+ case HLSL_TYPE_MIN16UINT:
case HLSL_TYPE_UINT:
return VKD3D_DATA_UINT;
}
@@ -11153,6 +11948,13 @@ static void sm4_generate_vsir(struct hlsl_ctx *ctx, struct hlsl_ir_function_decl
? 0 : ctx->input_control_point_count;
program->tess_domain = ctx->domain;
}
+ else if (version.type == VKD3D_SHADER_TYPE_GEOMETRY)
+ {
+ program->input_control_point_count = ctx->input_control_point_count;
+ program->input_primitive = ctx->input_primitive_type;
+ program->output_topology = VKD3D_PT_UNDEFINED; /* TODO: obtain from stream output parameters. */
+ program->vertices_out_count = ctx->max_vertex_count;
+ }
LIST_FOR_EACH_ENTRY(cbuffer, &ctx->buffers, struct hlsl_buffer, entry)
{
@@ -11288,6 +12090,7 @@ static enum D3D_RESOURCE_RETURN_TYPE sm4_data_type(const struct hlsl_type *type)
break;
case HLSL_TYPE_BOOL:
+ case HLSL_TYPE_MIN16UINT:
case HLSL_TYPE_UINT:
return D3D_RETURN_TYPE_UINT;
}
@@ -11373,6 +12176,8 @@ static D3D_SHADER_VARIABLE_TYPE sm4_base_type(const struct hlsl_type *type)
return D3D_SVT_INT;
case HLSL_TYPE_UINT:
return D3D_SVT_UINT;
+ case HLSL_TYPE_MIN16UINT:
+ return D3D_SVT_MIN16UINT;
}
vkd3d_unreachable();
@@ -11696,16 +12501,13 @@ static void sm4_generate_rdef(struct hlsl_ctx *ctx, struct vkd3d_shader_code *rd
static bool loop_unrolling_generate_const_bool_store(struct hlsl_ctx *ctx, struct hlsl_ir_var *var,
bool val, struct hlsl_block *block, struct vkd3d_shader_location *loc)
{
- struct hlsl_ir_node *const_node, *store;
+ struct hlsl_ir_node *const_node;
if (!(const_node = hlsl_new_bool_constant(ctx, val, loc)))
return false;
hlsl_block_add_instr(block, const_node);
- if (!(store = hlsl_new_simple_store(ctx, var, const_node)))
- return false;
- hlsl_block_add_instr(block, store);
-
+ hlsl_block_add_simple_store(ctx, block, var, const_node);
return true;
}
@@ -11759,19 +12561,13 @@ static bool loop_unrolling_remove_jumps_visit(struct hlsl_ctx *ctx, struct hlsl_
static struct hlsl_ir_if *loop_unrolling_generate_var_check(struct hlsl_ctx *ctx,
struct hlsl_block *dst, struct hlsl_ir_var *var, struct vkd3d_shader_location *loc)
{
- struct hlsl_ir_node *cond, *iff;
+ struct hlsl_ir_node *cond, *load, *iff;
struct hlsl_block then_block;
- struct hlsl_ir_load *load;
hlsl_block_init(&then_block);
- if (!(load = hlsl_new_var_load(ctx, var, loc)))
- return NULL;
- hlsl_block_add_instr(dst, &load->node);
-
- if (!(cond = hlsl_new_unary_expr(ctx, HLSL_OP1_LOGIC_NOT, &load->node, loc)))
- return NULL;
- hlsl_block_add_instr(dst, cond);
+ load = hlsl_block_add_simple_load(ctx, dst, var, loc);
+ cond = hlsl_block_add_unary_expr(ctx, dst, HLSL_OP1_LOGIC_NOT, load, loc);
if (!(iff = hlsl_new_if(ctx, cond, &then_block, NULL, loc)))
return NULL;
@@ -11850,9 +12646,7 @@ static void loop_unrolling_simplify(struct hlsl_ctx *ctx, struct hlsl_block *blo
copy_propagation_pop_scope(state);
copy_propagation_push_scope(state, ctx);
- progress = hlsl_transform_ir(ctx, hlsl_fold_constant_exprs, block, NULL);
- progress |= hlsl_transform_ir(ctx, hlsl_fold_constant_identities, block, NULL);
- progress |= hlsl_transform_ir(ctx, hlsl_fold_constant_swizzles, block, NULL);
+ progress = simplify_exprs(ctx, block);
current_index = index_instructions(block, *index);
progress |= copy_propagation_transform_block(ctx, block, state);
@@ -12113,10 +12907,9 @@ static void loop_unrolling_execute(struct hlsl_ctx *ctx, struct hlsl_block *bloc
static bool lower_f16tof32(struct hlsl_ctx *ctx, struct hlsl_ir_node *node, struct hlsl_block *block)
{
- struct hlsl_ir_node *call, *rhs, *store;
struct hlsl_ir_function_decl *func;
+ struct hlsl_ir_node *call, *rhs;
unsigned int component_count;
- struct hlsl_ir_load *load;
struct hlsl_ir_expr *expr;
struct hlsl_ir_var *lhs;
char *body;
@@ -12179,28 +12972,21 @@ static bool lower_f16tof32(struct hlsl_ctx *ctx, struct hlsl_ir_node *node, stru
return false;
lhs = func->parameters.vars[0];
-
- if (!(store = hlsl_new_simple_store(ctx, lhs, rhs)))
- return false;
- hlsl_block_add_instr(block, store);
+ hlsl_block_add_simple_store(ctx, block, lhs, rhs);
if (!(call = hlsl_new_call(ctx, func, &node->loc)))
return false;
hlsl_block_add_instr(block, call);
- if (!(load = hlsl_new_var_load(ctx, func->return_var, &node->loc)))
- return false;
- hlsl_block_add_instr(block, &load->node);
-
+ hlsl_block_add_simple_load(ctx, block, func->return_var, &node->loc);
return true;
}
static bool lower_f32tof16(struct hlsl_ctx *ctx, struct hlsl_ir_node *node, struct hlsl_block *block)
{
- struct hlsl_ir_node *call, *rhs, *store;
struct hlsl_ir_function_decl *func;
+ struct hlsl_ir_node *call, *rhs;
unsigned int component_count;
- struct hlsl_ir_load *load;
struct hlsl_ir_expr *expr;
struct hlsl_ir_var *lhs;
char *body;
@@ -12251,30 +13037,22 @@ static bool lower_f32tof16(struct hlsl_ctx *ctx, struct hlsl_ir_node *node, stru
return false;
lhs = func->parameters.vars[0];
-
- if (!(store = hlsl_new_simple_store(ctx, lhs, rhs)))
- return false;
- hlsl_block_add_instr(block, store);
+ hlsl_block_add_simple_store(ctx, block, lhs, rhs);
if (!(call = hlsl_new_call(ctx, func, &node->loc)))
return false;
hlsl_block_add_instr(block, call);
- if (!(load = hlsl_new_var_load(ctx, func->return_var, &node->loc)))
- return false;
- hlsl_block_add_instr(block, &load->node);
-
+ hlsl_block_add_simple_load(ctx, block, func->return_var, &node->loc);
return true;
}
static bool lower_isinf(struct hlsl_ctx *ctx, struct hlsl_ir_node *node, struct hlsl_block *block)
{
- struct hlsl_ir_node *call, *rhs, *store;
struct hlsl_ir_function_decl *func;
+ struct hlsl_ir_node *call, *rhs;
unsigned int component_count;
- struct hlsl_ir_load *load;
struct hlsl_ir_expr *expr;
- struct hlsl_ir_var *lhs;
const char *template;
char *body;
@@ -12327,7 +13105,7 @@ static bool lower_isinf(struct hlsl_ctx *ctx, struct hlsl_ir_node *node, struct
template = template_sm2;
else if (hlsl_version_lt(ctx, 4, 0))
template = template_sm3;
- else if (type_is_integer(rhs->data_type))
+ else if (hlsl_type_is_integer(rhs->data_type))
template = template_int;
else
template = template_sm4;
@@ -12339,20 +13117,13 @@ static bool lower_isinf(struct hlsl_ctx *ctx, struct hlsl_ir_node *node, struct
if (!(func = hlsl_compile_internal_function(ctx, "isinf", body)))
return false;
- lhs = func->parameters.vars[0];
-
- if (!(store = hlsl_new_simple_store(ctx, lhs, rhs)))
- return false;
- hlsl_block_add_instr(block, store);
+ hlsl_block_add_simple_store(ctx, block, func->parameters.vars[0], rhs);
if (!(call = hlsl_new_call(ctx, func, &node->loc)))
return false;
hlsl_block_add_instr(block, call);
- if (!(load = hlsl_new_var_load(ctx, func->return_var, &node->loc)))
- return false;
- hlsl_block_add_instr(block, &load->node);
-
+ hlsl_block_add_simple_load(ctx, block, func->return_var, &node->loc);
return true;
}
@@ -12366,6 +13137,7 @@ static void process_entry_function(struct hlsl_ctx *ctx,
struct recursive_call_ctx recursive_call_ctx;
struct hlsl_ir_var *var;
unsigned int i;
+ bool progress;
ctx->is_patch_constant_func = entry_func == ctx->patch_constant_func;
@@ -12418,41 +13190,61 @@ static void process_entry_function(struct hlsl_ctx *ctx,
else
prepend_uniform_copy(ctx, body, var);
}
- else if (hlsl_type_is_patch_array(var->data_type))
+ else if (hlsl_type_is_primitive_array(var->data_type))
{
- if (var->data_type->e.array.array_type == HLSL_ARRAY_PATCH_INPUT)
+ if (var->storage_modifiers & HLSL_STORAGE_OUT)
+ hlsl_error(ctx, &var->loc, VKD3D_SHADER_ERROR_HLSL_INVALID_MODIFIER,
+ "Input primitive parameter \"%s\" is declared as \"out\".", var->name);
+
+ if (profile->type != VKD3D_SHADER_TYPE_GEOMETRY)
{
- if (input_patch)
+ enum hlsl_array_type array_type = var->data_type->e.array.array_type;
+
+ if (array_type == HLSL_ARRAY_PATCH_INPUT)
{
- hlsl_error(ctx, &var->loc, VKD3D_SHADER_ERROR_HLSL_DUPLICATE_PATCH,
- "Found multiple InputPatch parameters.");
- hlsl_note(ctx, &input_patch->loc, VKD3D_SHADER_LOG_ERROR,
- "The InputPatch parameter was previously declared here.");
- continue;
+ if (input_patch)
+ {
+ hlsl_error(ctx, &var->loc, VKD3D_SHADER_ERROR_HLSL_DUPLICATE_PATCH,
+ "Found multiple InputPatch parameters.");
+ hlsl_note(ctx, &input_patch->loc, VKD3D_SHADER_LOG_ERROR,
+ "The InputPatch parameter was previously declared here.");
+ continue;
+ }
+ input_patch = var;
}
- input_patch = var;
- }
- else
- {
- if (output_patch)
+ else if (array_type == HLSL_ARRAY_PATCH_OUTPUT)
{
- hlsl_error(ctx, &var->loc, VKD3D_SHADER_ERROR_HLSL_DUPLICATE_PATCH,
- "Found multiple OutputPatch parameters.");
- hlsl_note(ctx, &output_patch->loc, VKD3D_SHADER_LOG_ERROR,
- "The OutputPatch parameter was previously declared here.");
- continue;
+ if (output_patch)
+ {
+ hlsl_error(ctx, &var->loc, VKD3D_SHADER_ERROR_HLSL_DUPLICATE_PATCH,
+ "Found multiple OutputPatch parameters.");
+ hlsl_note(ctx, &output_patch->loc, VKD3D_SHADER_LOG_ERROR,
+ "The OutputPatch parameter was previously declared here.");
+ continue;
+ }
+ output_patch = var;
}
- output_patch = var;
}
- validate_and_record_patch_type(ctx, var);
- if (profile->type == VKD3D_SHADER_TYPE_GEOMETRY)
+ validate_and_record_prim_type(ctx, var);
+ prepend_input_var_copy(ctx, entry_func, var);
+ }
+ else if (hlsl_get_stream_output_type(var->data_type))
+ {
+ if (profile->type != VKD3D_SHADER_TYPE_GEOMETRY)
{
- hlsl_fixme(ctx, &var->loc, "InputPatch/OutputPatch parameters in geometry shaders.");
+ hlsl_error(ctx, &var->loc, VKD3D_SHADER_ERROR_HLSL_INCOMPATIBLE_PROFILE,
+ "Stream output parameters can only be used in geometry shaders.");
continue;
}
- prepend_input_var_copy(ctx, entry_func, var);
+ if (!(var->storage_modifiers & HLSL_STORAGE_IN) || !(var->storage_modifiers & HLSL_STORAGE_OUT))
+ hlsl_error(ctx, &var->loc, VKD3D_SHADER_ERROR_HLSL_INVALID_MODIFIER,
+ "Stream output parameter \"%s\" must be declared as \"inout\".", var->name);
+
+ /* TODO: check that maxvertexcount * component_count(element_type) <= 1024. */
+
+ continue;
}
else
{
@@ -12465,12 +13257,24 @@ static void process_entry_function(struct hlsl_ctx *ctx,
}
if (var->storage_modifiers & HLSL_STORAGE_IN)
+ {
+ if (profile->type == VKD3D_SHADER_TYPE_GEOMETRY && !var->semantic.name)
+ {
+ hlsl_error(ctx, &var->loc, VKD3D_SHADER_ERROR_HLSL_MISSING_PRIMITIVE_TYPE,
+ "Input parameter \"%s\" is missing a primitive type.", var->name);
+ continue;
+ }
+
prepend_input_var_copy(ctx, entry_func, var);
+ }
if (var->storage_modifiers & HLSL_STORAGE_OUT)
{
if (profile->type == VKD3D_SHADER_TYPE_HULL && !ctx->is_patch_constant_func)
hlsl_error(ctx, &var->loc, VKD3D_SHADER_ERROR_HLSL_INVALID_MODIFIER,
"Output parameters are not supported in hull shader control point functions.");
+ else if (profile->type == VKD3D_SHADER_TYPE_GEOMETRY)
+ hlsl_error(ctx, &var->loc, VKD3D_SHADER_ERROR_HLSL_INVALID_MODIFIER,
+ "Output parameters are not allowed in geometry shaders.");
else
append_output_var_copy(ctx, entry_func, var);
}
@@ -12478,7 +13282,11 @@ static void process_entry_function(struct hlsl_ctx *ctx,
}
if (entry_func->return_var)
{
- if (entry_func->return_var->data_type->class != HLSL_CLASS_STRUCT && !entry_func->return_var->semantic.name)
+ if (profile->type == VKD3D_SHADER_TYPE_GEOMETRY)
+ hlsl_error(ctx, &entry_func->loc, VKD3D_SHADER_ERROR_HLSL_INCOMPATIBLE_PROFILE,
+ "Geometry shaders cannot return values.");
+ else if (entry_func->return_var->data_type->class != HLSL_CLASS_STRUCT
+ && !entry_func->return_var->semantic.name)
hlsl_error(ctx, &entry_func->loc, VKD3D_SHADER_ERROR_HLSL_MISSING_SEMANTIC,
"Entry point \"%s\" is missing a return value semantic.", entry_func->func->name);
@@ -12493,6 +13301,10 @@ static void process_entry_function(struct hlsl_ctx *ctx,
hlsl_fixme(ctx, &entry_func->loc, "Passthrough hull shader control point function.");
}
+ if (profile->type == VKD3D_SHADER_TYPE_GEOMETRY && ctx->input_primitive_type == VKD3D_PT_UNDEFINED)
+ hlsl_error(ctx, &entry_func->loc, VKD3D_SHADER_ERROR_HLSL_MISSING_PRIMITIVE_TYPE,
+ "Entry point \"%s\" is missing an input primitive parameter.", entry_func->func->name);
+
if (hlsl_version_ge(ctx, 4, 0))
{
hlsl_transform_ir(ctx, lower_discard_neg, body, NULL);
@@ -12503,6 +13315,9 @@ static void process_entry_function(struct hlsl_ctx *ctx,
hlsl_transform_ir(ctx, lower_resource_load_bias, body, NULL);
}
+ compute_liveness(ctx, entry_func);
+ transform_derefs(ctx, divert_written_uniform_derefs_to_temp, &entry_func->body);
+
loop_unrolling_execute(ctx, body);
hlsl_run_const_passes(ctx, body);
@@ -12517,6 +13332,17 @@ static void process_entry_function(struct hlsl_ctx *ctx,
hlsl_transform_ir(ctx, lower_separate_samples, body, NULL);
hlsl_transform_ir(ctx, validate_dereferences, body, NULL);
+
+ do
+ {
+ progress = vectorize_exprs(ctx, body);
+ compute_liveness(ctx, entry_func);
+ progress |= hlsl_transform_ir(ctx, dce, body, NULL);
+ progress |= hlsl_transform_ir(ctx, fold_swizzle_chains, body, NULL);
+ progress |= hlsl_transform_ir(ctx, remove_trivial_swizzles, body, NULL);
+ progress |= vectorize_stores(ctx, body);
+ } while (progress);
+
hlsl_transform_ir(ctx, track_object_components_sampler_dim, body, NULL);
if (hlsl_version_ge(ctx, 4, 0))
@@ -12537,14 +13363,14 @@ static void process_entry_function(struct hlsl_ctx *ctx,
while (lower_ir(ctx, lower_nonconstant_array_loads, body));
lower_ir(ctx, lower_ternary, body);
-
- lower_ir(ctx, lower_nonfloat_exprs, body);
+ lower_ir(ctx, lower_int_modulus_sm1, body);
+ lower_ir(ctx, lower_division, body);
/* Constants casted to float must be folded, and new casts to bool also need to be lowered. */
hlsl_transform_ir(ctx, hlsl_fold_constant_exprs, body, NULL);
lower_ir(ctx, lower_casts_to_bool, body);
lower_ir(ctx, lower_casts_to_int, body);
- lower_ir(ctx, lower_division, body);
+ lower_ir(ctx, lower_trunc, body);
lower_ir(ctx, lower_sqrt, body);
lower_ir(ctx, lower_dot, body);
lower_ir(ctx, lower_round, body);
@@ -12566,13 +13392,15 @@ static void process_entry_function(struct hlsl_ctx *ctx,
lower_ir(ctx, validate_nonconstant_vector_store_derefs, body);
+ hlsl_run_folding_passes(ctx, body);
+
do
compute_liveness(ctx, entry_func);
while (hlsl_transform_ir(ctx, dce, body, NULL));
/* TODO: move forward, remove when no longer needed */
transform_derefs(ctx, replace_deref_path_with_offset, body);
- while (hlsl_transform_ir(ctx, hlsl_fold_constant_exprs, body, NULL));
+ simplify_exprs(ctx, body);
transform_derefs(ctx, clean_constant_deref_offset_srcs, body);
do
@@ -12608,6 +13436,9 @@ int hlsl_emit_bytecode(struct hlsl_ctx *ctx, struct hlsl_ir_function_decl *entry
else if (profile->type == VKD3D_SHADER_TYPE_DOMAIN && ctx->domain == VKD3D_TESSELLATOR_DOMAIN_INVALID)
hlsl_error(ctx, &entry_func->loc, VKD3D_SHADER_ERROR_HLSL_MISSING_ATTRIBUTE,
"Entry point \"%s\" is missing a [domain] attribute.", entry_func->func->name);
+ else if (profile->type == VKD3D_SHADER_TYPE_GEOMETRY && !ctx->max_vertex_count)
+ hlsl_error(ctx, &entry_func->loc, VKD3D_SHADER_ERROR_HLSL_MISSING_ATTRIBUTE,
+ "Entry point \"%s\" is missing a [maxvertexcount] attribute.", entry_func->func->name);
hlsl_block_init(&global_uniform_block);
@@ -12633,7 +13464,6 @@ int hlsl_emit_bytecode(struct hlsl_ctx *ctx, struct hlsl_ir_function_decl *entry
if (profile->major_version < 4)
{
mark_indexable_vars(ctx, entry_func);
- allocate_temp_registers(ctx, entry_func);
allocate_const_registers(ctx, entry_func);
sort_uniforms_by_bind_count(ctx, HLSL_REGSET_SAMPLERS);
allocate_objects(ctx, entry_func, HLSL_REGSET_SAMPLERS);
diff --git a/libs/vkd3d/libs/vkd3d-shader/hlsl_constant_ops.c b/libs/vkd3d/libs/vkd3d-shader/hlsl_constant_ops.c
index 538f0f46854..f74ecffcd4b 100644
--- a/libs/vkd3d/libs/vkd3d-shader/hlsl_constant_ops.c
+++ b/libs/vkd3d/libs/vkd3d-shader/hlsl_constant_ops.c
@@ -51,6 +51,7 @@ static bool fold_abs(struct hlsl_ctx *ctx, struct hlsl_constant_value *dst,
dst->u[k].i = abs(src->value.u[k].i);
break;
+ case HLSL_TYPE_MIN16UINT:
case HLSL_TYPE_UINT:
dst->u[k].u = src->value.u[k].u;
break;
@@ -126,6 +127,7 @@ static bool fold_bit_not(struct hlsl_ctx *ctx, struct hlsl_constant_value *dst,
switch (type)
{
case HLSL_TYPE_INT:
+ case HLSL_TYPE_MIN16UINT:
case HLSL_TYPE_UINT:
case HLSL_TYPE_BOOL:
dst->u[k].u = ~src->value.u[k].u;
@@ -175,6 +177,7 @@ static bool fold_cast(struct hlsl_ctx *ctx, struct hlsl_constant_value *dst,
break;
case HLSL_TYPE_UINT:
+ case HLSL_TYPE_MIN16UINT:
u = src->value.u[k].u;
i = src->value.u[k].u;
f = src->value.u[k].u;
@@ -205,6 +208,7 @@ static bool fold_cast(struct hlsl_ctx *ctx, struct hlsl_constant_value *dst,
break;
case HLSL_TYPE_UINT:
+ case HLSL_TYPE_MIN16UINT:
dst->u[k].u = u;
break;
@@ -395,6 +399,7 @@ static bool fold_neg(struct hlsl_ctx *ctx, struct hlsl_constant_value *dst,
break;
case HLSL_TYPE_INT:
+ case HLSL_TYPE_MIN16UINT:
case HLSL_TYPE_UINT:
dst->u[k].u = -src->value.u[k].u;
break;
@@ -612,6 +617,7 @@ static bool fold_add(struct hlsl_ctx *ctx, struct hlsl_constant_value *dst, cons
/* Handling HLSL_TYPE_INT through the unsigned field to avoid
* undefined behavior with signed integers in C. */
case HLSL_TYPE_INT:
+ case HLSL_TYPE_MIN16UINT:
case HLSL_TYPE_UINT:
dst->u[k].u = src1->value.u[k].u + src2->value.u[k].u;
break;
@@ -638,6 +644,7 @@ static bool fold_and(struct hlsl_ctx *ctx, struct hlsl_constant_value *dst, cons
switch (type)
{
case HLSL_TYPE_INT:
+ case HLSL_TYPE_MIN16UINT:
case HLSL_TYPE_UINT:
case HLSL_TYPE_BOOL:
dst->u[k].u = src1->value.u[k].u & src2->value.u[k].u;
@@ -665,6 +672,7 @@ static bool fold_or(struct hlsl_ctx *ctx, struct hlsl_constant_value *dst, const
switch (type)
{
case HLSL_TYPE_INT:
+ case HLSL_TYPE_MIN16UINT:
case HLSL_TYPE_UINT:
case HLSL_TYPE_BOOL:
dst->u[k].u = src1->value.u[k].u | src2->value.u[k].u;
@@ -692,6 +700,7 @@ static bool fold_bit_xor(struct hlsl_ctx *ctx, struct hlsl_constant_value *dst,
switch (type)
{
case HLSL_TYPE_INT:
+ case HLSL_TYPE_MIN16UINT:
case HLSL_TYPE_UINT:
dst->u[k].u = src1->value.u[k].u ^ src2->value.u[k].u;
break;
@@ -813,6 +822,7 @@ static bool fold_div(struct hlsl_ctx *ctx, struct hlsl_constant_value *dst, cons
dst->u[k].i = src1->value.u[k].i / src2->value.u[k].i;
break;
+ case HLSL_TYPE_MIN16UINT:
case HLSL_TYPE_UINT:
if (src2->value.u[k].u == 0)
{
@@ -855,6 +865,7 @@ static bool fold_equal(struct hlsl_ctx *ctx, struct hlsl_constant_value *dst, co
case HLSL_TYPE_INT:
case HLSL_TYPE_UINT:
case HLSL_TYPE_BOOL:
+ case HLSL_TYPE_MIN16UINT:
dst->u[k].u = src1->value.u[k].u == src2->value.u[k].u;
break;
}
@@ -891,6 +902,7 @@ static bool fold_gequal(struct hlsl_ctx *ctx, struct hlsl_constant_value *dst, c
case HLSL_TYPE_UINT:
case HLSL_TYPE_BOOL:
+ case HLSL_TYPE_MIN16UINT:
dst->u[k].u = src1->value.u[k].u >= src2->value.u[k].u;
break;
}
@@ -927,6 +939,7 @@ static bool fold_less(struct hlsl_ctx *ctx, struct hlsl_constant_value *dst, con
case HLSL_TYPE_UINT:
case HLSL_TYPE_BOOL:
+ case HLSL_TYPE_MIN16UINT:
dst->u[k].u = src1->value.u[k].u < src2->value.u[k].u;
break;
}
@@ -951,6 +964,7 @@ static bool fold_lshift(struct hlsl_ctx *ctx, struct hlsl_constant_value *dst, c
switch (src1->node.data_type->e.numeric.type)
{
case HLSL_TYPE_INT:
+ case HLSL_TYPE_MIN16UINT:
case HLSL_TYPE_UINT:
dst->u[k].u = src1->value.u[k].u << shift;
break;
@@ -989,6 +1003,7 @@ static bool fold_max(struct hlsl_ctx *ctx, struct hlsl_constant_value *dst, cons
dst->u[k].i = max(src1->value.u[k].i, src2->value.u[k].i);
break;
+ case HLSL_TYPE_MIN16UINT:
case HLSL_TYPE_UINT:
dst->u[k].u = max(src1->value.u[k].u, src2->value.u[k].u);
break;
@@ -1027,6 +1042,7 @@ static bool fold_min(struct hlsl_ctx *ctx, struct hlsl_constant_value *dst, cons
dst->u[k].i = min(src1->value.u[k].i, src2->value.u[k].i);
break;
+ case HLSL_TYPE_MIN16UINT:
case HLSL_TYPE_UINT:
dst->u[k].u = min(src1->value.u[k].u, src2->value.u[k].u);
break;
@@ -1065,6 +1081,7 @@ static bool fold_mod(struct hlsl_ctx *ctx, struct hlsl_constant_value *dst, cons
dst->u[k].i = src1->value.u[k].i % src2->value.u[k].i;
break;
+ case HLSL_TYPE_MIN16UINT:
case HLSL_TYPE_UINT:
if (src2->value.u[k].u == 0)
{
@@ -1105,6 +1122,7 @@ static bool fold_mul(struct hlsl_ctx *ctx, struct hlsl_constant_value *dst, cons
break;
case HLSL_TYPE_INT:
+ case HLSL_TYPE_MIN16UINT:
case HLSL_TYPE_UINT:
dst->u[k].u = src1->value.u[k].u * src2->value.u[k].u;
break;
@@ -1141,6 +1159,7 @@ static bool fold_nequal(struct hlsl_ctx *ctx, struct hlsl_constant_value *dst, c
case HLSL_TYPE_INT:
case HLSL_TYPE_UINT:
case HLSL_TYPE_BOOL:
+ case HLSL_TYPE_MIN16UINT:
dst->u[k].u = src1->value.u[k].u != src2->value.u[k].u;
break;
}
@@ -1183,6 +1202,7 @@ static bool fold_rshift(struct hlsl_ctx *ctx, struct hlsl_constant_value *dst, c
dst->u[k].i = src1->value.u[k].i >> shift;
break;
+ case HLSL_TYPE_MIN16UINT:
case HLSL_TYPE_UINT:
dst->u[k].u = src1->value.u[k].u >> shift;
break;
@@ -1399,6 +1419,7 @@ static bool constant_is_zero(struct hlsl_ir_constant *const_arg)
case HLSL_TYPE_UINT:
case HLSL_TYPE_INT:
case HLSL_TYPE_BOOL:
+ case HLSL_TYPE_MIN16UINT:
if (const_arg->value.u[k].u != 0)
return false;
break;
@@ -1429,6 +1450,7 @@ static bool constant_is_one(struct hlsl_ir_constant *const_arg)
case HLSL_TYPE_UINT:
case HLSL_TYPE_INT:
+ case HLSL_TYPE_MIN16UINT:
if (const_arg->value.u[k].u != 1)
return false;
break;
@@ -1524,7 +1546,7 @@ static bool is_op_associative(enum hlsl_ir_expr_op op, enum hlsl_base_type type)
{
case HLSL_OP2_ADD:
case HLSL_OP2_MUL:
- return type == HLSL_TYPE_INT || type == HLSL_TYPE_UINT;
+ return hlsl_base_type_is_integer(type);
case HLSL_OP2_BIT_AND:
case HLSL_OP2_BIT_OR:
@@ -1574,7 +1596,7 @@ static bool is_op_left_distributive(enum hlsl_ir_expr_op opl, enum hlsl_ir_expr_
case HLSL_OP2_DOT:
case HLSL_OP2_MUL:
- return opr == HLSL_OP2_ADD && (type == HLSL_TYPE_INT || type == HLSL_TYPE_UINT);
+ return opr == HLSL_OP2_ADD && hlsl_base_type_is_integer(type);
case HLSL_OP2_MAX:
return opr == HLSL_OP2_MIN;
@@ -1593,7 +1615,6 @@ static struct hlsl_ir_node *collect_exprs(struct hlsl_ctx *ctx, struct hlsl_bloc
{
enum hlsl_base_type type = instr->data_type->e.numeric.type;
struct hlsl_ir_node *operands[HLSL_MAX_OPERANDS] = {0};
- struct hlsl_ir_node *ab, *res;
struct hlsl_ir_expr *e1, *e2;
enum hlsl_ir_expr_op opl;
@@ -1610,17 +1631,9 @@ static struct hlsl_ir_node *collect_exprs(struct hlsl_ctx *ctx, struct hlsl_bloc
if (e1->operands[1].node->type != HLSL_IR_CONSTANT || e2->operands[1].node->type != HLSL_IR_CONSTANT)
return NULL;
- if (!(ab = hlsl_new_binary_expr(ctx, opr, e1->operands[1].node, e2->operands[1].node)))
- return NULL;
- hlsl_block_add_instr(block, ab);
-
operands[0] = e1->operands[0].node;
- operands[1] = ab;
-
- if (!(res = hlsl_new_expr(ctx, opl, operands, instr->data_type, &instr->loc)))
- return NULL;
- hlsl_block_add_instr(block, res);
- return res;
+ operands[1] = hlsl_block_add_binary_expr(ctx, block, opr, e1->operands[1].node, e2->operands[1].node);
+ return hlsl_block_add_expr(ctx, block, opl, operands, instr->data_type, &instr->loc);
}
bool hlsl_normalize_binary_exprs(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, void *context)
@@ -1677,26 +1690,14 @@ bool hlsl_normalize_binary_exprs(struct hlsl_ctx *ctx, struct hlsl_ir_node *inst
if (arg2->type == HLSL_IR_CONSTANT)
{
/* (x OP a) OP b -> x OP (a OP b) */
- struct hlsl_ir_node *ab;
-
- if (!(ab = hlsl_new_binary_expr(ctx, op, e1->operands[1].node, arg2)))
- goto fail;
- hlsl_block_add_instr(&block, ab);
-
arg1 = e1->operands[0].node;
- arg2 = ab;
+ arg2 = hlsl_block_add_binary_expr(ctx, &block, op, e1->operands[1].node, arg2);
progress = true;
}
else if (is_op_commutative(op))
{
/* (x OP a) OP y -> (x OP y) OP a */
- struct hlsl_ir_node *xy;
-
- if (!(xy = hlsl_new_binary_expr(ctx, op, e1->operands[0].node, arg2)))
- goto fail;
- hlsl_block_add_instr(&block, xy);
-
- arg1 = xy;
+ arg1 = hlsl_block_add_binary_expr(ctx, &block, op, e1->operands[0].node, arg2);
arg2 = e1->operands[1].node;
progress = true;
}
@@ -1706,18 +1707,13 @@ bool hlsl_normalize_binary_exprs(struct hlsl_ctx *ctx, struct hlsl_ir_node *inst
&& e2->operands[0].node->type != HLSL_IR_CONSTANT && e2->operands[1].node->type == HLSL_IR_CONSTANT)
{
/* x OP (y OP a) -> (x OP y) OP a */
- struct hlsl_ir_node *xy;
-
- if (!(xy = hlsl_new_binary_expr(ctx, op, arg1, e2->operands[0].node)))
- goto fail;
- hlsl_block_add_instr(&block, xy);
-
- arg1 = xy;
+ arg1 = hlsl_block_add_binary_expr(ctx, &block, op, arg1, e2->operands[0].node);
arg2 = e2->operands[1].node;
progress = true;
}
- if (!progress && e1 && (tmp = collect_exprs(ctx, &block, instr, op, e1->operands[1].node, arg2)))
+ if (!progress && e1 && e1->op == op
+ && (tmp = collect_exprs(ctx, &block, instr, op, e1->operands[1].node, arg2)))
{
/* (y OPR (x OPL a)) OPR (x OPL b) -> y OPR (x OPL (a OPR b)) */
arg1 = e1->operands[0].node;
@@ -1725,7 +1721,7 @@ bool hlsl_normalize_binary_exprs(struct hlsl_ctx *ctx, struct hlsl_ir_node *inst
progress = true;
}
- if (!progress && is_op_commutative(op) && e1
+ if (!progress && is_op_commutative(op) && e1 && e1->op == op
&& (tmp = collect_exprs(ctx, &block, instr, op, e1->operands[0].node, arg2)))
{
/* ((x OPL a) OPR y) OPR (x OPL b) -> (x OPL (a OPR b)) OPR y */
@@ -1734,7 +1730,8 @@ bool hlsl_normalize_binary_exprs(struct hlsl_ctx *ctx, struct hlsl_ir_node *inst
progress = true;
}
- if (!progress && e2 && (tmp = collect_exprs(ctx, &block, instr, op, arg1, e2->operands[0].node)))
+ if (!progress && e2 && e2->op == op
+ && (tmp = collect_exprs(ctx, &block, instr, op, arg1, e2->operands[0].node)))
{
/* (x OPL a) OPR ((x OPL b) OPR y) -> (x OPL (a OPR b)) OPR y */
arg1 = tmp;
@@ -1742,7 +1739,7 @@ bool hlsl_normalize_binary_exprs(struct hlsl_ctx *ctx, struct hlsl_ir_node *inst
progress = true;
}
- if (!progress && is_op_commutative(op) && e2
+ if (!progress && is_op_commutative(op) && e2 && e2->op == op
&& (tmp = collect_exprs(ctx, &block, instr, op, arg1, e2->operands[1].node)))
{
/* (x OPL a) OPR (y OPR (x OPL b)) -> (x OPL (a OPR b)) OPR y */
@@ -1757,19 +1754,13 @@ bool hlsl_normalize_binary_exprs(struct hlsl_ctx *ctx, struct hlsl_ir_node *inst
struct hlsl_ir_node *operands[HLSL_MAX_OPERANDS] = {arg1, arg2};
struct hlsl_ir_node *res;
- if (!(res = hlsl_new_expr(ctx, op, operands, instr->data_type, &instr->loc)))
- goto fail;
- hlsl_block_add_instr(&block, res);
+ res = hlsl_block_add_expr(ctx, &block, op, operands, instr->data_type, &instr->loc);
list_move_before(&instr->entry, &block.instrs);
hlsl_replace_node(instr, res);
}
return progress;
-
-fail:
- hlsl_block_cleanup(&block);
- return false;
}
bool hlsl_fold_constant_swizzles(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, void *context)
diff --git a/libs/vkd3d/libs/vkd3d-shader/ir.c b/libs/vkd3d/libs/vkd3d-shader/ir.c
index b608fae21ac..72cf53761e4 100644
--- a/libs/vkd3d/libs/vkd3d-shader/ir.c
+++ b/libs/vkd3d/libs/vkd3d-shader/ir.c
@@ -116,6 +116,7 @@ void vsir_program_cleanup(struct vsir_program *program)
shader_signature_cleanup(&program->input_signature);
shader_signature_cleanup(&program->output_signature);
shader_signature_cleanup(&program->patch_constant_signature);
+ vkd3d_shader_free_scan_descriptor_info1(&program->descriptors);
}
const struct vkd3d_shader_parameter1 *vsir_program_get_parameter(
@@ -469,6 +470,80 @@ static bool get_opcode_from_rel_op(enum vkd3d_shader_rel_op rel_op, enum vkd3d_d
return false;
}
+static enum vkd3d_result vsir_program_normalize_addr(struct vsir_program *program,
+ struct vsir_transformation_context *ctx)
+{
+ struct vkd3d_shader_instruction *ins, *ins2;
+ unsigned int tmp_idx = ~0u;
+ unsigned int i, k, r;
+
+ for (i = 0; i < program->instructions.count; ++i)
+ {
+ ins = &program->instructions.elements[i];
+
+ if (ins->opcode == VKD3DSIH_MOV && ins->dst[0].reg.type == VKD3DSPR_ADDR)
+ {
+ if (tmp_idx == ~0u)
+ tmp_idx = program->temp_count++;
+
+ ins->opcode = VKD3DSIH_FTOU;
+ vsir_register_init(&ins->dst[0].reg, VKD3DSPR_TEMP, VKD3D_DATA_UINT, 1);
+ ins->dst[0].reg.idx[0].offset = tmp_idx;
+ ins->dst[0].reg.dimension = VSIR_DIMENSION_VEC4;
+ }
+ else if (ins->opcode == VKD3DSIH_MOVA)
+ {
+ if (tmp_idx == ~0u)
+ tmp_idx = program->temp_count++;
+
+ if (!shader_instruction_array_insert_at(&program->instructions, i + 1, 1))
+ return VKD3D_ERROR_OUT_OF_MEMORY;
+ ins = &program->instructions.elements[i];
+ ins2 = &program->instructions.elements[i + 1];
+
+ ins->opcode = VKD3DSIH_ROUND_NE;
+ vsir_register_init(&ins->dst[0].reg, VKD3DSPR_TEMP, VKD3D_DATA_FLOAT, 1);
+ ins->dst[0].reg.idx[0].offset = tmp_idx;
+ ins->dst[0].reg.dimension = VSIR_DIMENSION_VEC4;
+
+ if (!vsir_instruction_init_with_params(program, ins2, &ins->location, VKD3DSIH_FTOU, 1, 1))
+ return VKD3D_ERROR_OUT_OF_MEMORY;
+
+ vsir_register_init(&ins2->dst[0].reg, VKD3DSPR_TEMP, VKD3D_DATA_UINT, 1);
+ ins2->dst[0].reg.idx[0].offset = tmp_idx;
+ ins2->dst[0].reg.dimension = VSIR_DIMENSION_VEC4;
+ ins2->dst[0].write_mask = ins->dst[0].write_mask;
+
+ vsir_register_init(&ins2->src[0].reg, VKD3DSPR_TEMP, VKD3D_DATA_FLOAT, 1);
+ ins2->src[0].reg.idx[0].offset = tmp_idx;
+ ins2->src[0].reg.dimension = VSIR_DIMENSION_VEC4;
+ ins2->src[0].swizzle = vsir_swizzle_from_writemask(ins2->dst[0].write_mask);
+ }
+
+ for (k = 0; k < ins->src_count; ++k)
+ {
+ struct vkd3d_shader_src_param *src = &ins->src[k];
+
+ for (r = 0; r < src->reg.idx_count; ++r)
+ {
+ struct vkd3d_shader_src_param *rel = src->reg.idx[r].rel_addr;
+
+ if (rel && rel->reg.type == VKD3DSPR_ADDR)
+ {
+ if (tmp_idx == ~0u)
+ tmp_idx = program->temp_count++;
+
+ vsir_register_init(&rel->reg, VKD3DSPR_TEMP, VKD3D_DATA_UINT, 1);
+ rel->reg.idx[0].offset = tmp_idx;
+ rel->reg.dimension = VSIR_DIMENSION_VEC4;
+ }
+ }
+ }
+ }
+
+ return VKD3D_OK;
+}
+
static enum vkd3d_result vsir_program_lower_ifc(struct vsir_program *program,
struct vkd3d_shader_instruction *ifc, unsigned int *tmp_idx,
struct vkd3d_shader_message_context *message_context)
@@ -481,6 +556,7 @@ static enum vkd3d_result vsir_program_lower_ifc(struct vsir_program *program,
if (!shader_instruction_array_insert_at(instructions, pos + 1, 2))
return VKD3D_ERROR_OUT_OF_MEMORY;
+ ifc = &instructions->elements[pos];
if (*tmp_idx == ~0u)
*tmp_idx = program->temp_count++;
@@ -534,6 +610,7 @@ static enum vkd3d_result vsir_program_lower_texkill(struct vsir_program *program
if (!shader_instruction_array_insert_at(instructions, pos + 1, components_read + 1))
return VKD3D_ERROR_OUT_OF_MEMORY;
+ texkill = &instructions->elements[pos];
if (*tmp_idx == ~0u)
*tmp_idx = program->temp_count++;
@@ -620,6 +697,7 @@ static enum vkd3d_result vsir_program_lower_precise_mad(struct vsir_program *pro
if (!shader_instruction_array_insert_at(instructions, pos + 1, 1))
return VKD3D_ERROR_OUT_OF_MEMORY;
+ mad = &instructions->elements[pos];
if (*tmp_idx == ~0u)
*tmp_idx = program->temp_count++;
@@ -664,6 +742,7 @@ static enum vkd3d_result vsir_program_lower_sm1_sincos(struct vsir_program *prog
if (!shader_instruction_array_insert_at(instructions, pos + 1, 1))
return VKD3D_ERROR_OUT_OF_MEMORY;
+ sincos = &instructions->elements[pos];
ins = &instructions->elements[pos + 1];
@@ -716,6 +795,7 @@ static enum vkd3d_result vsir_program_lower_texldp(struct vsir_program *program,
if (!shader_instruction_array_insert_at(instructions, pos + 1, 2))
return VKD3D_ERROR_OUT_OF_MEMORY;
+ tex = &instructions->elements[pos];
if (*tmp_idx == ~0u)
*tmp_idx = program->temp_count++;
@@ -1127,6 +1207,7 @@ static enum vkd3d_result vsir_program_ensure_diffuse(struct vsir_program *progra
if (!shader_instruction_array_insert_at(&program->instructions, i, 1))
return VKD3D_ERROR_OUT_OF_MEMORY;
ins = &program->instructions.elements[i];
+
vsir_instruction_init_with_params(program, ins, &no_loc, VKD3DSIH_MOV, 1, 1);
vsir_dst_param_init(&ins->dst[0], VKD3DSPR_ATTROUT, VKD3D_DATA_FLOAT, 1);
ins->dst[0].reg.idx[0].offset = 0;
@@ -1345,7 +1426,6 @@ static enum vkd3d_result vsir_program_remap_output_signature(struct vsir_program
loc = ins->location;
if (!shader_instruction_array_insert_at(&program->instructions, i, uninit_varying_count))
return VKD3D_ERROR_OUT_OF_MEMORY;
-
ins = &program->instructions.elements[i];
for (unsigned int j = signature->element_count - uninit_varying_count; j < signature->element_count; ++j)
@@ -1732,8 +1812,20 @@ static enum vkd3d_result instruction_array_normalise_hull_shader_control_point_i
return VKD3D_OK;
}
+struct io_normaliser_register_data
+{
+ struct
+ {
+ uint8_t register_count;
+ uint32_t mask;
+ uint32_t used_mask;
+ } component[VKD3D_VEC4_SIZE];
+};
+
+
struct io_normaliser
{
+ struct vkd3d_shader_message_context *message_context;
struct vkd3d_shader_instruction_array instructions;
enum vkd3d_shader_type shader_type;
uint8_t major;
@@ -1751,9 +1843,9 @@ struct io_normaliser
struct vkd3d_shader_dst_param *input_dcl_params[MAX_REG_OUTPUT];
struct vkd3d_shader_dst_param *output_dcl_params[MAX_REG_OUTPUT];
struct vkd3d_shader_dst_param *pc_dcl_params[MAX_REG_OUTPUT];
- uint8_t input_range_map[MAX_REG_OUTPUT][VKD3D_VEC4_SIZE];
- uint8_t output_range_map[MAX_REG_OUTPUT][VKD3D_VEC4_SIZE];
- uint8_t pc_range_map[MAX_REG_OUTPUT][VKD3D_VEC4_SIZE];
+ struct io_normaliser_register_data input_range_map[MAX_REG_OUTPUT];
+ struct io_normaliser_register_data output_range_map[MAX_REG_OUTPUT];
+ struct io_normaliser_register_data pc_range_map[MAX_REG_OUTPUT];
bool use_vocp;
};
@@ -1794,36 +1886,44 @@ struct signature_element *vsir_signature_find_element_for_reg(const struct shade
return NULL;
}
-static unsigned int range_map_get_register_count(uint8_t range_map[][VKD3D_VEC4_SIZE],
+static unsigned int range_map_get_register_count(struct io_normaliser_register_data range_map[],
unsigned int register_idx, uint32_t write_mask)
{
- return range_map[register_idx][vsir_write_mask_get_component_idx(write_mask)];
+ return range_map[register_idx].component[vsir_write_mask_get_component_idx(write_mask)].register_count;
}
-static void range_map_set_register_range(uint8_t range_map[][VKD3D_VEC4_SIZE], unsigned int register_idx,
- unsigned int register_count, uint32_t write_mask, bool is_dcl_indexrange)
+static enum vkd3d_result range_map_set_register_range(struct io_normaliser *normaliser,
+ struct io_normaliser_register_data range_map[], unsigned int register_idx,
+ unsigned int register_count, uint32_t mask, uint32_t used_mask, bool is_dcl_indexrange)
{
unsigned int i, j, r, c, component_idx, component_count;
- VKD3D_ASSERT(write_mask <= VKD3DSP_WRITEMASK_ALL);
- component_idx = vsir_write_mask_get_component_idx(write_mask);
- component_count = vsir_write_mask_component_count(write_mask);
+ VKD3D_ASSERT(mask <= VKD3DSP_WRITEMASK_ALL);
+ component_idx = vsir_write_mask_get_component_idx(mask);
+ component_count = vsir_write_mask_component_count(mask);
VKD3D_ASSERT(register_idx < MAX_REG_OUTPUT && MAX_REG_OUTPUT - register_idx >= register_count);
- if (range_map[register_idx][component_idx] > register_count && is_dcl_indexrange)
+ if (range_map[register_idx].component[component_idx].register_count > register_count && is_dcl_indexrange)
{
- /* Validated in the TPF reader. */
- VKD3D_ASSERT(range_map[register_idx][component_idx] != UINT8_MAX);
- return;
+ if (range_map[register_idx].component[component_idx].register_count == UINT8_MAX)
+ {
+ WARN("Conflicting index ranges.\n");
+ vkd3d_shader_error(normaliser->message_context, NULL,
+ VKD3D_SHADER_ERROR_VSIR_INVALID_SIGNATURE, "Conflicting index ranges.");
+ return VKD3D_ERROR_INVALID_SHADER;
+ }
+ return VKD3D_OK;
}
- if (range_map[register_idx][component_idx] == register_count)
+ if (range_map[register_idx].component[component_idx].register_count == register_count)
{
/* Already done. This happens when fxc splits a register declaration by
* component(s). The dcl_indexrange instructions are split too. */
- return;
+ return VKD3D_OK;
}
- range_map[register_idx][component_idx] = register_count;
+ range_map[register_idx].component[component_idx].register_count = register_count;
+ range_map[register_idx].component[component_idx].mask = mask;
+ range_map[register_idx].component[component_idx].used_mask = used_mask;
for (i = 0; i < register_count; ++i)
{
@@ -1834,21 +1934,31 @@ static void range_map_set_register_range(uint8_t range_map[][VKD3D_VEC4_SIZE], u
/* A synthetic patch constant range which overlaps an existing range can start upstream of it
* for fork/join phase instancing, but ranges declared by dcl_indexrange should not overlap.
* The latter is validated in the TPF reader. */
- VKD3D_ASSERT(!range_map[r][c] || !is_dcl_indexrange);
- range_map[r][c] = UINT8_MAX;
+ if (range_map[r].component[c].register_count && is_dcl_indexrange)
+ {
+ WARN("Conflicting index ranges.\n");
+ vkd3d_shader_error(normaliser->message_context, NULL,
+ VKD3D_SHADER_ERROR_VSIR_INVALID_SIGNATURE, "Conflicting index ranges.");
+ return VKD3D_ERROR_INVALID_SHADER;
+ }
+ range_map[r].component[c].register_count = UINT8_MAX;
+ range_map[r].component[c].mask = mask;
+ range_map[r].component[c].used_mask = used_mask;
}
}
+
+ return VKD3D_OK;
}
-static void io_normaliser_add_index_range(struct io_normaliser *normaliser,
+static enum vkd3d_result io_normaliser_add_index_range(struct io_normaliser *normaliser,
const struct vkd3d_shader_instruction *ins)
{
const struct vkd3d_shader_index_range *range = &ins->declaration.index_range;
const struct vkd3d_shader_register *reg = &range->dst.reg;
+ struct io_normaliser_register_data *range_map;
const struct shader_signature *signature;
- uint8_t (*range_map)[VKD3D_VEC4_SIZE];
- struct signature_element *element;
- unsigned int reg_idx, write_mask;
+ uint32_t mask, used_mask;
+ unsigned int reg_idx, i;
switch (reg->type)
{
@@ -1879,9 +1989,21 @@ static void io_normaliser_add_index_range(struct io_normaliser *normaliser,
}
reg_idx = reg->idx[reg->idx_count - 1].offset;
- write_mask = range->dst.write_mask;
- element = vsir_signature_find_element_for_reg(signature, reg_idx, write_mask);
- range_map_set_register_range(range_map, reg_idx, range->register_count, element->mask, true);
+ mask = range->dst.write_mask;
+ used_mask = 0;
+
+ for (i = 0; i < range->register_count; ++i)
+ {
+ struct signature_element *element;
+
+ if ((element = vsir_signature_find_element_for_reg(signature, reg_idx + i, mask)))
+ {
+ mask |= element->mask;
+ used_mask |= element->used_mask;
+ }
+ }
+
+ return range_map_set_register_range(normaliser, range_map, reg_idx, range->register_count, mask, used_mask, true);
}
static int signature_element_mask_compare(const void *a, const void *b)
@@ -1908,11 +2030,12 @@ static bool sysval_semantics_should_merge(const struct signature_element *e, con
}
/* Merge tess factor sysvals because they are an array in SPIR-V. */
-static void shader_signature_map_patch_constant_index_ranges(struct shader_signature *s,
- uint8_t range_map[][VKD3D_VEC4_SIZE])
+static enum vkd3d_result shader_signature_map_patch_constant_index_ranges(struct io_normaliser *normaliser,
+ struct shader_signature *s, struct io_normaliser_register_data range_map[])
{
- struct signature_element *e, *f;
unsigned int i, j, register_count;
+ struct signature_element *e, *f;
+ enum vkd3d_result ret;
qsort(s->elements, s->element_count, sizeof(s->elements[0]), signature_element_mask_compare);
@@ -1933,8 +2056,12 @@ static void shader_signature_map_patch_constant_index_ranges(struct shader_signa
if (register_count < 2)
continue;
- range_map_set_register_range(range_map, e->register_index, register_count, e->mask, false);
+ if ((ret = range_map_set_register_range(normaliser, range_map,
+ e->register_index, register_count, e->mask, e->used_mask, false) < 0))
+ return ret;
}
+
+ return VKD3D_OK;
}
static int signature_element_register_compare(const void *a, const void *b)
@@ -1977,62 +2104,19 @@ static int signature_element_index_compare(const void *a, const void *b)
return vkd3d_u32_compare(e->sort_index, f->sort_index);
}
-static unsigned int signature_element_range_expand_mask(struct signature_element *e, unsigned int register_count,
- uint8_t range_map[][VKD3D_VEC4_SIZE])
-{
- unsigned int i, j, component_idx, component_count, merged_write_mask = e->mask;
-
- /* dcl_indexrange instructions can declare a subset of the full mask, and the masks of
- * the elements within the range may differ. TPF's handling of arrayed inputs with
- * dcl_indexrange is really just a hack. Here we create a mask which covers all element
- * masks, and check for collisions with other ranges. */
-
- for (i = 1; i < register_count; ++i)
- merged_write_mask |= e[i].mask;
-
- if (merged_write_mask == e->mask)
- return merged_write_mask;
-
- /* Reaching this point is very rare to begin with, and collisions are even rarer or
- * impossible. If the latter shows up, the fallback in shader_signature_find_element_for_reg()
- * may be sufficient. */
-
- component_idx = vsir_write_mask_get_component_idx(e->mask);
- component_count = vsir_write_mask_component_count(e->mask);
-
- for (i = e->register_index; i < e->register_index + register_count; ++i)
- {
- for (j = 0; j < component_idx; ++j)
- if (range_map[i][j])
- break;
- for (j = component_idx + component_count; j < VKD3D_VEC4_SIZE; ++j)
- if (range_map[i][j])
- break;
- }
-
- if (i == register_count)
- {
- WARN("Expanding mask %#x to %#x for %s, base reg %u, count %u.\n", e->mask, merged_write_mask,
- e->semantic_name, e->register_index, register_count);
- return merged_write_mask;
- }
-
- WARN("Cannot expand mask %#x to %#x for %s, base reg %u, count %u.\n", e->mask, merged_write_mask,
- e->semantic_name, e->register_index, register_count);
- return e->mask;
-}
-
-static bool shader_signature_merge(struct shader_signature *s, uint8_t range_map[][VKD3D_VEC4_SIZE],
+static enum vkd3d_result shader_signature_merge(struct io_normaliser *normaliser,
+ struct shader_signature *s, struct io_normaliser_register_data range_map[],
bool is_patch_constant)
{
unsigned int i, j, element_count, new_count, register_count;
struct signature_element *elements;
+ enum vkd3d_result ret = VKD3D_OK;
struct signature_element *e, *f;
bool used;
element_count = s->element_count;
if (!(elements = vkd3d_malloc(element_count * sizeof(*elements))))
- return false;
+ return VKD3D_ERROR_OUT_OF_MEMORY;
if (element_count)
memcpy(elements, s->elements, element_count * sizeof(*elements));
@@ -2091,42 +2175,49 @@ static bool shader_signature_merge(struct shader_signature *s, uint8_t range_map
s->elements = elements;
s->element_count = element_count;
- if (is_patch_constant)
- shader_signature_map_patch_constant_index_ranges(s, range_map);
+ if (is_patch_constant
+ && (ret = shader_signature_map_patch_constant_index_ranges(normaliser, s, range_map)) < 0)
+ goto out;
- for (i = 0, new_count = 0; i < element_count; i += register_count, elements[new_count++] = *e)
+ for (i = 0, new_count = 0; i < element_count; ++i)
{
e = &elements[i];
register_count = 1;
if (e->register_index >= MAX_REG_OUTPUT)
+ {
+ elements[new_count++] = *e;
continue;
+ }
register_count = range_map_get_register_count(range_map, e->register_index, e->mask);
- VKD3D_ASSERT(register_count != UINT8_MAX);
- register_count += !register_count;
- if (register_count > 1)
+ if (register_count == UINT8_MAX)
{
- TRACE("Merging %s, base reg %u, count %u.\n", e->semantic_name, e->register_index, register_count);
- e->register_count = register_count;
- e->mask = signature_element_range_expand_mask(e, register_count, range_map);
+ TRACE("Register %u mask %#x semantic %s%u has already been merged, dropping it.\n",
+ e->register_index, e->mask, e->semantic_name, e->semantic_index);
+ vkd3d_free((void *)e->semantic_name);
+ continue;
+ }
- for (j = 1; j < register_count; ++j)
- {
- f = &elements[i + j];
- vkd3d_free((void *)f->semantic_name);
- }
+ if (register_count > 0)
+ {
+ TRACE("Register %u mask %#x semantic %s%u is used as merge destination.\n",
+ e->register_index, e->mask, e->semantic_name, e->semantic_index);
+ e->register_count = register_count;
+ e->mask = range_map[e->register_index].component[vsir_write_mask_get_component_idx(e->mask)].mask;
+ e->used_mask = range_map[e->register_index].component[vsir_write_mask_get_component_idx(e->mask)].used_mask;
}
+
+ elements[new_count++] = *e;
}
- element_count = new_count;
+ s->element_count = new_count;
+out:
/* Restoring the original order is required for sensible trace output. */
- qsort(elements, element_count, sizeof(elements[0]), signature_element_index_compare);
-
- s->element_count = element_count;
+ qsort(s->elements, s->element_count, sizeof(elements[0]), signature_element_index_compare);
- return true;
+ return ret;
}
static unsigned int shader_register_normalise_arrayed_addressing(struct vkd3d_shader_register *reg,
@@ -2342,8 +2433,9 @@ static void shader_instruction_normalise_io_params(struct vkd3d_shader_instructi
static enum vkd3d_result vsir_program_normalise_io_registers(struct vsir_program *program,
struct vsir_transformation_context *ctx)
{
- struct io_normaliser normaliser = {program->instructions};
+ struct io_normaliser normaliser = {ctx->message_context, program->instructions};
struct vkd3d_shader_instruction *ins;
+ enum vkd3d_result ret;
unsigned int i;
VKD3D_ASSERT(program->normalisation_level == VSIR_NORMALISED_HULL_CONTROL_POINT_IO);
@@ -2365,7 +2457,8 @@ static enum vkd3d_result vsir_program_normalise_io_registers(struct vsir_program
normaliser.output_control_point_count = ins->declaration.count;
break;
case VKD3DSIH_DCL_INDEX_RANGE:
- io_normaliser_add_index_range(&normaliser, ins);
+ if ((ret = io_normaliser_add_index_range(&normaliser, ins)) < 0)
+ return ret;
vkd3d_shader_instruction_make_nop(ins);
break;
case VKD3DSIH_HS_CONTROL_POINT_PHASE:
@@ -2378,12 +2471,14 @@ static enum vkd3d_result vsir_program_normalise_io_registers(struct vsir_program
}
}
- if (!shader_signature_merge(&program->input_signature, normaliser.input_range_map, false)
- || !shader_signature_merge(&program->output_signature, normaliser.output_range_map, false)
- || !shader_signature_merge(&program->patch_constant_signature, normaliser.pc_range_map, true))
+ if ((ret = shader_signature_merge(&normaliser, &program->input_signature, normaliser.input_range_map, false)) < 0
+ || (ret = shader_signature_merge(&normaliser, &program->output_signature,
+ normaliser.output_range_map, false)) < 0
+ || (ret = shader_signature_merge(&normaliser, &program->patch_constant_signature,
+ normaliser.pc_range_map, true)) < 0)
{
program->instructions = normaliser.instructions;
- return VKD3D_ERROR_OUT_OF_MEMORY;
+ return ret;
}
normaliser.phase = VKD3DSIH_INVALID;
@@ -2410,7 +2505,8 @@ struct flat_constants_normaliser
};
static bool get_flat_constant_register_type(const struct vkd3d_shader_register *reg,
- enum vkd3d_shader_d3dbc_constant_register *set, uint32_t *index)
+ enum vkd3d_shader_d3dbc_constant_register *set, uint32_t *index,
+ struct vkd3d_shader_src_param **rel_addr)
{
static const struct
{
@@ -2430,12 +2526,8 @@ static bool get_flat_constant_register_type(const struct vkd3d_shader_register *
{
if (reg->type == regs[i].type)
{
- if (reg->idx[0].rel_addr)
- {
- FIXME("Unhandled relative address.\n");
- return false;
- }
-
+ if (rel_addr)
+ *rel_addr = reg->idx[0].rel_addr;
*set = regs[i].set;
*index = reg->idx[0].offset;
return true;
@@ -2449,10 +2541,11 @@ static void shader_register_normalise_flat_constants(struct vkd3d_shader_src_par
const struct flat_constants_normaliser *normaliser)
{
enum vkd3d_shader_d3dbc_constant_register set;
+ struct vkd3d_shader_src_param *rel_addr;
uint32_t index;
size_t i, j;
- if (!get_flat_constant_register_type(&param->reg, &set, &index))
+ if (!get_flat_constant_register_type(&param->reg, &set, &index, &rel_addr))
return;
for (i = 0; i < normaliser->def_count; ++i)
@@ -2470,8 +2563,11 @@ static void shader_register_normalise_flat_constants(struct vkd3d_shader_src_par
param->reg.type = VKD3DSPR_CONSTBUFFER;
param->reg.idx[0].offset = set; /* register ID */
+ param->reg.idx[0].rel_addr = NULL;
param->reg.idx[1].offset = set; /* register index */
+ param->reg.idx[1].rel_addr = NULL;
param->reg.idx[2].offset = index; /* buffer index */
+ param->reg.idx[2].rel_addr = rel_addr;
param->reg.idx_count = 3;
}
@@ -2498,7 +2594,7 @@ static enum vkd3d_result vsir_program_normalise_flat_constants(struct vsir_progr
def = &normaliser.defs[normaliser.def_count++];
- get_flat_constant_register_type((struct vkd3d_shader_register *)&ins->dst[0].reg, &def->set, &def->index);
+ get_flat_constant_register_type(&ins->dst[0].reg, &def->set, &def->index, NULL);
for (j = 0; j < 4; ++j)
def->value[j] = ins->src[0].reg.u.immconst_u32[j];
@@ -6021,6 +6117,7 @@ static enum vkd3d_result insert_alpha_test_before_ret(struct vsir_program *progr
uint32_t colour_temp, size_t *ret_pos, struct vkd3d_shader_message_context *message_context)
{
struct vkd3d_shader_instruction_array *instructions = &program->instructions;
+ const struct vkd3d_shader_location loc = ret->location;
static const struct vkd3d_shader_location no_loc;
size_t pos = ret - instructions->elements;
struct vkd3d_shader_instruction *ins;
@@ -6045,9 +6142,10 @@ static enum vkd3d_result insert_alpha_test_before_ret(struct vsir_program *progr
{
if (!shader_instruction_array_insert_at(&program->instructions, pos, 1))
return VKD3D_ERROR_OUT_OF_MEMORY;
+ ret = NULL;
ins = &program->instructions.elements[pos];
- vsir_instruction_init_with_params(program, ins, &ret->location, VKD3DSIH_DISCARD, 0, 1);
+ vsir_instruction_init_with_params(program, ins, &loc, VKD3DSIH_DISCARD, 0, 1);
ins->flags = VKD3D_SHADER_CONDITIONAL_OP_Z;
src_param_init_const_uint(&ins->src[0], 0);
@@ -6057,20 +6155,20 @@ static enum vkd3d_result insert_alpha_test_before_ret(struct vsir_program *progr
if (!shader_instruction_array_insert_at(&program->instructions, pos, 3))
return VKD3D_ERROR_OUT_OF_MEMORY;
-
+ ret = NULL;
ins = &program->instructions.elements[pos];
switch (ref->data_type)
{
case VKD3D_SHADER_PARAMETER_DATA_TYPE_FLOAT32:
- vsir_instruction_init_with_params(program, ins, &ret->location, opcodes[compare_func].float_opcode, 1, 2);
+ vsir_instruction_init_with_params(program, ins, &loc, opcodes[compare_func].float_opcode, 1, 2);
src_param_init_temp_float(&ins->src[opcodes[compare_func].swap ? 1 : 0], colour_temp);
src_param_init_parameter(&ins->src[opcodes[compare_func].swap ? 0 : 1],
VKD3D_SHADER_PARAMETER_NAME_ALPHA_TEST_REF, VKD3D_DATA_FLOAT);
break;
case VKD3D_SHADER_PARAMETER_DATA_TYPE_UINT32:
- vsir_instruction_init_with_params(program, ins, &ret->location, opcodes[compare_func].uint_opcode, 1, 2);
+ vsir_instruction_init_with_params(program, ins, &loc, opcodes[compare_func].uint_opcode, 1, 2);
src_param_init_temp_uint(&ins->src[opcodes[compare_func].swap ? 1 : 0], colour_temp);
src_param_init_parameter(&ins->src[opcodes[compare_func].swap ? 0 : 1],
VKD3D_SHADER_PARAMETER_NAME_ALPHA_TEST_REF, VKD3D_DATA_UINT);
@@ -6091,14 +6189,14 @@ static enum vkd3d_result insert_alpha_test_before_ret(struct vsir_program *progr
ins->src[opcodes[compare_func].swap ? 1 : 0].swizzle = VKD3D_SHADER_SWIZZLE(W, W, W, W);
++ins;
- vsir_instruction_init_with_params(program, ins, &ret->location, VKD3DSIH_DISCARD, 0, 1);
+ vsir_instruction_init_with_params(program, ins, &loc, VKD3DSIH_DISCARD, 0, 1);
ins->flags = VKD3D_SHADER_CONDITIONAL_OP_Z;
src_param_init_ssa_bool(&ins->src[0], program->ssa_count);
++program->ssa_count;
++ins;
- vsir_instruction_init_with_params(program, ins, &ret->location, VKD3DSIH_MOV, 1, 1);
+ vsir_instruction_init_with_params(program, ins, &loc, VKD3DSIH_MOV, 1, 1);
vsir_dst_param_init(&ins->dst[0], VKD3DSPR_OUTPUT, VKD3D_DATA_FLOAT, 1);
ins->dst[0].reg.idx[0].offset = colour_signature_idx;
ins->dst[0].reg.dimension = VSIR_DIMENSION_VEC4;
@@ -6199,13 +6297,14 @@ static enum vkd3d_result insert_clip_planes_before_ret(struct vsir_program *prog
uint32_t position_temp, uint32_t low_signature_idx, uint32_t high_signature_idx, size_t *ret_pos)
{
struct vkd3d_shader_instruction_array *instructions = &program->instructions;
+ const struct vkd3d_shader_location loc = ret->location;
size_t pos = ret - instructions->elements;
struct vkd3d_shader_instruction *ins;
unsigned int output_idx = 0;
if (!shader_instruction_array_insert_at(&program->instructions, pos, vkd3d_popcount(mask) + 1))
return VKD3D_ERROR_OUT_OF_MEMORY;
-
+ ret = NULL;
ins = &program->instructions.elements[pos];
for (unsigned int i = 0; i < 8; ++i)
@@ -6213,7 +6312,7 @@ static enum vkd3d_result insert_clip_planes_before_ret(struct vsir_program *prog
if (!(mask & (1u << i)))
continue;
- vsir_instruction_init_with_params(program, ins, &ret->location, VKD3DSIH_DP4, 1, 2);
+ vsir_instruction_init_with_params(program, ins, &loc, VKD3DSIH_DP4, 1, 2);
src_param_init_temp_float4(&ins->src[0], position_temp);
src_param_init_parameter(&ins->src[1], VKD3D_SHADER_PARAMETER_NAME_CLIP_PLANE_0 + i, VKD3D_DATA_FLOAT);
ins->src[1].swizzle = VKD3D_SHADER_NO_SWIZZLE;
@@ -6231,7 +6330,7 @@ static enum vkd3d_result insert_clip_planes_before_ret(struct vsir_program *prog
++ins;
}
- vsir_instruction_init_with_params(program, ins, &ret->location, VKD3DSIH_MOV, 1, 1);
+ vsir_instruction_init_with_params(program, ins, &loc, VKD3DSIH_MOV, 1, 1);
vsir_dst_param_init(&ins->dst[0], VKD3DSPR_OUTPUT, VKD3D_DATA_FLOAT, 1);
ins->dst[0].reg.idx[0].offset = position_signature_idx;
ins->dst[0].reg.dimension = VSIR_DIMENSION_VEC4;
@@ -6388,15 +6487,16 @@ static enum vkd3d_result insert_point_size_before_ret(struct vsir_program *progr
const struct vkd3d_shader_instruction *ret, size_t *ret_pos)
{
struct vkd3d_shader_instruction_array *instructions = &program->instructions;
+ const struct vkd3d_shader_location loc = ret->location;
size_t pos = ret - instructions->elements;
struct vkd3d_shader_instruction *ins;
if (!shader_instruction_array_insert_at(&program->instructions, pos, 1))
return VKD3D_ERROR_OUT_OF_MEMORY;
-
+ ret = NULL;
ins = &program->instructions.elements[pos];
- vsir_instruction_init_with_params(program, ins, &ret->location, VKD3DSIH_MOV, 1, 1);
+ vsir_instruction_init_with_params(program, ins, &loc, VKD3DSIH_MOV, 1, 1);
vsir_dst_param_init(&ins->dst[0], VKD3DSPR_RASTOUT, VKD3D_DATA_FLOAT, 1);
ins->dst[0].reg.idx[0].offset = VSIR_RASTOUT_POINT_SIZE;
src_param_init_parameter(&ins->src[0], VKD3D_SHADER_PARAMETER_NAME_POINT_SIZE, VKD3D_DATA_FLOAT);
@@ -6525,9 +6625,9 @@ static enum vkd3d_result vsir_program_insert_point_size_clamp(struct vsir_progra
if (!shader_instruction_array_insert_at(&program->instructions, i + 1, !!min_parameter + !!max_parameter))
return VKD3D_ERROR_OUT_OF_MEMORY;
+ ins = &program->instructions.elements[i + 1];
loc = &program->instructions.elements[i].location;
- ins = &program->instructions.elements[i + 1];
if (min_parameter)
{
@@ -6725,7 +6825,6 @@ static enum vkd3d_result vsir_program_insert_point_coord(struct vsir_program *pr
{
if (!shader_instruction_array_insert_at(&program->instructions, insert_pos, 2))
return VKD3D_ERROR_OUT_OF_MEMORY;
-
ins = &program->instructions.elements[insert_pos];
vsir_instruction_init_with_params(program, ins, &no_loc, VKD3DSIH_MOV, 1, 1);
@@ -6799,6 +6898,8 @@ static enum vkd3d_result insert_fragment_fog_before_ret(struct vsir_program *pro
*/
if (!shader_instruction_array_insert_at(&program->instructions, pos, 4))
return VKD3D_ERROR_OUT_OF_MEMORY;
+ ret = NULL;
+
*ret_pos = pos + 4;
ssa_temp = program->ssa_count++;
@@ -6829,6 +6930,8 @@ static enum vkd3d_result insert_fragment_fog_before_ret(struct vsir_program *pro
*/
if (!shader_instruction_array_insert_at(&program->instructions, pos, 4))
return VKD3D_ERROR_OUT_OF_MEMORY;
+ ret = NULL;
+
*ret_pos = pos + 4;
ssa_temp = program->ssa_count++;
@@ -6859,6 +6962,8 @@ static enum vkd3d_result insert_fragment_fog_before_ret(struct vsir_program *pro
*/
if (!shader_instruction_array_insert_at(&program->instructions, pos, 5))
return VKD3D_ERROR_OUT_OF_MEMORY;
+ ret = NULL;
+
*ret_pos = pos + 5;
ssa_temp = program->ssa_count++;
@@ -7037,16 +7142,18 @@ static enum vkd3d_result insert_vertex_fog_before_ret(struct vsir_program *progr
{
const struct signature_element *e = &program->output_signature.elements[source_signature_idx];
struct vkd3d_shader_instruction_array *instructions = &program->instructions;
+ const struct vkd3d_shader_location loc = ret->location;
size_t pos = ret - instructions->elements;
struct vkd3d_shader_instruction *ins;
if (!shader_instruction_array_insert_at(&program->instructions, pos, 2))
return VKD3D_ERROR_OUT_OF_MEMORY;
+ ret = NULL;
ins = &program->instructions.elements[pos];
/* Write the fog output. */
- vsir_instruction_init_with_params(program, ins, &ret->location, VKD3DSIH_MOV, 1, 1);
+ vsir_instruction_init_with_params(program, ins, &loc, VKD3DSIH_MOV, 1, 1);
dst_param_init_output(&ins->dst[0], VKD3D_DATA_FLOAT, fog_signature_idx, 0x1);
src_param_init_temp_float4(&ins->src[0], temp);
if (source == VKD3D_SHADER_FOG_SOURCE_Z)
@@ -7056,7 +7163,7 @@ static enum vkd3d_result insert_vertex_fog_before_ret(struct vsir_program *progr
++ins;
/* Write the position or specular output. */
- vsir_instruction_init_with_params(program, ins, &ret->location, VKD3DSIH_MOV, 1, 1);
+ vsir_instruction_init_with_params(program, ins, &loc, VKD3DSIH_MOV, 1, 1);
dst_param_init_output(&ins->dst[0], vkd3d_data_type_from_component_type(e->component_type),
source_signature_idx, e->mask);
src_param_init_temp_float4(&ins->src[0], temp);
@@ -7691,6 +7798,54 @@ static void vsir_validate_label_register(struct validation_context *ctx,
reg->idx[0].offset, ctx->program->block_count);
}
+static void vsir_validate_descriptor_indices(struct validation_context *ctx,
+ const struct vkd3d_shader_register *reg, enum vkd3d_shader_descriptor_type type, const char *name)
+{
+ const struct vkd3d_shader_descriptor_info1 *descriptor;
+
+ if (reg->idx[0].rel_addr)
+ validator_error(ctx, VKD3D_SHADER_ERROR_VSIR_INVALID_INDEX,
+ "Non-NULL indirect address for the ID of a register of type \"%s\".", name);
+
+ if (!ctx->program->has_descriptor_info)
+ return;
+
+ if (!(descriptor = vkd3d_shader_find_descriptor(&ctx->program->descriptors, type, reg->idx[0].offset)))
+ {
+ validator_error(ctx, VKD3D_SHADER_ERROR_VSIR_INVALID_INDEX,
+ "No matching descriptor found for register %s%u.", name, reg->idx[0].offset);
+ return;
+ }
+
+ if (!reg->idx[1].rel_addr && (reg->idx[1].offset < descriptor->register_index
+ || reg->idx[1].offset - descriptor->register_index >= descriptor->count))
+ validator_error(ctx, VKD3D_SHADER_ERROR_VSIR_INVALID_INDEX,
+ "Register index %u doesn't belong to the range [%u, %u] for register %s%u.",
+ reg->idx[1].offset, descriptor->register_index,
+ descriptor->register_index + descriptor->count - 1, name, reg->idx[0].offset);
+}
+
+static void vsir_validate_constbuffer_register(struct validation_context *ctx,
+ const struct vkd3d_shader_register *reg)
+{
+ if (reg->precision != VKD3D_SHADER_REGISTER_PRECISION_DEFAULT)
+ validator_error(ctx, VKD3D_SHADER_ERROR_VSIR_INVALID_PRECISION,
+ "Invalid precision %#x for a CONSTBUFFER register.", reg->precision);
+
+ if (reg->dimension != VSIR_DIMENSION_VEC4)
+ validator_error(ctx, VKD3D_SHADER_ERROR_VSIR_INVALID_DIMENSION,
+ "Invalid dimension %#x for a CONSTBUFFER register.", reg->dimension);
+
+ if (reg->idx_count != 3)
+ {
+ validator_error(ctx, VKD3D_SHADER_ERROR_VSIR_INVALID_INDEX_COUNT,
+ "Invalid index count %u for a CONSTBUFFER register.", reg->idx_count);
+ return;
+ }
+
+ vsir_validate_descriptor_indices(ctx, reg, VKD3D_SHADER_DESCRIPTOR_TYPE_CBV, "cb");
+}
+
static void vsir_validate_sampler_register(struct validation_context *ctx,
const struct vkd3d_shader_register *reg)
{
@@ -7714,9 +7869,7 @@ static void vsir_validate_sampler_register(struct validation_context *ctx,
return;
}
- if (reg->idx[0].rel_addr)
- validator_error(ctx, VKD3D_SHADER_ERROR_VSIR_INVALID_INDEX,
- "Non-NULL relative address for the descriptor index of a SAMPLER register.");
+ vsir_validate_descriptor_indices(ctx, reg, VKD3D_SHADER_DESCRIPTOR_TYPE_SAMPLER, "s");
}
static void vsir_validate_resource_register(struct validation_context *ctx,
@@ -7741,9 +7894,7 @@ static void vsir_validate_resource_register(struct validation_context *ctx,
return;
}
- if (reg->idx[0].rel_addr)
- validator_error(ctx, VKD3D_SHADER_ERROR_VSIR_INVALID_INDEX,
- "Non-NULL relative address for the descriptor index of a RESOURCE register.");
+ vsir_validate_descriptor_indices(ctx, reg, VKD3D_SHADER_DESCRIPTOR_TYPE_SRV, "t");
}
static void vsir_validate_uav_register(struct validation_context *ctx,
@@ -7773,9 +7924,7 @@ static void vsir_validate_uav_register(struct validation_context *ctx,
return;
}
- if (reg->idx[0].rel_addr)
- validator_error(ctx, VKD3D_SHADER_ERROR_VSIR_INVALID_INDEX,
- "Non-NULL relative address for the descriptor index of a UAV register.");
+ vsir_validate_descriptor_indices(ctx, reg, VKD3D_SHADER_DESCRIPTOR_TYPE_UAV, "u");
}
static void vsir_validate_ssa_register(struct validation_context *ctx,
@@ -7928,6 +8077,10 @@ static void vsir_validate_register(struct validation_context *ctx,
vsir_validate_register_without_indices(ctx, reg);
break;
+ case VKD3DSPR_CONSTBUFFER:
+ vsir_validate_constbuffer_register(ctx, reg);
+ break;
+
case VKD3DSPR_PRIMID:
vsir_validate_register_without_indices(ctx, reg);
break;
@@ -8115,6 +8268,8 @@ static void vsir_validate_dst_param(struct validation_context *ctx,
case VKD3DSPR_IMMCONST:
case VKD3DSPR_IMMCONST64:
+ case VKD3DSPR_CONSTBUFFER:
+ case VKD3DSPR_IMMCONSTBUFFER:
case VKD3DSPR_SAMPLER:
case VKD3DSPR_RESOURCE:
validator_error(ctx, VKD3D_SHADER_ERROR_VSIR_INVALID_REGISTER_TYPE,
@@ -8505,10 +8660,13 @@ static void vsir_validate_signature_element(struct validation_context *ctx,
{
case VKD3D_SHADER_COMPONENT_INT:
case VKD3D_SHADER_COMPONENT_UINT:
+ case VKD3D_SHADER_COMPONENT_INT16:
+ case VKD3D_SHADER_COMPONENT_UINT16:
integer_type = true;
break;
case VKD3D_SHADER_COMPONENT_FLOAT:
+ case VKD3D_SHADER_COMPONENT_FLOAT16:
break;
default:
@@ -9776,6 +9934,9 @@ enum vkd3d_result vsir_program_transform(struct vsir_program *program, uint64_t
if (program->shader_version.major <= 2)
vsir_transform(&ctx, vsir_program_ensure_diffuse);
+ if (program->shader_version.major < 4)
+ vsir_transform(&ctx, vsir_program_normalize_addr);
+
if (program->shader_version.type != VKD3D_SHADER_TYPE_PIXEL)
vsir_transform(&ctx, vsir_program_remap_output_signature);
diff --git a/libs/vkd3d/libs/vkd3d-shader/msl.c b/libs/vkd3d/libs/vkd3d-shader/msl.c
index e783128e236..a5d952cd525 100644
--- a/libs/vkd3d/libs/vkd3d-shader/msl.c
+++ b/libs/vkd3d/libs/vkd3d-shader/msl.c
@@ -44,7 +44,6 @@ struct msl_generator
bool write_depth;
const struct vkd3d_shader_interface_info *interface_info;
- const struct vkd3d_shader_scan_descriptor_info1 *descriptor_info;
};
static void VKD3D_PRINTF_FUNC(3, 4) msl_compiler_error(struct msl_generator *gen,
@@ -821,7 +820,7 @@ static void msl_generate_cbv_declaration(struct msl_generator *gen,
static void msl_generate_descriptor_struct_declarations(struct msl_generator *gen)
{
- const struct vkd3d_shader_scan_descriptor_info1 *info = gen->descriptor_info;
+ const struct vkd3d_shader_scan_descriptor_info1 *info = &gen->program->descriptors;
const struct vkd3d_shader_descriptor_info1 *descriptor;
struct vkd3d_string_buffer *buffer = gen->buffer;
unsigned int i;
@@ -1171,7 +1170,7 @@ static void msl_generate_entrypoint(struct msl_generator *gen)
vkd3d_string_buffer_printf(gen->buffer, "vkd3d_%s_out shader_entry(\n", gen->prefix);
- if (gen->descriptor_info->descriptor_count)
+ if (gen->program->descriptors.descriptor_count)
{
msl_print_indent(gen->buffer, 2);
/* TODO: Configurable argument buffer binding location. */
@@ -1195,7 +1194,7 @@ static void msl_generate_entrypoint(struct msl_generator *gen)
vkd3d_string_buffer_printf(gen->buffer, " %s_main(%s_in, %s_out", gen->prefix, gen->prefix, gen->prefix);
if (gen->write_depth)
vkd3d_string_buffer_printf(gen->buffer, ", shader_out_depth");
- if (gen->descriptor_info->descriptor_count)
+ if (gen->program->descriptors.descriptor_count)
vkd3d_string_buffer_printf(gen->buffer, ", descriptors");
vkd3d_string_buffer_printf(gen->buffer, ");\n");
@@ -1234,7 +1233,7 @@ static int msl_generator_generate(struct msl_generator *gen, struct vkd3d_shader
gen->prefix);
if (gen->write_depth)
vkd3d_string_buffer_printf(gen->buffer, ", thread float& o_depth");
- if (gen->descriptor_info->descriptor_count)
+ if (gen->program->descriptors.descriptor_count)
vkd3d_string_buffer_printf(gen->buffer, ", constant vkd3d_%s_descriptors& descriptors", gen->prefix);
vkd3d_string_buffer_printf(gen->buffer, ")\n{\n");
@@ -1276,7 +1275,6 @@ static void msl_generator_cleanup(struct msl_generator *gen)
static int msl_generator_init(struct msl_generator *gen, struct vsir_program *program,
const struct vkd3d_shader_compile_info *compile_info,
- const struct vkd3d_shader_scan_descriptor_info1 *descriptor_info,
struct vkd3d_shader_message_context *message_context)
{
enum vkd3d_shader_type type = program->shader_version.type;
@@ -1297,13 +1295,11 @@ static int msl_generator_init(struct msl_generator *gen, struct vsir_program *pr
return VKD3D_ERROR_INVALID_SHADER;
}
gen->interface_info = vkd3d_find_struct(compile_info->next, INTERFACE_INFO);
- gen->descriptor_info = descriptor_info;
return VKD3D_OK;
}
int msl_compile(struct vsir_program *program, uint64_t config_flags,
- const struct vkd3d_shader_scan_descriptor_info1 *descriptor_info,
const struct vkd3d_shader_compile_info *compile_info, struct vkd3d_shader_code *out,
struct vkd3d_shader_message_context *message_context)
{
@@ -1314,8 +1310,9 @@ int msl_compile(struct vsir_program *program, uint64_t config_flags,
return ret;
VKD3D_ASSERT(program->normalisation_level == VSIR_NORMALISED_SM6);
+ VKD3D_ASSERT(program->has_descriptor_info);
- if ((ret = msl_generator_init(&generator, program, compile_info, descriptor_info, message_context)) < 0)
+ if ((ret = msl_generator_init(&generator, program, compile_info, message_context)) < 0)
return ret;
ret = msl_generator_generate(&generator, out);
msl_generator_cleanup(&generator);
diff --git a/libs/vkd3d/libs/vkd3d-shader/preproc.l b/libs/vkd3d/libs/vkd3d-shader/preproc.l
index 4a8d0fddae1..d167415c356 100644
--- a/libs/vkd3d/libs/vkd3d-shader/preproc.l
+++ b/libs/vkd3d/libs/vkd3d-shader/preproc.l
@@ -20,6 +20,7 @@
%{
+#include "preproc.h"
#include "preproc.tab.h"
#undef ERROR /* defined in wingdi.h */
diff --git a/libs/vkd3d/libs/vkd3d-shader/preproc.y b/libs/vkd3d/libs/vkd3d-shader/preproc.y
index c6be17bd230..95987831faa 100644
--- a/libs/vkd3d/libs/vkd3d-shader/preproc.y
+++ b/libs/vkd3d/libs/vkd3d-shader/preproc.y
@@ -178,6 +178,16 @@ static int default_open_include(const char *filename, bool local,
if (S_ISREG(st.st_mode))
size = st.st_size;
+ if (!size)
+ {
+ fclose(f);
+
+ out->code = NULL;
+ out->size = 0;
+
+ return VKD3D_OK;
+ }
+
if (!(data = vkd3d_malloc(size)))
{
fclose(f);
diff --git a/libs/vkd3d/libs/vkd3d-shader/spirv.c b/libs/vkd3d/libs/vkd3d-shader/spirv.c
index db7ebab742d..91a6686eb0d 100644
--- a/libs/vkd3d/libs/vkd3d-shader/spirv.c
+++ b/libs/vkd3d/libs/vkd3d-shader/spirv.c
@@ -18,6 +18,7 @@
*/
#include "vkd3d_shader_private.h"
+#include "spirv_grammar.h"
#include "wine/rbtree.h"
#include <stdarg.h>
@@ -60,6 +61,8 @@
#define VKD3D_SPIRV_INSTRUCTION_OP_SHIFT 0u
#define VKD3D_SPIRV_INSTRUCTION_OP_MASK (0xffffu << VKD3D_SPIRV_INSTRUCTION_OP_SHIFT)
+#define VKD3D_SPIRV_INDENT 15
+
#ifdef HAVE_SPIRV_TOOLS
# include "spirv-tools/libspirv.h"
@@ -211,6 +214,10 @@ struct spirv_colours
{
const char *reset;
const char *comment;
+ const char *literal;
+ const char *enumerant;
+ const char *opcode;
+ const char *id;
};
struct spirv_parser
@@ -240,6 +247,16 @@ static void VKD3D_PRINTF_FUNC(3, 4) spirv_parser_error(struct spirv_parser *pars
parser->failed = true;
}
+static void VKD3D_PRINTF_FUNC(3, 4) spirv_parser_warning(struct spirv_parser *parser,
+ enum vkd3d_shader_error error, const char *format, ...)
+{
+ va_list args;
+
+ va_start(args, format);
+ vkd3d_shader_vwarning(parser->message_context, &parser->location, error, format, args);
+ va_end(args);
+}
+
static uint32_t spirv_parser_read_u32(struct spirv_parser *parser)
{
if (parser->pos >= parser->size)
@@ -290,6 +307,91 @@ static void spirv_parser_print_generator(struct spirv_parser *parser, uint32_t m
spirv_parser_print_comment(parser, "Generator: Unknown (%#x); %u", id, version);
}
+static void spirv_parser_print_immediate_word(struct spirv_parser *parser,
+ struct vkd3d_string_buffer *buffer, const char *prefix, uint32_t w, const char *suffix)
+{
+ vkd3d_string_buffer_printf(buffer, "%s!%s0x%08x%s%s",
+ prefix, parser->colours.literal, w, parser->colours.reset, suffix);
+}
+
+static void spirv_parser_print_id(struct spirv_parser *parser,
+ struct vkd3d_string_buffer *buffer, const char *prefix, uint32_t id, const char *suffix)
+{
+ vkd3d_string_buffer_printf(buffer, "%s%%%s%u%s%s",
+ prefix, parser->colours.id, id, parser->colours.reset, suffix);
+}
+
+static void spirv_parser_print_uint_literal(struct spirv_parser *parser,
+ struct vkd3d_string_buffer *buffer, const char *prefix, uint32_t i, const char *suffix)
+{
+ vkd3d_string_buffer_printf(buffer, "%s%s%u%s%s",
+ prefix, parser->colours.literal, i, parser->colours.reset, suffix);
+}
+
+static void spirv_parser_print_enumerant(struct spirv_parser *parser,
+ struct vkd3d_string_buffer *buffer, const char *prefix, const char *name, const char *suffix)
+{
+ vkd3d_string_buffer_printf(buffer, "%s%s%s%s%s",
+ prefix, parser->colours.enumerant, name, parser->colours.reset, suffix);
+}
+
+static void spirv_parser_print_opcode(struct spirv_parser *parser,
+ struct vkd3d_string_buffer *buffer, const char *name)
+{
+ vkd3d_string_buffer_printf(buffer, "%s%s%s", parser->colours.opcode, name, parser->colours.reset);
+}
+
+static void spirv_parser_print_instruction_offset(struct spirv_parser *parser,
+ struct vkd3d_string_buffer *buffer, const char *prefix, size_t offset, const char *suffix)
+{
+ vkd3d_string_buffer_printf(parser->text, "%s%s; 0x%08zx%s%s", prefix,
+ parser->colours.comment, offset * sizeof(uint32_t), parser->colours.reset, suffix);
+}
+
+static void spirv_parser_print_string_literal(struct spirv_parser *parser, struct vkd3d_string_buffer *buffer,
+ const char *prefix, const char *s, size_t len, const char *suffix)
+{
+ vkd3d_string_buffer_printf(buffer, "%s\"%s", prefix, parser->colours.literal);
+ vkd3d_string_buffer_print_string_escaped(buffer, s, len);
+ vkd3d_string_buffer_printf(buffer, "%s\"%s", parser->colours.reset, suffix);
+}
+
+static const struct spirv_parser_enumerant *spirv_parser_get_enumerant(
+ const struct spirv_parser_operand_type_info *info, uint32_t value)
+{
+ const struct spirv_parser_enumerant *e;
+ size_t i;
+
+ for (i = 0; i < info->enumerant_count; ++i)
+ {
+ if ((e = &info->enumerants[i])->value == value)
+ return e;
+ }
+
+ return NULL;
+}
+
+static const struct spirv_parser_operand_type_info *spirv_parser_get_operand_type_info(enum spirv_parser_operand_type t)
+{
+ if (t >= ARRAY_SIZE(spirv_parser_operand_type_info))
+ return NULL;
+ return &spirv_parser_operand_type_info[t];
+}
+
+static int spirv_parser_opcode_info_compare(const void *key, const void *element)
+{
+ const struct spirv_parser_opcode_info *e = element;
+ const uint16_t *op = key;
+
+ return vkd3d_u32_compare(*op, e->op);
+}
+
+static const struct spirv_parser_opcode_info *spirv_parser_get_opcode_info(uint16_t op)
+{
+ return bsearch(&op, spirv_parser_opcode_info, ARRAY_SIZE(spirv_parser_opcode_info),
+ sizeof(*spirv_parser_opcode_info), spirv_parser_opcode_info_compare);
+}
+
static enum vkd3d_result spirv_parser_read_header(struct spirv_parser *parser)
{
uint32_t magic, version, generator, bound, schema;
@@ -357,36 +459,260 @@ static enum vkd3d_result spirv_parser_read_header(struct spirv_parser *parser)
return VKD3D_OK;
}
+static bool spirv_parser_parse_string_literal(struct spirv_parser *parser,
+ struct vkd3d_string_buffer *buffer, size_t end)
+{
+ const char *s = (const char *)&parser->code[parser->pos];
+ size_t len, max_len;
+
+ max_len = (end - parser->pos) * sizeof(uint32_t);
+ len = strnlen(s, max_len);
+ if (len == max_len)
+ {
+ spirv_parser_warning(parser, VKD3D_SHADER_ERROR_SPV_NOT_IMPLEMENTED,
+ "Insufficient words remaining while parsing string literal.");
+ return false;
+ }
+
+ spirv_parser_print_string_literal(parser, buffer, " ", s, len, "");
+ parser->pos += (len / sizeof(uint32_t)) + 1;
+
+ return true;
+}
+
+static uint32_t lowest_set(uint32_t v)
+{
+ return v & -v;
+}
+
+static bool spirv_parser_parse_operand(struct spirv_parser *parser, struct vkd3d_string_buffer *buffer,
+ const char *opcode_name, enum spirv_parser_operand_type type, size_t end, uint32_t *result_id)
+{
+ const struct spirv_parser_operand_type_info *info;
+ const struct spirv_parser_enumerant *e;
+ uint32_t word, tmp, v, i, j;
+
+ if (parser->pos >= end)
+ {
+ spirv_parser_warning(parser, VKD3D_SHADER_ERROR_SPV_NOT_IMPLEMENTED,
+ "Insufficient words remaining while parsing operands for instruction \"%s\".", opcode_name);
+ return false;
+ }
+
+ if (!(info = spirv_parser_get_operand_type_info(type)))
+ {
+ ERR("Invalid operand type %#x.\n", type);
+ return false;
+ }
+
+ if (info->category == SPIRV_PARSER_OPERAND_CATEGORY_BIT_ENUM)
+ {
+ if (!(word = spirv_parser_read_u32(parser)))
+ {
+ if (!(e = spirv_parser_get_enumerant(info, word)))
+ {
+ spirv_parser_warning(parser, VKD3D_SHADER_ERROR_SPV_NOT_IMPLEMENTED,
+ "Unhandled enumeration value %#x.", word);
+ return false;
+ }
+ spirv_parser_print_enumerant(parser, buffer, " ", e->name, "");
+
+ for (j = 0; j < e->parameter_count; ++j)
+ {
+ if (!spirv_parser_parse_operand(parser, buffer, opcode_name, e->parameters[j], end, result_id))
+ return false;
+ }
+
+ return true;
+ }
+
+ for (i = 0, tmp = word; tmp; ++i, tmp ^= v)
+ {
+ v = lowest_set(tmp);
+ if (!(e = spirv_parser_get_enumerant(info, v)))
+ {
+ spirv_parser_warning(parser, VKD3D_SHADER_ERROR_SPV_NOT_IMPLEMENTED,
+ "Unhandled enumeration value %#x.", v);
+ return false;
+ }
+ spirv_parser_print_enumerant(parser, buffer, i ? " | " : " ", e->name, "");
+ }
+
+ for (i = 0, tmp = word; tmp; ++i, tmp ^= v)
+ {
+ v = lowest_set(tmp);
+ if (!(e = spirv_parser_get_enumerant(info, v)))
+ {
+ spirv_parser_warning(parser, VKD3D_SHADER_ERROR_SPV_NOT_IMPLEMENTED,
+ "Unhandled enumeration value %#x.", v);
+ return false;
+ }
+
+ for (j = 0; j < e->parameter_count; ++j)
+ {
+ if (!spirv_parser_parse_operand(parser, buffer, opcode_name, e->parameters[j], end, result_id))
+ return false;
+ }
+ }
+
+ return true;
+ }
+
+ if (info->category == SPIRV_PARSER_OPERAND_CATEGORY_VALUE_ENUM)
+ {
+ word = spirv_parser_read_u32(parser);
+ if (!(e = spirv_parser_get_enumerant(info, word)))
+ {
+ spirv_parser_warning(parser, VKD3D_SHADER_ERROR_SPV_NOT_IMPLEMENTED,
+ "Unhandled \"%s\" enumeration value %#x.", info->name, word);
+ return false;
+ }
+ spirv_parser_print_enumerant(parser, buffer, " ", e->name, "");
+
+ for (i = 0; i < e->parameter_count; ++i)
+ {
+ if (!spirv_parser_parse_operand(parser, buffer, opcode_name, e->parameters[i], end, result_id))
+ return false;
+ }
+
+ return true;
+ }
+
+ switch (type)
+ {
+ case SPIRV_PARSER_OPERAND_TYPE_ID_REF:
+ case SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE:
+ spirv_parser_print_id(parser, buffer, " ", spirv_parser_read_u32(parser), "");
+ return true;
+
+ case SPIRV_PARSER_OPERAND_TYPE_ID_RESULT:
+ if (*result_id)
+ {
+ spirv_parser_warning(parser, VKD3D_SHADER_ERROR_SPV_NOT_IMPLEMENTED,
+ "Instruction has multiple results.");
+ return false;
+ }
+ *result_id = spirv_parser_read_u32(parser);
+ return true;
+
+ case SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER:
+ spirv_parser_print_uint_literal(parser, buffer, " ", spirv_parser_read_u32(parser), "");
+ return true;
+
+ case SPIRV_PARSER_OPERAND_TYPE_LITERAL_STRING:
+ return spirv_parser_parse_string_literal(parser, buffer, end);
+
+ default:
+ spirv_parser_warning(parser, VKD3D_SHADER_ERROR_SPV_NOT_IMPLEMENTED,
+ "Unhandled operand type \"%s\".", info->name);
+ return false;
+ }
+}
+
+static void spirv_parser_parse_raw_instruction(struct spirv_parser *parser, uint16_t count)
+{
+ size_t pos = parser->pos;
+ size_t i;
+
+ if (parser->formatting & VKD3D_SHADER_COMPILE_OPTION_FORMATTING_INDENT)
+ vkd3d_string_buffer_printf(parser->text, "%*s", VKD3D_SPIRV_INDENT, "");
+ for (i = 0; i < count; ++i)
+ {
+ spirv_parser_print_immediate_word(parser, parser->text, i ? " " : "", spirv_parser_read_u32(parser), "");
+ }
+ if (parser->formatting & VKD3D_SHADER_COMPILE_OPTION_FORMATTING_OFFSETS)
+ spirv_parser_print_instruction_offset(parser, parser->text, " ", pos, "");
+ vkd3d_string_buffer_printf(parser->text, "\n");
+}
+
static enum vkd3d_result spirv_parser_parse_instruction(struct spirv_parser *parser)
{
- struct vkd3d_string_buffer *buffer;
- uint16_t op, count;
- unsigned int i;
- uint32_t word;
+ const struct spirv_parser_instruction_operand *operand;
+ struct vkd3d_string_buffer *operands, *result_name;
+ const struct spirv_parser_opcode_info *info;
+ uint32_t result_id, word;
+ uint16_t op, count, rem;
+ size_t end, pos, i;
+ pos = parser->pos;
word = spirv_parser_read_u32(parser);
count = (word & VKD3D_SPIRV_INSTRUCTION_WORD_COUNT_MASK) >> VKD3D_SPIRV_INSTRUCTION_WORD_COUNT_SHIFT;
op = (word & VKD3D_SPIRV_INSTRUCTION_OP_MASK) >> VKD3D_SPIRV_INSTRUCTION_OP_SHIFT;
- if (!count)
+ if (!count || count > parser->size - pos)
{
spirv_parser_error(parser, VKD3D_SHADER_ERROR_SPV_INVALID_SHADER,
"Invalid word count %u.", count);
return VKD3D_ERROR_INVALID_SHADER;
}
- --count;
- buffer = vkd3d_string_buffer_get(&parser->string_buffers);
- for (i = 0; i < count; ++i)
+ if (!(info = spirv_parser_get_opcode_info(op)))
{
- word = spirv_parser_read_u32(parser);
- vkd3d_string_buffer_printf(buffer, " 0x%08x", word);
+ spirv_parser_warning(parser, VKD3D_SHADER_ERROR_SPV_NOT_IMPLEMENTED,
+ "Unrecognised instruction %#x.", op);
+ goto raw;
+ }
+
+ operands = vkd3d_string_buffer_get(&parser->string_buffers);
+
+ result_id = 0;
+ for (i = 0, end = pos + count; i < info->operand_count; ++i)
+ {
+ operand = &info->operands[i];
+
+ do
+ {
+ if (parser->pos >= end && (operand->quantifier == '?' || operand->quantifier == '*'))
+ break;
+
+ if (!spirv_parser_parse_operand(parser, operands, info->name, operand->type, end, &result_id))
+ {
+ vkd3d_string_buffer_release(&parser->string_buffers, operands);
+ goto raw;
+ }
+ } while (operand->quantifier == '*' && parser->pos < end);
+ }
+
+ if ((rem = end - parser->pos))
+ {
+ spirv_parser_warning(parser, VKD3D_SHADER_ERROR_SPV_NOT_IMPLEMENTED,
+ "%u word(s) remaining after parsing all operands for instruction \"%s\"", rem, info->name);
+ for (i = 0; i < rem; ++i)
+ {
+ spirv_parser_print_immediate_word(parser, operands, " ", spirv_parser_read_u32(parser), "");
+ }
+ }
+
+ if (result_id)
+ {
+ int max_indent = 0;
+
+ if (parser->formatting & VKD3D_SHADER_COMPILE_OPTION_FORMATTING_INDENT)
+ max_indent = VKD3D_SPIRV_INDENT - 4;
+ result_name = vkd3d_string_buffer_get(&parser->string_buffers);
+ vkd3d_string_buffer_printf(result_name, "%u", result_id);
+ vkd3d_string_buffer_printf(parser->text, "%*s%%%s%s%s = ",
+ result_name->content_size > max_indent ? 0 : max_indent - (int)result_name->content_size, "",
+ parser->colours.id, result_name->buffer, parser->colours.reset);
+ vkd3d_string_buffer_release(&parser->string_buffers, result_name);
}
- spirv_parser_print_comment(parser, "<unrecognised instruction %#x>%s", op, buffer->buffer);
- vkd3d_string_buffer_release(&parser->string_buffers, buffer);
+ else if (parser->formatting & VKD3D_SHADER_COMPILE_OPTION_FORMATTING_INDENT)
+ {
+ vkd3d_string_buffer_printf(parser->text, "%*s", VKD3D_SPIRV_INDENT, "");
+ }
+ spirv_parser_print_opcode(parser, parser->text, info->name);
+ vkd3d_string_buffer_printf(parser->text, "%s", operands->buffer);
+ if (parser->formatting & VKD3D_SHADER_COMPILE_OPTION_FORMATTING_OFFSETS)
+ spirv_parser_print_instruction_offset(parser, parser->text, " ", pos, "");
+ vkd3d_string_buffer_printf(parser->text, "\n");
+
+ vkd3d_string_buffer_release(&parser->string_buffers, operands);
- spirv_parser_error(parser, VKD3D_SHADER_ERROR_SPV_NOT_IMPLEMENTED,
- "Unrecognised instruction %#x.", op);
+ return VKD3D_OK;
+
+raw:
+ parser->pos = pos;
+ spirv_parser_parse_raw_instruction(parser, count);
return VKD3D_OK;
}
@@ -441,11 +767,19 @@ static enum vkd3d_result spirv_parser_init(struct spirv_parser *parser, const st
{
.reset = "",
.comment = "",
+ .literal = "",
+ .enumerant = "",
+ .opcode = "",
+ .id = "",
};
static const struct spirv_colours colours =
{
.reset = "\x1b[m",
.comment = "\x1b[36m",
+ .literal = "\x1b[95m",
+ .enumerant = "\x1b[93m",
+ .opcode = "\x1b[96;1m",
+ .id = "\x1b[96m",
};
memset(parser, 0, sizeof(*parser));
@@ -829,16 +1163,16 @@ static unsigned int vkd3d_spirv_string_word_count(const char *str)
static void vkd3d_spirv_build_string(struct vkd3d_spirv_stream *stream,
const char *str, unsigned int word_count)
{
- unsigned int word_idx, i;
- const char *ptr = str;
+ uint32_t *ptr;
- for (word_idx = 0; word_idx < word_count; ++word_idx)
- {
- uint32_t word = 0;
- for (i = 0; i < sizeof(uint32_t) && *ptr; ++i)
- word |= (uint32_t)*ptr++ << (8 * i);
- vkd3d_spirv_build_word(stream, word);
- }
+ if (!vkd3d_array_reserve((void **)&stream->words, &stream->capacity,
+ stream->word_count + word_count, sizeof(*stream->words)))
+ return;
+
+ ptr = &stream->words[stream->word_count];
+ ptr[word_count - 1] = 0;
+ memcpy(ptr, str, strlen(str));
+ stream->word_count += word_count;
}
typedef uint32_t (*vkd3d_spirv_build_pfn)(struct vkd3d_spirv_builder *builder);
@@ -2757,9 +3091,6 @@ struct spirv_compiler
} *spirv_parameter_info;
bool prolog_emitted;
- struct shader_signature input_signature;
- struct shader_signature output_signature;
- struct shader_signature patch_constant_signature;
const struct vkd3d_shader_transform_feedback_info *xfb_info;
struct vkd3d_shader_output_info
{
@@ -2774,7 +3105,6 @@ struct spirv_compiler
uint32_t binding_idx;
- const struct vkd3d_shader_scan_descriptor_info1 *scan_descriptor_info;
unsigned int input_control_point_count;
unsigned int output_control_point_count;
@@ -2852,10 +3182,6 @@ static void spirv_compiler_destroy(struct spirv_compiler *compiler)
vkd3d_string_buffer_cache_cleanup(&compiler->string_buffers);
- shader_signature_cleanup(&compiler->input_signature);
- shader_signature_cleanup(&compiler->output_signature);
- shader_signature_cleanup(&compiler->patch_constant_signature);
-
vkd3d_free(compiler->ssa_register_info);
vkd3d_free(compiler->block_label_ids);
@@ -2864,7 +3190,6 @@ static void spirv_compiler_destroy(struct spirv_compiler *compiler)
static struct spirv_compiler *spirv_compiler_create(const struct vsir_program *program,
const struct vkd3d_shader_compile_info *compile_info,
- const struct vkd3d_shader_scan_descriptor_info1 *scan_descriptor_info,
struct vkd3d_shader_message_context *message_context, uint64_t config_flags)
{
const struct vkd3d_shader_interface_info *shader_interface;
@@ -2880,6 +3205,7 @@ static struct spirv_compiler *spirv_compiler_create(const struct vsir_program *p
compiler->message_context = message_context;
compiler->location.source_name = compile_info->source_name;
compiler->config_flags = config_flags;
+ compiler->program = program;
if ((target_info = vkd3d_find_struct(compile_info->next, SPIRV_TARGET_INFO)))
{
@@ -3006,8 +3332,6 @@ static struct spirv_compiler *spirv_compiler_create(const struct vsir_program *p
else if (compiler->shader_type != VKD3D_SHADER_TYPE_GEOMETRY)
compiler->emit_point_size = compiler->xfb_info && compiler->xfb_info->element_count;
- compiler->scan_descriptor_info = scan_descriptor_info;
-
compiler->phase = VKD3DSIH_INVALID;
vkd3d_string_buffer_cache_init(&compiler->string_buffers);
@@ -3375,7 +3699,8 @@ static uint32_t spirv_compiler_get_constant(struct spirv_compiler *compiler,
"Vectors of bool type are not supported.");
return vkd3d_spirv_get_op_undef(builder, type_id);
default:
- FIXME("Unhandled component_type %#x.\n", component_type);
+ spirv_compiler_error(compiler, VKD3D_SHADER_ERROR_SPV_INVALID_TYPE,
+ "Unhandled component_type %#x.", component_type);
return vkd3d_spirv_get_op_undef(builder, type_id);
}
@@ -5471,7 +5796,7 @@ static uint32_t spirv_compiler_emit_input(struct spirv_compiler *compiler,
unsigned int array_sizes[2];
shader_signature = reg_type == VKD3DSPR_PATCHCONST
- ? &compiler->patch_constant_signature : &compiler->input_signature;
+ ? &compiler->program->patch_constant_signature : &compiler->program->input_signature;
signature_element = &shader_signature->elements[element_idx];
sysval = signature_element->sysval_semantic;
@@ -5549,7 +5874,7 @@ static uint32_t spirv_compiler_emit_input(struct spirv_compiler *compiler,
if (reg_type == VKD3DSPR_PATCHCONST)
{
vkd3d_spirv_build_op_decorate(builder, input_id, SpvDecorationPatch, NULL, 0);
- location += shader_signature_next_location(&compiler->input_signature);
+ location += shader_signature_next_location(&compiler->program->input_signature);
}
vkd3d_spirv_build_op_decorate1(builder, input_id, SpvDecorationLocation, location);
if (component_idx)
@@ -5683,7 +6008,7 @@ static void calculate_clip_or_cull_distance_mask(const struct signature_element
/* Emits arrayed SPIR-V built-in variables. */
static void spirv_compiler_emit_shader_signature_outputs(struct spirv_compiler *compiler)
{
- const struct shader_signature *output_signature = &compiler->output_signature;
+ const struct shader_signature *output_signature = &compiler->program->output_signature;
uint32_t clip_distance_mask = 0, clip_distance_id = 0;
uint32_t cull_distance_mask = 0, cull_distance_id = 0;
const struct vkd3d_spirv_builtin *builtin;
@@ -5793,7 +6118,8 @@ static void spirv_compiler_emit_output(struct spirv_compiler *compiler,
is_patch_constant = (reg_type == VKD3DSPR_PATCHCONST);
- shader_signature = is_patch_constant ? &compiler->patch_constant_signature : &compiler->output_signature;
+ shader_signature = is_patch_constant ? &compiler->program->patch_constant_signature
+ : &compiler->program->output_signature;
signature_element = &shader_signature->elements[element_idx];
sysval = signature_element->sysval_semantic;
@@ -5867,7 +6193,7 @@ static void spirv_compiler_emit_output(struct spirv_compiler *compiler,
unsigned int location = signature_element->target_location;
if (is_patch_constant)
- location += shader_signature_next_location(&compiler->output_signature);
+ location += shader_signature_next_location(&compiler->program->output_signature);
else if (compiler->shader_type == VKD3D_SHADER_TYPE_PIXEL
&& signature_element->sysval_semantic == VKD3D_SHADER_SV_TARGET)
location = signature_element->semantic_index;
@@ -6057,7 +6383,8 @@ static void spirv_compiler_emit_shader_epilogue_function(struct spirv_compiler *
is_patch_constant = is_in_fork_or_join_phase(compiler);
- signature = is_patch_constant ? &compiler->patch_constant_signature : &compiler->output_signature;
+ signature = is_patch_constant ? &compiler->program->patch_constant_signature
+ : &compiler->program->output_signature;
function_id = compiler->epilogue_function_id;
@@ -6401,7 +6728,7 @@ static const struct vkd3d_shader_descriptor_info1 *spirv_compiler_get_descriptor
struct spirv_compiler *compiler, enum vkd3d_shader_descriptor_type type,
const struct vkd3d_shader_register_range *range)
{
- const struct vkd3d_shader_scan_descriptor_info1 *descriptor_info = compiler->scan_descriptor_info;
+ const struct vkd3d_shader_scan_descriptor_info1 *descriptor_info = &compiler->program->descriptors;
unsigned int register_last = (range->last == ~0u) ? range->first : range->last;
const struct vkd3d_shader_descriptor_info1 *d;
unsigned int i;
@@ -6904,6 +7231,13 @@ static void spirv_compiler_emit_workgroup_memory(struct spirv_compiler *compiler
const SpvStorageClass storage_class = SpvStorageClassWorkgroup;
struct vkd3d_symbol reg_symbol;
+ if (zero_init && !(compiler->features & VKD3D_SHADER_COMPILE_OPTION_FEATURE_ZERO_INITIALIZE_WORKGROUP_MEMORY))
+ {
+ WARN("Unsupported zero-initialized workgroup memory.\n");
+ spirv_compiler_error(compiler, VKD3D_SHADER_ERROR_SPV_UNSUPPORTED_FEATURE,
+ "The target environment does not support zero-initialized workgroup memory.");
+ }
+
/* Alignment is supported only in the Kernel execution model. */
if (alignment)
TRACE("Ignoring alignment %u.\n", alignment);
@@ -10772,20 +11106,20 @@ static void spirv_compiler_emit_io_declarations(struct spirv_compiler *compiler)
{
struct vkd3d_shader_dst_param dst;
- for (unsigned int i = 0; i < compiler->input_signature.element_count; ++i)
+ for (unsigned int i = 0; i < compiler->program->input_signature.element_count; ++i)
spirv_compiler_emit_input(compiler, VKD3DSPR_INPUT, i);
- for (unsigned int i = 0; i < compiler->output_signature.element_count; ++i)
+ for (unsigned int i = 0; i < compiler->program->output_signature.element_count; ++i)
{
/* PS outputs other than TARGET have dedicated registers and therefore
* go through spirv_compiler_emit_dcl_output() for now. */
if (compiler->shader_type == VKD3D_SHADER_TYPE_PIXEL
- && compiler->output_signature.elements[i].sysval_semantic != VKD3D_SHADER_SV_TARGET)
+ && compiler->program->output_signature.elements[i].sysval_semantic != VKD3D_SHADER_SV_TARGET)
continue;
spirv_compiler_emit_output(compiler, VKD3DSPR_OUTPUT, i);
}
- for (unsigned int i = 0; i < compiler->patch_constant_signature.element_count; ++i)
+ for (unsigned int i = 0; i < compiler->program->patch_constant_signature.element_count; ++i)
{
if (compiler->shader_type == VKD3D_SHADER_TYPE_HULL)
spirv_compiler_emit_output(compiler, VKD3DSPR_PATCHCONST, i);
@@ -10821,11 +11155,12 @@ static void spirv_compiler_emit_io_declarations(struct spirv_compiler *compiler)
static void spirv_compiler_emit_descriptor_declarations(struct spirv_compiler *compiler)
{
+ const struct vkd3d_shader_scan_descriptor_info1 *descriptors = &compiler->program->descriptors;
unsigned int i;
- for (i = 0; i < compiler->scan_descriptor_info->descriptor_count; ++i)
+ for (i = 0; i < descriptors->descriptor_count; ++i)
{
- const struct vkd3d_shader_descriptor_info1 *descriptor = &compiler->scan_descriptor_info->descriptors[i];
+ const struct vkd3d_shader_descriptor_info1 *descriptor = &descriptors->descriptors[i];
struct vkd3d_shader_register_range range;
range.first = descriptor->register_index;
@@ -10856,23 +11191,18 @@ static void spirv_compiler_emit_descriptor_declarations(struct spirv_compiler *c
}
}
-static int spirv_compiler_generate_spirv(struct spirv_compiler *compiler, struct vsir_program *program,
+static int spirv_compiler_generate_spirv(struct spirv_compiler *compiler,
const struct vkd3d_shader_compile_info *compile_info, struct vkd3d_shader_code *spirv)
{
const struct vkd3d_shader_spirv_target_info *info = compiler->spirv_target_info;
const struct vkd3d_shader_spirv_domain_shader_target_info *ds_info;
struct vkd3d_spirv_builder *builder = &compiler->spirv_builder;
+ const struct vsir_program *program = compiler->program;
struct vkd3d_shader_instruction_array instructions;
enum vkd3d_shader_spirv_environment environment;
enum vkd3d_result result = VKD3D_OK;
unsigned int i, max_element_count;
- if ((result = vsir_program_transform(program, compiler->config_flags,
- compile_info, compiler->message_context)) < 0)
- return result;
-
- VKD3D_ASSERT(program->normalisation_level == VSIR_NORMALISED_SM6);
-
max_element_count = max(program->output_signature.element_count, program->patch_constant_signature.element_count);
if (!(compiler->output_info = vkd3d_calloc(max_element_count, sizeof(*compiler->output_info))))
return VKD3D_ERROR_OUT_OF_MEMORY;
@@ -10919,17 +11249,8 @@ static int spirv_compiler_generate_spirv(struct spirv_compiler *compiler, struct
if (program->block_count && !spirv_compiler_init_blocks(compiler, program->block_count))
return VKD3D_ERROR_OUT_OF_MEMORY;
- compiler->program = program;
-
instructions = program->instructions;
- memset(&program->instructions, 0, sizeof(program->instructions));
-
- compiler->input_signature = program->input_signature;
- compiler->output_signature = program->output_signature;
- compiler->patch_constant_signature = program->patch_constant_signature;
- memset(&program->input_signature, 0, sizeof(program->input_signature));
- memset(&program->output_signature, 0, sizeof(program->output_signature));
- memset(&program->patch_constant_signature, 0, sizeof(program->patch_constant_signature));
+
compiler->use_vocp = program->use_vocp;
compiler->block_names = program->block_names;
compiler->block_name_count = program->block_name_count;
@@ -10949,8 +11270,6 @@ static int spirv_compiler_generate_spirv(struct spirv_compiler *compiler, struct
result = spirv_compiler_handle_instruction(compiler, &instructions.elements[i]);
}
- shader_instruction_array_destroy(&instructions);
-
if (result < 0)
return result;
@@ -11032,21 +11351,26 @@ static int spirv_compiler_generate_spirv(struct spirv_compiler *compiler, struct
}
int spirv_compile(struct vsir_program *program, uint64_t config_flags,
- const struct vkd3d_shader_scan_descriptor_info1 *scan_descriptor_info,
const struct vkd3d_shader_compile_info *compile_info,
struct vkd3d_shader_code *out, struct vkd3d_shader_message_context *message_context)
{
struct spirv_compiler *spirv_compiler;
int ret;
+ if ((ret = vsir_program_transform(program, config_flags, compile_info, message_context)) < 0)
+ return ret;
+
+ VKD3D_ASSERT(program->normalisation_level == VSIR_NORMALISED_SM6);
+ VKD3D_ASSERT(program->has_descriptor_info);
+
if (!(spirv_compiler = spirv_compiler_create(program, compile_info,
- scan_descriptor_info, message_context, config_flags)))
+ message_context, config_flags)))
{
ERR("Failed to create SPIR-V compiler.\n");
return VKD3D_ERROR;
}
- ret = spirv_compiler_generate_spirv(spirv_compiler, program, compile_info, out);
+ ret = spirv_compiler_generate_spirv(spirv_compiler, compile_info, out);
spirv_compiler_destroy(spirv_compiler);
return ret;
diff --git a/libs/vkd3d/libs/vkd3d-shader/tpf.c b/libs/vkd3d/libs/vkd3d-shader/tpf.c
index 82302aac666..23dab35a288 100644
--- a/libs/vkd3d/libs/vkd3d-shader/tpf.c
+++ b/libs/vkd3d/libs/vkd3d-shader/tpf.c
@@ -714,6 +714,22 @@ input_primitive_type_table[] =
[VKD3D_SM4_INPUT_PT_TRIANGLEADJ] = {6, VKD3D_PT_TRIANGLELIST_ADJ},
};
+static const enum vkd3d_sm4_input_primitive_type sm4_input_primitive_type_table[] =
+{
+ [VKD3D_PT_POINTLIST] = VKD3D_SM4_INPUT_PT_POINT,
+ [VKD3D_PT_LINELIST] = VKD3D_SM4_INPUT_PT_LINE,
+ [VKD3D_PT_TRIANGLELIST] = VKD3D_SM4_INPUT_PT_TRIANGLE,
+ [VKD3D_PT_LINELIST_ADJ] = VKD3D_SM4_INPUT_PT_LINEADJ,
+ [VKD3D_PT_TRIANGLELIST_ADJ] = VKD3D_SM4_INPUT_PT_TRIANGLEADJ,
+};
+
+static const enum vkd3d_sm4_output_primitive_type sm4_output_primitive_type_table[] =
+{
+ [VKD3D_PT_POINTLIST] = VKD3D_SM4_OUTPUT_PT_POINTLIST,
+ [VKD3D_PT_LINESTRIP] = VKD3D_SM4_OUTPUT_PT_LINESTRIP,
+ [VKD3D_PT_TRIANGLESTRIP] = VKD3D_SM4_OUTPUT_PT_TRIANGLESTRIP,
+};
+
static const enum vkd3d_shader_resource_type resource_type_table[] =
{
/* 0 */ VKD3D_SHADER_RESOURCE_NONE,
@@ -1051,7 +1067,8 @@ static void shader_sm4_read_dcl_index_range(struct vkd3d_shader_instruction *ins
register_idx, register_count, write_mask, e->sysval_semantic);
return;
}
- if ((io_masks[register_idx + i] & write_mask) != write_mask)
+ if ((io_masks[register_idx + i] & write_mask) != write_mask
+ && (io_masks[register_idx + i] & write_mask) != 0)
{
WARN("No matching declaration for index range base %u, count %u, mask %#x.\n",
register_idx, register_count, write_mask);
@@ -1076,6 +1093,8 @@ static void shader_sm4_read_dcl_output_topology(struct vkd3d_shader_instruction
if (ins->declaration.primitive_type.type == VKD3D_PT_UNDEFINED)
FIXME("Unhandled output primitive type %#x.\n", primitive_type);
+
+ priv->p.program->output_topology = ins->declaration.primitive_type.type;
}
static void shader_sm4_read_dcl_input_primitive(struct vkd3d_shader_instruction *ins, uint32_t opcode,
@@ -1103,6 +1122,8 @@ static void shader_sm4_read_dcl_input_primitive(struct vkd3d_shader_instruction
if (ins->declaration.primitive_type.type == VKD3D_PT_UNDEFINED)
FIXME("Unhandled input primitive type %#x.\n", primitive_type);
+
+ program->input_primitive = ins->declaration.primitive_type.type;
}
static void shader_sm4_read_declaration_count(struct vkd3d_shader_instruction *ins, uint32_t opcode,
@@ -1113,6 +1134,8 @@ static void shader_sm4_read_declaration_count(struct vkd3d_shader_instruction *i
ins->declaration.count = *tokens;
if (opcode == VKD3D_SM4_OP_DCL_TEMPS)
program->temp_count = max(program->temp_count, *tokens);
+ else if (opcode == VKD3D_SM4_OP_DCL_VERTICES_OUT)
+ program->vertices_out_count = *tokens;
}
static void shader_sm4_read_declaration_dst(struct vkd3d_shader_instruction *ins, uint32_t opcode,
@@ -1720,7 +1743,7 @@ static void init_sm4_lookup_tables(struct vkd3d_sm4_lookup_tables *lookup)
{VKD3D_SM5_RT_LOCAL_THREAD_ID, VKD3DSPR_LOCALTHREADID, VKD3D_SM4_SWIZZLE_VEC4},
{VKD3D_SM5_RT_COVERAGE, VKD3DSPR_COVERAGE, VKD3D_SM4_SWIZZLE_VEC4},
{VKD3D_SM5_RT_LOCAL_THREAD_INDEX, VKD3DSPR_LOCALTHREADINDEX,VKD3D_SM4_SWIZZLE_VEC4},
- {VKD3D_SM5_RT_GS_INSTANCE_ID, VKD3DSPR_GSINSTID, VKD3D_SM4_SWIZZLE_VEC4},
+ {VKD3D_SM5_RT_GS_INSTANCE_ID, VKD3DSPR_GSINSTID, VKD3D_SM4_SWIZZLE_SCALAR},
{VKD3D_SM5_RT_DEPTHOUT_GREATER_EQUAL, VKD3DSPR_DEPTHOUTGE, VKD3D_SM4_SWIZZLE_VEC4},
{VKD3D_SM5_RT_DEPTHOUT_LESS_EQUAL, VKD3DSPR_DEPTHOUTLE, VKD3D_SM4_SWIZZLE_VEC4},
{VKD3D_SM5_RT_OUTPUT_STENCIL_REF, VKD3DSPR_OUTSTENCILREF, VKD3D_SM4_SWIZZLE_VEC4},
@@ -2990,6 +3013,7 @@ bool sm4_register_from_semantic_name(const struct vkd3d_shader_version *version,
{"sv_primitiveid", false, VKD3D_SHADER_TYPE_DOMAIN, VKD3DSPR_PRIMID, false},
{"sv_primitiveid", false, VKD3D_SHADER_TYPE_GEOMETRY, VKD3DSPR_PRIMID, false},
+ {"sv_gsinstanceid", false, VKD3D_SHADER_TYPE_GEOMETRY, VKD3DSPR_GSINSTID, false},
{"sv_outputcontrolpointid", false, VKD3D_SHADER_TYPE_HULL, VKD3DSPR_OUTPOINTID, false},
{"sv_primitiveid", false, VKD3D_SHADER_TYPE_HULL, VKD3DSPR_PRIMID, false},
@@ -3070,7 +3094,8 @@ static bool get_insidetessfactor_sysval_semantic(enum vkd3d_shader_sysval_semant
bool sm4_sysval_semantic_from_semantic_name(enum vkd3d_shader_sysval_semantic *sysval_semantic,
const struct vkd3d_shader_version *version, bool semantic_compat_mapping, enum vkd3d_tessellator_domain domain,
- const char *semantic_name, unsigned int semantic_idx, bool output, bool is_patch_constant_func, bool is_patch)
+ const char *semantic_name, unsigned int semantic_idx, bool output,
+ bool is_patch_constant_func, bool is_primitive)
{
unsigned int i;
@@ -3094,9 +3119,8 @@ bool sm4_sysval_semantic_from_semantic_name(enum vkd3d_shader_sysval_semantic *s
{"sv_position", true, VKD3D_SHADER_TYPE_DOMAIN, VKD3D_SHADER_SV_POSITION},
- {"position", false, VKD3D_SHADER_TYPE_GEOMETRY, VKD3D_SHADER_SV_POSITION},
- {"sv_position", false, VKD3D_SHADER_TYPE_GEOMETRY, VKD3D_SHADER_SV_POSITION},
{"sv_primitiveid", false, VKD3D_SHADER_TYPE_GEOMETRY, VKD3D_SHADER_SV_PRIMITIVE_ID},
+ {"sv_gsinstanceid", false, VKD3D_SHADER_TYPE_GEOMETRY, ~0u},
{"position", true, VKD3D_SHADER_TYPE_GEOMETRY, VKD3D_SHADER_SV_POSITION},
{"sv_position", true, VKD3D_SHADER_TYPE_GEOMETRY, VKD3D_SHADER_SV_POSITION},
@@ -3133,7 +3157,7 @@ bool sm4_sysval_semantic_from_semantic_name(enum vkd3d_shader_sysval_semantic *s
};
bool has_sv_prefix = !ascii_strncasecmp(semantic_name, "sv_", 3);
- if (is_patch)
+ if (is_primitive)
{
VKD3D_ASSERT(!output);
@@ -3197,6 +3221,8 @@ bool sm4_sysval_semantic_from_semantic_name(enum vkd3d_shader_sysval_semantic *s
if (has_sv_prefix)
return false;
+ if (!output && version->type == VKD3D_SHADER_TYPE_GEOMETRY)
+ return false;
*sysval_semantic = VKD3D_SHADER_SV_NONE;
return true;
@@ -3228,6 +3254,7 @@ static int signature_element_pointer_compare(const void *x, const void *y)
static void tpf_write_signature(struct tpf_compiler *tpf, const struct shader_signature *signature, uint32_t tag)
{
+ bool has_minimum_precision = tpf->program->global_flags & VKD3DSGF_ENABLE_MINIMUM_PRECISION;
bool output = tag == TAG_OSGN || (tag == TAG_PCSG
&& tpf->program->shader_version.type == VKD3D_SHADER_TYPE_HULL);
const struct signature_element **sorted_elements;
@@ -3256,12 +3283,16 @@ static void tpf_write_signature(struct tpf_compiler *tpf, const struct shader_si
if (sysval >= VKD3D_SHADER_SV_TARGET)
sysval = VKD3D_SHADER_SV_NONE;
+ if (has_minimum_precision)
+ put_u32(&buffer, 0); /* FIXME: stream index */
put_u32(&buffer, 0); /* name */
put_u32(&buffer, element->semantic_index);
put_u32(&buffer, sysval);
put_u32(&buffer, element->component_type);
put_u32(&buffer, element->register_index);
put_u32(&buffer, vkd3d_make_u16(element->mask, used_mask));
+ if (has_minimum_precision)
+ put_u32(&buffer, element->min_precision);
}
for (i = 0; i < signature->element_count; ++i)
@@ -3270,9 +3301,21 @@ static void tpf_write_signature(struct tpf_compiler *tpf, const struct shader_si
size_t string_offset;
string_offset = put_string(&buffer, element->semantic_name);
- set_u32(&buffer, (2 + i * 6) * sizeof(uint32_t), string_offset);
+ if (has_minimum_precision)
+ set_u32(&buffer, (2 + i * 8 + 1) * sizeof(uint32_t), string_offset);
+ else
+ set_u32(&buffer, (2 + i * 6) * sizeof(uint32_t), string_offset);
}
+ if (has_minimum_precision)
+ {
+ if (tag == TAG_ISGN)
+ tag = TAG_ISG1;
+ else if (tag == TAG_OSGN || tag == TAG_OSG5)
+ tag = TAG_OSG1;
+ else if (tag == TAG_PCSG)
+ tag = TAG_PSG1;
+ }
add_section(tpf, tag, &buffer);
vkd3d_free(sorted_elements);
}
@@ -3444,12 +3487,16 @@ static void sm4_write_register_index(const struct tpf_compiler *tpf, const struc
unsigned int j)
{
unsigned int addressing = sm4_get_index_addressing_from_reg(reg, j);
+ const struct vkd3d_shader_register_index *idx = &reg->idx[j];
struct vkd3d_bytecode_buffer *buffer = tpf->buffer;
unsigned int k;
+ if (!addressing || (addressing & VKD3D_SM4_ADDRESSING_OFFSET))
+ put_u32(buffer, idx->offset);
+
if (addressing & VKD3D_SM4_ADDRESSING_RELATIVE)
{
- const struct vkd3d_shader_src_param *idx_src = reg->idx[j].rel_addr;
+ const struct vkd3d_shader_src_param *idx_src = idx->rel_addr;
uint32_t idx_src_token;
VKD3D_ASSERT(idx_src);
@@ -3464,10 +3511,6 @@ static void sm4_write_register_index(const struct tpf_compiler *tpf, const struc
VKD3D_ASSERT(!idx_src->reg.idx[k].rel_addr);
}
}
- else
- {
- put_u32(tpf->buffer, reg->idx[j].offset);
- }
}
static void sm4_write_dst_register(const struct tpf_compiler *tpf, const struct vkd3d_shader_dst_param *dst)
@@ -3912,6 +3955,57 @@ static void tpf_write_dcl_tessellator_output_primitive(const struct tpf_compiler
write_sm4_instruction(tpf, &instr);
}
+static void tpf_write_dcl_input_primitive(const struct tpf_compiler *tpf, enum vkd3d_primitive_type input_primitive,
+ unsigned int patch_vertex_count)
+{
+ enum vkd3d_sm4_input_primitive_type sm4_input_primitive;
+ struct sm4_instruction instr =
+ {
+ .opcode = VKD3D_SM4_OP_DCL_INPUT_PRIMITIVE,
+ };
+
+ if (input_primitive == VKD3D_PT_PATCH)
+ {
+ VKD3D_ASSERT(patch_vertex_count >= 1 && patch_vertex_count <= 32);
+ sm4_input_primitive = VKD3D_SM5_INPUT_PT_PATCH1 + patch_vertex_count - 1;
+ }
+ else
+ {
+ VKD3D_ASSERT(input_primitive < ARRAY_SIZE(sm4_input_primitive_type_table));
+ sm4_input_primitive = sm4_input_primitive_type_table[input_primitive];
+ }
+
+ instr.extra_bits = sm4_input_primitive << VKD3D_SM4_PRIMITIVE_TYPE_SHIFT;
+
+ write_sm4_instruction(tpf, &instr);
+}
+
+static void tpf_write_dcl_output_topology(const struct tpf_compiler *tpf, enum vkd3d_primitive_type output_topology)
+{
+ struct sm4_instruction instr =
+ {
+ .opcode = VKD3D_SM4_OP_DCL_OUTPUT_TOPOLOGY,
+ };
+
+ VKD3D_ASSERT(output_topology < ARRAY_SIZE(sm4_output_primitive_type_table));
+ instr.extra_bits = sm4_output_primitive_type_table[output_topology] << VKD3D_SM4_PRIMITIVE_TYPE_SHIFT;
+
+ write_sm4_instruction(tpf, &instr);
+}
+
+static void tpf_write_dcl_vertices_out(const struct tpf_compiler *tpf, unsigned int count)
+{
+ struct sm4_instruction instr =
+ {
+ .opcode = VKD3D_SM4_OP_DCL_VERTICES_OUT,
+
+ .idx = {count},
+ .idx_count = 1,
+ };
+
+ write_sm4_instruction(tpf, &instr);
+}
+
static void tpf_simple_instruction(struct tpf_compiler *tpf, const struct vkd3d_shader_instruction *ins)
{
struct sm4_instruction_modifier *modifier;
@@ -4215,6 +4309,13 @@ static void tpf_write_shdr(struct tpf_compiler *tpf)
tpf_write_dcl_input_control_point_count(tpf, program->input_control_point_count);
tpf_write_dcl_tessellator_domain(tpf, program->tess_domain);
}
+ else if (version->type == VKD3D_SHADER_TYPE_GEOMETRY)
+ {
+ tpf_write_dcl_input_primitive(tpf, program->input_primitive, program->input_control_point_count);
+ if (program->output_topology != VKD3D_PT_UNDEFINED)
+ tpf_write_dcl_output_topology(tpf, program->output_topology);
+ tpf_write_dcl_vertices_out(tpf, program->vertices_out_count);
+ }
tpf_write_program(tpf, program);
@@ -4233,6 +4334,9 @@ static void tpf_write_sfi0(struct tpf_compiler *tpf)
if (tpf->program->features.rovs)
*flags |= DXBC_SFI0_REQUIRES_ROVS;
+ if (tpf->program->global_flags & VKD3DSGF_ENABLE_MINIMUM_PRECISION)
+ *flags |= DXBC_SFI0_REQUIRES_MINIMUM_PRECISION;
+
/* FIXME: We also emit code that should require UAVS_AT_EVERY_STAGE,
* STENCIL_REF, and TYPED_UAV_LOAD_ADDITIONAL_FORMATS. */
diff --git a/libs/vkd3d/libs/vkd3d-shader/vkd3d_shader_main.c b/libs/vkd3d/libs/vkd3d-shader/vkd3d_shader_main.c
index 021691bb3a1..9191429c439 100644
--- a/libs/vkd3d/libs/vkd3d-shader/vkd3d_shader_main.c
+++ b/libs/vkd3d/libs/vkd3d-shader/vkd3d_shader_main.c
@@ -23,6 +23,8 @@
#include <stdio.h>
#include <math.h>
+/* VKD3D_DEBUG_ENV_NAME("VKD3D_SHADER_DEBUG"); */
+
static inline int char_to_int(char c)
{
if ('0' <= c && c <= '9')
@@ -161,6 +163,60 @@ int vkd3d_string_buffer_print_f64(struct vkd3d_string_buffer *buffer, double d)
return ret;
}
+static char get_escape_char(char c)
+{
+ switch (c)
+ {
+ case '"':
+ case '\\':
+ return c;
+ case '\t':
+ return 't';
+ case '\n':
+ return 'n';
+ case '\v':
+ return 'v';
+ case '\f':
+ return 'f';
+ case '\r':
+ return 'r';
+ default:
+ return 0;
+ }
+}
+
+int vkd3d_string_buffer_print_string_escaped(struct vkd3d_string_buffer *buffer, const char *s, size_t len)
+{
+ size_t content_size, start, i;
+ int ret;
+ char c;
+
+ content_size = buffer->content_size;
+ for (i = 0, start = 0; i < len; ++i)
+ {
+ if ((c = get_escape_char(s[i])))
+ {
+ if ((ret = vkd3d_string_buffer_printf(buffer, "%.*s\\%c", (int)(i - start), &s[start], c)) < 0)
+ goto fail;
+ start = i + 1;
+ }
+ else if (!isprint(s[i]))
+ {
+ if ((ret = vkd3d_string_buffer_printf(buffer, "%.*s\\%03o",
+ (int)(i - start), &s[start], (uint8_t)s[i])) < 0)
+ goto fail;
+ start = i + 1;
+ }
+ }
+ if ((ret = vkd3d_string_buffer_printf(buffer, "%.*s", (int)(len - start), &s[start])) < 0)
+ goto fail;
+ return ret;
+
+fail:
+ buffer->content_size = content_size;
+ return ret;
+}
+
void vkd3d_string_buffer_trace_(const struct vkd3d_string_buffer *buffer, const char *function)
{
vkd3d_shader_trace_text_(buffer->buffer, buffer->content_size, function);
@@ -454,8 +510,15 @@ struct shader_dump_data
const char *target_suffix;
};
+enum shader_dump_type
+{
+ SHADER_DUMP_TYPE_SOURCE,
+ SHADER_DUMP_TYPE_PREPROC,
+ SHADER_DUMP_TYPE_TARGET,
+};
+
static void vkd3d_shader_dump_shader(const struct shader_dump_data *dump_data,
- const void *data, size_t size, bool source)
+ const void *data, size_t size, enum shader_dump_type type)
{
static const char hexadecimal_digits[] = "0123456789abcdef";
const uint8_t *checksum = dump_data->checksum;
@@ -480,8 +543,10 @@ static void vkd3d_shader_dump_shader(const struct shader_dump_data *dump_data,
if (dump_data->profile)
pos += snprintf(filename + pos, ARRAY_SIZE(filename) - pos, "-%s", dump_data->profile);
- if (source)
+ if (type == SHADER_DUMP_TYPE_SOURCE)
pos += snprintf(filename + pos, ARRAY_SIZE(filename) - pos, "-source.%s", dump_data->source_suffix);
+ else if (type == SHADER_DUMP_TYPE_PREPROC)
+ pos += snprintf(filename + pos, ARRAY_SIZE(filename) - pos, "-preproc.%s", dump_data->source_suffix);
else
pos += snprintf(filename + pos, ARRAY_SIZE(filename) - pos, "-target.%s", dump_data->target_suffix);
@@ -737,12 +802,20 @@ void vkd3d_shader_free_messages(char *messages)
static bool vkd3d_shader_signature_from_shader_signature(struct vkd3d_shader_signature *signature,
const struct shader_signature *src)
{
- unsigned int i;
+ struct vkd3d_shader_signature_element *d;
+ const struct signature_element *e;
+ size_t count, i, j;
+
+ for (i = 0, count = 0; i < src->element_count; ++i)
+ {
+ e = &src->elements[i];
+ count += e->register_count;
+ }
- signature->element_count = src->element_count;
+ signature->element_count = count;
if (!src->elements)
{
- VKD3D_ASSERT(!signature->element_count);
+ VKD3D_ASSERT(!count);
signature->elements = NULL;
return true;
}
@@ -750,30 +823,25 @@ static bool vkd3d_shader_signature_from_shader_signature(struct vkd3d_shader_sig
if (!(signature->elements = vkd3d_calloc(signature->element_count, sizeof(*signature->elements))))
return false;
- for (i = 0; i < signature->element_count; ++i)
+ for (i = 0, d = signature->elements; i < src->element_count; ++i)
{
- struct vkd3d_shader_signature_element *d = &signature->elements[i];
- struct signature_element *e = &src->elements[i];
-
- if (!(d->semantic_name = vkd3d_strdup(e->semantic_name)))
+ for (j = 0, e = &src->elements[i]; j < e->register_count; ++j)
{
- for (unsigned int j = 0; j < i; ++j)
+ if (!(d->semantic_name = vkd3d_strdup(e->semantic_name)))
{
- vkd3d_free((void *)signature->elements[j].semantic_name);
+ vkd3d_shader_free_shader_signature(signature);
+ return false;
}
- vkd3d_free(signature->elements);
- return false;
+ d->semantic_index = e->semantic_index + j;
+ d->stream_index = e->stream_index;
+ d->sysval_semantic = e->sysval_semantic;
+ d->component_type = e->component_type;
+ d->register_index = e->register_index + j;
+ d->mask = e->mask;
+ d->used_mask = e->used_mask;
+ d->min_precision = e->min_precision;
+ ++d;
}
- d->semantic_index = e->semantic_index;
- d->stream_index = e->stream_index;
- d->sysval_semantic = e->sysval_semantic;
- d->component_type = e->component_type;
- d->register_index = e->register_index;
- if (e->register_count > 1)
- FIXME("Arrayed elements are not supported yet.\n");
- d->mask = e->mask;
- d->used_mask = e->used_mask;
- d->min_precision = e->min_precision;
}
return true;
@@ -1059,7 +1127,7 @@ static void vkd3d_shader_scan_combined_sampler_declaration(
&semantic->resource.range, semantic->resource_type, VKD3D_SHADER_RESOURCE_DATA_FLOAT);
}
-static const struct vkd3d_shader_descriptor_info1 *find_descriptor(
+const struct vkd3d_shader_descriptor_info1 *vkd3d_shader_find_descriptor(
const struct vkd3d_shader_scan_descriptor_info1 *info,
enum vkd3d_shader_descriptor_type type, unsigned int register_id)
{
@@ -1113,11 +1181,11 @@ static void vkd3d_shader_scan_combined_sampler_usage(struct vkd3d_shader_scan_co
if (dynamic_resource || dynamic_sampler)
return;
- if ((d = find_descriptor(context->scan_descriptor_info,
+ if ((d = vkd3d_shader_find_descriptor(context->scan_descriptor_info,
VKD3D_SHADER_DESCRIPTOR_TYPE_SRV, resource->idx[0].offset)))
resource_space = d->register_space;
- if (sampler && (d = find_descriptor(context->scan_descriptor_info,
+ if (sampler && (d = vkd3d_shader_find_descriptor(context->scan_descriptor_info,
VKD3D_SHADER_DESCRIPTOR_TYPE_SAMPLER, sampler->idx[0].offset)))
sampler_space = d->register_space;
}
@@ -1501,7 +1569,7 @@ static enum vkd3d_result convert_descriptor_info(struct vkd3d_shader_scan_descri
return VKD3D_OK;
}
-static void vkd3d_shader_free_scan_descriptor_info1(struct vkd3d_shader_scan_descriptor_info1 *scan_descriptor_info)
+void vkd3d_shader_free_scan_descriptor_info1(struct vkd3d_shader_scan_descriptor_info1 *scan_descriptor_info)
{
TRACE("scan_descriptor_info %p.\n", scan_descriptor_info);
@@ -1509,12 +1577,10 @@ static void vkd3d_shader_free_scan_descriptor_info1(struct vkd3d_shader_scan_des
}
static int vsir_program_scan(struct vsir_program *program, const struct vkd3d_shader_compile_info *compile_info,
- struct vkd3d_shader_message_context *message_context,
- struct vkd3d_shader_scan_descriptor_info1 *descriptor_info1)
+ struct vkd3d_shader_message_context *message_context, bool add_descriptor_info)
{
struct vkd3d_shader_scan_combined_resource_sampler_info *combined_sampler_info;
struct vkd3d_shader_scan_hull_shader_tessellation_info *tessellation_info;
- struct vkd3d_shader_scan_descriptor_info1 local_descriptor_info1 = {0};
struct vkd3d_shader_scan_descriptor_info *descriptor_info;
struct vkd3d_shader_scan_signature_info *signature_info;
struct vkd3d_shader_instruction *instruction;
@@ -1523,29 +1589,25 @@ static int vsir_program_scan(struct vsir_program *program, const struct vkd3d_sh
unsigned int i;
descriptor_info = vkd3d_find_struct(compile_info->next, SCAN_DESCRIPTOR_INFO);
- if (descriptor_info1)
- {
- descriptor_info1->descriptors = NULL;
- descriptor_info1->descriptor_count = 0;
- }
- else if (descriptor_info)
- {
- descriptor_info1 = &local_descriptor_info1;
- }
+ if (descriptor_info)
+ add_descriptor_info = true;
+
signature_info = vkd3d_find_struct(compile_info->next, SCAN_SIGNATURE_INFO);
if ((combined_sampler_info = vkd3d_find_struct(compile_info->next, SCAN_COMBINED_RESOURCE_SAMPLER_INFO)))
{
combined_sampler_info->combined_samplers = NULL;
combined_sampler_info->combined_sampler_count = 0;
- if (!descriptor_info1)
- descriptor_info1 = &local_descriptor_info1;
+ add_descriptor_info = true;
}
tessellation_info = vkd3d_find_struct(compile_info->next, SCAN_HULL_SHADER_TESSELLATION_INFO);
vkd3d_shader_scan_context_init(&context, &program->shader_version, compile_info,
- descriptor_info1, combined_sampler_info, message_context);
+ add_descriptor_info ? &program->descriptors : NULL, combined_sampler_info, message_context);
+
+ if (add_descriptor_info)
+ program->has_descriptor_info = true;
if (TRACE_ON())
vsir_program_trace(program);
@@ -1585,7 +1647,7 @@ static int vsir_program_scan(struct vsir_program *program, const struct vkd3d_sh
}
if (!ret && descriptor_info)
- ret = convert_descriptor_info(descriptor_info, descriptor_info1);
+ ret = convert_descriptor_info(descriptor_info, &program->descriptors);
if (!ret && tessellation_info)
{
@@ -1599,15 +1661,10 @@ static int vsir_program_scan(struct vsir_program *program, const struct vkd3d_sh
vkd3d_shader_free_scan_combined_resource_sampler_info(combined_sampler_info);
if (descriptor_info)
vkd3d_shader_free_scan_descriptor_info(descriptor_info);
- if (descriptor_info1)
- vkd3d_shader_free_scan_descriptor_info1(descriptor_info1);
if (signature_info)
vkd3d_shader_free_scan_signature_info(signature_info);
}
- else
- {
- vkd3d_shader_free_scan_descriptor_info1(&local_descriptor_info1);
- }
+
vkd3d_shader_scan_context_cleanup(&context);
return ret;
}
@@ -1631,7 +1688,7 @@ int vkd3d_shader_scan(const struct vkd3d_shader_compile_info *compile_info, char
vkd3d_shader_message_context_init(&message_context, compile_info->log_level);
fill_shader_dump_data(compile_info, &dump_data);
- vkd3d_shader_dump_shader(&dump_data, compile_info->source.code, compile_info->source.size, true);
+ vkd3d_shader_dump_shader(&dump_data, compile_info->source.code, compile_info->source.size, SHADER_DUMP_TYPE_SOURCE);
if (compile_info->source_type == VKD3D_SHADER_SOURCE_HLSL)
{
@@ -1645,7 +1702,7 @@ int vkd3d_shader_scan(const struct vkd3d_shader_compile_info *compile_info, char
if (!(ret = vsir_parse(compile_info, config_flags, &message_context, &program)))
{
- ret = vsir_program_scan(&program, compile_info, &message_context, NULL);
+ ret = vsir_program_scan(&program, compile_info, &message_context, false);
vsir_program_cleanup(&program);
}
}
@@ -1662,7 +1719,6 @@ int vsir_program_compile(struct vsir_program *program, uint64_t config_flags,
struct vkd3d_shader_message_context *message_context)
{
struct vkd3d_shader_scan_combined_resource_sampler_info combined_sampler_info;
- struct vkd3d_shader_scan_descriptor_info1 scan_descriptor_info;
struct vkd3d_shader_compile_info scan_info;
int ret;
@@ -1678,28 +1734,24 @@ int vsir_program_compile(struct vsir_program *program, uint64_t config_flags,
combined_sampler_info.type = VKD3D_SHADER_STRUCTURE_TYPE_SCAN_COMBINED_RESOURCE_SAMPLER_INFO;
combined_sampler_info.next = scan_info.next;
scan_info.next = &combined_sampler_info;
- if ((ret = vsir_program_scan(program, &scan_info, message_context, &scan_descriptor_info)) < 0)
+ if ((ret = vsir_program_scan(program, &scan_info, message_context, true)) < 0)
return ret;
- ret = glsl_compile(program, config_flags, &scan_descriptor_info,
+ ret = glsl_compile(program, config_flags,
&combined_sampler_info, compile_info, out, message_context);
vkd3d_shader_free_scan_combined_resource_sampler_info(&combined_sampler_info);
- vkd3d_shader_free_scan_descriptor_info1(&scan_descriptor_info);
break;
case VKD3D_SHADER_TARGET_SPIRV_BINARY:
case VKD3D_SHADER_TARGET_SPIRV_TEXT:
- if ((ret = vsir_program_scan(program, &scan_info, message_context, &scan_descriptor_info)) < 0)
+ if ((ret = vsir_program_scan(program, &scan_info, message_context, true)) < 0)
return ret;
- ret = spirv_compile(program, config_flags, &scan_descriptor_info,
- compile_info, out, message_context);
- vkd3d_shader_free_scan_descriptor_info1(&scan_descriptor_info);
+ ret = spirv_compile(program, config_flags, compile_info, out, message_context);
break;
case VKD3D_SHADER_TARGET_MSL:
- if ((ret = vsir_program_scan(program, &scan_info, message_context, &scan_descriptor_info)) < 0)
+ if ((ret = vsir_program_scan(program, &scan_info, message_context, true)) < 0)
return ret;
- ret = msl_compile(program, config_flags, &scan_descriptor_info, compile_info, out, message_context);
- vkd3d_shader_free_scan_descriptor_info1(&scan_descriptor_info);
+ ret = msl_compile(program, config_flags, compile_info, out, message_context);
break;
default:
@@ -1711,7 +1763,8 @@ int vsir_program_compile(struct vsir_program *program, uint64_t config_flags,
}
static int compile_hlsl(const struct vkd3d_shader_compile_info *compile_info,
- struct vkd3d_shader_code *out, struct vkd3d_shader_message_context *message_context)
+ const struct shader_dump_data *dump_data, struct vkd3d_shader_code *out,
+ struct vkd3d_shader_message_context *message_context)
{
struct vkd3d_shader_code preprocessed;
int ret;
@@ -1719,6 +1772,8 @@ static int compile_hlsl(const struct vkd3d_shader_compile_info *compile_info,
if ((ret = preproc_lexer_parse(compile_info, &preprocessed, message_context)))
return ret;
+ vkd3d_shader_dump_shader(dump_data, preprocessed.code, preprocessed.size, SHADER_DUMP_TYPE_PREPROC);
+
ret = hlsl_compile_shader(&preprocessed, compile_info, out, message_context);
vkd3d_shader_free_shader_code(&preprocessed);
@@ -1745,11 +1800,11 @@ int vkd3d_shader_compile(const struct vkd3d_shader_compile_info *compile_info,
vkd3d_shader_message_context_init(&message_context, compile_info->log_level);
fill_shader_dump_data(compile_info, &dump_data);
- vkd3d_shader_dump_shader(&dump_data, compile_info->source.code, compile_info->source.size, true);
+ vkd3d_shader_dump_shader(&dump_data, compile_info->source.code, compile_info->source.size, SHADER_DUMP_TYPE_SOURCE);
if (compile_info->source_type == VKD3D_SHADER_SOURCE_HLSL)
{
- ret = compile_hlsl(compile_info, out, &message_context);
+ ret = compile_hlsl(compile_info, &dump_data, out, &message_context);
}
else if (compile_info->source_type == VKD3D_SHADER_SOURCE_FX)
{
@@ -1768,7 +1823,7 @@ int vkd3d_shader_compile(const struct vkd3d_shader_compile_info *compile_info,
}
if (ret >= 0)
- vkd3d_shader_dump_shader(&dump_data, out->code, out->size, false);
+ vkd3d_shader_dump_shader(&dump_data, out->code, out->size, SHADER_DUMP_TYPE_TARGET);
vkd3d_shader_message_context_trace_messages(&message_context);
if (!vkd3d_shader_message_context_copy_messages(&message_context, messages))
@@ -1961,9 +2016,7 @@ const enum vkd3d_shader_source_type *vkd3d_shader_get_supported_source_types(uns
VKD3D_SHADER_SOURCE_DXBC_TPF,
VKD3D_SHADER_SOURCE_HLSL,
VKD3D_SHADER_SOURCE_D3D_BYTECODE,
-#ifdef VKD3D_SHADER_UNSUPPORTED_DXIL
VKD3D_SHADER_SOURCE_DXBC_DXIL,
-#endif
VKD3D_SHADER_SOURCE_FX,
};
@@ -1996,6 +2049,9 @@ const enum vkd3d_shader_target_type *vkd3d_shader_get_supported_target_types(
VKD3D_SHADER_TARGET_SPIRV_BINARY,
#if defined(HAVE_SPIRV_TOOLS) || defined(VKD3D_SHADER_UNSUPPORTED_SPIRV_PARSER)
VKD3D_SHADER_TARGET_SPIRV_TEXT,
+#endif
+#ifdef VKD3D_SHADER_UNSUPPORTED_GLSL
+ VKD3D_SHADER_TARGET_GLSL,
#endif
VKD3D_SHADER_TARGET_D3D_ASM,
VKD3D_SHADER_TARGET_D3D_BYTECODE,
@@ -2012,7 +2068,6 @@ const enum vkd3d_shader_target_type *vkd3d_shader_get_supported_target_types(
VKD3D_SHADER_TARGET_D3D_ASM,
};
-#ifdef VKD3D_SHADER_UNSUPPORTED_DXIL
static const enum vkd3d_shader_target_type dxbc_dxil_types[] =
{
VKD3D_SHADER_TARGET_SPIRV_BINARY,
@@ -2021,7 +2076,6 @@ const enum vkd3d_shader_target_type *vkd3d_shader_get_supported_target_types(
# endif
VKD3D_SHADER_TARGET_D3D_ASM,
};
-#endif
static const enum vkd3d_shader_target_type fx_types[] =
{
@@ -2044,11 +2098,9 @@ const enum vkd3d_shader_target_type *vkd3d_shader_get_supported_target_types(
*count = ARRAY_SIZE(d3dbc_types);
return d3dbc_types;
-#ifdef VKD3D_SHADER_UNSUPPORTED_DXIL
case VKD3D_SHADER_SOURCE_DXBC_DXIL:
*count = ARRAY_SIZE(dxbc_dxil_types);
return dxbc_dxil_types;
-#endif
case VKD3D_SHADER_SOURCE_FX:
*count = ARRAY_SIZE(fx_types);
diff --git a/libs/vkd3d/libs/vkd3d-shader/vkd3d_shader_private.h b/libs/vkd3d/libs/vkd3d-shader/vkd3d_shader_private.h
index 03643acff3c..bf794d5e936 100644
--- a/libs/vkd3d/libs/vkd3d-shader/vkd3d_shader_private.h
+++ b/libs/vkd3d/libs/vkd3d-shader/vkd3d_shader_private.h
@@ -59,7 +59,7 @@
#define VKD3D_VEC4_SIZE 4
#define VKD3D_DVEC2_SIZE 2
-#define VKD3D_SHADER_COMPONENT_TYPE_COUNT (VKD3D_SHADER_COMPONENT_UINT64 + 1)
+#define VKD3D_SHADER_COMPONENT_TYPE_COUNT (VKD3D_SHADER_COMPONENT_INT16 + 1)
#define VKD3D_SHADER_MINIMUM_PRECISION_COUNT (VKD3D_SHADER_MINIMUM_PRECISION_UINT_16 + 1)
#define VKD3D_MAX_STREAM_COUNT 4
@@ -74,6 +74,8 @@ enum vkd3d_shader_error
VKD3D_SHADER_ERROR_DXBC_INVALID_CHUNK_SIZE = 6,
VKD3D_SHADER_ERROR_DXBC_OUT_OF_MEMORY = 7,
VKD3D_SHADER_ERROR_DXBC_INVALID_SIGNATURE = 8,
+ VKD3D_SHADER_ERROR_DXBC_INVALID_STRING_REFERENCE = 9,
+ VKD3D_SHADER_ERROR_DXBC_INVALID_COMPONENT_TYPE = 10,
VKD3D_SHADER_ERROR_TPF_MISMATCHED_CF = 1000,
VKD3D_SHADER_ERROR_TPF_INVALID_REGISTER_RANGE = 1001,
@@ -167,6 +169,9 @@ enum vkd3d_shader_error
VKD3D_SHADER_ERROR_HLSL_MISPLACED_SAMPLER_STATE = 5039,
VKD3D_SHADER_ERROR_HLSL_AMBIGUOUS_CALL = 5040,
VKD3D_SHADER_ERROR_HLSL_DUPLICATE_PATCH = 5041,
+ VKD3D_SHADER_ERROR_HLSL_INVALID_MAX_VERTEX_COUNT = 5042,
+ VKD3D_SHADER_ERROR_HLSL_MISSING_PRIMITIVE_TYPE = 5043,
+ VKD3D_SHADER_ERROR_HLSL_MISPLACED_STREAM_OUTPUT = 5044,
VKD3D_SHADER_WARNING_HLSL_IMPLICIT_TRUNCATION = 5300,
VKD3D_SHADER_WARNING_HLSL_DIVISION_BY_ZERO = 5301,
@@ -261,6 +266,8 @@ enum vkd3d_shader_error
VKD3D_SHADER_ERROR_FX_NOT_IMPLEMENTED = 11000,
VKD3D_SHADER_ERROR_FX_INVALID_VERSION = 11001,
VKD3D_SHADER_ERROR_FX_INVALID_DATA = 11002,
+ VKD3D_SHADER_ERROR_FX_INVALID_SIZE = 11003,
+ VKD3D_SHADER_ERROR_FX_OUT_OF_MEMORY = 11004,
};
enum vkd3d_shader_opcode
@@ -1415,6 +1422,33 @@ enum vsir_normalisation_level
VSIR_NORMALISED_SM6,
};
+struct vkd3d_shader_descriptor_info1
+{
+ enum vkd3d_shader_descriptor_type type;
+ unsigned int register_space;
+ unsigned int register_index;
+ unsigned int register_id;
+ enum vkd3d_shader_resource_type resource_type;
+ enum vkd3d_shader_resource_data_type resource_data_type;
+ unsigned int flags;
+ unsigned int sample_count;
+ unsigned int buffer_size;
+ unsigned int structure_stride;
+ unsigned int count;
+ uint32_t uav_flags;
+};
+
+struct vkd3d_shader_scan_descriptor_info1
+{
+ struct vkd3d_shader_descriptor_info1 *descriptors;
+ unsigned int descriptor_count;
+};
+
+const struct vkd3d_shader_descriptor_info1 *vkd3d_shader_find_descriptor(
+ const struct vkd3d_shader_scan_descriptor_info1 *info,
+ enum vkd3d_shader_descriptor_type type, unsigned int register_id);
+void vkd3d_shader_free_scan_descriptor_info1(struct vkd3d_shader_scan_descriptor_info1 *scan_descriptor_info);
+
struct vsir_program
{
struct vkd3d_shader_version shader_version;
@@ -1424,6 +1458,9 @@ struct vsir_program
struct shader_signature output_signature;
struct shader_signature patch_constant_signature;
+ struct vkd3d_shader_scan_descriptor_info1 descriptors;
+ bool has_descriptor_info;
+
unsigned int parameter_count;
const struct vkd3d_shader_parameter1 *parameters;
bool free_parameters;
@@ -1445,6 +1482,9 @@ struct vsir_program
enum vkd3d_tessellator_domain tess_domain;
enum vkd3d_shader_tessellator_partitioning tess_partitioning;
enum vkd3d_shader_tessellator_output_primitive tess_output_primitive;
+ enum vkd3d_primitive_type input_primitive, output_topology;
+ unsigned int vertices_out_count;
+
uint32_t io_dcls[VKD3D_BITMAP_SIZE(VKD3DSPR_COUNT)];
struct vsir_features features;
@@ -1501,28 +1541,6 @@ void vkd3d_shader_parser_init(struct vkd3d_shader_parser *parser, struct vsir_pr
void vkd3d_shader_parser_warning(struct vkd3d_shader_parser *parser,
enum vkd3d_shader_error error, const char *format, ...) VKD3D_PRINTF_FUNC(3, 4);
-struct vkd3d_shader_descriptor_info1
-{
- enum vkd3d_shader_descriptor_type type;
- unsigned int register_space;
- unsigned int register_index;
- unsigned int register_id;
- enum vkd3d_shader_resource_type resource_type;
- enum vkd3d_shader_resource_data_type resource_data_type;
- unsigned int flags;
- unsigned int sample_count;
- unsigned int buffer_size;
- unsigned int structure_stride;
- unsigned int count;
- uint32_t uav_flags;
-};
-
-struct vkd3d_shader_scan_descriptor_info1
-{
- struct vkd3d_shader_descriptor_info1 *descriptors;
- unsigned int descriptor_count;
-};
-
void vsir_program_trace(const struct vsir_program *program);
const char *shader_get_type_prefix(enum vkd3d_shader_type type);
@@ -1558,6 +1576,7 @@ void vkd3d_string_buffer_clear(struct vkd3d_string_buffer *buffer);
void vkd3d_string_buffer_truncate(struct vkd3d_string_buffer *buffer, size_t size);
int vkd3d_string_buffer_print_f32(struct vkd3d_string_buffer *buffer, float f);
int vkd3d_string_buffer_print_f64(struct vkd3d_string_buffer *buffer, double d);
+int vkd3d_string_buffer_print_string_escaped(struct vkd3d_string_buffer *buffer, const char *s, size_t len);
int vkd3d_string_buffer_printf(struct vkd3d_string_buffer *buffer, const char *format, ...) VKD3D_PRINTF_FUNC(2, 3);
void vkd3d_string_buffer_release(struct vkd3d_string_buffer_cache *list, struct vkd3d_string_buffer *buffer);
#define vkd3d_string_buffer_trace(buffer) \
@@ -1642,7 +1661,8 @@ bool sm4_register_from_semantic_name(const struct vkd3d_shader_version *version,
bool shader_sm4_is_scalar_register(const struct vkd3d_shader_register *reg);
bool sm4_sysval_semantic_from_semantic_name(enum vkd3d_shader_sysval_semantic *sysval_semantic,
const struct vkd3d_shader_version *version, bool semantic_compat_mapping, enum vkd3d_tessellator_domain domain,
- const char *semantic_name, unsigned int semantic_idx, bool output, bool is_patch_constant_func, bool is_patch);
+ const char *semantic_name, unsigned int semantic_idx, bool output,
+ bool is_patch_constant_func, bool is_primitive);
int d3dbc_parse(const struct vkd3d_shader_compile_info *compile_info, uint64_t config_flags,
struct vkd3d_shader_message_context *message_context, struct vsir_program *program);
@@ -1665,7 +1685,6 @@ int d3dbc_compile(struct vsir_program *program, uint64_t config_flags,
struct vkd3d_shader_code *out, struct vkd3d_shader_message_context *message_context);
int glsl_compile(struct vsir_program *program, uint64_t config_flags,
- const struct vkd3d_shader_scan_descriptor_info1 *descriptor_info,
const struct vkd3d_shader_scan_combined_resource_sampler_info *combined_sampler_info,
const struct vkd3d_shader_compile_info *compile_info,
struct vkd3d_shader_code *out, struct vkd3d_shader_message_context *message_context);
@@ -1673,12 +1692,10 @@ int glsl_compile(struct vsir_program *program, uint64_t config_flags,
#define SPIRV_MAX_SRC_COUNT 6
int spirv_compile(struct vsir_program *program, uint64_t config_flags,
- const struct vkd3d_shader_scan_descriptor_info1 *scan_descriptor_info,
const struct vkd3d_shader_compile_info *compile_info,
struct vkd3d_shader_code *out, struct vkd3d_shader_message_context *message_context);
int msl_compile(struct vsir_program *program, uint64_t config_flags,
- const struct vkd3d_shader_scan_descriptor_info1 *descriptor_info,
const struct vkd3d_shader_compile_info *compile_info, struct vkd3d_shader_code *out,
struct vkd3d_shader_message_context *message_context);
diff --git a/libs/vkd3d/libs/vkd3d-utils/vkd3d_utils_main.c b/libs/vkd3d/libs/vkd3d-utils/vkd3d_utils_main.c
index 62dd5f69f77..d59a133c3d4 100644
--- a/libs/vkd3d/libs/vkd3d-utils/vkd3d_utils_main.c
+++ b/libs/vkd3d/libs/vkd3d-utils/vkd3d_utils_main.c
@@ -19,6 +19,8 @@
#include "vkd3d_utils_private.h"
#undef D3D12CreateDevice
+/* VKD3D_DEBUG_ENV_NAME("VKD3D_DEBUG"); */
+
static const char *debug_d3d_blob_part(D3D_BLOB_PART part)
{
switch (part)
diff --git a/libs/vkd3d/libs/vkd3d/command.c b/libs/vkd3d/libs/vkd3d/command.c
index ce0c3b9128f..1ff58f97565 100644
--- a/libs/vkd3d/libs/vkd3d/command.c
+++ b/libs/vkd3d/libs/vkd3d/command.c
@@ -1499,7 +1499,7 @@ static VkDescriptorPool d3d12_command_allocator_allocate_descriptor_pool(
const struct vkd3d_vk_device_procs *vk_procs = &device->vk_procs;
struct VkDescriptorPoolCreateInfo pool_desc;
VkDevice vk_device = device->vk_device;
- VkDescriptorPoolSize vk_pool_sizes[2];
+ VkDescriptorPoolSize vk_pool_sizes[4];
unsigned int pool_size, pool_limit;
VkDescriptorPool vk_pool;
VkResult vr;
@@ -1530,21 +1530,43 @@ static VkDescriptorPool d3d12_command_allocator_allocate_descriptor_pool(
}
descriptor_count = pool_size;
- vk_pool_sizes[0].type = vk_descriptor_type_from_vkd3d_descriptor_type(descriptor_type, true);
- vk_pool_sizes[0].descriptorCount = descriptor_count;
-
- vk_pool_sizes[1].type = vk_descriptor_type_from_vkd3d_descriptor_type(descriptor_type, false);
- vk_pool_sizes[1].descriptorCount = descriptor_count;
-
pool_desc.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
pool_desc.pNext = NULL;
pool_desc.flags = 0;
pool_desc.maxSets = 512;
- pool_desc.poolSizeCount = 1;
- if (vk_pool_sizes[1].type != vk_pool_sizes[0].type)
- ++pool_desc.poolSizeCount;
pool_desc.pPoolSizes = vk_pool_sizes;
+ if (allocator->device->use_vk_heaps)
+ {
+ /* SRV root descriptors. */
+ vk_pool_sizes[0].type = VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER;
+ vk_pool_sizes[0].descriptorCount = descriptor_count;
+
+ /* UAV root descriptors and UAV counters. */
+ vk_pool_sizes[1].type = VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER;
+ vk_pool_sizes[1].descriptorCount = descriptor_count;
+
+ /* CBV root descriptors. */
+ vk_pool_sizes[2].type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
+ vk_pool_sizes[2].descriptorCount = descriptor_count;
+
+ /* Static samplers. */
+ vk_pool_sizes[3].type = VK_DESCRIPTOR_TYPE_SAMPLER;
+ vk_pool_sizes[3].descriptorCount = descriptor_count;
+
+ pool_desc.poolSizeCount = 4;
+ }
+ else
+ {
+ vk_pool_sizes[0].type = vk_descriptor_type_from_vkd3d_descriptor_type(descriptor_type, true);
+ vk_pool_sizes[0].descriptorCount = descriptor_count;
+
+ vk_pool_sizes[1].type = vk_descriptor_type_from_vkd3d_descriptor_type(descriptor_type, false);
+ vk_pool_sizes[1].descriptorCount = descriptor_count;
+
+ pool_desc.poolSizeCount = 1 + (vk_pool_sizes[0].type != vk_pool_sizes[1].type);
+ }
+
if ((vr = VK_CALL(vkCreateDescriptorPool(vk_device, &pool_desc, NULL, &vk_pool))) < 0)
{
ERR("Failed to create descriptor pool, vr %d.\n", vr);
@@ -1578,6 +1600,10 @@ static VkDescriptorSet d3d12_command_allocator_allocate_descriptor_set(struct d3
VkDescriptorSet vk_descriptor_set;
VkResult vr;
+ /* With Vulkan heaps we use just one descriptor pool. */
+ if (device->use_vk_heaps)
+ descriptor_type = 0;
+
if (!allocator->vk_descriptor_pools[descriptor_type])
allocator->vk_descriptor_pools[descriptor_type] = d3d12_command_allocator_allocate_descriptor_pool(allocator,
descriptor_type, descriptor_count, unbounded);
@@ -2222,7 +2248,7 @@ static bool vk_barrier_parameters_from_d3d12_resource_state(unsigned int state,
if (!stencil_state || (stencil_state & D3D12_RESOURCE_STATE_DEPTH_WRITE))
*image_layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
else
- *image_layout = VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL;
+ *image_layout = VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL_KHR;
}
return true;
@@ -2256,7 +2282,7 @@ static bool vk_barrier_parameters_from_d3d12_resource_state(unsigned int state,
{
if (stencil_state & D3D12_RESOURCE_STATE_DEPTH_WRITE)
{
- *image_layout = VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL;
+ *image_layout = VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL_KHR;
*access_mask |= VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
}
else
diff --git a/libs/vkd3d/libs/vkd3d/device.c b/libs/vkd3d/libs/vkd3d/device.c
index b51e2963efa..15affcee9cb 100644
--- a/libs/vkd3d/libs/vkd3d/device.c
+++ b/libs/vkd3d/libs/vkd3d/device.c
@@ -73,6 +73,7 @@ static const struct vkd3d_optional_extension_info optional_instance_extensions[]
static const char * const required_device_extensions[] =
{
VK_KHR_MAINTENANCE1_EXTENSION_NAME,
+ VK_KHR_MAINTENANCE2_EXTENSION_NAME,
VK_KHR_SHADER_DRAW_PARAMETERS_EXTENSION_NAME,
};
@@ -91,12 +92,12 @@ static const struct vkd3d_optional_extension_info optional_device_extensions[] =
VK_EXTENSION(KHR_DRAW_INDIRECT_COUNT, KHR_draw_indirect_count),
VK_EXTENSION(KHR_GET_MEMORY_REQUIREMENTS_2, KHR_get_memory_requirements2),
VK_EXTENSION(KHR_IMAGE_FORMAT_LIST, KHR_image_format_list),
- VK_EXTENSION(KHR_MAINTENANCE2, KHR_maintenance2),
VK_EXTENSION(KHR_MAINTENANCE3, KHR_maintenance3),
VK_EXTENSION(KHR_PORTABILITY_SUBSET, KHR_portability_subset),
VK_EXTENSION(KHR_PUSH_DESCRIPTOR, KHR_push_descriptor),
VK_EXTENSION(KHR_SAMPLER_MIRROR_CLAMP_TO_EDGE, KHR_sampler_mirror_clamp_to_edge),
VK_EXTENSION(KHR_TIMELINE_SEMAPHORE, KHR_timeline_semaphore),
+ VK_EXTENSION(KHR_ZERO_INITIALIZE_WORKGROUP_MEMORY, KHR_zero_initialize_workgroup_memory),
/* EXT extensions */
VK_EXTENSION(EXT_4444_FORMATS, EXT_4444_formats),
VK_EXTENSION(EXT_CALIBRATED_TIMESTAMPS, EXT_calibrated_timestamps),
@@ -520,7 +521,26 @@ static VkBool32 VKAPI_PTR vkd3d_debug_report_callback(VkDebugReportFlagsEXT flag
VkDebugReportObjectTypeEXT object_type, uint64_t object, size_t location,
int32_t message_code, const char *layer_prefix, const char *message, void *user_data)
{
- FIXME("%s\n", debugstr_a(message));
+ while (*message)
+ {
+ const char *end = strchr(message, '\n');
+ size_t len;
+
+ if (end)
+ len = end - message;
+ else
+ len = strlen(message);
+
+ len = min(len, 256);
+
+ FIXME("%s\n", debugstr_an(message, len));
+
+ message += len;
+
+ if (*message == '\n')
+ ++message;
+ }
+
return VK_FALSE;
}
@@ -835,6 +855,7 @@ struct vkd3d_physical_device_info
VkPhysicalDeviceTimelineSemaphoreFeaturesKHR timeline_semaphore_features;
VkPhysicalDeviceMutableDescriptorTypeFeaturesEXT mutable_features;
VkPhysicalDevice4444FormatsFeaturesEXT formats4444_features;
+ VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeaturesKHR zero_initialize_workgroup_memory_features;
VkPhysicalDeviceFeatures2 features2;
};
@@ -870,6 +891,8 @@ static void vkd3d_chain_physical_device_info_structures(struct vkd3d_physical_de
vk_prepend_struct(&info->features2, &info->mutable_features);
if (vulkan_info->EXT_4444_formats)
vk_prepend_struct(&info->features2, &info->formats4444_features);
+ if (vulkan_info->KHR_zero_initialize_workgroup_memory)
+ vk_prepend_struct(&info->features2, &info->zero_initialize_workgroup_memory_features);
info->properties2.pNext = NULL;
@@ -908,6 +931,7 @@ static void vkd3d_physical_device_info_init(struct vkd3d_physical_device_info *i
info->timeline_semaphore_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_FEATURES_KHR;
info->mutable_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MUTABLE_DESCRIPTOR_TYPE_FEATURES_EXT;
info->formats4444_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_4444_FORMATS_FEATURES_EXT;
+ info->zero_initialize_workgroup_memory_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ZERO_INITIALIZE_WORKGROUP_MEMORY_FEATURES_KHR;
info->properties2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
info->maintenance3_properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES;
@@ -1418,6 +1442,9 @@ static void vkd3d_init_feature_level(struct vkd3d_vulkan_info *vk_info,
else if (!vk_info->vertex_attrib_zero_divisor)
WARN("Vertex attribute instance rate zero divisor is not supported.\n");
+ if (!vk_info->KHR_zero_initialize_workgroup_memory)
+ WARN("Shader zero initialize workgroup memory is not supported.\n");
+
#undef CHECK_MIN_REQUIREMENT
#undef CHECK_MAX_REQUIREMENT
#undef CHECK_FEATURE
@@ -1834,6 +1861,8 @@ static HRESULT vkd3d_init_device_caps(struct d3d12_device *device,
vulkan_info->EXT_mutable_descriptor_type = false;
if (!physical_device_info->timeline_semaphore_features.timelineSemaphore)
vulkan_info->KHR_timeline_semaphore = false;
+ if (!physical_device_info->zero_initialize_workgroup_memory_features.shaderZeroInitializeWorkgroupMemory)
+ vulkan_info->KHR_zero_initialize_workgroup_memory = false;
physical_device_info->formats4444_features.formatA4B4G4R4 = VK_FALSE;
@@ -3610,11 +3639,7 @@ static HRESULT STDMETHODCALLTYPE d3d12_device_CheckFeatureSupport(ID3D12Device9
TRACE("Request shader model %#x.\n", data->HighestShaderModel);
-#ifdef VKD3D_SHADER_UNSUPPORTED_DXIL
data->HighestShaderModel = min(data->HighestShaderModel, D3D_SHADER_MODEL_6_0);
-#else
- data->HighestShaderModel = min(data->HighestShaderModel, D3D_SHADER_MODEL_5_1);
-#endif
TRACE("Shader model %#x.\n", data->HighestShaderModel);
return S_OK;
diff --git a/libs/vkd3d/libs/vkd3d/resource.c b/libs/vkd3d/libs/vkd3d/resource.c
index eab97715944..cb184986f2a 100644
--- a/libs/vkd3d/libs/vkd3d/resource.c
+++ b/libs/vkd3d/libs/vkd3d/resource.c
@@ -3094,7 +3094,7 @@ bool vkd3d_create_texture_view(struct d3d12_device *device, uint32_t magic, VkIm
if (vk_image)
{
view_desc.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
- view_desc.pNext = NULL;
+ view_desc.pNext = &usage_desc;
view_desc.flags = 0;
view_desc.image = vk_image;
view_desc.viewType = desc->view_type;
@@ -3107,13 +3107,11 @@ bool vkd3d_create_texture_view(struct d3d12_device *device, uint32_t magic, VkIm
view_desc.subresourceRange.levelCount = desc->miplevel_count;
view_desc.subresourceRange.baseArrayLayer = desc->layer_idx;
view_desc.subresourceRange.layerCount = desc->layer_count;
- if (device->vk_info.KHR_maintenance2)
- {
- usage_desc.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_USAGE_CREATE_INFO;
- usage_desc.pNext = NULL;
- usage_desc.usage = desc->usage;
- view_desc.pNext = &usage_desc;
- }
+
+ usage_desc.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_USAGE_CREATE_INFO;
+ usage_desc.pNext = NULL;
+ usage_desc.usage = desc->usage;
+
if ((vr = VK_CALL(vkCreateImageView(device->vk_device, &view_desc, NULL, &vk_view))) < 0)
{
WARN("Failed to create Vulkan image view, vr %d.\n", vr);
diff --git a/libs/vkd3d/libs/vkd3d/state.c b/libs/vkd3d/libs/vkd3d/state.c
index aa08dc985bd..a1f09422305 100644
--- a/libs/vkd3d/libs/vkd3d/state.c
+++ b/libs/vkd3d/libs/vkd3d/state.c
@@ -754,8 +754,11 @@ struct vkd3d_descriptor_set_context
unsigned int uav_counter_index;
unsigned int push_constant_index;
- struct vk_binding_array *push_descriptor_set;
+ struct vk_binding_array *root_descriptor_set;
+ struct vk_binding_array *static_samplers_descriptor_set;
bool push_descriptor;
+ bool static_samplers;
+ bool use_vk_heaps;
};
static void descriptor_set_context_cleanup(struct vkd3d_descriptor_set_context *context)
@@ -806,13 +809,59 @@ static struct vk_binding_array *d3d12_root_signature_vk_binding_array_for_type(
{
struct vk_binding_array *array, **current;
+ /* There are a few different ways we can reach this point:
+ * * If we are using virtual heaps we want to allocate descriptors to sets
+ * depending on their descriptor type, in order to minimize waste when
+ * recycling descriptor pools.
+ * + With the exception of root descriptors when we are using push
+ * descriptors: the push descriptors must be in a separate set, so we
+ * keep one specifically for them.
+ * * If we are using Vulkan heaps then all the root table descriptors don't
+ * even reach here, because they are managed by the D3D12 descriptor
+ * heap. Thus we only have to deal with root descriptors and static
+ * samplers.
+ * + If we're using push descriptors then again we have to dedicate a set
+ * for them, so static samplers will and up in their own set too.
+ * + If we're not using push descriptors then we can use the same set and
+ * save one. In this case we don't care too much about minimizing
+ * wasted descriptors, because few descriptors can end up here anyway.
+ */
+
if (context->push_descriptor)
{
- if (!context->push_descriptor_set)
- context->push_descriptor_set = d3d12_root_signature_append_vk_binding_array(root_signature,
- descriptor_type, VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR, context);
+ /* The descriptor type is irrelevant here, it will never be used. */
+ if (!context->root_descriptor_set)
+ context->root_descriptor_set = d3d12_root_signature_append_vk_binding_array(root_signature,
+ 0, VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR, context);
+
+ return context->root_descriptor_set;
+ }
+
+ if (context->use_vk_heaps)
+ {
+ if (context->static_samplers)
+ {
+ if (!context->static_samplers_descriptor_set)
+ {
+ if (!context->push_descriptor && context->root_descriptor_set)
+ context->static_samplers_descriptor_set = context->root_descriptor_set;
+ else
+ /* The descriptor type is irrelevant here, it will never be used. */
+ context->static_samplers_descriptor_set = d3d12_root_signature_append_vk_binding_array(
+ root_signature, 0, 0, context);
+ }
+
+ return context->static_samplers_descriptor_set;
+ }
+ else
+ {
+ /* The descriptor type is irrelevant here, it will never be used. */
+ if (!context->root_descriptor_set)
+ context->root_descriptor_set = d3d12_root_signature_append_vk_binding_array(
+ root_signature, 0, 0, context);
- return context->push_descriptor_set;
+ return context->root_descriptor_set;
+ }
}
current = context->current_binding_array;
@@ -1638,17 +1687,22 @@ static HRESULT d3d12_root_signature_init(struct d3d12_root_signature *root_signa
sizeof(*root_signature->static_samplers))))
goto fail;
+ context.use_vk_heaps = use_vk_heaps;
context.push_descriptor = vk_info->KHR_push_descriptor;
if (FAILED(hr = d3d12_root_signature_init_root_descriptors(root_signature, desc, &context)))
goto fail;
- root_signature->main_set = !!context.push_descriptor_set;
+ root_signature->main_set = context.root_descriptor_set && context.push_descriptor;
context.push_descriptor = false;
if (FAILED(hr = d3d12_root_signature_init_push_constants(root_signature, desc,
root_signature->push_constant_ranges, &root_signature->push_constant_range_count)))
goto fail;
+
+ context.static_samplers = true;
if (FAILED(hr = d3d12_root_signature_init_static_samplers(root_signature, device, desc, &context)))
goto fail;
+ context.static_samplers = false;
+
context.push_constant_index = 0;
if (FAILED(hr = d3d12_root_signature_init_root_descriptor_tables(root_signature, desc, &info, &context)))
goto fail;
@@ -2316,6 +2370,8 @@ static unsigned int feature_flags_compile_option(const struct d3d12_device *devi
flags |= VKD3D_SHADER_COMPILE_OPTION_FEATURE_FLOAT64;
if (device->feature_options1.WaveOps)
flags |= VKD3D_SHADER_COMPILE_OPTION_FEATURE_WAVE_OPS;
+ if (device->vk_info.KHR_zero_initialize_workgroup_memory)
+ flags |= VKD3D_SHADER_COMPILE_OPTION_FEATURE_ZERO_INITIALIZE_WORKGROUP_MEMORY;
return flags;
}
@@ -3146,13 +3202,13 @@ static HRESULT d3d12_pipeline_state_init_graphics(struct d3d12_pipeline_state *s
struct vkd3d_shader_spirv_target_info *stage_target_info;
uint32_t aligned_offsets[D3D12_VS_INPUT_REGISTER_COUNT];
struct vkd3d_shader_descriptor_offset_info offset_info;
+ struct vkd3d_shader_scan_signature_info signature_info;
struct vkd3d_shader_parameter ps_shader_parameters[1];
struct vkd3d_shader_transform_feedback_info xfb_info;
struct vkd3d_shader_spirv_target_info ps_target_info;
struct vkd3d_shader_interface_info shader_interface;
struct vkd3d_shader_spirv_target_info target_info;
- const struct d3d12_root_signature *root_signature;
- struct vkd3d_shader_signature input_signature;
+ struct d3d12_root_signature *root_signature;
bool have_attachment, is_dsv_format_unknown;
VkShaderStageFlagBits xfb_stage = 0;
VkSampleCountFlagBits sample_count;
@@ -3163,7 +3219,6 @@ static HRESULT d3d12_pipeline_state_init_graphics(struct d3d12_pipeline_state *s
size_t rt_count;
uint32_t mask;
HRESULT hr;
- int ret;
static const DWORD default_ps_code[] =
{
@@ -3196,7 +3251,8 @@ static HRESULT d3d12_pipeline_state_init_graphics(struct d3d12_pipeline_state *s
memset(&state->uav_counters, 0, sizeof(state->uav_counters));
graphics->stage_count = 0;
- memset(&input_signature, 0, sizeof(input_signature));
+ memset(&signature_info, 0, sizeof(signature_info));
+ signature_info.type = VKD3D_SHADER_STRUCTURE_TYPE_SCAN_SIGNATURE_INFO;
for (i = desc->rtv_formats.NumRenderTargets; i < ARRAY_SIZE(desc->rtv_formats.RTFormats); ++i)
{
@@ -3207,10 +3263,25 @@ static HRESULT d3d12_pipeline_state_init_graphics(struct d3d12_pipeline_state *s
}
}
+ state->implicit_root_signature = NULL;
if (!(root_signature = unsafe_impl_from_ID3D12RootSignature(desc->root_signature)))
{
- WARN("Root signature is NULL.\n");
- return E_INVALIDARG;
+ TRACE("Root signature is NULL, looking for an embedded signature in the vertex shader.\n");
+ if (FAILED(hr = d3d12_root_signature_create(device,
+ desc->vs.pShaderBytecode, desc->vs.BytecodeLength, &root_signature))
+ && FAILED(hr = d3d12_root_signature_create(device,
+ desc->ps.pShaderBytecode, desc->ps.BytecodeLength, &root_signature))
+ && FAILED(hr = d3d12_root_signature_create(device,
+ desc->ds.pShaderBytecode, desc->ds.BytecodeLength, &root_signature))
+ && FAILED(hr = d3d12_root_signature_create(device,
+ desc->hs.pShaderBytecode, desc->hs.BytecodeLength, &root_signature))
+ && FAILED(hr = d3d12_root_signature_create(device,
+ desc->gs.pShaderBytecode, desc->gs.BytecodeLength, &root_signature)))
+ {
+ WARN("Failed to find an embedded root signature, hr %s.\n", debugstr_hresult(hr));
+ goto fail;
+ }
+ state->implicit_root_signature = &root_signature->ID3D12RootSignature_iface;
}
sample_count = vk_samples_from_dxgi_sample_desc(&desc->sample_desc);
@@ -3425,7 +3496,6 @@ static HRESULT d3d12_pipeline_state_init_graphics(struct d3d12_pipeline_state *s
for (i = 0; i < ARRAY_SIZE(shader_stages); ++i)
{
const D3D12_SHADER_BYTECODE *b = (const void *)((uintptr_t)desc + shader_stages[i].offset);
- const struct vkd3d_shader_code dxbc = {b->pShaderBytecode, b->BytecodeLength};
if (!b->pShaderBytecode)
continue;
@@ -3439,14 +3509,6 @@ static HRESULT d3d12_pipeline_state_init_graphics(struct d3d12_pipeline_state *s
stage_target_info = &target_info;
switch (shader_stages[i].stage)
{
- case VK_SHADER_STAGE_VERTEX_BIT:
- if ((ret = vkd3d_shader_parse_input_signature(&dxbc, &input_signature, NULL)) < 0)
- {
- hr = hresult_from_vkd3d_result(ret);
- goto fail;
- }
- break;
-
case VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT:
case VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT:
if (desc->primitive_topology_type != D3D12_PRIMITIVE_TOPOLOGY_TYPE_PATCH)
@@ -3457,6 +3519,7 @@ static HRESULT d3d12_pipeline_state_init_graphics(struct d3d12_pipeline_state *s
}
break;
+ case VK_SHADER_STAGE_VERTEX_BIT:
case VK_SHADER_STAGE_GEOMETRY_BIT:
break;
@@ -3478,11 +3541,14 @@ static HRESULT d3d12_pipeline_state_init_graphics(struct d3d12_pipeline_state *s
ps_target_info.next = NULL;
target_info.next = NULL;
offset_info.next = NULL;
+ signature_info.next = NULL;
if (shader_stages[i].stage == xfb_stage)
vkd3d_prepend_struct(&shader_interface, &xfb_info);
vkd3d_prepend_struct(&shader_interface, stage_target_info);
if (root_signature->descriptor_offsets)
vkd3d_prepend_struct(&shader_interface, &offset_info);
+ if (shader_stages[i].stage == VK_SHADER_STAGE_VERTEX_BIT)
+ vkd3d_prepend_struct(&shader_interface, &signature_info);
if (FAILED(hr = create_shader_stage(device, &graphics->stages[graphics->stage_count],
shader_stages[i].stage, b, &shader_interface)))
@@ -3533,7 +3599,7 @@ static HRESULT d3d12_pipeline_state_init_graphics(struct d3d12_pipeline_state *s
goto fail;
}
- if (!(signature_element = vkd3d_shader_find_signature_element(&input_signature,
+ if (!(signature_element = vkd3d_shader_find_signature_element(&signature_info.input,
e->SemanticName, e->SemanticIndex, 0)))
{
WARN("Unused input element %u.\n", i);
@@ -3660,19 +3726,21 @@ static HRESULT d3d12_pipeline_state_init_graphics(struct d3d12_pipeline_state *s
if (FAILED(hr = vkd3d_private_store_init(&state->private_store)))
goto fail;
- vkd3d_shader_free_shader_signature(&input_signature);
+ vkd3d_shader_free_scan_signature_info(&signature_info);
state->vk_bind_point = VK_PIPELINE_BIND_POINT_GRAPHICS;
- state->implicit_root_signature = NULL;
d3d12_device_add_ref(state->device = device);
return S_OK;
fail:
+ if (state->implicit_root_signature)
+ ID3D12RootSignature_Release(state->implicit_root_signature);
+
for (i = 0; i < graphics->stage_count; ++i)
{
VK_CALL(vkDestroyShaderModule(device->vk_device, state->u.graphics.stages[i].module, NULL));
}
- vkd3d_shader_free_shader_signature(&input_signature);
+ vkd3d_shader_free_scan_signature_info(&signature_info);
d3d12_pipeline_uav_counter_state_cleanup(&state->uav_counters, device);
diff --git a/libs/vkd3d/libs/vkd3d/vkd3d_private.h b/libs/vkd3d/libs/vkd3d/vkd3d_private.h
index fd1fbb1679a..e0e44248053 100644
--- a/libs/vkd3d/libs/vkd3d/vkd3d_private.h
+++ b/libs/vkd3d/libs/vkd3d/vkd3d_private.h
@@ -127,12 +127,12 @@ struct vkd3d_vulkan_info
bool KHR_draw_indirect_count;
bool KHR_get_memory_requirements2;
bool KHR_image_format_list;
- bool KHR_maintenance2;
bool KHR_maintenance3;
bool KHR_portability_subset;
bool KHR_push_descriptor;
bool KHR_sampler_mirror_clamp_to_edge;
bool KHR_timeline_semaphore;
+ bool KHR_zero_initialize_workgroup_memory;
/* EXT device extensions */
bool EXT_4444_formats;
bool EXT_calibrated_timestamps;
--
2.47.2